xref: /freebsd/contrib/ntp/ntpd/ntp_request.c (revision f5f40dd63bc7acbb5312b26ac1ea1103c12352a6)
1 /*
2  * ntp_request.c - respond to information requests
3  */
4 
5 #ifdef HAVE_CONFIG_H
6 # include <config.h>
7 #endif
8 
9 #include "ntpd.h"
10 #include "ntp_io.h"
11 #include "ntp_request.h"
12 #include "ntp_control.h"
13 #include "ntp_refclock.h"
14 #include "ntp_if.h"
15 #include "ntp_stdlib.h"
16 #include "ntp_assert.h"
17 
18 #include <stdio.h>
19 #include <stddef.h>
20 #include <signal.h>
21 #ifdef HAVE_NETINET_IN_H
22 #include <netinet/in.h>
23 #endif
24 #include <arpa/inet.h>
25 
26 #include "recvbuff.h"
27 
28 #ifdef KERNEL_PLL
29 #include "ntp_syscall.h"
30 #endif /* KERNEL_PLL */
31 
32 /*
33  * Structure to hold request procedure information
34  */
35 #define	NOAUTH	0
36 #define	AUTH	1
37 
38 #define	NO_REQUEST	(-1)
39 /*
40  * Because we now have v6 addresses in the messages, we need to compensate
41  * for the larger size.  Therefore, we introduce the alternate size to
42  * keep us friendly with older implementations.  A little ugly.
43  */
44 static int client_v6_capable = 0;   /* the client can handle longer messages */
45 
46 #define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
47 
48 struct req_proc {
49 	short request_code;	/* defined request code */
50 	short needs_auth;	/* true when authentication needed */
51 	short sizeofitem;	/* size of request data item (older size)*/
52 	short v6_sizeofitem;	/* size of request data item (new size)*/
53 	void (*handler) (sockaddr_u *, endpt *,
54 			   struct req_pkt *);	/* routine to handle request */
55 };
56 
57 /*
58  * Universal request codes
59  */
60 static const struct req_proc univ_codes[] = {
61 	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
62 };
63 
64 static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
65 static	void *	prepare_pkt	(sockaddr_u *, endpt *,
66 				 struct req_pkt *, size_t);
67 static	void *	more_pkt	(void);
68 static	void	flush_pkt	(void);
69 static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
70 static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
71 static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
72 static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
73 static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
74 static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75 static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
76 static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77 static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78 static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
79 static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
80 static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
81 static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
82 static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
83 static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
84 static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
85 static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
86 static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
87 static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
88 static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
89 static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
90 static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
91 static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
92 static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
93 static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
94 static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
95 static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
96 static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
97 static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
98 static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
99 static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
100 static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
101 static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
102 static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
103 static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
104 static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
105 static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
106 static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
107 static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
108 #ifdef KERNEL_PLL
109 static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
110 #endif /* KERNEL_PLL */
111 #ifdef REFCLOCK
112 static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
113 static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
114 #endif	/* REFCLOCK */
115 #ifdef REFCLOCK
116 static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
117 #endif	/* REFCLOCK */
118 
119 /*
120  * ntpd request codes
121  */
122 static const struct req_proc ntp_codes[] = {
123 	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
124 	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
125 	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
126 				sizeof(struct info_peer_list), peer_info},
127 	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
128 				sizeof(struct info_peer_list), peer_stats},
129 	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
130 	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
131 	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
132 	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
133 	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
134 	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
135 	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
136 				sizeof(struct conf_peer), do_conf },
137 	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
138 				sizeof(struct conf_unpeer), do_unconf },
139 	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
140 				sizeof(struct conf_sys_flags), set_sys_flag },
141 	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142 				sizeof(struct conf_sys_flags),  clr_sys_flag },
143 	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
144 	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
145 				sizeof(struct conf_restrict), do_resaddflags },
146 	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
147 				sizeof(struct conf_restrict), do_ressubflags },
148 	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
149 				sizeof(struct conf_restrict), do_unrestrict },
150 	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
151 	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
152 	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
153 	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
154 				sizeof(struct conf_unpeer), reset_peer },
155 	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
156 	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
157 	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
158 	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
159 	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
160 	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
161 				sizeof(struct conf_trap), req_set_trap },
162 	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
163 				sizeof(struct conf_trap), req_clr_trap },
164 	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
165 				set_request_keyid },
166 	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167 				set_control_keyid },
168 	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
169 #ifdef KERNEL_PLL
170 	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
171 #endif
172 #ifdef REFCLOCK
173 	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
174 				get_clock_info },
175 	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
176 				sizeof(struct conf_fudge), set_clock_fudge },
177 	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
178 				get_clkbug_info },
179 #endif
180 	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
181 	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
182 
183 	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
184 };
185 
186 
187 /*
188  * Authentication keyid used to authenticate requests.  Zero means we
189  * don't allow writing anything.
190  */
191 keyid_t info_auth_keyid;
192 
193 /*
194  * Statistic counters to keep track of requests and responses.
195  */
196 u_long numrequests;		/* number of requests we've received */
197 u_long numresppkts;		/* number of resp packets sent with data */
198 
199 /*
200  * lazy way to count errors, indexed by the error code
201  */
202 u_long errorcounter[MAX_INFO_ERR + 1];
203 
204 /*
205  * A hack.  To keep the authentication module clear of ntp-ism's, we
206  * include a time reset variable for its stats here.
207  */
208 u_long auth_timereset;
209 
210 /*
211  * Response packet used by these routines.  Also some state information
212  * so that we can handle packet formatting within a common set of
213  * subroutines.  Note we try to enter data in place whenever possible,
214  * but the need to set the more bit correctly means we occasionally
215  * use the extra buffer and copy.
216  */
217 static struct resp_pkt rpkt;
218 static int reqver;
219 static int seqno;
220 static int nitems;
221 static int itemsize;
222 static int databytes;
223 static char exbuf[RESP_DATA_SIZE];
224 static int usingexbuf;
225 static sockaddr_u *toaddr;
226 static endpt *frominter;
227 
228 /*
229  * init_request - initialize request data
230  */
231 void
232 init_request (void)
233 {
234 	size_t i;
235 
236 	numrequests = 0;
237 	numresppkts = 0;
238 	auth_timereset = 0;
239 	info_auth_keyid = 0;	/* by default, can't do this */
240 
241 	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
242 	    errorcounter[i] = 0;
243 }
244 
245 
246 /*
247  * req_ack - acknowledge request with no data
248  */
249 static void
250 req_ack(
251 	sockaddr_u *srcadr,
252 	endpt *inter,
253 	struct req_pkt *inpkt,
254 	int errcode
255 	)
256 {
257 	/*
258 	 * fill in the fields
259 	 */
260 	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
261 	rpkt.auth_seq = AUTH_SEQ(0, 0);
262 	rpkt.implementation = inpkt->implementation;
263 	rpkt.request = inpkt->request;
264 	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
265 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
266 
267 	/*
268 	 * send packet and bump counters
269 	 */
270 	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
271 	errorcounter[errcode]++;
272 }
273 
274 
275 /*
276  * prepare_pkt - prepare response packet for transmission, return pointer
277  *		 to storage for data item.
278  */
279 static void *
280 prepare_pkt(
281 	sockaddr_u *srcadr,
282 	endpt *inter,
283 	struct req_pkt *pkt,
284 	size_t structsize
285 	)
286 {
287 	DPRINTF(4, ("request: preparing pkt\n"));
288 
289 	/*
290 	 * Fill in the implementation, request and itemsize fields
291 	 * since these won't change.
292 	 */
293 	rpkt.implementation = pkt->implementation;
294 	rpkt.request = pkt->request;
295 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
296 
297 	/*
298 	 * Compute the static data needed to carry on.
299 	 */
300 	toaddr = srcadr;
301 	frominter = inter;
302 	seqno = 0;
303 	nitems = 0;
304 	itemsize = structsize;
305 	databytes = 0;
306 	usingexbuf = 0;
307 
308 	/*
309 	 * return the beginning of the packet buffer.
310 	 */
311 	return &rpkt.u;
312 }
313 
314 
315 /*
316  * more_pkt - return a data pointer for a new item.
317  */
318 static void *
319 more_pkt(void)
320 {
321 	/*
322 	 * If we were using the extra buffer, send the packet.
323 	 */
324 	if (usingexbuf) {
325 		DPRINTF(3, ("request: sending pkt\n"));
326 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
327 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
328 		rpkt.err_nitems = htons((u_short)nitems);
329 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
330 			RESP_HEADER_SIZE + databytes);
331 		numresppkts++;
332 
333 		/*
334 		 * Copy data out of exbuf into the packet.
335 		 */
336 		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
337 		seqno++;
338 		databytes = 0;
339 		nitems = 0;
340 		usingexbuf = 0;
341 	}
342 
343 	databytes += itemsize;
344 	nitems++;
345 	if (databytes + itemsize <= RESP_DATA_SIZE) {
346 		DPRINTF(4, ("request: giving him more data\n"));
347 		/*
348 		 * More room in packet.  Give him the
349 		 * next address.
350 		 */
351 		return &rpkt.u.data[databytes];
352 	} else {
353 		/*
354 		 * No room in packet.  Give him the extra
355 		 * buffer unless this was the last in the sequence.
356 		 */
357 		DPRINTF(4, ("request: into extra buffer\n"));
358 		if (seqno == MAXSEQ)
359 			return NULL;
360 		else {
361 			usingexbuf = 1;
362 			return exbuf;
363 		}
364 	}
365 }
366 
367 
368 /*
369  * flush_pkt - we're done, return remaining information.
370  */
371 static void
372 flush_pkt(void)
373 {
374 	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
375 	/*
376 	 * Must send the last packet.  If nothing in here and nothing
377 	 * has been sent, send an error saying no data to be found.
378 	 */
379 	if (seqno == 0 && nitems == 0)
380 		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
381 			INFO_ERR_NODATA);
382 	else {
383 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
384 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
385 		rpkt.err_nitems = htons((u_short)nitems);
386 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
387 			RESP_HEADER_SIZE+databytes);
388 		numresppkts++;
389 	}
390 }
391 
392 
393 
394 /*
395  * Given a buffer, return the packet mode
396  */
397 int
398 get_packet_mode(struct recvbuf *rbufp)
399 {
400 	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
401 	return (INFO_MODE(inpkt->rm_vn_mode));
402 }
403 
404 
405 /*
406  * process_private - process private mode (7) packets
407  */
408 void
409 process_private(
410 	struct recvbuf *rbufp,
411 	int mod_okay
412 	)
413 {
414 	static u_long quiet_until;
415 	struct req_pkt *inpkt;
416 	struct req_pkt_tail *tailinpkt;
417 	sockaddr_u *srcadr;
418 	endpt *inter;
419 	const struct req_proc *proc;
420 	int ec;
421 	short temp_size;
422 	l_fp ftmp;
423 	double dtemp;
424 	size_t recv_len;
425 	size_t noslop_len;
426 	size_t mac_len;
427 
428 	/*
429 	 * Initialize pointers, for convenience
430 	 */
431 	recv_len = rbufp->recv_length;
432 	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
433 	srcadr = &rbufp->recv_srcadr;
434 	inter = rbufp->dstadr;
435 
436 	DPRINTF(3, ("process_private: impl %d req %d\n",
437 		    inpkt->implementation, inpkt->request));
438 
439 	/*
440 	 * Do some sanity checks on the packet.  Return a format
441 	 * error if it fails.
442 	 */
443 	ec = 0;
444 	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
445 	    || (++ec, ISMORE(inpkt->rm_vn_mode))
446 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
447 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
448 	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
449 	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
450 	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
451 	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
452 		) {
453 		NLOG(NLOG_SYSEVENT)
454 			if (current_time >= quiet_until) {
455 				msyslog(LOG_ERR,
456 					"process_private: drop test %d"
457 					" failed, pkt from %s",
458 					ec, stoa(srcadr));
459 				quiet_until = current_time + 60;
460 			}
461 		return;
462 	}
463 
464 	reqver = INFO_VERSION(inpkt->rm_vn_mode);
465 
466 	/*
467 	 * Get the appropriate procedure list to search.
468 	 */
469 	if (inpkt->implementation == IMPL_UNIV)
470 		proc = univ_codes;
471 	else if ((inpkt->implementation == IMPL_XNTPD) ||
472 		 (inpkt->implementation == IMPL_XNTPD_OLD))
473 		proc = ntp_codes;
474 	else {
475 		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
476 		return;
477 	}
478 
479 	/*
480 	 * Search the list for the request codes.  If it isn't one
481 	 * we know, return an error.
482 	 */
483 	while (proc->request_code != NO_REQUEST) {
484 		if (proc->request_code == (short) inpkt->request)
485 			break;
486 		proc++;
487 	}
488 	if (proc->request_code == NO_REQUEST) {
489 		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
490 		return;
491 	}
492 
493 	DPRINTF(4, ("found request in tables\n"));
494 
495 	/*
496 	 * If we need data, check to see if we have some.  If we
497 	 * don't, check to see that there is none (picky, picky).
498 	 */
499 
500 	/* This part is a bit tricky, we want to be sure that the size
501 	 * returned is either the old or the new size.  We also can find
502 	 * out if the client can accept both types of messages this way.
503 	 *
504 	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
505 	 */
506 	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
507 	if ((temp_size != proc->sizeofitem &&
508 	     temp_size != proc->v6_sizeofitem) &&
509 	    !(inpkt->implementation == IMPL_XNTPD &&
510 	      inpkt->request == REQ_CONFIG &&
511 	      temp_size == sizeof(struct old_conf_peer))) {
512 		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
513 			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
514 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
515 		return;
516 	}
517 	if ((proc->sizeofitem != 0) &&
518 	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
519 	     (recv_len - REQ_LEN_HDR))) {
520 		DPRINTF(3, ("process_private: not enough data\n"));
521 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
522 		return;
523 	}
524 
525 	switch (inpkt->implementation) {
526 	case IMPL_XNTPD:
527 		client_v6_capable = 1;
528 		break;
529 	case IMPL_XNTPD_OLD:
530 		client_v6_capable = 0;
531 		break;
532 	default:
533 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
534 		return;
535 	}
536 
537 	/*
538 	 * If we need to authenticate, do so.  Note that an
539 	 * authenticatable packet must include a mac field, must
540 	 * have used key info_auth_keyid and must have included
541 	 * a time stamp in the appropriate field.  The time stamp
542 	 * must be within INFO_TS_MAXSKEW of the receive
543 	 * time stamp.
544 	 */
545 	if (proc->needs_auth && sys_authenticate) {
546 
547 		if (recv_len < (REQ_LEN_HDR +
548 		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
549 		    INFO_NITEMS(inpkt->err_nitems)) +
550 		    REQ_TAIL_MIN)) {
551 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
552 			return;
553 		}
554 
555 		/*
556 		 * For 16-octet digests, regardless of itemsize and
557 		 * nitems, authenticated requests are a fixed size
558 		 * with the timestamp, key ID, and digest located
559 		 * at the end of the packet.  Because the key ID
560 		 * determining the digest size precedes the digest,
561 		 * for larger digests the fixed size request scheme
562 		 * is abandoned and the timestamp, key ID, and digest
563 		 * are located relative to the start of the packet,
564 		 * with the digest size determined by the packet size.
565 		 */
566 		noslop_len = REQ_LEN_HDR
567 			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
568 			       INFO_NITEMS(inpkt->err_nitems)
569 			     + sizeof(inpkt->tstamp);
570 		/* 32-bit alignment */
571 		noslop_len = (noslop_len + 3) & ~3;
572 		if (recv_len > (noslop_len + MAX_MAC_LEN))
573 			mac_len = 20;
574 		else
575 			mac_len = recv_len - noslop_len;
576 
577 		tailinpkt = (void *)((char *)inpkt + recv_len -
578 			    (mac_len + sizeof(inpkt->tstamp)));
579 
580 		/*
581 		 * If this guy is restricted from doing this, don't let
582 		 * him.  If the wrong key was used, or packet doesn't
583 		 * have mac, return.
584 		 */
585 		/* XXX: Use authistrustedip(), or equivalent. */
586 		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
587 		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
588 			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
589 				    INFO_IS_AUTH(inpkt->auth_seq),
590 				    info_auth_keyid,
591 				    ntohl(tailinpkt->keyid), (u_long)mac_len));
592 #ifdef DEBUG
593 			msyslog(LOG_DEBUG,
594 				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
595 				INFO_IS_AUTH(inpkt->auth_seq),
596 				info_auth_keyid,
597 				ntohl(tailinpkt->keyid), (u_long)mac_len);
598 #endif
599 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
600 			return;
601 		}
602 		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
603 			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
604 			msyslog(LOG_ERR,
605 				"process_private: bad pkt length %zu",
606 				recv_len);
607 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
608 			return;
609 		}
610 		if (!mod_okay || !authhavekey(info_auth_keyid)) {
611 			DPRINTF(5, ("failed auth mod_okay %d\n",
612 				    mod_okay));
613 #ifdef DEBUG
614 			msyslog(LOG_DEBUG,
615 				"process_private: failed auth mod_okay %d\n",
616 				mod_okay);
617 #endif
618 			if (!mod_okay) {
619 				sys_restricted++;
620 			}
621 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
622 			return;
623 		}
624 
625 		/*
626 		 * calculate absolute time difference between xmit time stamp
627 		 * and receive time stamp.  If too large, too bad.
628 		 */
629 		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
630 		L_SUB(&ftmp, &rbufp->recv_time);
631 		LFPTOD(&ftmp, dtemp);
632 		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
633 			/*
634 			 * He's a loser.  Tell him.
635 			 */
636 			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
637 				    dtemp, INFO_TS_MAXSKEW));
638 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
639 			return;
640 		}
641 
642 		/*
643 		 * So far so good.  See if decryption works out okay.
644 		 */
645 		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
646 				 recv_len - mac_len, mac_len)) {
647 			DPRINTF(5, ("authdecrypt failed\n"));
648 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
649 			return;
650 		}
651 	}
652 
653 	DPRINTF(3, ("process_private: all okay, into handler\n"));
654 	/*
655 	 * Packet is okay.  Call the handler to send him data.
656 	 */
657 	(proc->handler)(srcadr, inter, inpkt);
658 }
659 
660 
661 /*
662  * list_peers - send a list of the peers
663  */
664 static void
665 list_peers(
666 	sockaddr_u *srcadr,
667 	endpt *inter,
668 	struct req_pkt *inpkt
669 	)
670 {
671 	struct info_peer_list *	ip;
672 	const struct peer *	pp;
673 
674 	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
675 	    v6sizeof(struct info_peer_list));
676 	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
677 		if (IS_IPV6(&pp->srcadr)) {
678 			if (!client_v6_capable)
679 				continue;
680 			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
681 			ip->v6_flag = 1;
682 		} else {
683 			ip->addr = NSRCADR(&pp->srcadr);
684 			if (client_v6_capable)
685 				ip->v6_flag = 0;
686 		}
687 
688 		ip->port = NSRCPORT(&pp->srcadr);
689 		ip->hmode = pp->hmode;
690 		ip->flags = 0;
691 		if (pp->flags & FLAG_CONFIG)
692 			ip->flags |= INFO_FLAG_CONFIG;
693 		if (pp == sys_peer)
694 			ip->flags |= INFO_FLAG_SYSPEER;
695 		if (pp->status == CTL_PST_SEL_SYNCCAND)
696 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
697 		if (pp->status >= CTL_PST_SEL_SYSPEER)
698 			ip->flags |= INFO_FLAG_SHORTLIST;
699 		ip = (struct info_peer_list *)more_pkt();
700 	}	/* for pp */
701 
702 	flush_pkt();
703 }
704 
705 
706 /*
707  * list_peers_sum - return extended peer list
708  */
709 static void
710 list_peers_sum(
711 	sockaddr_u *srcadr,
712 	endpt *inter,
713 	struct req_pkt *inpkt
714 	)
715 {
716 	struct info_peer_summary *	ips;
717 	const struct peer *		pp;
718 	l_fp 				ltmp;
719 
720 	DPRINTF(3, ("wants peer list summary\n"));
721 
722 	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
723 	    v6sizeof(struct info_peer_summary));
724 	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
725 		DPRINTF(4, ("sum: got one\n"));
726 		/*
727 		 * Be careful here not to return v6 peers when we
728 		 * want only v4.
729 		 */
730 		if (IS_IPV6(&pp->srcadr)) {
731 			if (!client_v6_capable)
732 				continue;
733 			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
734 			ips->v6_flag = 1;
735 			if (pp->dstadr)
736 				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
737 			else
738 				ZERO(ips->dstadr6);
739 		} else {
740 			ips->srcadr = NSRCADR(&pp->srcadr);
741 			if (client_v6_capable)
742 				ips->v6_flag = 0;
743 
744 			if (pp->dstadr) {
745 				if (!pp->processed)
746 					ips->dstadr = NSRCADR(&pp->dstadr->sin);
747 				else {
748 					if (MDF_BCAST == pp->cast_flags)
749 						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
750 					else if (pp->cast_flags) {
751 						ips->dstadr = NSRCADR(&pp->dstadr->sin);
752 						if (!ips->dstadr)
753 							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
754 					}
755 				}
756 			} else {
757 				ips->dstadr = 0;
758 			}
759 		}
760 
761 		ips->srcport = NSRCPORT(&pp->srcadr);
762 		ips->stratum = pp->stratum;
763 		ips->hpoll = pp->hpoll;
764 		ips->ppoll = pp->ppoll;
765 		ips->reach = pp->reach;
766 		ips->flags = 0;
767 		if (pp == sys_peer)
768 			ips->flags |= INFO_FLAG_SYSPEER;
769 		if (pp->flags & FLAG_CONFIG)
770 			ips->flags |= INFO_FLAG_CONFIG;
771 		if (pp->flags & FLAG_REFCLOCK)
772 			ips->flags |= INFO_FLAG_REFCLOCK;
773 		if (pp->flags & FLAG_PREFER)
774 			ips->flags |= INFO_FLAG_PREFER;
775 		if (pp->flags & FLAG_BURST)
776 			ips->flags |= INFO_FLAG_BURST;
777 		if (pp->status == CTL_PST_SEL_SYNCCAND)
778 			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
779 		if (pp->status >= CTL_PST_SEL_SYSPEER)
780 			ips->flags |= INFO_FLAG_SHORTLIST;
781 		ips->hmode = pp->hmode;
782 		ips->delay = HTONS_FP(DTOFP(pp->delay));
783 		DTOLFP(pp->offset, &ltmp);
784 		HTONL_FP(&ltmp, &ips->offset);
785 		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
786 
787 		ips = (struct info_peer_summary *)more_pkt();
788 	}	/* for pp */
789 
790 	flush_pkt();
791 }
792 
793 
794 /*
795  * peer_info - send information for one or more peers
796  */
797 static void
798 peer_info (
799 	sockaddr_u *srcadr,
800 	endpt *inter,
801 	struct req_pkt *inpkt
802 	)
803 {
804 	u_short			items;
805 	size_t			item_sz;
806 	char *			datap;
807 	struct info_peer_list	ipl;
808 	struct peer *		pp;
809 	struct info_peer *	ip;
810 	int			i;
811 	int			j;
812 	sockaddr_u		addr;
813 	l_fp			ltmp;
814 
815 	items = INFO_NITEMS(inpkt->err_nitems);
816 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
817 	datap = inpkt->u.data;
818 	if (item_sz != sizeof(ipl)) {
819 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
820 		return;
821 	}
822 	ip = prepare_pkt(srcadr, inter, inpkt,
823 			 v6sizeof(struct info_peer));
824 	while (items-- > 0 && ip != NULL) {
825 		ZERO(ipl);
826 		memcpy(&ipl, datap, item_sz);
827 		ZERO_SOCK(&addr);
828 		NSRCPORT(&addr) = ipl.port;
829 		if (client_v6_capable && ipl.v6_flag) {
830 			AF(&addr) = AF_INET6;
831 			SOCK_ADDR6(&addr) = ipl.addr6;
832 		} else {
833 			AF(&addr) = AF_INET;
834 			NSRCADR(&addr) = ipl.addr;
835 		}
836 #ifdef ISC_PLATFORM_HAVESALEN
837 		addr.sa.sa_len = SOCKLEN(&addr);
838 #endif
839 		datap += item_sz;
840 
841 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
842 		if (NULL == pp)
843 			continue;
844 		if (IS_IPV6(&pp->srcadr)) {
845 			if (pp->dstadr)
846 				ip->dstadr6 =
847 				    (MDF_BCAST == pp->cast_flags)
848 					? SOCK_ADDR6(&pp->dstadr->bcast)
849 					: SOCK_ADDR6(&pp->dstadr->sin);
850 			else
851 				ZERO(ip->dstadr6);
852 
853 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
854 			ip->v6_flag = 1;
855 		} else {
856 			if (pp->dstadr) {
857 				if (!pp->processed)
858 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
859 				else {
860 					if (MDF_BCAST == pp->cast_flags)
861 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
862 					else if (pp->cast_flags) {
863 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
864 						if (!ip->dstadr)
865 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
866 					}
867 				}
868 			} else
869 				ip->dstadr = 0;
870 
871 			ip->srcadr = NSRCADR(&pp->srcadr);
872 			if (client_v6_capable)
873 				ip->v6_flag = 0;
874 		}
875 		ip->srcport = NSRCPORT(&pp->srcadr);
876 		ip->flags = 0;
877 		if (pp == sys_peer)
878 			ip->flags |= INFO_FLAG_SYSPEER;
879 		if (pp->flags & FLAG_CONFIG)
880 			ip->flags |= INFO_FLAG_CONFIG;
881 		if (pp->flags & FLAG_REFCLOCK)
882 			ip->flags |= INFO_FLAG_REFCLOCK;
883 		if (pp->flags & FLAG_PREFER)
884 			ip->flags |= INFO_FLAG_PREFER;
885 		if (pp->flags & FLAG_BURST)
886 			ip->flags |= INFO_FLAG_BURST;
887 		if (pp->status == CTL_PST_SEL_SYNCCAND)
888 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
889 		if (pp->status >= CTL_PST_SEL_SYSPEER)
890 			ip->flags |= INFO_FLAG_SHORTLIST;
891 		ip->leap = pp->leap;
892 		ip->hmode = pp->hmode;
893 		ip->pmode = pp->pmode;
894 		ip->keyid = pp->keyid;
895 		ip->stratum = pp->stratum;
896 		ip->ppoll = pp->ppoll;
897 		ip->hpoll = pp->hpoll;
898 		ip->precision = pp->precision;
899 		ip->version = pp->version;
900 		ip->reach = pp->reach;
901 		ip->unreach = (u_char)pp->unreach;
902 		ip->flash = (u_char)pp->flash;
903 		ip->flash2 = (u_short)pp->flash;
904 		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
905 		ip->ttl = (u_char)pp->ttl;
906 		ip->associd = htons(pp->associd);
907 		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
908 		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
909 		ip->refid = pp->refid;
910 		HTONL_FP(&pp->reftime, &ip->reftime);
911 		HTONL_FP(&pp->aorg, &ip->org);
912 		HTONL_FP(&pp->rec, &ip->rec);
913 		HTONL_FP(&pp->xmt, &ip->xmt);
914 		j = pp->filter_nextpt - 1;
915 		for (i = 0; i < NTP_SHIFT; i++, j--) {
916 			if (j < 0)
917 				j = NTP_SHIFT-1;
918 			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
919 			DTOLFP(pp->filter_offset[j], &ltmp);
920 			HTONL_FP(&ltmp, &ip->filtoffset[i]);
921 			ip->order[i] = (u_char)((pp->filter_nextpt +
922 						 NTP_SHIFT - 1) -
923 						pp->filter_order[i]);
924 			if (ip->order[i] >= NTP_SHIFT)
925 				ip->order[i] -= NTP_SHIFT;
926 		}
927 		DTOLFP(pp->offset, &ltmp);
928 		HTONL_FP(&ltmp, &ip->offset);
929 		ip->delay = HTONS_FP(DTOFP(pp->delay));
930 		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
931 		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
932 		ip = more_pkt();
933 	}
934 	flush_pkt();
935 }
936 
937 
938 /*
939  * peer_stats - send statistics for one or more peers
940  */
941 static void
942 peer_stats (
943 	sockaddr_u *srcadr,
944 	endpt *inter,
945 	struct req_pkt *inpkt
946 	)
947 {
948 	u_short			items;
949 	size_t			item_sz;
950 	char *			datap;
951 	struct info_peer_list	ipl;
952 	struct peer *		pp;
953 	struct info_peer_stats *ip;
954 	sockaddr_u addr;
955 
956 	DPRINTF(1, ("peer_stats: called\n"));
957 	items = INFO_NITEMS(inpkt->err_nitems);
958 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
959 	datap = inpkt->u.data;
960 	if (item_sz > sizeof(ipl)) {
961 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
962 		return;
963 	}
964 	ip = prepare_pkt(srcadr, inter, inpkt,
965 			 v6sizeof(struct info_peer_stats));
966 	while (items-- > 0 && ip != NULL) {
967 		ZERO(ipl);
968 		memcpy(&ipl, datap, item_sz);
969 		ZERO(addr);
970 		NSRCPORT(&addr) = ipl.port;
971 		if (client_v6_capable && ipl.v6_flag) {
972 			AF(&addr) = AF_INET6;
973 			SOCK_ADDR6(&addr) = ipl.addr6;
974 		} else {
975 			AF(&addr) = AF_INET;
976 			NSRCADR(&addr) = ipl.addr;
977 		}
978 #ifdef ISC_PLATFORM_HAVESALEN
979 		addr.sa.sa_len = SOCKLEN(&addr);
980 #endif
981 		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
982 			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
983 
984 		datap += item_sz;
985 
986 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
987 		if (NULL == pp)
988 			continue;
989 
990 		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
991 
992 		if (IS_IPV4(&pp->srcadr)) {
993 			if (pp->dstadr) {
994 				if (!pp->processed)
995 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
996 				else {
997 					if (MDF_BCAST == pp->cast_flags)
998 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
999 					else if (pp->cast_flags) {
1000 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1001 						if (!ip->dstadr)
1002 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1003 					}
1004 				}
1005 			} else
1006 				ip->dstadr = 0;
1007 
1008 			ip->srcadr = NSRCADR(&pp->srcadr);
1009 			if (client_v6_capable)
1010 				ip->v6_flag = 0;
1011 		} else {
1012 			if (pp->dstadr)
1013 				ip->dstadr6 =
1014 				    (MDF_BCAST == pp->cast_flags)
1015 					? SOCK_ADDR6(&pp->dstadr->bcast)
1016 					: SOCK_ADDR6(&pp->dstadr->sin);
1017 			else
1018 				ZERO(ip->dstadr6);
1019 
1020 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1021 			ip->v6_flag = 1;
1022 		}
1023 		ip->srcport = NSRCPORT(&pp->srcadr);
1024 		ip->flags = 0;
1025 		if (pp == sys_peer)
1026 		    ip->flags |= INFO_FLAG_SYSPEER;
1027 		if (pp->flags & FLAG_CONFIG)
1028 		    ip->flags |= INFO_FLAG_CONFIG;
1029 		if (pp->flags & FLAG_REFCLOCK)
1030 		    ip->flags |= INFO_FLAG_REFCLOCK;
1031 		if (pp->flags & FLAG_PREFER)
1032 		    ip->flags |= INFO_FLAG_PREFER;
1033 		if (pp->flags & FLAG_BURST)
1034 		    ip->flags |= INFO_FLAG_BURST;
1035 		if (pp->flags & FLAG_IBURST)
1036 		    ip->flags |= INFO_FLAG_IBURST;
1037 		if (pp->status == CTL_PST_SEL_SYNCCAND)
1038 		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1039 		if (pp->status >= CTL_PST_SEL_SYSPEER)
1040 		    ip->flags |= INFO_FLAG_SHORTLIST;
1041 		ip->flags = htons(ip->flags);
1042 		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1043 		ip->timetosend = htonl(pp->nextdate - current_time);
1044 		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1045 		ip->sent = htonl((u_int32)(pp->sent));
1046 		ip->processed = htonl((u_int32)(pp->processed));
1047 		ip->badauth = htonl((u_int32)(pp->badauth));
1048 		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1049 		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1050 		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1051 		ip->selbroken = htonl((u_int32)(pp->selbroken));
1052 		ip->candidate = pp->status;
1053 		ip = (struct info_peer_stats *)more_pkt();
1054 	}
1055 	flush_pkt();
1056 }
1057 
1058 
1059 /*
1060  * sys_info - return system info
1061  */
1062 static void
1063 sys_info(
1064 	sockaddr_u *srcadr,
1065 	endpt *inter,
1066 	struct req_pkt *inpkt
1067 	)
1068 {
1069 	register struct info_sys *is;
1070 
1071 	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1072 	    v6sizeof(struct info_sys));
1073 
1074 	if (sys_peer) {
1075 		if (IS_IPV4(&sys_peer->srcadr)) {
1076 			is->peer = NSRCADR(&sys_peer->srcadr);
1077 			if (client_v6_capable)
1078 				is->v6_flag = 0;
1079 		} else if (client_v6_capable) {
1080 			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1081 			is->v6_flag = 1;
1082 		}
1083 		is->peer_mode = sys_peer->hmode;
1084 	} else {
1085 		is->peer = 0;
1086 		if (client_v6_capable) {
1087 			is->v6_flag = 0;
1088 		}
1089 		is->peer_mode = 0;
1090 	}
1091 
1092 	is->leap = sys_leap;
1093 	is->stratum = sys_stratum;
1094 	is->precision = sys_precision;
1095 	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1096 	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1097 	is->frequency = htonl(DTOFP(sys_jitter));
1098 	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1099 	is->refid = sys_refid;
1100 	HTONL_FP(&sys_reftime, &is->reftime);
1101 
1102 	is->poll = sys_poll;
1103 
1104 	is->flags = 0;
1105 	if (sys_authenticate)
1106 		is->flags |= INFO_FLAG_AUTHENTICATE;
1107 	if (sys_bclient || sys_mclient)
1108 		is->flags |= INFO_FLAG_BCLIENT;
1109 #ifdef REFCLOCK
1110 	if (cal_enable)
1111 		is->flags |= INFO_FLAG_CAL;
1112 #endif /* REFCLOCK */
1113 	if (kern_enable)
1114 		is->flags |= INFO_FLAG_KERNEL;
1115 	if (mon_enabled != MON_OFF)
1116 		is->flags |= INFO_FLAG_MONITOR;
1117 	if (ntp_enable)
1118 		is->flags |= INFO_FLAG_NTP;
1119 	if (hardpps_enable)
1120 		is->flags |= INFO_FLAG_PPS_SYNC;
1121 	if (stats_control)
1122 		is->flags |= INFO_FLAG_FILEGEN;
1123 	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1124 	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1125 	(void) more_pkt();
1126 	flush_pkt();
1127 }
1128 
1129 
1130 /*
1131  * sys_stats - return system statistics
1132  */
1133 static void
1134 sys_stats(
1135 	sockaddr_u *srcadr,
1136 	endpt *inter,
1137 	struct req_pkt *inpkt
1138 	)
1139 {
1140 	register struct info_sys_stats *ss;
1141 
1142 	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1143 		sizeof(struct info_sys_stats));
1144 	ss->timeup = htonl((u_int32)current_time);
1145 	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1146 	ss->denied = htonl((u_int32)sys_restricted);
1147 	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1148 	ss->newversionpkt = htonl((u_int32)sys_newversion);
1149 	ss->unknownversion = htonl((u_int32)sys_declined);
1150 	ss->badlength = htonl((u_int32)sys_badlength);
1151 	ss->processed = htonl((u_int32)sys_processed);
1152 	ss->badauth = htonl((u_int32)sys_badauth);
1153 	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1154 	ss->received = htonl((u_int32)sys_received);
1155 	ss->lamport = htonl((u_int32)sys_lamport);
1156 	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1157 	(void) more_pkt();
1158 	flush_pkt();
1159 }
1160 
1161 
1162 /*
1163  * mem_stats - return memory statistics
1164  */
1165 static void
1166 mem_stats(
1167 	sockaddr_u *srcadr,
1168 	endpt *inter,
1169 	struct req_pkt *inpkt
1170 	)
1171 {
1172 	register struct info_mem_stats *ms;
1173 	register int i;
1174 
1175 	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1176 						  sizeof(struct info_mem_stats));
1177 
1178 	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1179 	ms->totalpeermem = htons((u_short)total_peer_structs);
1180 	ms->freepeermem = htons((u_short)peer_free_count);
1181 	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1182 	ms->allocations = htonl((u_int32)peer_allocations);
1183 	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1184 
1185 	for (i = 0; i < NTP_HASH_SIZE; i++)
1186 		ms->hashcount[i] = (u_char)
1187 		    min((u_int)peer_hash_count[i], UCHAR_MAX);
1188 
1189 	(void) more_pkt();
1190 	flush_pkt();
1191 }
1192 
1193 
1194 /*
1195  * io_stats - return io statistics
1196  */
1197 static void
1198 io_stats(
1199 	sockaddr_u *srcadr,
1200 	endpt *inter,
1201 	struct req_pkt *inpkt
1202 	)
1203 {
1204 	struct info_io_stats *io;
1205 
1206 	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1207 						 sizeof(struct info_io_stats));
1208 
1209 	io->timereset = htonl((u_int32)(current_time - io_timereset));
1210 	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1211 	io->freerecvbufs = htons((u_short) free_recvbuffs());
1212 	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1213 	io->lowwater = htons((u_short) lowater_additions());
1214 	io->dropped = htonl((u_int32)packets_dropped);
1215 	io->ignored = htonl((u_int32)packets_ignored);
1216 	io->received = htonl((u_int32)packets_received);
1217 	io->sent = htonl((u_int32)packets_sent);
1218 	io->notsent = htonl((u_int32)packets_notsent);
1219 	io->interrupts = htonl((u_int32)handler_calls);
1220 	io->int_received = htonl((u_int32)handler_pkts);
1221 
1222 	(void) more_pkt();
1223 	flush_pkt();
1224 }
1225 
1226 
1227 /*
1228  * timer_stats - return timer statistics
1229  */
1230 static void
1231 timer_stats(
1232 	sockaddr_u *		srcadr,
1233 	endpt *			inter,
1234 	struct req_pkt *	inpkt
1235 	)
1236 {
1237 	struct info_timer_stats *	ts;
1238 	u_long				sincereset;
1239 
1240 	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1241 						    inpkt, sizeof(*ts));
1242 
1243 	sincereset = current_time - timer_timereset;
1244 	ts->timereset = htonl((u_int32)sincereset);
1245 	ts->alarms = ts->timereset;
1246 	ts->overflows = htonl((u_int32)alarm_overflow);
1247 	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1248 
1249 	(void) more_pkt();
1250 	flush_pkt();
1251 }
1252 
1253 
1254 /*
1255  * loop_info - return the current state of the loop filter
1256  */
1257 static void
1258 loop_info(
1259 	sockaddr_u *srcadr,
1260 	endpt *inter,
1261 	struct req_pkt *inpkt
1262 	)
1263 {
1264 	struct info_loop *li;
1265 	l_fp ltmp;
1266 
1267 	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1268 	    sizeof(struct info_loop));
1269 
1270 	DTOLFP(last_offset, &ltmp);
1271 	HTONL_FP(&ltmp, &li->last_offset);
1272 	DTOLFP(drift_comp * 1e6, &ltmp);
1273 	HTONL_FP(&ltmp, &li->drift_comp);
1274 	li->compliance = htonl((u_int32)(tc_counter));
1275 	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1276 
1277 	(void) more_pkt();
1278 	flush_pkt();
1279 }
1280 
1281 
1282 /*
1283  * do_conf - add a peer to the configuration list
1284  */
1285 static void
1286 do_conf(
1287 	sockaddr_u *srcadr,
1288 	endpt *inter,
1289 	struct req_pkt *inpkt
1290 	)
1291 {
1292 	u_short			items;
1293 	size_t			item_sz;
1294 	u_int			fl;
1295 	char *			datap;
1296 	struct conf_peer	temp_cp;
1297 	sockaddr_u		peeraddr;
1298 
1299 	/*
1300 	 * Do a check of everything to see that it looks
1301 	 * okay.  If not, complain about it.  Note we are
1302 	 * very picky here.
1303 	 */
1304 	items = INFO_NITEMS(inpkt->err_nitems);
1305 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1306 	datap = inpkt->u.data;
1307 	if (item_sz > sizeof(temp_cp)) {
1308 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1309 		return;
1310 	}
1311 
1312 	while (items-- > 0) {
1313 		ZERO(temp_cp);
1314 		memcpy(&temp_cp, datap, item_sz);
1315 		ZERO_SOCK(&peeraddr);
1316 
1317 		fl = 0;
1318 		if (temp_cp.flags & CONF_FLAG_PREFER)
1319 			fl |= FLAG_PREFER;
1320 		if (temp_cp.flags & CONF_FLAG_BURST)
1321 			fl |= FLAG_BURST;
1322 		if (temp_cp.flags & CONF_FLAG_IBURST)
1323 			fl |= FLAG_IBURST;
1324 #ifdef AUTOKEY
1325 		if (temp_cp.flags & CONF_FLAG_SKEY)
1326 			fl |= FLAG_SKEY;
1327 #endif	/* AUTOKEY */
1328 		if (client_v6_capable && temp_cp.v6_flag) {
1329 			AF(&peeraddr) = AF_INET6;
1330 			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1331 		} else {
1332 			AF(&peeraddr) = AF_INET;
1333 			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1334 			/*
1335 			 * Make sure the address is valid
1336 			 */
1337 			if (!ISREFCLOCKADR(&peeraddr) &&
1338 			    ISBADADR(&peeraddr)) {
1339 				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1340 				return;
1341 			}
1342 
1343 		}
1344 		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1345 #ifdef ISC_PLATFORM_HAVESALEN
1346 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1347 #endif
1348 
1349 		/* check mode value: 0 <= hmode <= 6
1350 		 *
1351 		 * There's no good global define for that limit, and
1352 		 * using a magic define is as good (or bad, actually) as
1353 		 * a magic number. So we use the highest possible peer
1354 		 * mode, and that is MODE_BCLIENT.
1355 		 *
1356 		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1357 		 * but the code in ntp_peer.c indicates trouble for any
1358 		 * hmode > 6 ( --> MODE_BCLIENT).
1359 		 */
1360 		if (temp_cp.hmode > MODE_BCLIENT) {
1361 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1362 			return;
1363 		}
1364 
1365 		/* Any more checks on the values? Unchecked at this
1366 		 * point:
1367 		 *   - version
1368 		 *   - ttl
1369 		 *   - keyid
1370 		 *
1371 		 *   - minpoll/maxpoll, but they are treated properly
1372 		 *     for all cases internally. Checking not necessary.
1373 		 *
1374 		 * Note that we ignore any previously-specified ippeerlimit.
1375 		 * If we're told to create the peer, we create the peer.
1376 		 */
1377 
1378 		/* finally create the peer */
1379 		if (peer_config(&peeraddr, NULL, NULL, -1,
1380 		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1381 		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1382 		    NULL) == 0)
1383 		{
1384 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1385 			return;
1386 		}
1387 
1388 		datap += item_sz;
1389 	}
1390 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1391 }
1392 
1393 
1394 /*
1395  * do_unconf - remove a peer from the configuration list
1396  */
1397 static void
1398 do_unconf(
1399 	sockaddr_u *	srcadr,
1400 	endpt *		inter,
1401 	struct req_pkt *inpkt
1402 	)
1403 {
1404 	u_short			items;
1405 	size_t			item_sz;
1406 	char *			datap;
1407 	struct conf_unpeer	temp_cp;
1408 	struct peer *		p;
1409 	sockaddr_u		peeraddr;
1410 	int			loops;
1411 
1412 	/*
1413 	 * This is a bit unstructured, but I like to be careful.
1414 	 * We check to see that every peer exists and is actually
1415 	 * configured.  If so, we remove them.  If not, we return
1416 	 * an error.
1417 	 *
1418 	 * [Bug 3011] Even if we checked all peers given in the request
1419 	 * in a dry run, there's still a chance that the caller played
1420 	 * unfair and gave the same peer multiple times. So we still
1421 	 * have to be prepared for nasty surprises in the second run ;)
1422 	 */
1423 
1424 	/* basic consistency checks */
1425 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1426 	if (item_sz > sizeof(temp_cp)) {
1427 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1428 		return;
1429 	}
1430 
1431 	/* now do two runs: first a dry run, then a busy one */
1432 	for (loops = 0; loops != 2; ++loops) {
1433 		items = INFO_NITEMS(inpkt->err_nitems);
1434 		datap = inpkt->u.data;
1435 		while (items-- > 0) {
1436 			/* copy from request to local */
1437 			ZERO(temp_cp);
1438 			memcpy(&temp_cp, datap, item_sz);
1439 			/* get address structure */
1440 			ZERO_SOCK(&peeraddr);
1441 			if (client_v6_capable && temp_cp.v6_flag) {
1442 				AF(&peeraddr) = AF_INET6;
1443 				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1444 			} else {
1445 				AF(&peeraddr) = AF_INET;
1446 				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1447 			}
1448 			SET_PORT(&peeraddr, NTP_PORT);
1449 #ifdef ISC_PLATFORM_HAVESALEN
1450 			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1451 #endif
1452 			DPRINTF(1, ("searching for %s\n",
1453 				    stoa(&peeraddr)));
1454 
1455 			/* search for matching configred(!) peer */
1456 			p = NULL;
1457 			do {
1458 				p = findexistingpeer(
1459 					&peeraddr, NULL, p, -1, 0, NULL);
1460 			} while (p && !(FLAG_CONFIG & p->flags));
1461 
1462 			if (!loops && !p) {
1463 				/* Item not found in dry run -- bail! */
1464 				req_ack(srcadr, inter, inpkt,
1465 					INFO_ERR_NODATA);
1466 				return;
1467 			} else if (loops && p) {
1468 				/* Item found in busy run -- remove! */
1469 				peer_clear(p, "GONE");
1470 				unpeer(p);
1471 			}
1472 			datap += item_sz;
1473 		}
1474 	}
1475 
1476 	/* report success */
1477 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1478 }
1479 
1480 
1481 /*
1482  * set_sys_flag - set system flags
1483  */
1484 static void
1485 set_sys_flag(
1486 	sockaddr_u *srcadr,
1487 	endpt *inter,
1488 	struct req_pkt *inpkt
1489 	)
1490 {
1491 	setclr_flags(srcadr, inter, inpkt, 1);
1492 }
1493 
1494 
1495 /*
1496  * clr_sys_flag - clear system flags
1497  */
1498 static void
1499 clr_sys_flag(
1500 	sockaddr_u *srcadr,
1501 	endpt *inter,
1502 	struct req_pkt *inpkt
1503 	)
1504 {
1505 	setclr_flags(srcadr, inter, inpkt, 0);
1506 }
1507 
1508 
1509 /*
1510  * setclr_flags - do the grunge work of flag setting/clearing
1511  */
1512 static void
1513 setclr_flags(
1514 	sockaddr_u *srcadr,
1515 	endpt *inter,
1516 	struct req_pkt *inpkt,
1517 	u_long set
1518 	)
1519 {
1520 	struct conf_sys_flags *sf;
1521 	u_int32 flags;
1522 
1523 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1524 		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1525 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1526 		return;
1527 	}
1528 
1529 	sf = (struct conf_sys_flags *)&inpkt->u;
1530 	flags = ntohl(sf->flags);
1531 
1532 	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1533 		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1534 		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1535 		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1536 			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1537 				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1538 				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1539 				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1540 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1541 		return;
1542 	}
1543 
1544 	if (flags & SYS_FLAG_BCLIENT)
1545 		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1546 	if (flags & SYS_FLAG_PPS)
1547 		proto_config(PROTO_PPS, set, 0., NULL);
1548 	if (flags & SYS_FLAG_NTP)
1549 		proto_config(PROTO_NTP, set, 0., NULL);
1550 	if (flags & SYS_FLAG_KERNEL)
1551 		proto_config(PROTO_KERNEL, set, 0., NULL);
1552 	if (flags & SYS_FLAG_MONITOR)
1553 		proto_config(PROTO_MONITOR, set, 0., NULL);
1554 	if (flags & SYS_FLAG_FILEGEN)
1555 		proto_config(PROTO_FILEGEN, set, 0., NULL);
1556 	if (flags & SYS_FLAG_AUTH)
1557 		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1558 	if (flags & SYS_FLAG_CAL)
1559 		proto_config(PROTO_CAL, set, 0., NULL);
1560 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1561 }
1562 
1563 /* There have been some issues with the restrict list processing,
1564  * ranging from problems with deep recursion (resulting in stack
1565  * overflows) and overfull reply buffers.
1566  *
1567  * To avoid this trouble the list reversal is done iteratively using a
1568  * scratch pad.
1569  */
1570 typedef struct RestrictStack RestrictStackT;
1571 struct RestrictStack {
1572 	RestrictStackT   *link;
1573 	size_t            fcnt;
1574 	const restrict_u *pres[63];
1575 };
1576 
1577 static size_t
1578 getStackSheetSize(
1579 	RestrictStackT *sp
1580 	)
1581 {
1582 	if (sp)
1583 		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1584 	return 0u;
1585 }
1586 
1587 static int/*BOOL*/
1588 pushRestriction(
1589 	RestrictStackT  **spp,
1590 	const restrict_u *ptr
1591 	)
1592 {
1593 	RestrictStackT *sp;
1594 
1595 	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1596 		/* need another sheet in the scratch pad */
1597 		sp = emalloc(sizeof(*sp));
1598 		sp->link = *spp;
1599 		sp->fcnt = getStackSheetSize(sp);
1600 		*spp = sp;
1601 	}
1602 	sp->pres[--sp->fcnt] = ptr;
1603 	return TRUE;
1604 }
1605 
1606 static int/*BOOL*/
1607 popRestriction(
1608 	RestrictStackT   **spp,
1609 	const restrict_u **opp
1610 	)
1611 {
1612 	RestrictStackT *sp;
1613 
1614 	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1615 		return FALSE;
1616 
1617 	*opp = sp->pres[sp->fcnt++];
1618 	if (sp->fcnt >= getStackSheetSize(sp)) {
1619 		/* discard sheet from scratch pad */
1620 		*spp = sp->link;
1621 		free(sp);
1622 	}
1623 	return TRUE;
1624 }
1625 
1626 static void
1627 flushRestrictionStack(
1628 	RestrictStackT **spp
1629 	)
1630 {
1631 	RestrictStackT *sp;
1632 
1633 	while (NULL != (sp = *spp)) {
1634 		*spp = sp->link;
1635 		free(sp);
1636 	}
1637 }
1638 
1639 /*
1640  * list_restrict4 - iterative helper for list_restrict dumps IPv4
1641  *		    restriction list in reverse order.
1642  */
1643 static void
1644 list_restrict4(
1645 	const restrict_u *	res,
1646 	struct info_restrict **	ppir
1647 	)
1648 {
1649 	RestrictStackT *	rpad;
1650 	struct info_restrict *	pir;
1651 
1652 	pir = *ppir;
1653 	for (rpad = NULL; res; res = res->link)
1654 		if (!pushRestriction(&rpad, res))
1655 			break;
1656 
1657 	while (pir && popRestriction(&rpad, &res)) {
1658 		pir->addr = htonl(res->u.v4.addr);
1659 		if (client_v6_capable)
1660 			pir->v6_flag = 0;
1661 		pir->mask = htonl(res->u.v4.mask);
1662 		pir->count = htonl(res->count);
1663 		pir->rflags = htons(res->rflags);
1664 		pir->mflags = htons(res->mflags);
1665 		pir = (struct info_restrict *)more_pkt();
1666 	}
1667 	flushRestrictionStack(&rpad);
1668 	*ppir = pir;
1669 }
1670 
1671 /*
1672  * list_restrict6 - iterative helper for list_restrict dumps IPv6
1673  *		    restriction list in reverse order.
1674  */
1675 static void
1676 list_restrict6(
1677 	const restrict_u *	res,
1678 	struct info_restrict **	ppir
1679 	)
1680 {
1681 	RestrictStackT *	rpad;
1682 	struct info_restrict *	pir;
1683 
1684 	pir = *ppir;
1685 	for (rpad = NULL; res; res = res->link)
1686 		if (!pushRestriction(&rpad, res))
1687 			break;
1688 
1689 	while (pir && popRestriction(&rpad, &res)) {
1690 		pir->addr6 = res->u.v6.addr;
1691 		pir->mask6 = res->u.v6.mask;
1692 		pir->v6_flag = 1;
1693 		pir->count = htonl(res->count);
1694 		pir->rflags = htons(res->rflags);
1695 		pir->mflags = htons(res->mflags);
1696 		pir = (struct info_restrict *)more_pkt();
1697 	}
1698 	flushRestrictionStack(&rpad);
1699 	*ppir = pir;
1700 }
1701 
1702 
1703 /*
1704  * list_restrict - return the restrict list
1705  */
1706 static void
1707 list_restrict(
1708 	sockaddr_u *srcadr,
1709 	endpt *inter,
1710 	struct req_pkt *inpkt
1711 	)
1712 {
1713 	struct info_restrict *ir;
1714 
1715 	DPRINTF(3, ("wants restrict list summary\n"));
1716 
1717 	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1718 	    v6sizeof(struct info_restrict));
1719 
1720 	/*
1721 	 * The restriction lists are kept sorted in the reverse order
1722 	 * than they were originally.  To preserve the output semantics,
1723 	 * dump each list in reverse order. The workers take care of that.
1724 	 */
1725 	list_restrict4(restrictlist4, &ir);
1726 	if (client_v6_capable)
1727 		list_restrict6(restrictlist6, &ir);
1728 	flush_pkt();
1729 }
1730 
1731 
1732 /*
1733  * do_resaddflags - add flags to a restrict entry (or create one)
1734  */
1735 static void
1736 do_resaddflags(
1737 	sockaddr_u *srcadr,
1738 	endpt *inter,
1739 	struct req_pkt *inpkt
1740 	)
1741 {
1742 	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1743 }
1744 
1745 
1746 
1747 /*
1748  * do_ressubflags - remove flags from a restrict entry
1749  */
1750 static void
1751 do_ressubflags(
1752 	sockaddr_u *srcadr,
1753 	endpt *inter,
1754 	struct req_pkt *inpkt
1755 	)
1756 {
1757 	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1758 }
1759 
1760 
1761 /*
1762  * do_unrestrict - remove a restrict entry from the list
1763  */
1764 static void
1765 do_unrestrict(
1766 	sockaddr_u *srcadr,
1767 	endpt *inter,
1768 	struct req_pkt *inpkt
1769 	)
1770 {
1771 	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1772 }
1773 
1774 
1775 /*
1776  * do_restrict - do the dirty stuff of dealing with restrictions
1777  */
1778 static void
1779 do_restrict(
1780 	sockaddr_u *srcadr,
1781 	endpt *inter,
1782 	struct req_pkt *inpkt,
1783 	restrict_op op
1784 	)
1785 {
1786 	char *			datap;
1787 	struct conf_restrict	cr;
1788 	u_short			items;
1789 	size_t			item_sz;
1790 	sockaddr_u		matchaddr;
1791 	sockaddr_u		matchmask;
1792 	int			bad;
1793 	int/*BOOL*/		success;
1794 
1795 	switch(op) {
1796 	    case RESTRICT_FLAGS:
1797 	    case RESTRICT_UNFLAG:
1798 	    case RESTRICT_REMOVE:
1799 	    case RESTRICT_REMOVEIF:
1800 	    	break;
1801 
1802 	    default:
1803 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1804 		return;
1805 	}
1806 
1807 	/*
1808 	 * Do a check of the flags to make sure that only
1809 	 * the NTPPORT flag is set, if any.  If not, complain
1810 	 * about it.  Note we are very picky here.
1811 	 */
1812 	items = INFO_NITEMS(inpkt->err_nitems);
1813 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1814 	datap = inpkt->u.data;
1815 	if (item_sz > sizeof(cr)) {
1816 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1817 		return;
1818 	}
1819 
1820 	bad = 0;
1821 	while (items-- > 0 && !bad) {
1822 		memcpy(&cr, datap, item_sz);
1823 		cr.flags = ntohs(cr.flags);	/* XXX */
1824 		cr.mflags = ntohs(cr.mflags);
1825 		if (~RESM_NTPONLY & cr.mflags)
1826 			bad |= 1;
1827 		if (~RES_ALLFLAGS & cr.flags)
1828 			bad |= 2;
1829 		if (INADDR_ANY != cr.mask) {
1830 			if (client_v6_capable && cr.v6_flag) {
1831 				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1832 					bad |= 4;
1833 			} else {
1834 				if (INADDR_ANY == cr.addr)
1835 					bad |= 8;
1836 			}
1837 		}
1838 		datap += item_sz;
1839 	}
1840 
1841 	if (bad) {
1842 		msyslog(LOG_ERR, "%s: bad = 0x%x", __func__, bad);
1843 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1844 		return;
1845 	}
1846 
1847 	/*
1848 	 * Looks okay, try it out.  Needs to reload data pointer and
1849 	 * item counter. (Talos-CAN-0052)
1850 	 */
1851 	ZERO_SOCK(&matchaddr);
1852 	ZERO_SOCK(&matchmask);
1853 	items = INFO_NITEMS(inpkt->err_nitems);
1854 	datap = inpkt->u.data;
1855 
1856 	while (items-- > 0) {
1857 		memcpy(&cr, datap, item_sz);
1858 		cr.flags = ntohs(cr.flags);	/* XXX: size */
1859 		cr.mflags = ntohs(cr.mflags);
1860 		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1861 		if (client_v6_capable && cr.v6_flag) {
1862 			AF(&matchaddr) = AF_INET6;
1863 			AF(&matchmask) = AF_INET6;
1864 			SOCK_ADDR6(&matchaddr) = cr.addr6;
1865 			SOCK_ADDR6(&matchmask) = cr.mask6;
1866 		} else {
1867 			AF(&matchaddr) = AF_INET;
1868 			AF(&matchmask) = AF_INET;
1869 			NSRCADR(&matchaddr) = cr.addr;
1870 			NSRCADR(&matchmask) = cr.mask;
1871 		}
1872 		success =  hack_restrict(op, &matchaddr, &matchmask,
1873 					 cr.ippeerlimit, cr.mflags,
1874 					 cr.flags, 0);
1875 		if (!success) {
1876 			DPRINTF(1, ("%s: %s %s mask %s ippeerlimit %hd %s %s failed",
1877 				    __func__, resop_str(op),
1878 				    stoa(&matchaddr), stoa(&matchmask),
1879 				    cr.ippeerlimit, mflags_str(cr.mflags),
1880 				    rflags_str(cr.flags)));
1881 		}
1882 		datap += item_sz;
1883 	}
1884 
1885 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1886 }
1887 
1888 
1889 /*
1890  * mon_getlist - return monitor data
1891  */
1892 static void
1893 mon_getlist(
1894 	sockaddr_u *srcadr,
1895 	endpt *inter,
1896 	struct req_pkt *inpkt
1897 	)
1898 {
1899 	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1900 }
1901 
1902 
1903 /*
1904  * Module entry points and the flags they correspond with
1905  */
1906 struct reset_entry {
1907 	int flag;		/* flag this corresponds to */
1908 	void (*handler)(void);	/* routine to handle request */
1909 };
1910 
1911 struct reset_entry reset_entries[] = {
1912 	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1913 	{ RESET_FLAG_IO,	io_clr_stats },
1914 	{ RESET_FLAG_SYS,	proto_clr_stats },
1915 	{ RESET_FLAG_MEM,	peer_clr_stats },
1916 	{ RESET_FLAG_TIMER,	timer_clr_stats },
1917 	{ RESET_FLAG_AUTH,	reset_auth_stats },
1918 	{ RESET_FLAG_CTL,	ctl_clr_stats },
1919 	{ 0,			0 }
1920 };
1921 
1922 /*
1923  * reset_stats - reset statistic counters here and there
1924  */
1925 static void
1926 reset_stats(
1927 	sockaddr_u *srcadr,
1928 	endpt *inter,
1929 	struct req_pkt *inpkt
1930 	)
1931 {
1932 	struct reset_flags *rflags;
1933 	u_long flags;
1934 	struct reset_entry *rent;
1935 
1936 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1937 		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1938 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1939 		return;
1940 	}
1941 
1942 	rflags = (struct reset_flags *)&inpkt->u;
1943 	flags = ntohl(rflags->flags);
1944 
1945 	if (flags & ~RESET_ALLFLAGS) {
1946 		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1947 			flags & ~RESET_ALLFLAGS);
1948 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1949 		return;
1950 	}
1951 
1952 	for (rent = reset_entries; rent->flag != 0; rent++) {
1953 		if (flags & rent->flag)
1954 			(*rent->handler)();
1955 	}
1956 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1957 }
1958 
1959 
1960 /*
1961  * reset_peer - clear a peer's statistics
1962  */
1963 static void
1964 reset_peer(
1965 	sockaddr_u *srcadr,
1966 	endpt *inter,
1967 	struct req_pkt *inpkt
1968 	)
1969 {
1970 	u_short			items;
1971 	size_t			item_sz;
1972 	char *			datap;
1973 	struct conf_unpeer	cp;
1974 	struct peer *		p;
1975 	sockaddr_u		peeraddr;
1976 	int			bad;
1977 
1978 	/*
1979 	 * We check first to see that every peer exists.  If not,
1980 	 * we return an error.
1981 	 */
1982 
1983 	items = INFO_NITEMS(inpkt->err_nitems);
1984 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1985 	datap = inpkt->u.data;
1986 	if (item_sz > sizeof(cp)) {
1987 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1988 		return;
1989 	}
1990 
1991 	bad = FALSE;
1992 	while (items-- > 0 && !bad) {
1993 		ZERO(cp);
1994 		memcpy(&cp, datap, item_sz);
1995 		ZERO_SOCK(&peeraddr);
1996 		if (client_v6_capable && cp.v6_flag) {
1997 			AF(&peeraddr) = AF_INET6;
1998 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1999 		} else {
2000 			AF(&peeraddr) = AF_INET;
2001 			NSRCADR(&peeraddr) = cp.peeraddr;
2002 		}
2003 
2004 #ifdef ISC_PLATFORM_HAVESALEN
2005 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2006 #endif
2007 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2008 		if (NULL == p)
2009 			bad++;
2010 		datap += item_sz;
2011 	}
2012 
2013 	if (bad) {
2014 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2015 		return;
2016 	}
2017 
2018 	/*
2019 	 * Now do it in earnest. Needs to reload data pointer and item
2020 	 * counter. (Talos-CAN-0052)
2021 	 */
2022 
2023 	items = INFO_NITEMS(inpkt->err_nitems);
2024 	datap = inpkt->u.data;
2025 	while (items-- > 0) {
2026 		ZERO(cp);
2027 		memcpy(&cp, datap, item_sz);
2028 		ZERO_SOCK(&peeraddr);
2029 		if (client_v6_capable && cp.v6_flag) {
2030 			AF(&peeraddr) = AF_INET6;
2031 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2032 		} else {
2033 			AF(&peeraddr) = AF_INET;
2034 			NSRCADR(&peeraddr) = cp.peeraddr;
2035 		}
2036 		SET_PORT(&peeraddr, 123);
2037 #ifdef ISC_PLATFORM_HAVESALEN
2038 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2039 #endif
2040 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2041 		while (p != NULL) {
2042 			peer_reset(p);
2043 			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2044 		}
2045 		datap += item_sz;
2046 	}
2047 
2048 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2049 }
2050 
2051 
2052 /*
2053  * do_key_reread - reread the encryption key file
2054  */
2055 static void
2056 do_key_reread(
2057 	sockaddr_u *srcadr,
2058 	endpt *inter,
2059 	struct req_pkt *inpkt
2060 	)
2061 {
2062 	rereadkeys();
2063 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2064 }
2065 
2066 
2067 /*
2068  * trust_key - make one or more keys trusted
2069  */
2070 static void
2071 trust_key(
2072 	sockaddr_u *srcadr,
2073 	endpt *inter,
2074 	struct req_pkt *inpkt
2075 	)
2076 {
2077 	do_trustkey(srcadr, inter, inpkt, 1);
2078 }
2079 
2080 
2081 /*
2082  * untrust_key - make one or more keys untrusted
2083  */
2084 static void
2085 untrust_key(
2086 	sockaddr_u *srcadr,
2087 	endpt *inter,
2088 	struct req_pkt *inpkt
2089 	)
2090 {
2091 	do_trustkey(srcadr, inter, inpkt, 0);
2092 }
2093 
2094 
2095 /*
2096  * do_trustkey - make keys either trustable or untrustable
2097  */
2098 static void
2099 do_trustkey(
2100 	sockaddr_u *srcadr,
2101 	endpt *inter,
2102 	struct req_pkt *inpkt,
2103 	u_long trust
2104 	)
2105 {
2106 	register uint32_t *kp;
2107 	register int items;
2108 
2109 	items = INFO_NITEMS(inpkt->err_nitems);
2110 	kp = (uint32_t *)&inpkt->u;
2111 	while (items-- > 0) {
2112 		authtrust(*kp, trust);
2113 		kp++;
2114 	}
2115 
2116 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2117 }
2118 
2119 
2120 /*
2121  * get_auth_info - return some stats concerning the authentication module
2122  */
2123 static void
2124 get_auth_info(
2125 	sockaddr_u *srcadr,
2126 	endpt *inter,
2127 	struct req_pkt *inpkt
2128 	)
2129 {
2130 	register struct info_auth *ia;
2131 
2132 	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2133 					     sizeof(struct info_auth));
2134 
2135 	ia->numkeys = htonl((u_int32)authnumkeys);
2136 	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2137 	ia->keylookups = htonl((u_int32)authkeylookups);
2138 	ia->keynotfound = htonl((u_int32)authkeynotfound);
2139 	ia->encryptions = htonl((u_int32)authencryptions);
2140 	ia->decryptions = htonl((u_int32)authdecryptions);
2141 	ia->keyuncached = htonl((u_int32)authkeyuncached);
2142 	ia->expired = htonl((u_int32)authkeyexpired);
2143 	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2144 
2145 	(void) more_pkt();
2146 	flush_pkt();
2147 }
2148 
2149 
2150 
2151 /*
2152  * reset_auth_stats - reset the authentication stat counters.  Done here
2153  *		      to keep ntp-isms out of the authentication module
2154  */
2155 void
2156 reset_auth_stats(void)
2157 {
2158 	authkeylookups = 0;
2159 	authkeynotfound = 0;
2160 	authencryptions = 0;
2161 	authdecryptions = 0;
2162 	authkeyuncached = 0;
2163 	auth_timereset = current_time;
2164 }
2165 
2166 
2167 /*
2168  * req_get_traps - return information about current trap holders
2169  */
2170 static void
2171 req_get_traps(
2172 	sockaddr_u *srcadr,
2173 	endpt *inter,
2174 	struct req_pkt *inpkt
2175 	)
2176 {
2177 	struct info_trap *it;
2178 	struct ctl_trap *tr;
2179 	size_t i;
2180 
2181 	if (num_ctl_traps == 0) {
2182 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2183 		return;
2184 	}
2185 
2186 	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2187 	    v6sizeof(struct info_trap));
2188 
2189 	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2190 		if (tr->tr_flags & TRAP_INUSE) {
2191 			if (IS_IPV4(&tr->tr_addr)) {
2192 				if (tr->tr_localaddr == any_interface)
2193 					it->local_address = 0;
2194 				else
2195 					it->local_address
2196 					    = NSRCADR(&tr->tr_localaddr->sin);
2197 				it->trap_address = NSRCADR(&tr->tr_addr);
2198 				if (client_v6_capable)
2199 					it->v6_flag = 0;
2200 			} else {
2201 				if (!client_v6_capable)
2202 					continue;
2203 				it->local_address6
2204 				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2205 				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2206 				it->v6_flag = 1;
2207 			}
2208 			it->trap_port = NSRCPORT(&tr->tr_addr);
2209 			it->sequence = htons(tr->tr_sequence);
2210 			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2211 			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2212 			it->resets = htonl((u_int32)tr->tr_resets);
2213 			it->flags = htonl((u_int32)tr->tr_flags);
2214 			it = (struct info_trap *)more_pkt();
2215 		}
2216 	}
2217 	flush_pkt();
2218 }
2219 
2220 
2221 /*
2222  * req_set_trap - configure a trap
2223  */
2224 static void
2225 req_set_trap(
2226 	sockaddr_u *srcadr,
2227 	endpt *inter,
2228 	struct req_pkt *inpkt
2229 	)
2230 {
2231 	do_setclr_trap(srcadr, inter, inpkt, 1);
2232 }
2233 
2234 
2235 
2236 /*
2237  * req_clr_trap - unconfigure a trap
2238  */
2239 static void
2240 req_clr_trap(
2241 	sockaddr_u *srcadr,
2242 	endpt *inter,
2243 	struct req_pkt *inpkt
2244 	)
2245 {
2246 	do_setclr_trap(srcadr, inter, inpkt, 0);
2247 }
2248 
2249 
2250 
2251 /*
2252  * do_setclr_trap - do the grunge work of (un)configuring a trap
2253  */
2254 static void
2255 do_setclr_trap(
2256 	sockaddr_u *srcadr,
2257 	endpt *inter,
2258 	struct req_pkt *inpkt,
2259 	int set
2260 	)
2261 {
2262 	register struct conf_trap *ct;
2263 	register endpt *linter;
2264 	int res;
2265 	sockaddr_u laddr;
2266 
2267 	/*
2268 	 * Prepare sockaddr
2269 	 */
2270 	ZERO_SOCK(&laddr);
2271 	AF(&laddr) = AF(srcadr);
2272 	SET_PORT(&laddr, NTP_PORT);
2273 
2274 	/*
2275 	 * Restrict ourselves to one item only.  This eliminates
2276 	 * the error reporting problem.
2277 	 */
2278 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2279 		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2280 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2281 		return;
2282 	}
2283 	ct = (struct conf_trap *)&inpkt->u;
2284 
2285 	/*
2286 	 * Look for the local interface.  If none, use the default.
2287 	 */
2288 	if (ct->local_address == 0) {
2289 		linter = any_interface;
2290 	} else {
2291 		if (IS_IPV4(&laddr))
2292 			NSRCADR(&laddr) = ct->local_address;
2293 		else
2294 			SOCK_ADDR6(&laddr) = ct->local_address6;
2295 		linter = findinterface(&laddr);
2296 		if (NULL == linter) {
2297 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2298 			return;
2299 		}
2300 	}
2301 
2302 	if (IS_IPV4(&laddr))
2303 		NSRCADR(&laddr) = ct->trap_address;
2304 	else
2305 		SOCK_ADDR6(&laddr) = ct->trap_address6;
2306 	if (ct->trap_port)
2307 		NSRCPORT(&laddr) = ct->trap_port;
2308 	else
2309 		SET_PORT(&laddr, TRAPPORT);
2310 
2311 	if (set) {
2312 		res = ctlsettrap(&laddr, linter, 0,
2313 				 INFO_VERSION(inpkt->rm_vn_mode));
2314 	} else {
2315 		res = ctlclrtrap(&laddr, linter, 0);
2316 	}
2317 
2318 	if (!res) {
2319 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2320 	} else {
2321 		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2322 	}
2323 	return;
2324 }
2325 
2326 /*
2327  * Validate a request packet for a new request or control key:
2328  *  - only one item allowed
2329  *  - key must be valid (that is, known, and not in the autokey range)
2330  */
2331 static void
2332 set_keyid_checked(
2333 	keyid_t        *into,
2334 	const char     *what,
2335 	sockaddr_u     *srcadr,
2336 	endpt          *inter,
2337 	struct req_pkt *inpkt
2338 	)
2339 {
2340 	keyid_t *pkeyid;
2341 	keyid_t  tmpkey;
2342 
2343 	/* restrict ourselves to one item only */
2344 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2345 		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2346 			what);
2347 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2348 		return;
2349 	}
2350 
2351 	/* plug the new key from the packet */
2352 	pkeyid = (keyid_t *)&inpkt->u;
2353 	tmpkey = ntohl(*pkeyid);
2354 
2355 	/* validate the new key id, claim data error on failure */
2356 	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2357 		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2358 			what, (long)tmpkey);
2359 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2360 		return;
2361 	}
2362 
2363 	/* if we arrive here, the key is good -- use it */
2364 	*into = tmpkey;
2365 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2366 }
2367 
2368 /*
2369  * set_request_keyid - set the keyid used to authenticate requests
2370  */
2371 static void
2372 set_request_keyid(
2373 	sockaddr_u *srcadr,
2374 	endpt *inter,
2375 	struct req_pkt *inpkt
2376 	)
2377 {
2378 	set_keyid_checked(&info_auth_keyid, "request",
2379 			  srcadr, inter, inpkt);
2380 }
2381 
2382 
2383 
2384 /*
2385  * set_control_keyid - set the keyid used to authenticate requests
2386  */
2387 static void
2388 set_control_keyid(
2389 	sockaddr_u *srcadr,
2390 	endpt *inter,
2391 	struct req_pkt *inpkt
2392 	)
2393 {
2394 	set_keyid_checked(&ctl_auth_keyid, "control",
2395 			  srcadr, inter, inpkt);
2396 }
2397 
2398 
2399 
2400 /*
2401  * get_ctl_stats - return some stats concerning the control message module
2402  */
2403 static void
2404 get_ctl_stats(
2405 	sockaddr_u *srcadr,
2406 	endpt *inter,
2407 	struct req_pkt *inpkt
2408 	)
2409 {
2410 	register struct info_control *ic;
2411 
2412 	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2413 						sizeof(struct info_control));
2414 
2415 	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2416 	ic->numctlreq = htonl((u_int32)numctlreq);
2417 	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2418 	ic->numctlresponses = htonl((u_int32)numctlresponses);
2419 	ic->numctlfrags = htonl((u_int32)numctlfrags);
2420 	ic->numctlerrors = htonl((u_int32)numctlerrors);
2421 	ic->numctltooshort = htonl((u_int32)numctltooshort);
2422 	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2423 	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2424 	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2425 	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2426 	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2427 	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2428 	ic->numctlbadop = htonl((u_int32)numctlbadop);
2429 	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2430 
2431 	(void) more_pkt();
2432 	flush_pkt();
2433 }
2434 
2435 
2436 #ifdef KERNEL_PLL
2437 /*
2438  * get_kernel_info - get kernel pll/pps information
2439  */
2440 static void
2441 get_kernel_info(
2442 	sockaddr_u *srcadr,
2443 	endpt *inter,
2444 	struct req_pkt *inpkt
2445 	)
2446 {
2447 	register struct info_kernel *ik;
2448 	struct timex ntx;
2449 
2450 	if (!pll_control) {
2451 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2452 		return;
2453 	}
2454 
2455 	ZERO(ntx);
2456 	if (ntp_adjtime(&ntx) < 0)
2457 		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2458 	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2459 	    sizeof(struct info_kernel));
2460 
2461 	/*
2462 	 * pll variables
2463 	 */
2464 	ik->offset = htonl((u_int32)ntx.offset);
2465 	ik->freq = htonl((u_int32)ntx.freq);
2466 	ik->maxerror = htonl((u_int32)ntx.maxerror);
2467 	ik->esterror = htonl((u_int32)ntx.esterror);
2468 	ik->status = htons(ntx.status);
2469 	ik->constant = htonl((u_int32)ntx.constant);
2470 	ik->precision = htonl((u_int32)ntx.precision);
2471 	ik->tolerance = htonl((u_int32)ntx.tolerance);
2472 
2473 	/*
2474 	 * pps variables
2475 	 */
2476 	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2477 	ik->jitter = htonl((u_int32)ntx.jitter);
2478 	ik->shift = htons(ntx.shift);
2479 	ik->stabil = htonl((u_int32)ntx.stabil);
2480 	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2481 	ik->calcnt = htonl((u_int32)ntx.calcnt);
2482 	ik->errcnt = htonl((u_int32)ntx.errcnt);
2483 	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2484 
2485 	(void) more_pkt();
2486 	flush_pkt();
2487 }
2488 #endif /* KERNEL_PLL */
2489 
2490 
2491 #ifdef REFCLOCK
2492 /*
2493  * get_clock_info - get info about a clock
2494  */
2495 static void
2496 get_clock_info(
2497 	sockaddr_u *srcadr,
2498 	endpt *inter,
2499 	struct req_pkt *inpkt
2500 	)
2501 {
2502 	register struct info_clock *ic;
2503 	register u_int32 *clkaddr;
2504 	register int items;
2505 	struct refclockstat clock_stat;
2506 	sockaddr_u addr;
2507 	l_fp ltmp;
2508 
2509 	ZERO_SOCK(&addr);
2510 	AF(&addr) = AF_INET;
2511 #ifdef ISC_PLATFORM_HAVESALEN
2512 	addr.sa.sa_len = SOCKLEN(&addr);
2513 #endif
2514 	SET_PORT(&addr, NTP_PORT);
2515 	items = INFO_NITEMS(inpkt->err_nitems);
2516 	clkaddr = &inpkt->u.u32[0];
2517 
2518 	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2519 					      sizeof(struct info_clock));
2520 
2521 	while (items-- > 0 && ic) {
2522 		NSRCADR(&addr) = *clkaddr++;
2523 		if (!ISREFCLOCKADR(&addr) || NULL ==
2524 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2525 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2526 			return;
2527 		}
2528 
2529 		clock_stat.kv_list = (struct ctl_var *)0;
2530 
2531 		refclock_control(&addr, NULL, &clock_stat);
2532 
2533 		ic->clockadr = NSRCADR(&addr);
2534 		ic->type = clock_stat.type;
2535 		ic->flags = clock_stat.flags;
2536 		ic->lastevent = clock_stat.lastevent;
2537 		ic->currentstatus = clock_stat.currentstatus;
2538 		ic->polls = htonl((u_int32)clock_stat.polls);
2539 		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2540 		ic->badformat = htonl((u_int32)clock_stat.badformat);
2541 		ic->baddata = htonl((u_int32)clock_stat.baddata);
2542 		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2543 		DTOLFP(clock_stat.fudgetime1, &ltmp);
2544 		HTONL_FP(&ltmp, &ic->fudgetime1);
2545 		DTOLFP(clock_stat.fudgetime2, &ltmp);
2546 		HTONL_FP(&ltmp, &ic->fudgetime2);
2547 		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2548 		/* [Bug3527] Backward Incompatible: ic->fudgeval2 is
2549 		 * a string, instantiated via memcpy() so there is no
2550 		 * endian issue to correct.
2551 		 */
2552 #ifdef DISABLE_BUG3527_FIX
2553 		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2554 #else
2555 		ic->fudgeval2 = clock_stat.fudgeval2;
2556 #endif
2557 
2558 		free_varlist(clock_stat.kv_list);
2559 
2560 		ic = (struct info_clock *)more_pkt();
2561 	}
2562 	flush_pkt();
2563 }
2564 
2565 
2566 
2567 /*
2568  * set_clock_fudge - get a clock's fudge factors
2569  */
2570 static void
2571 set_clock_fudge(
2572 	sockaddr_u *srcadr,
2573 	endpt *inter,
2574 	struct req_pkt *inpkt
2575 	)
2576 {
2577 	register struct conf_fudge *cf;
2578 	register int items;
2579 	struct refclockstat clock_stat;
2580 	sockaddr_u addr;
2581 	l_fp ltmp;
2582 
2583 	ZERO(addr);
2584 	ZERO(clock_stat);
2585 	items = INFO_NITEMS(inpkt->err_nitems);
2586 	cf = (struct conf_fudge *)&inpkt->u;
2587 
2588 	while (items-- > 0) {
2589 		AF(&addr) = AF_INET;
2590 		NSRCADR(&addr) = cf->clockadr;
2591 #ifdef ISC_PLATFORM_HAVESALEN
2592 		addr.sa.sa_len = SOCKLEN(&addr);
2593 #endif
2594 		SET_PORT(&addr, NTP_PORT);
2595 		if (!ISREFCLOCKADR(&addr) || NULL ==
2596 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2597 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2598 			return;
2599 		}
2600 
2601 		switch(ntohl(cf->which)) {
2602 		    case FUDGE_TIME1:
2603 			NTOHL_FP(&cf->fudgetime, &ltmp);
2604 			LFPTOD(&ltmp, clock_stat.fudgetime1);
2605 			clock_stat.haveflags = CLK_HAVETIME1;
2606 			break;
2607 		    case FUDGE_TIME2:
2608 			NTOHL_FP(&cf->fudgetime, &ltmp);
2609 			LFPTOD(&ltmp, clock_stat.fudgetime2);
2610 			clock_stat.haveflags = CLK_HAVETIME2;
2611 			break;
2612 		    case FUDGE_VAL1:
2613 			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2614 			clock_stat.haveflags = CLK_HAVEVAL1;
2615 			break;
2616 		    case FUDGE_VAL2:
2617 			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2618 			clock_stat.haveflags = CLK_HAVEVAL2;
2619 			break;
2620 		    case FUDGE_FLAGS:
2621 			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2622 			clock_stat.haveflags =
2623 				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2624 			break;
2625 		    default:
2626 			msyslog(LOG_ERR, "set_clock_fudge: default!");
2627 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2628 			return;
2629 		}
2630 
2631 		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2632 	}
2633 
2634 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2635 }
2636 #endif
2637 
2638 #ifdef REFCLOCK
2639 /*
2640  * get_clkbug_info - get debugging info about a clock
2641  */
2642 static void
2643 get_clkbug_info(
2644 	sockaddr_u *srcadr,
2645 	endpt *inter,
2646 	struct req_pkt *inpkt
2647 	)
2648 {
2649 	register int i;
2650 	register struct info_clkbug *ic;
2651 	register u_int32 *clkaddr;
2652 	register int items;
2653 	struct refclockbug bug;
2654 	sockaddr_u addr;
2655 
2656 	ZERO_SOCK(&addr);
2657 	AF(&addr) = AF_INET;
2658 #ifdef ISC_PLATFORM_HAVESALEN
2659 	addr.sa.sa_len = SOCKLEN(&addr);
2660 #endif
2661 	SET_PORT(&addr, NTP_PORT);
2662 	items = INFO_NITEMS(inpkt->err_nitems);
2663 	clkaddr = (u_int32 *)&inpkt->u;
2664 
2665 	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2666 					       sizeof(struct info_clkbug));
2667 
2668 	while (items-- > 0 && ic) {
2669 		NSRCADR(&addr) = *clkaddr++;
2670 		if (!ISREFCLOCKADR(&addr) || NULL ==
2671 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2672 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2673 			return;
2674 		}
2675 
2676 		ZERO(bug);
2677 		refclock_buginfo(&addr, &bug);
2678 		if (bug.nvalues == 0 && bug.ntimes == 0) {
2679 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2680 			return;
2681 		}
2682 
2683 		ic->clockadr = NSRCADR(&addr);
2684 		i = bug.nvalues;
2685 		if (i > NUMCBUGVALUES)
2686 		    i = NUMCBUGVALUES;
2687 		ic->nvalues = (u_char)i;
2688 		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2689 		while (--i >= 0)
2690 		    ic->values[i] = htonl(bug.values[i]);
2691 
2692 		i = bug.ntimes;
2693 		if (i > NUMCBUGTIMES)
2694 		    i = NUMCBUGTIMES;
2695 		ic->ntimes = (u_char)i;
2696 		ic->stimes = htonl(bug.stimes);
2697 		while (--i >= 0) {
2698 			HTONL_FP(&bug.times[i], &ic->times[i]);
2699 		}
2700 
2701 		ic = (struct info_clkbug *)more_pkt();
2702 	}
2703 	flush_pkt();
2704 }
2705 #endif
2706 
2707 /*
2708  * receiver of interface structures
2709  */
2710 static void
2711 fill_info_if_stats(void *data, interface_info_t *interface_info)
2712 {
2713 	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2714 	struct info_if_stats *ifs = *ifsp;
2715 	endpt *ep = interface_info->ep;
2716 
2717 	if (NULL == ifs)
2718 		return;
2719 
2720 	ZERO(*ifs);
2721 
2722 	if (IS_IPV6(&ep->sin)) {
2723 		if (!client_v6_capable)
2724 			return;
2725 		ifs->v6_flag = 1;
2726 		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2727 		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2728 		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2729 	} else {
2730 		ifs->v6_flag = 0;
2731 		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2732 		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2733 		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2734 	}
2735 	ifs->v6_flag = htonl(ifs->v6_flag);
2736 	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2737 	ifs->family = htons(ep->family);
2738 	ifs->flags = htonl(ep->flags);
2739 	ifs->last_ttl = htonl(ep->last_ttl);
2740 	ifs->num_mcast = htonl(ep->num_mcast);
2741 	ifs->received = htonl(ep->received);
2742 	ifs->sent = htonl(ep->sent);
2743 	ifs->notsent = htonl(ep->notsent);
2744 	ifs->ifindex = htonl(ep->ifindex);
2745 	/* scope no longer in endpt, in in6_addr typically */
2746 	ifs->scopeid = ifs->ifindex;
2747 	ifs->ifnum = htonl(ep->ifnum);
2748 	ifs->uptime = htonl(current_time - ep->starttime);
2749 	ifs->ignore_packets = ep->ignore_packets;
2750 	ifs->peercnt = htonl(ep->peercnt);
2751 	ifs->action = interface_info->action;
2752 
2753 	*ifsp = (struct info_if_stats *)more_pkt();
2754 }
2755 
2756 /*
2757  * get_if_stats - get interface statistics
2758  */
2759 static void
2760 get_if_stats(
2761 	sockaddr_u *srcadr,
2762 	endpt *inter,
2763 	struct req_pkt *inpkt
2764 	)
2765 {
2766 	struct info_if_stats *ifs;
2767 
2768 	DPRINTF(3, ("wants interface statistics\n"));
2769 
2770 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2771 	    v6sizeof(struct info_if_stats));
2772 
2773 	interface_enumerate(fill_info_if_stats, &ifs);
2774 
2775 	flush_pkt();
2776 }
2777 
2778 static void
2779 do_if_reload(
2780 	sockaddr_u *srcadr,
2781 	endpt *inter,
2782 	struct req_pkt *inpkt
2783 	)
2784 {
2785 	struct info_if_stats *ifs;
2786 
2787 	DPRINTF(3, ("wants interface reload\n"));
2788 
2789 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2790 	    v6sizeof(struct info_if_stats));
2791 
2792 	interface_update(fill_info_if_stats, &ifs);
2793 
2794 	flush_pkt();
2795 }
2796 
2797