1 /* 2 * ntp_request.c - respond to information requests 3 */ 4 5 #ifdef HAVE_CONFIG_H 6 # include <config.h> 7 #endif 8 9 #include "ntpd.h" 10 #include "ntp_io.h" 11 #include "ntp_request.h" 12 #include "ntp_control.h" 13 #include "ntp_refclock.h" 14 #include "ntp_if.h" 15 #include "ntp_stdlib.h" 16 #include "ntp_assert.h" 17 18 #include <stdio.h> 19 #include <stddef.h> 20 #include <signal.h> 21 #ifdef HAVE_NETINET_IN_H 22 #include <netinet/in.h> 23 #endif 24 #include <arpa/inet.h> 25 26 #include "recvbuff.h" 27 28 #ifdef KERNEL_PLL 29 #include "ntp_syscall.h" 30 #endif /* KERNEL_PLL */ 31 32 /* 33 * Structure to hold request procedure information 34 */ 35 #define NOAUTH 0 36 #define AUTH 1 37 38 #define NO_REQUEST (-1) 39 /* 40 * Because we now have v6 addresses in the messages, we need to compensate 41 * for the larger size. Therefore, we introduce the alternate size to 42 * keep us friendly with older implementations. A little ugly. 43 */ 44 static int client_v6_capable = 0; /* the client can handle longer messages */ 45 46 #define v6sizeof(type) (client_v6_capable ? sizeof(type) : v4sizeof(type)) 47 48 struct req_proc { 49 short request_code; /* defined request code */ 50 short needs_auth; /* true when authentication needed */ 51 short sizeofitem; /* size of request data item (older size)*/ 52 short v6_sizeofitem; /* size of request data item (new size)*/ 53 void (*handler) (sockaddr_u *, endpt *, 54 struct req_pkt *); /* routine to handle request */ 55 }; 56 57 /* 58 * Universal request codes 59 */ 60 static const struct req_proc univ_codes[] = { 61 { NO_REQUEST, NOAUTH, 0, 0, NULL } 62 }; 63 64 static void req_ack (sockaddr_u *, endpt *, struct req_pkt *, int); 65 static void * prepare_pkt (sockaddr_u *, endpt *, 66 struct req_pkt *, size_t); 67 static void * more_pkt (void); 68 static void flush_pkt (void); 69 static void list_peers (sockaddr_u *, endpt *, struct req_pkt *); 70 static void list_peers_sum (sockaddr_u *, endpt *, struct req_pkt *); 71 static void peer_info (sockaddr_u *, endpt *, struct req_pkt *); 72 static void peer_stats (sockaddr_u *, endpt *, struct req_pkt *); 73 static void sys_info (sockaddr_u *, endpt *, struct req_pkt *); 74 static void sys_stats (sockaddr_u *, endpt *, struct req_pkt *); 75 static void mem_stats (sockaddr_u *, endpt *, struct req_pkt *); 76 static void io_stats (sockaddr_u *, endpt *, struct req_pkt *); 77 static void timer_stats (sockaddr_u *, endpt *, struct req_pkt *); 78 static void loop_info (sockaddr_u *, endpt *, struct req_pkt *); 79 static void do_conf (sockaddr_u *, endpt *, struct req_pkt *); 80 static void do_unconf (sockaddr_u *, endpt *, struct req_pkt *); 81 static void set_sys_flag (sockaddr_u *, endpt *, struct req_pkt *); 82 static void clr_sys_flag (sockaddr_u *, endpt *, struct req_pkt *); 83 static void setclr_flags (sockaddr_u *, endpt *, struct req_pkt *, u_long); 84 static void list_restrict4 (const restrict_u *, struct info_restrict **); 85 static void list_restrict6 (const restrict_u *, struct info_restrict **); 86 static void list_restrict (sockaddr_u *, endpt *, struct req_pkt *); 87 static void do_resaddflags (sockaddr_u *, endpt *, struct req_pkt *); 88 static void do_ressubflags (sockaddr_u *, endpt *, struct req_pkt *); 89 static void do_unrestrict (sockaddr_u *, endpt *, struct req_pkt *); 90 static void do_restrict (sockaddr_u *, endpt *, struct req_pkt *, int); 91 static void mon_getlist (sockaddr_u *, endpt *, struct req_pkt *); 92 static void reset_stats (sockaddr_u *, endpt *, struct req_pkt *); 93 static void reset_peer (sockaddr_u *, endpt *, struct req_pkt *); 94 static void do_key_reread (sockaddr_u *, endpt *, struct req_pkt *); 95 static void trust_key (sockaddr_u *, endpt *, struct req_pkt *); 96 static void untrust_key (sockaddr_u *, endpt *, struct req_pkt *); 97 static void do_trustkey (sockaddr_u *, endpt *, struct req_pkt *, u_long); 98 static void get_auth_info (sockaddr_u *, endpt *, struct req_pkt *); 99 static void req_get_traps (sockaddr_u *, endpt *, struct req_pkt *); 100 static void req_set_trap (sockaddr_u *, endpt *, struct req_pkt *); 101 static void req_clr_trap (sockaddr_u *, endpt *, struct req_pkt *); 102 static void do_setclr_trap (sockaddr_u *, endpt *, struct req_pkt *, int); 103 static void set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *); 104 static void set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *); 105 static void get_ctl_stats (sockaddr_u *, endpt *, struct req_pkt *); 106 static void get_if_stats (sockaddr_u *, endpt *, struct req_pkt *); 107 static void do_if_reload (sockaddr_u *, endpt *, struct req_pkt *); 108 #ifdef KERNEL_PLL 109 static void get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *); 110 #endif /* KERNEL_PLL */ 111 #ifdef REFCLOCK 112 static void get_clock_info (sockaddr_u *, endpt *, struct req_pkt *); 113 static void set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *); 114 #endif /* REFCLOCK */ 115 #ifdef REFCLOCK 116 static void get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *); 117 #endif /* REFCLOCK */ 118 119 /* 120 * ntpd request codes 121 */ 122 static const struct req_proc ntp_codes[] = { 123 { REQ_PEER_LIST, NOAUTH, 0, 0, list_peers }, 124 { REQ_PEER_LIST_SUM, NOAUTH, 0, 0, list_peers_sum }, 125 { REQ_PEER_INFO, NOAUTH, v4sizeof(struct info_peer_list), 126 sizeof(struct info_peer_list), peer_info}, 127 { REQ_PEER_STATS, NOAUTH, v4sizeof(struct info_peer_list), 128 sizeof(struct info_peer_list), peer_stats}, 129 { REQ_SYS_INFO, NOAUTH, 0, 0, sys_info }, 130 { REQ_SYS_STATS, NOAUTH, 0, 0, sys_stats }, 131 { REQ_IO_STATS, NOAUTH, 0, 0, io_stats }, 132 { REQ_MEM_STATS, NOAUTH, 0, 0, mem_stats }, 133 { REQ_LOOP_INFO, NOAUTH, 0, 0, loop_info }, 134 { REQ_TIMER_STATS, NOAUTH, 0, 0, timer_stats }, 135 { REQ_CONFIG, AUTH, v4sizeof(struct conf_peer), 136 sizeof(struct conf_peer), do_conf }, 137 { REQ_UNCONFIG, AUTH, v4sizeof(struct conf_unpeer), 138 sizeof(struct conf_unpeer), do_unconf }, 139 { REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags), 140 sizeof(struct conf_sys_flags), set_sys_flag }, 141 { REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags), 142 sizeof(struct conf_sys_flags), clr_sys_flag }, 143 { REQ_GET_RESTRICT, NOAUTH, 0, 0, list_restrict }, 144 { REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict), 145 sizeof(struct conf_restrict), do_resaddflags }, 146 { REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict), 147 sizeof(struct conf_restrict), do_ressubflags }, 148 { REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict), 149 sizeof(struct conf_restrict), do_unrestrict }, 150 { REQ_MON_GETLIST, NOAUTH, 0, 0, mon_getlist }, 151 { REQ_MON_GETLIST_1, NOAUTH, 0, 0, mon_getlist }, 152 { REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats }, 153 { REQ_RESET_PEER, AUTH, v4sizeof(struct conf_unpeer), 154 sizeof(struct conf_unpeer), reset_peer }, 155 { REQ_REREAD_KEYS, AUTH, 0, 0, do_key_reread }, 156 { REQ_TRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), trust_key }, 157 { REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key }, 158 { REQ_AUTHINFO, NOAUTH, 0, 0, get_auth_info }, 159 { REQ_TRAPS, NOAUTH, 0, 0, req_get_traps }, 160 { REQ_ADD_TRAP, AUTH, v4sizeof(struct conf_trap), 161 sizeof(struct conf_trap), req_set_trap }, 162 { REQ_CLR_TRAP, AUTH, v4sizeof(struct conf_trap), 163 sizeof(struct conf_trap), req_clr_trap }, 164 { REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long), 165 set_request_keyid }, 166 { REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long), 167 set_control_keyid }, 168 { REQ_GET_CTLSTATS, NOAUTH, 0, 0, get_ctl_stats }, 169 #ifdef KERNEL_PLL 170 { REQ_GET_KERNEL, NOAUTH, 0, 0, get_kernel_info }, 171 #endif 172 #ifdef REFCLOCK 173 { REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32), 174 get_clock_info }, 175 { REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge), 176 sizeof(struct conf_fudge), set_clock_fudge }, 177 { REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32), 178 get_clkbug_info }, 179 #endif 180 { REQ_IF_STATS, AUTH, 0, 0, get_if_stats }, 181 { REQ_IF_RELOAD, AUTH, 0, 0, do_if_reload }, 182 183 { NO_REQUEST, NOAUTH, 0, 0, 0 } 184 }; 185 186 187 /* 188 * Authentication keyid used to authenticate requests. Zero means we 189 * don't allow writing anything. 190 */ 191 keyid_t info_auth_keyid; 192 193 /* 194 * Statistic counters to keep track of requests and responses. 195 */ 196 u_long numrequests; /* number of requests we've received */ 197 u_long numresppkts; /* number of resp packets sent with data */ 198 199 /* 200 * lazy way to count errors, indexed by the error code 201 */ 202 u_long errorcounter[MAX_INFO_ERR + 1]; 203 204 /* 205 * A hack. To keep the authentication module clear of ntp-ism's, we 206 * include a time reset variable for its stats here. 207 */ 208 u_long auth_timereset; 209 210 /* 211 * Response packet used by these routines. Also some state information 212 * so that we can handle packet formatting within a common set of 213 * subroutines. Note we try to enter data in place whenever possible, 214 * but the need to set the more bit correctly means we occasionally 215 * use the extra buffer and copy. 216 */ 217 static struct resp_pkt rpkt; 218 static int reqver; 219 static int seqno; 220 static int nitems; 221 static int itemsize; 222 static int databytes; 223 static char exbuf[RESP_DATA_SIZE]; 224 static int usingexbuf; 225 static sockaddr_u *toaddr; 226 static endpt *frominter; 227 228 /* 229 * init_request - initialize request data 230 */ 231 void 232 init_request (void) 233 { 234 size_t i; 235 236 numrequests = 0; 237 numresppkts = 0; 238 auth_timereset = 0; 239 info_auth_keyid = 0; /* by default, can't do this */ 240 241 for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++) 242 errorcounter[i] = 0; 243 } 244 245 246 /* 247 * req_ack - acknowledge request with no data 248 */ 249 static void 250 req_ack( 251 sockaddr_u *srcadr, 252 endpt *inter, 253 struct req_pkt *inpkt, 254 int errcode 255 ) 256 { 257 /* 258 * fill in the fields 259 */ 260 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver); 261 rpkt.auth_seq = AUTH_SEQ(0, 0); 262 rpkt.implementation = inpkt->implementation; 263 rpkt.request = inpkt->request; 264 rpkt.err_nitems = ERR_NITEMS(errcode, 0); 265 rpkt.mbz_itemsize = MBZ_ITEMSIZE(0); 266 267 /* 268 * send packet and bump counters 269 */ 270 sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE); 271 errorcounter[errcode]++; 272 } 273 274 275 /* 276 * prepare_pkt - prepare response packet for transmission, return pointer 277 * to storage for data item. 278 */ 279 static void * 280 prepare_pkt( 281 sockaddr_u *srcadr, 282 endpt *inter, 283 struct req_pkt *pkt, 284 size_t structsize 285 ) 286 { 287 DPRINTF(4, ("request: preparing pkt\n")); 288 289 /* 290 * Fill in the implementation, request and itemsize fields 291 * since these won't change. 292 */ 293 rpkt.implementation = pkt->implementation; 294 rpkt.request = pkt->request; 295 rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize); 296 297 /* 298 * Compute the static data needed to carry on. 299 */ 300 toaddr = srcadr; 301 frominter = inter; 302 seqno = 0; 303 nitems = 0; 304 itemsize = structsize; 305 databytes = 0; 306 usingexbuf = 0; 307 308 /* 309 * return the beginning of the packet buffer. 310 */ 311 return &rpkt.u; 312 } 313 314 315 /* 316 * more_pkt - return a data pointer for a new item. 317 */ 318 static void * 319 more_pkt(void) 320 { 321 /* 322 * If we were using the extra buffer, send the packet. 323 */ 324 if (usingexbuf) { 325 DPRINTF(3, ("request: sending pkt\n")); 326 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver); 327 rpkt.auth_seq = AUTH_SEQ(0, seqno); 328 rpkt.err_nitems = htons((u_short)nitems); 329 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt, 330 RESP_HEADER_SIZE + databytes); 331 numresppkts++; 332 333 /* 334 * Copy data out of exbuf into the packet. 335 */ 336 memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize); 337 seqno++; 338 databytes = 0; 339 nitems = 0; 340 usingexbuf = 0; 341 } 342 343 databytes += itemsize; 344 nitems++; 345 if (databytes + itemsize <= RESP_DATA_SIZE) { 346 DPRINTF(4, ("request: giving him more data\n")); 347 /* 348 * More room in packet. Give him the 349 * next address. 350 */ 351 return &rpkt.u.data[databytes]; 352 } else { 353 /* 354 * No room in packet. Give him the extra 355 * buffer unless this was the last in the sequence. 356 */ 357 DPRINTF(4, ("request: into extra buffer\n")); 358 if (seqno == MAXSEQ) 359 return NULL; 360 else { 361 usingexbuf = 1; 362 return exbuf; 363 } 364 } 365 } 366 367 368 /* 369 * flush_pkt - we're done, return remaining information. 370 */ 371 static void 372 flush_pkt(void) 373 { 374 DPRINTF(3, ("request: flushing packet, %d items\n", nitems)); 375 /* 376 * Must send the last packet. If nothing in here and nothing 377 * has been sent, send an error saying no data to be found. 378 */ 379 if (seqno == 0 && nitems == 0) 380 req_ack(toaddr, frominter, (struct req_pkt *)&rpkt, 381 INFO_ERR_NODATA); 382 else { 383 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver); 384 rpkt.auth_seq = AUTH_SEQ(0, seqno); 385 rpkt.err_nitems = htons((u_short)nitems); 386 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt, 387 RESP_HEADER_SIZE+databytes); 388 numresppkts++; 389 } 390 } 391 392 393 394 /* 395 * Given a buffer, return the packet mode 396 */ 397 int 398 get_packet_mode(struct recvbuf *rbufp) 399 { 400 struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt; 401 return (INFO_MODE(inpkt->rm_vn_mode)); 402 } 403 404 405 /* 406 * process_private - process private mode (7) packets 407 */ 408 void 409 process_private( 410 struct recvbuf *rbufp, 411 int mod_okay 412 ) 413 { 414 static u_long quiet_until; 415 struct req_pkt *inpkt; 416 struct req_pkt_tail *tailinpkt; 417 sockaddr_u *srcadr; 418 endpt *inter; 419 const struct req_proc *proc; 420 int ec; 421 short temp_size; 422 l_fp ftmp; 423 double dtemp; 424 size_t recv_len; 425 size_t noslop_len; 426 size_t mac_len; 427 428 /* 429 * Initialize pointers, for convenience 430 */ 431 recv_len = rbufp->recv_length; 432 inpkt = (struct req_pkt *)&rbufp->recv_pkt; 433 srcadr = &rbufp->recv_srcadr; 434 inter = rbufp->dstadr; 435 436 DPRINTF(3, ("process_private: impl %d req %d\n", 437 inpkt->implementation, inpkt->request)); 438 439 /* 440 * Do some sanity checks on the packet. Return a format 441 * error if it fails. 442 */ 443 ec = 0; 444 if ( (++ec, ISRESPONSE(inpkt->rm_vn_mode)) 445 || (++ec, ISMORE(inpkt->rm_vn_mode)) 446 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION) 447 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION) 448 || (++ec, INFO_SEQ(inpkt->auth_seq) != 0) 449 || (++ec, INFO_ERR(inpkt->err_nitems) != 0) 450 || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0) 451 || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR) 452 ) { 453 NLOG(NLOG_SYSEVENT) 454 if (current_time >= quiet_until) { 455 msyslog(LOG_ERR, 456 "process_private: drop test %d" 457 " failed, pkt from %s", 458 ec, stoa(srcadr)); 459 quiet_until = current_time + 60; 460 } 461 return; 462 } 463 464 reqver = INFO_VERSION(inpkt->rm_vn_mode); 465 466 /* 467 * Get the appropriate procedure list to search. 468 */ 469 if (inpkt->implementation == IMPL_UNIV) 470 proc = univ_codes; 471 else if ((inpkt->implementation == IMPL_XNTPD) || 472 (inpkt->implementation == IMPL_XNTPD_OLD)) 473 proc = ntp_codes; 474 else { 475 req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL); 476 return; 477 } 478 479 /* 480 * Search the list for the request codes. If it isn't one 481 * we know, return an error. 482 */ 483 while (proc->request_code != NO_REQUEST) { 484 if (proc->request_code == (short) inpkt->request) 485 break; 486 proc++; 487 } 488 if (proc->request_code == NO_REQUEST) { 489 req_ack(srcadr, inter, inpkt, INFO_ERR_REQ); 490 return; 491 } 492 493 DPRINTF(4, ("found request in tables\n")); 494 495 /* 496 * If we need data, check to see if we have some. If we 497 * don't, check to see that there is none (picky, picky). 498 */ 499 500 /* This part is a bit tricky, we want to be sure that the size 501 * returned is either the old or the new size. We also can find 502 * out if the client can accept both types of messages this way. 503 * 504 * Handle the exception of REQ_CONFIG. It can have two data sizes. 505 */ 506 temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize); 507 if ((temp_size != proc->sizeofitem && 508 temp_size != proc->v6_sizeofitem) && 509 !(inpkt->implementation == IMPL_XNTPD && 510 inpkt->request == REQ_CONFIG && 511 temp_size == sizeof(struct old_conf_peer))) { 512 DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n", 513 temp_size, proc->sizeofitem, proc->v6_sizeofitem)); 514 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 515 return; 516 } 517 if ((proc->sizeofitem != 0) && 518 ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) > 519 (recv_len - REQ_LEN_HDR))) { 520 DPRINTF(3, ("process_private: not enough data\n")); 521 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 522 return; 523 } 524 525 switch (inpkt->implementation) { 526 case IMPL_XNTPD: 527 client_v6_capable = 1; 528 break; 529 case IMPL_XNTPD_OLD: 530 client_v6_capable = 0; 531 break; 532 default: 533 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 534 return; 535 } 536 537 /* 538 * If we need to authenticate, do so. Note that an 539 * authenticatable packet must include a mac field, must 540 * have used key info_auth_keyid and must have included 541 * a time stamp in the appropriate field. The time stamp 542 * must be within INFO_TS_MAXSKEW of the receive 543 * time stamp. 544 */ 545 if (proc->needs_auth && sys_authenticate) { 546 547 if (recv_len < (REQ_LEN_HDR + 548 (INFO_ITEMSIZE(inpkt->mbz_itemsize) * 549 INFO_NITEMS(inpkt->err_nitems)) + 550 REQ_TAIL_MIN)) { 551 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 552 return; 553 } 554 555 /* 556 * For 16-octet digests, regardless of itemsize and 557 * nitems, authenticated requests are a fixed size 558 * with the timestamp, key ID, and digest located 559 * at the end of the packet. Because the key ID 560 * determining the digest size precedes the digest, 561 * for larger digests the fixed size request scheme 562 * is abandoned and the timestamp, key ID, and digest 563 * are located relative to the start of the packet, 564 * with the digest size determined by the packet size. 565 */ 566 noslop_len = REQ_LEN_HDR 567 + INFO_ITEMSIZE(inpkt->mbz_itemsize) * 568 INFO_NITEMS(inpkt->err_nitems) 569 + sizeof(inpkt->tstamp); 570 /* 32-bit alignment */ 571 noslop_len = (noslop_len + 3) & ~3; 572 if (recv_len > (noslop_len + MAX_MAC_LEN)) 573 mac_len = 20; 574 else 575 mac_len = recv_len - noslop_len; 576 577 tailinpkt = (void *)((char *)inpkt + recv_len - 578 (mac_len + sizeof(inpkt->tstamp))); 579 580 /* 581 * If this guy is restricted from doing this, don't let 582 * him. If the wrong key was used, or packet doesn't 583 * have mac, return. 584 */ 585 if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid 586 || ntohl(tailinpkt->keyid) != info_auth_keyid) { 587 DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n", 588 INFO_IS_AUTH(inpkt->auth_seq), 589 info_auth_keyid, 590 ntohl(tailinpkt->keyid), (u_long)mac_len)); 591 #ifdef DEBUG 592 msyslog(LOG_DEBUG, 593 "process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n", 594 INFO_IS_AUTH(inpkt->auth_seq), 595 info_auth_keyid, 596 ntohl(tailinpkt->keyid), (u_long)mac_len); 597 #endif 598 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 599 return; 600 } 601 if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) { 602 DPRINTF(5, ("bad pkt length %zu\n", recv_len)); 603 msyslog(LOG_ERR, 604 "process_private: bad pkt length %zu", 605 recv_len); 606 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 607 return; 608 } 609 if (!mod_okay || !authhavekey(info_auth_keyid)) { 610 DPRINTF(5, ("failed auth mod_okay %d\n", 611 mod_okay)); 612 #ifdef DEBUG 613 msyslog(LOG_DEBUG, 614 "process_private: failed auth mod_okay %d\n", 615 mod_okay); 616 #endif 617 if (!mod_okay) { 618 sys_restricted++; 619 } 620 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 621 return; 622 } 623 624 /* 625 * calculate absolute time difference between xmit time stamp 626 * and receive time stamp. If too large, too bad. 627 */ 628 NTOHL_FP(&tailinpkt->tstamp, &ftmp); 629 L_SUB(&ftmp, &rbufp->recv_time); 630 LFPTOD(&ftmp, dtemp); 631 if (fabs(dtemp) > INFO_TS_MAXSKEW) { 632 /* 633 * He's a loser. Tell him. 634 */ 635 DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n", 636 dtemp, INFO_TS_MAXSKEW)); 637 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 638 return; 639 } 640 641 /* 642 * So far so good. See if decryption works out okay. 643 */ 644 if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt, 645 recv_len - mac_len, mac_len)) { 646 DPRINTF(5, ("authdecrypt failed\n")); 647 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 648 return; 649 } 650 } 651 652 DPRINTF(3, ("process_private: all okay, into handler\n")); 653 /* 654 * Packet is okay. Call the handler to send him data. 655 */ 656 (proc->handler)(srcadr, inter, inpkt); 657 } 658 659 660 /* 661 * list_peers - send a list of the peers 662 */ 663 static void 664 list_peers( 665 sockaddr_u *srcadr, 666 endpt *inter, 667 struct req_pkt *inpkt 668 ) 669 { 670 struct info_peer_list * ip; 671 const struct peer * pp; 672 673 ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt, 674 v6sizeof(struct info_peer_list)); 675 for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) { 676 if (IS_IPV6(&pp->srcadr)) { 677 if (!client_v6_capable) 678 continue; 679 ip->addr6 = SOCK_ADDR6(&pp->srcadr); 680 ip->v6_flag = 1; 681 } else { 682 ip->addr = NSRCADR(&pp->srcadr); 683 if (client_v6_capable) 684 ip->v6_flag = 0; 685 } 686 687 ip->port = NSRCPORT(&pp->srcadr); 688 ip->hmode = pp->hmode; 689 ip->flags = 0; 690 if (pp->flags & FLAG_CONFIG) 691 ip->flags |= INFO_FLAG_CONFIG; 692 if (pp == sys_peer) 693 ip->flags |= INFO_FLAG_SYSPEER; 694 if (pp->status == CTL_PST_SEL_SYNCCAND) 695 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 696 if (pp->status >= CTL_PST_SEL_SYSPEER) 697 ip->flags |= INFO_FLAG_SHORTLIST; 698 ip = (struct info_peer_list *)more_pkt(); 699 } /* for pp */ 700 701 flush_pkt(); 702 } 703 704 705 /* 706 * list_peers_sum - return extended peer list 707 */ 708 static void 709 list_peers_sum( 710 sockaddr_u *srcadr, 711 endpt *inter, 712 struct req_pkt *inpkt 713 ) 714 { 715 struct info_peer_summary * ips; 716 const struct peer * pp; 717 l_fp ltmp; 718 719 DPRINTF(3, ("wants peer list summary\n")); 720 721 ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt, 722 v6sizeof(struct info_peer_summary)); 723 for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) { 724 DPRINTF(4, ("sum: got one\n")); 725 /* 726 * Be careful here not to return v6 peers when we 727 * want only v4. 728 */ 729 if (IS_IPV6(&pp->srcadr)) { 730 if (!client_v6_capable) 731 continue; 732 ips->srcadr6 = SOCK_ADDR6(&pp->srcadr); 733 ips->v6_flag = 1; 734 if (pp->dstadr) 735 ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin); 736 else 737 ZERO(ips->dstadr6); 738 } else { 739 ips->srcadr = NSRCADR(&pp->srcadr); 740 if (client_v6_capable) 741 ips->v6_flag = 0; 742 743 if (pp->dstadr) { 744 if (!pp->processed) 745 ips->dstadr = NSRCADR(&pp->dstadr->sin); 746 else { 747 if (MDF_BCAST == pp->cast_flags) 748 ips->dstadr = NSRCADR(&pp->dstadr->bcast); 749 else if (pp->cast_flags) { 750 ips->dstadr = NSRCADR(&pp->dstadr->sin); 751 if (!ips->dstadr) 752 ips->dstadr = NSRCADR(&pp->dstadr->bcast); 753 } 754 } 755 } else { 756 ips->dstadr = 0; 757 } 758 } 759 760 ips->srcport = NSRCPORT(&pp->srcadr); 761 ips->stratum = pp->stratum; 762 ips->hpoll = pp->hpoll; 763 ips->ppoll = pp->ppoll; 764 ips->reach = pp->reach; 765 ips->flags = 0; 766 if (pp == sys_peer) 767 ips->flags |= INFO_FLAG_SYSPEER; 768 if (pp->flags & FLAG_CONFIG) 769 ips->flags |= INFO_FLAG_CONFIG; 770 if (pp->flags & FLAG_REFCLOCK) 771 ips->flags |= INFO_FLAG_REFCLOCK; 772 if (pp->flags & FLAG_PREFER) 773 ips->flags |= INFO_FLAG_PREFER; 774 if (pp->flags & FLAG_BURST) 775 ips->flags |= INFO_FLAG_BURST; 776 if (pp->status == CTL_PST_SEL_SYNCCAND) 777 ips->flags |= INFO_FLAG_SEL_CANDIDATE; 778 if (pp->status >= CTL_PST_SEL_SYSPEER) 779 ips->flags |= INFO_FLAG_SHORTLIST; 780 ips->hmode = pp->hmode; 781 ips->delay = HTONS_FP(DTOFP(pp->delay)); 782 DTOLFP(pp->offset, <mp); 783 HTONL_FP(<mp, &ips->offset); 784 ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp))); 785 786 ips = (struct info_peer_summary *)more_pkt(); 787 } /* for pp */ 788 789 flush_pkt(); 790 } 791 792 793 /* 794 * peer_info - send information for one or more peers 795 */ 796 static void 797 peer_info ( 798 sockaddr_u *srcadr, 799 endpt *inter, 800 struct req_pkt *inpkt 801 ) 802 { 803 u_short items; 804 size_t item_sz; 805 char * datap; 806 struct info_peer_list ipl; 807 struct peer * pp; 808 struct info_peer * ip; 809 int i; 810 int j; 811 sockaddr_u addr; 812 l_fp ltmp; 813 814 items = INFO_NITEMS(inpkt->err_nitems); 815 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 816 datap = inpkt->u.data; 817 if (item_sz != sizeof(ipl)) { 818 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 819 return; 820 } 821 ip = prepare_pkt(srcadr, inter, inpkt, 822 v6sizeof(struct info_peer)); 823 while (items-- > 0 && ip != NULL) { 824 ZERO(ipl); 825 memcpy(&ipl, datap, item_sz); 826 ZERO_SOCK(&addr); 827 NSRCPORT(&addr) = ipl.port; 828 if (client_v6_capable && ipl.v6_flag) { 829 AF(&addr) = AF_INET6; 830 SOCK_ADDR6(&addr) = ipl.addr6; 831 } else { 832 AF(&addr) = AF_INET; 833 NSRCADR(&addr) = ipl.addr; 834 } 835 #ifdef ISC_PLATFORM_HAVESALEN 836 addr.sa.sa_len = SOCKLEN(&addr); 837 #endif 838 datap += item_sz; 839 840 pp = findexistingpeer(&addr, NULL, NULL, -1, 0); 841 if (NULL == pp) 842 continue; 843 if (IS_IPV6(srcadr)) { 844 if (pp->dstadr) 845 ip->dstadr6 = 846 (MDF_BCAST == pp->cast_flags) 847 ? SOCK_ADDR6(&pp->dstadr->bcast) 848 : SOCK_ADDR6(&pp->dstadr->sin); 849 else 850 ZERO(ip->dstadr6); 851 852 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr); 853 ip->v6_flag = 1; 854 } else { 855 if (pp->dstadr) { 856 if (!pp->processed) 857 ip->dstadr = NSRCADR(&pp->dstadr->sin); 858 else { 859 if (MDF_BCAST == pp->cast_flags) 860 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 861 else if (pp->cast_flags) { 862 ip->dstadr = NSRCADR(&pp->dstadr->sin); 863 if (!ip->dstadr) 864 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 865 } 866 } 867 } else 868 ip->dstadr = 0; 869 870 ip->srcadr = NSRCADR(&pp->srcadr); 871 if (client_v6_capable) 872 ip->v6_flag = 0; 873 } 874 ip->srcport = NSRCPORT(&pp->srcadr); 875 ip->flags = 0; 876 if (pp == sys_peer) 877 ip->flags |= INFO_FLAG_SYSPEER; 878 if (pp->flags & FLAG_CONFIG) 879 ip->flags |= INFO_FLAG_CONFIG; 880 if (pp->flags & FLAG_REFCLOCK) 881 ip->flags |= INFO_FLAG_REFCLOCK; 882 if (pp->flags & FLAG_PREFER) 883 ip->flags |= INFO_FLAG_PREFER; 884 if (pp->flags & FLAG_BURST) 885 ip->flags |= INFO_FLAG_BURST; 886 if (pp->status == CTL_PST_SEL_SYNCCAND) 887 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 888 if (pp->status >= CTL_PST_SEL_SYSPEER) 889 ip->flags |= INFO_FLAG_SHORTLIST; 890 ip->leap = pp->leap; 891 ip->hmode = pp->hmode; 892 ip->keyid = pp->keyid; 893 ip->stratum = pp->stratum; 894 ip->ppoll = pp->ppoll; 895 ip->hpoll = pp->hpoll; 896 ip->precision = pp->precision; 897 ip->version = pp->version; 898 ip->reach = pp->reach; 899 ip->unreach = (u_char)pp->unreach; 900 ip->flash = (u_char)pp->flash; 901 ip->flash2 = (u_short)pp->flash; 902 ip->estbdelay = HTONS_FP(DTOFP(pp->delay)); 903 ip->ttl = (u_char)pp->ttl; 904 ip->associd = htons(pp->associd); 905 ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay)); 906 ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp)); 907 ip->refid = pp->refid; 908 HTONL_FP(&pp->reftime, &ip->reftime); 909 HTONL_FP(&pp->aorg, &ip->org); 910 HTONL_FP(&pp->rec, &ip->rec); 911 HTONL_FP(&pp->xmt, &ip->xmt); 912 j = pp->filter_nextpt - 1; 913 for (i = 0; i < NTP_SHIFT; i++, j--) { 914 if (j < 0) 915 j = NTP_SHIFT-1; 916 ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j])); 917 DTOLFP(pp->filter_offset[j], <mp); 918 HTONL_FP(<mp, &ip->filtoffset[i]); 919 ip->order[i] = (u_char)((pp->filter_nextpt + 920 NTP_SHIFT - 1) - 921 pp->filter_order[i]); 922 if (ip->order[i] >= NTP_SHIFT) 923 ip->order[i] -= NTP_SHIFT; 924 } 925 DTOLFP(pp->offset, <mp); 926 HTONL_FP(<mp, &ip->offset); 927 ip->delay = HTONS_FP(DTOFP(pp->delay)); 928 ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp))); 929 ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter))); 930 ip = more_pkt(); 931 } 932 flush_pkt(); 933 } 934 935 936 /* 937 * peer_stats - send statistics for one or more peers 938 */ 939 static void 940 peer_stats ( 941 sockaddr_u *srcadr, 942 endpt *inter, 943 struct req_pkt *inpkt 944 ) 945 { 946 u_short items; 947 size_t item_sz; 948 char * datap; 949 struct info_peer_list ipl; 950 struct peer * pp; 951 struct info_peer_stats *ip; 952 sockaddr_u addr; 953 954 DPRINTF(1, ("peer_stats: called\n")); 955 items = INFO_NITEMS(inpkt->err_nitems); 956 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 957 datap = inpkt->u.data; 958 if (item_sz > sizeof(ipl)) { 959 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 960 return; 961 } 962 ip = prepare_pkt(srcadr, inter, inpkt, 963 v6sizeof(struct info_peer_stats)); 964 while (items-- > 0 && ip != NULL) { 965 ZERO(ipl); 966 memcpy(&ipl, datap, item_sz); 967 ZERO(addr); 968 NSRCPORT(&addr) = ipl.port; 969 if (client_v6_capable && ipl.v6_flag) { 970 AF(&addr) = AF_INET6; 971 SOCK_ADDR6(&addr) = ipl.addr6; 972 } else { 973 AF(&addr) = AF_INET; 974 NSRCADR(&addr) = ipl.addr; 975 } 976 #ifdef ISC_PLATFORM_HAVESALEN 977 addr.sa.sa_len = SOCKLEN(&addr); 978 #endif 979 DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n", 980 stoa(&addr), ipl.port, NSRCPORT(&addr))); 981 982 datap += item_sz; 983 984 pp = findexistingpeer(&addr, NULL, NULL, -1, 0); 985 if (NULL == pp) 986 continue; 987 988 DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr))); 989 990 if (IS_IPV4(&pp->srcadr)) { 991 if (pp->dstadr) { 992 if (!pp->processed) 993 ip->dstadr = NSRCADR(&pp->dstadr->sin); 994 else { 995 if (MDF_BCAST == pp->cast_flags) 996 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 997 else if (pp->cast_flags) { 998 ip->dstadr = NSRCADR(&pp->dstadr->sin); 999 if (!ip->dstadr) 1000 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 1001 } 1002 } 1003 } else 1004 ip->dstadr = 0; 1005 1006 ip->srcadr = NSRCADR(&pp->srcadr); 1007 if (client_v6_capable) 1008 ip->v6_flag = 0; 1009 } else { 1010 if (pp->dstadr) 1011 ip->dstadr6 = 1012 (MDF_BCAST == pp->cast_flags) 1013 ? SOCK_ADDR6(&pp->dstadr->bcast) 1014 : SOCK_ADDR6(&pp->dstadr->sin); 1015 else 1016 ZERO(ip->dstadr6); 1017 1018 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr); 1019 ip->v6_flag = 1; 1020 } 1021 ip->srcport = NSRCPORT(&pp->srcadr); 1022 ip->flags = 0; 1023 if (pp == sys_peer) 1024 ip->flags |= INFO_FLAG_SYSPEER; 1025 if (pp->flags & FLAG_CONFIG) 1026 ip->flags |= INFO_FLAG_CONFIG; 1027 if (pp->flags & FLAG_REFCLOCK) 1028 ip->flags |= INFO_FLAG_REFCLOCK; 1029 if (pp->flags & FLAG_PREFER) 1030 ip->flags |= INFO_FLAG_PREFER; 1031 if (pp->flags & FLAG_BURST) 1032 ip->flags |= INFO_FLAG_BURST; 1033 if (pp->flags & FLAG_IBURST) 1034 ip->flags |= INFO_FLAG_IBURST; 1035 if (pp->status == CTL_PST_SEL_SYNCCAND) 1036 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 1037 if (pp->status >= CTL_PST_SEL_SYSPEER) 1038 ip->flags |= INFO_FLAG_SHORTLIST; 1039 ip->flags = htons(ip->flags); 1040 ip->timereceived = htonl((u_int32)(current_time - pp->timereceived)); 1041 ip->timetosend = htonl(pp->nextdate - current_time); 1042 ip->timereachable = htonl((u_int32)(current_time - pp->timereachable)); 1043 ip->sent = htonl((u_int32)(pp->sent)); 1044 ip->processed = htonl((u_int32)(pp->processed)); 1045 ip->badauth = htonl((u_int32)(pp->badauth)); 1046 ip->bogusorg = htonl((u_int32)(pp->bogusorg)); 1047 ip->oldpkt = htonl((u_int32)(pp->oldpkt)); 1048 ip->seldisp = htonl((u_int32)(pp->seldisptoolarge)); 1049 ip->selbroken = htonl((u_int32)(pp->selbroken)); 1050 ip->candidate = pp->status; 1051 ip = (struct info_peer_stats *)more_pkt(); 1052 } 1053 flush_pkt(); 1054 } 1055 1056 1057 /* 1058 * sys_info - return system info 1059 */ 1060 static void 1061 sys_info( 1062 sockaddr_u *srcadr, 1063 endpt *inter, 1064 struct req_pkt *inpkt 1065 ) 1066 { 1067 register struct info_sys *is; 1068 1069 is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt, 1070 v6sizeof(struct info_sys)); 1071 1072 if (sys_peer) { 1073 if (IS_IPV4(&sys_peer->srcadr)) { 1074 is->peer = NSRCADR(&sys_peer->srcadr); 1075 if (client_v6_capable) 1076 is->v6_flag = 0; 1077 } else if (client_v6_capable) { 1078 is->peer6 = SOCK_ADDR6(&sys_peer->srcadr); 1079 is->v6_flag = 1; 1080 } 1081 is->peer_mode = sys_peer->hmode; 1082 } else { 1083 is->peer = 0; 1084 if (client_v6_capable) { 1085 is->v6_flag = 0; 1086 } 1087 is->peer_mode = 0; 1088 } 1089 1090 is->leap = sys_leap; 1091 is->stratum = sys_stratum; 1092 is->precision = sys_precision; 1093 is->rootdelay = htonl(DTOFP(sys_rootdelay)); 1094 is->rootdispersion = htonl(DTOUFP(sys_rootdisp)); 1095 is->frequency = htonl(DTOFP(sys_jitter)); 1096 is->stability = htonl(DTOUFP(clock_stability * 1e6)); 1097 is->refid = sys_refid; 1098 HTONL_FP(&sys_reftime, &is->reftime); 1099 1100 is->poll = sys_poll; 1101 1102 is->flags = 0; 1103 if (sys_authenticate) 1104 is->flags |= INFO_FLAG_AUTHENTICATE; 1105 if (sys_bclient) 1106 is->flags |= INFO_FLAG_BCLIENT; 1107 #ifdef REFCLOCK 1108 if (cal_enable) 1109 is->flags |= INFO_FLAG_CAL; 1110 #endif /* REFCLOCK */ 1111 if (kern_enable) 1112 is->flags |= INFO_FLAG_KERNEL; 1113 if (mon_enabled != MON_OFF) 1114 is->flags |= INFO_FLAG_MONITOR; 1115 if (ntp_enable) 1116 is->flags |= INFO_FLAG_NTP; 1117 if (hardpps_enable) 1118 is->flags |= INFO_FLAG_PPS_SYNC; 1119 if (stats_control) 1120 is->flags |= INFO_FLAG_FILEGEN; 1121 is->bdelay = HTONS_FP(DTOFP(sys_bdelay)); 1122 HTONL_UF(sys_authdelay.l_uf, &is->authdelay); 1123 (void) more_pkt(); 1124 flush_pkt(); 1125 } 1126 1127 1128 /* 1129 * sys_stats - return system statistics 1130 */ 1131 static void 1132 sys_stats( 1133 sockaddr_u *srcadr, 1134 endpt *inter, 1135 struct req_pkt *inpkt 1136 ) 1137 { 1138 register struct info_sys_stats *ss; 1139 1140 ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt, 1141 sizeof(struct info_sys_stats)); 1142 ss->timeup = htonl((u_int32)current_time); 1143 ss->timereset = htonl((u_int32)(current_time - sys_stattime)); 1144 ss->denied = htonl((u_int32)sys_restricted); 1145 ss->oldversionpkt = htonl((u_int32)sys_oldversion); 1146 ss->newversionpkt = htonl((u_int32)sys_newversion); 1147 ss->unknownversion = htonl((u_int32)sys_declined); 1148 ss->badlength = htonl((u_int32)sys_badlength); 1149 ss->processed = htonl((u_int32)sys_processed); 1150 ss->badauth = htonl((u_int32)sys_badauth); 1151 ss->limitrejected = htonl((u_int32)sys_limitrejected); 1152 ss->received = htonl((u_int32)sys_received); 1153 (void) more_pkt(); 1154 flush_pkt(); 1155 } 1156 1157 1158 /* 1159 * mem_stats - return memory statistics 1160 */ 1161 static void 1162 mem_stats( 1163 sockaddr_u *srcadr, 1164 endpt *inter, 1165 struct req_pkt *inpkt 1166 ) 1167 { 1168 register struct info_mem_stats *ms; 1169 register int i; 1170 1171 ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt, 1172 sizeof(struct info_mem_stats)); 1173 1174 ms->timereset = htonl((u_int32)(current_time - peer_timereset)); 1175 ms->totalpeermem = htons((u_short)total_peer_structs); 1176 ms->freepeermem = htons((u_short)peer_free_count); 1177 ms->findpeer_calls = htonl((u_int32)findpeer_calls); 1178 ms->allocations = htonl((u_int32)peer_allocations); 1179 ms->demobilizations = htonl((u_int32)peer_demobilizations); 1180 1181 for (i = 0; i < NTP_HASH_SIZE; i++) 1182 ms->hashcount[i] = (u_char) 1183 max((u_int)peer_hash_count[i], UCHAR_MAX); 1184 1185 (void) more_pkt(); 1186 flush_pkt(); 1187 } 1188 1189 1190 /* 1191 * io_stats - return io statistics 1192 */ 1193 static void 1194 io_stats( 1195 sockaddr_u *srcadr, 1196 endpt *inter, 1197 struct req_pkt *inpkt 1198 ) 1199 { 1200 struct info_io_stats *io; 1201 1202 io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt, 1203 sizeof(struct info_io_stats)); 1204 1205 io->timereset = htonl((u_int32)(current_time - io_timereset)); 1206 io->totalrecvbufs = htons((u_short) total_recvbuffs()); 1207 io->freerecvbufs = htons((u_short) free_recvbuffs()); 1208 io->fullrecvbufs = htons((u_short) full_recvbuffs()); 1209 io->lowwater = htons((u_short) lowater_additions()); 1210 io->dropped = htonl((u_int32)packets_dropped); 1211 io->ignored = htonl((u_int32)packets_ignored); 1212 io->received = htonl((u_int32)packets_received); 1213 io->sent = htonl((u_int32)packets_sent); 1214 io->notsent = htonl((u_int32)packets_notsent); 1215 io->interrupts = htonl((u_int32)handler_calls); 1216 io->int_received = htonl((u_int32)handler_pkts); 1217 1218 (void) more_pkt(); 1219 flush_pkt(); 1220 } 1221 1222 1223 /* 1224 * timer_stats - return timer statistics 1225 */ 1226 static void 1227 timer_stats( 1228 sockaddr_u * srcadr, 1229 endpt * inter, 1230 struct req_pkt * inpkt 1231 ) 1232 { 1233 struct info_timer_stats * ts; 1234 u_long sincereset; 1235 1236 ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter, 1237 inpkt, sizeof(*ts)); 1238 1239 sincereset = current_time - timer_timereset; 1240 ts->timereset = htonl((u_int32)sincereset); 1241 ts->alarms = ts->timereset; 1242 ts->overflows = htonl((u_int32)alarm_overflow); 1243 ts->xmtcalls = htonl((u_int32)timer_xmtcalls); 1244 1245 (void) more_pkt(); 1246 flush_pkt(); 1247 } 1248 1249 1250 /* 1251 * loop_info - return the current state of the loop filter 1252 */ 1253 static void 1254 loop_info( 1255 sockaddr_u *srcadr, 1256 endpt *inter, 1257 struct req_pkt *inpkt 1258 ) 1259 { 1260 struct info_loop *li; 1261 l_fp ltmp; 1262 1263 li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt, 1264 sizeof(struct info_loop)); 1265 1266 DTOLFP(last_offset, <mp); 1267 HTONL_FP(<mp, &li->last_offset); 1268 DTOLFP(drift_comp * 1e6, <mp); 1269 HTONL_FP(<mp, &li->drift_comp); 1270 li->compliance = htonl((u_int32)(tc_counter)); 1271 li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch)); 1272 1273 (void) more_pkt(); 1274 flush_pkt(); 1275 } 1276 1277 1278 /* 1279 * do_conf - add a peer to the configuration list 1280 */ 1281 static void 1282 do_conf( 1283 sockaddr_u *srcadr, 1284 endpt *inter, 1285 struct req_pkt *inpkt 1286 ) 1287 { 1288 u_short items; 1289 size_t item_sz; 1290 u_int fl; 1291 char * datap; 1292 struct conf_peer temp_cp; 1293 sockaddr_u peeraddr; 1294 1295 /* 1296 * Do a check of everything to see that it looks 1297 * okay. If not, complain about it. Note we are 1298 * very picky here. 1299 */ 1300 items = INFO_NITEMS(inpkt->err_nitems); 1301 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1302 datap = inpkt->u.data; 1303 if (item_sz > sizeof(temp_cp)) { 1304 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1305 return; 1306 } 1307 1308 while (items-- > 0) { 1309 ZERO(temp_cp); 1310 memcpy(&temp_cp, datap, item_sz); 1311 ZERO_SOCK(&peeraddr); 1312 1313 fl = 0; 1314 if (temp_cp.flags & CONF_FLAG_PREFER) 1315 fl |= FLAG_PREFER; 1316 if (temp_cp.flags & CONF_FLAG_BURST) 1317 fl |= FLAG_BURST; 1318 if (temp_cp.flags & CONF_FLAG_IBURST) 1319 fl |= FLAG_IBURST; 1320 #ifdef AUTOKEY 1321 if (temp_cp.flags & CONF_FLAG_SKEY) 1322 fl |= FLAG_SKEY; 1323 #endif /* AUTOKEY */ 1324 if (client_v6_capable && temp_cp.v6_flag) { 1325 AF(&peeraddr) = AF_INET6; 1326 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6; 1327 } else { 1328 AF(&peeraddr) = AF_INET; 1329 NSRCADR(&peeraddr) = temp_cp.peeraddr; 1330 /* 1331 * Make sure the address is valid 1332 */ 1333 if (!ISREFCLOCKADR(&peeraddr) && 1334 ISBADADR(&peeraddr)) { 1335 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1336 return; 1337 } 1338 1339 } 1340 NSRCPORT(&peeraddr) = htons(NTP_PORT); 1341 #ifdef ISC_PLATFORM_HAVESALEN 1342 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1343 #endif 1344 1345 /* XXX W2DO? minpoll/maxpoll arguments ??? */ 1346 if (peer_config(&peeraddr, NULL, NULL, 1347 temp_cp.hmode, temp_cp.version, temp_cp.minpoll, 1348 temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid, 1349 NULL) == 0) { 1350 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1351 return; 1352 } 1353 1354 datap += item_sz; 1355 } 1356 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1357 } 1358 1359 1360 /* 1361 * do_unconf - remove a peer from the configuration list 1362 */ 1363 static void 1364 do_unconf( 1365 sockaddr_u * srcadr, 1366 endpt * inter, 1367 struct req_pkt *inpkt 1368 ) 1369 { 1370 u_short items; 1371 size_t item_sz; 1372 char * datap; 1373 struct conf_unpeer temp_cp; 1374 struct peer * p; 1375 sockaddr_u peeraddr; 1376 int bad; 1377 int found; 1378 1379 /* 1380 * This is a bit unstructured, but I like to be careful. 1381 * We check to see that every peer exists and is actually 1382 * configured. If so, we remove them. If not, we return 1383 * an error. 1384 */ 1385 items = INFO_NITEMS(inpkt->err_nitems); 1386 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1387 datap = inpkt->u.data; 1388 if (item_sz > sizeof(temp_cp)) { 1389 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1390 return; 1391 } 1392 1393 bad = FALSE; 1394 while (items-- > 0 && !bad) { 1395 ZERO(temp_cp); 1396 memcpy(&temp_cp, datap, item_sz); 1397 ZERO_SOCK(&peeraddr); 1398 if (client_v6_capable && temp_cp.v6_flag) { 1399 AF(&peeraddr) = AF_INET6; 1400 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6; 1401 } else { 1402 AF(&peeraddr) = AF_INET; 1403 NSRCADR(&peeraddr) = temp_cp.peeraddr; 1404 } 1405 SET_PORT(&peeraddr, NTP_PORT); 1406 #ifdef ISC_PLATFORM_HAVESALEN 1407 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1408 #endif 1409 found = FALSE; 1410 p = NULL; 1411 1412 DPRINTF(1, ("searching for %s\n", stoa(&peeraddr))); 1413 1414 while (!found) { 1415 p = findexistingpeer(&peeraddr, NULL, p, -1, 0); 1416 if (NULL == p) 1417 break; 1418 if (FLAG_CONFIG & p->flags) 1419 found = TRUE; 1420 } 1421 if (!found) 1422 bad = TRUE; 1423 1424 datap += item_sz; 1425 } 1426 1427 if (bad) { 1428 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1429 return; 1430 } 1431 1432 /* 1433 * Now do it in earnest. 1434 */ 1435 1436 items = INFO_NITEMS(inpkt->err_nitems); 1437 datap = inpkt->u.data; 1438 1439 while (items-- > 0) { 1440 ZERO(temp_cp); 1441 memcpy(&temp_cp, datap, item_sz); 1442 ZERO(peeraddr); 1443 if (client_v6_capable && temp_cp.v6_flag) { 1444 AF(&peeraddr) = AF_INET6; 1445 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6; 1446 } else { 1447 AF(&peeraddr) = AF_INET; 1448 NSRCADR(&peeraddr) = temp_cp.peeraddr; 1449 } 1450 SET_PORT(&peeraddr, NTP_PORT); 1451 #ifdef ISC_PLATFORM_HAVESALEN 1452 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1453 #endif 1454 found = FALSE; 1455 p = NULL; 1456 1457 while (!found) { 1458 p = findexistingpeer(&peeraddr, NULL, p, -1, 0); 1459 if (NULL == p) 1460 break; 1461 if (FLAG_CONFIG & p->flags) 1462 found = TRUE; 1463 } 1464 INSIST(found); 1465 INSIST(NULL != p); 1466 1467 peer_clear(p, "GONE"); 1468 unpeer(p); 1469 1470 datap += item_sz; 1471 } 1472 1473 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1474 } 1475 1476 1477 /* 1478 * set_sys_flag - set system flags 1479 */ 1480 static void 1481 set_sys_flag( 1482 sockaddr_u *srcadr, 1483 endpt *inter, 1484 struct req_pkt *inpkt 1485 ) 1486 { 1487 setclr_flags(srcadr, inter, inpkt, 1); 1488 } 1489 1490 1491 /* 1492 * clr_sys_flag - clear system flags 1493 */ 1494 static void 1495 clr_sys_flag( 1496 sockaddr_u *srcadr, 1497 endpt *inter, 1498 struct req_pkt *inpkt 1499 ) 1500 { 1501 setclr_flags(srcadr, inter, inpkt, 0); 1502 } 1503 1504 1505 /* 1506 * setclr_flags - do the grunge work of flag setting/clearing 1507 */ 1508 static void 1509 setclr_flags( 1510 sockaddr_u *srcadr, 1511 endpt *inter, 1512 struct req_pkt *inpkt, 1513 u_long set 1514 ) 1515 { 1516 struct conf_sys_flags *sf; 1517 u_int32 flags; 1518 1519 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 1520 msyslog(LOG_ERR, "setclr_flags: err_nitems > 1"); 1521 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1522 return; 1523 } 1524 1525 sf = (struct conf_sys_flags *)&inpkt->u; 1526 flags = ntohl(sf->flags); 1527 1528 if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS | 1529 SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR | 1530 SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) { 1531 msyslog(LOG_ERR, "setclr_flags: extra flags: %#x", 1532 flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS | 1533 SYS_FLAG_NTP | SYS_FLAG_KERNEL | 1534 SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN | 1535 SYS_FLAG_AUTH | SYS_FLAG_CAL)); 1536 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1537 return; 1538 } 1539 1540 if (flags & SYS_FLAG_BCLIENT) 1541 proto_config(PROTO_BROADCLIENT, set, 0., NULL); 1542 if (flags & SYS_FLAG_PPS) 1543 proto_config(PROTO_PPS, set, 0., NULL); 1544 if (flags & SYS_FLAG_NTP) 1545 proto_config(PROTO_NTP, set, 0., NULL); 1546 if (flags & SYS_FLAG_KERNEL) 1547 proto_config(PROTO_KERNEL, set, 0., NULL); 1548 if (flags & SYS_FLAG_MONITOR) 1549 proto_config(PROTO_MONITOR, set, 0., NULL); 1550 if (flags & SYS_FLAG_FILEGEN) 1551 proto_config(PROTO_FILEGEN, set, 0., NULL); 1552 if (flags & SYS_FLAG_AUTH) 1553 proto_config(PROTO_AUTHENTICATE, set, 0., NULL); 1554 if (flags & SYS_FLAG_CAL) 1555 proto_config(PROTO_CAL, set, 0., NULL); 1556 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1557 } 1558 1559 /* There have been some issues with the restrict list processing, 1560 * ranging from problems with deep recursion (resulting in stack 1561 * overflows) and overfull reply buffers. 1562 * 1563 * To avoid this trouble the list reversal is done iteratively using a 1564 * scratch pad. 1565 */ 1566 typedef struct RestrictStack RestrictStackT; 1567 struct RestrictStack { 1568 RestrictStackT *link; 1569 size_t fcnt; 1570 const restrict_u *pres[63]; 1571 }; 1572 1573 static size_t 1574 getStackSheetSize( 1575 RestrictStackT *sp 1576 ) 1577 { 1578 if (sp) 1579 return sizeof(sp->pres)/sizeof(sp->pres[0]); 1580 return 0u; 1581 } 1582 1583 static int/*BOOL*/ 1584 pushRestriction( 1585 RestrictStackT **spp, 1586 const restrict_u *ptr 1587 ) 1588 { 1589 RestrictStackT *sp; 1590 1591 if (NULL == (sp = *spp) || 0 == sp->fcnt) { 1592 /* need another sheet in the scratch pad */ 1593 sp = emalloc(sizeof(*sp)); 1594 sp->link = *spp; 1595 sp->fcnt = getStackSheetSize(sp); 1596 *spp = sp; 1597 } 1598 sp->pres[--sp->fcnt] = ptr; 1599 return TRUE; 1600 } 1601 1602 static int/*BOOL*/ 1603 popRestriction( 1604 RestrictStackT **spp, 1605 const restrict_u **opp 1606 ) 1607 { 1608 RestrictStackT *sp; 1609 1610 if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp)) 1611 return FALSE; 1612 1613 *opp = sp->pres[sp->fcnt++]; 1614 if (sp->fcnt >= getStackSheetSize(sp)) { 1615 /* discard sheet from scratch pad */ 1616 *spp = sp->link; 1617 free(sp); 1618 } 1619 return TRUE; 1620 } 1621 1622 static void 1623 flushRestrictionStack( 1624 RestrictStackT **spp 1625 ) 1626 { 1627 RestrictStackT *sp; 1628 1629 while (NULL != (sp = *spp)) { 1630 *spp = sp->link; 1631 free(sp); 1632 } 1633 } 1634 1635 /* 1636 * list_restrict4 - iterative helper for list_restrict dumps IPv4 1637 * restriction list in reverse order. 1638 */ 1639 static void 1640 list_restrict4( 1641 const restrict_u * res, 1642 struct info_restrict ** ppir 1643 ) 1644 { 1645 RestrictStackT * rpad; 1646 struct info_restrict * pir; 1647 1648 pir = *ppir; 1649 for (rpad = NULL; res; res = res->link) 1650 if (!pushRestriction(&rpad, res)) 1651 break; 1652 1653 while (pir && popRestriction(&rpad, &res)) { 1654 pir->addr = htonl(res->u.v4.addr); 1655 if (client_v6_capable) 1656 pir->v6_flag = 0; 1657 pir->mask = htonl(res->u.v4.mask); 1658 pir->count = htonl(res->count); 1659 pir->flags = htons(res->flags); 1660 pir->mflags = htons(res->mflags); 1661 pir = (struct info_restrict *)more_pkt(); 1662 } 1663 flushRestrictionStack(&rpad); 1664 *ppir = pir; 1665 } 1666 1667 /* 1668 * list_restrict6 - iterative helper for list_restrict dumps IPv6 1669 * restriction list in reverse order. 1670 */ 1671 static void 1672 list_restrict6( 1673 const restrict_u * res, 1674 struct info_restrict ** ppir 1675 ) 1676 { 1677 RestrictStackT * rpad; 1678 struct info_restrict * pir; 1679 1680 pir = *ppir; 1681 for (rpad = NULL; res; res = res->link) 1682 if (!pushRestriction(&rpad, res)) 1683 break; 1684 1685 while (pir && popRestriction(&rpad, &res)) { 1686 pir->addr6 = res->u.v6.addr; 1687 pir->mask6 = res->u.v6.mask; 1688 pir->v6_flag = 1; 1689 pir->count = htonl(res->count); 1690 pir->flags = htons(res->flags); 1691 pir->mflags = htons(res->mflags); 1692 pir = (struct info_restrict *)more_pkt(); 1693 } 1694 flushRestrictionStack(&rpad); 1695 *ppir = pir; 1696 } 1697 1698 1699 /* 1700 * list_restrict - return the restrict list 1701 */ 1702 static void 1703 list_restrict( 1704 sockaddr_u *srcadr, 1705 endpt *inter, 1706 struct req_pkt *inpkt 1707 ) 1708 { 1709 struct info_restrict *ir; 1710 1711 DPRINTF(3, ("wants restrict list summary\n")); 1712 1713 ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt, 1714 v6sizeof(struct info_restrict)); 1715 1716 /* 1717 * The restriction lists are kept sorted in the reverse order 1718 * than they were originally. To preserve the output semantics, 1719 * dump each list in reverse order. The workers take care of that. 1720 */ 1721 list_restrict4(restrictlist4, &ir); 1722 if (client_v6_capable) 1723 list_restrict6(restrictlist6, &ir); 1724 flush_pkt(); 1725 } 1726 1727 1728 /* 1729 * do_resaddflags - add flags to a restrict entry (or create one) 1730 */ 1731 static void 1732 do_resaddflags( 1733 sockaddr_u *srcadr, 1734 endpt *inter, 1735 struct req_pkt *inpkt 1736 ) 1737 { 1738 do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS); 1739 } 1740 1741 1742 1743 /* 1744 * do_ressubflags - remove flags from a restrict entry 1745 */ 1746 static void 1747 do_ressubflags( 1748 sockaddr_u *srcadr, 1749 endpt *inter, 1750 struct req_pkt *inpkt 1751 ) 1752 { 1753 do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG); 1754 } 1755 1756 1757 /* 1758 * do_unrestrict - remove a restrict entry from the list 1759 */ 1760 static void 1761 do_unrestrict( 1762 sockaddr_u *srcadr, 1763 endpt *inter, 1764 struct req_pkt *inpkt 1765 ) 1766 { 1767 do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE); 1768 } 1769 1770 1771 /* 1772 * do_restrict - do the dirty stuff of dealing with restrictions 1773 */ 1774 static void 1775 do_restrict( 1776 sockaddr_u *srcadr, 1777 endpt *inter, 1778 struct req_pkt *inpkt, 1779 int op 1780 ) 1781 { 1782 char * datap; 1783 struct conf_restrict cr; 1784 u_short items; 1785 size_t item_sz; 1786 sockaddr_u matchaddr; 1787 sockaddr_u matchmask; 1788 int bad; 1789 1790 /* 1791 * Do a check of the flags to make sure that only 1792 * the NTPPORT flag is set, if any. If not, complain 1793 * about it. Note we are very picky here. 1794 */ 1795 items = INFO_NITEMS(inpkt->err_nitems); 1796 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1797 datap = inpkt->u.data; 1798 if (item_sz > sizeof(cr)) { 1799 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1800 return; 1801 } 1802 1803 bad = FALSE; 1804 while (items-- > 0 && !bad) { 1805 memcpy(&cr, datap, item_sz); 1806 cr.flags = ntohs(cr.flags); 1807 cr.mflags = ntohs(cr.mflags); 1808 if (~RESM_NTPONLY & cr.mflags) 1809 bad |= 1; 1810 if (~RES_ALLFLAGS & cr.flags) 1811 bad |= 2; 1812 if (INADDR_ANY != cr.mask) { 1813 if (client_v6_capable && cr.v6_flag) { 1814 if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6)) 1815 bad |= 4; 1816 } else { 1817 if (INADDR_ANY == cr.addr) 1818 bad |= 8; 1819 } 1820 } 1821 datap += item_sz; 1822 } 1823 1824 if (bad) { 1825 msyslog(LOG_ERR, "do_restrict: bad = %#x", bad); 1826 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1827 return; 1828 } 1829 1830 /* 1831 * Looks okay, try it out. Needs to reload data pointer and 1832 * item counter. (Talos-CAN-0052) 1833 */ 1834 ZERO_SOCK(&matchaddr); 1835 ZERO_SOCK(&matchmask); 1836 items = INFO_NITEMS(inpkt->err_nitems); 1837 datap = inpkt->u.data; 1838 1839 while (items-- > 0) { 1840 memcpy(&cr, datap, item_sz); 1841 cr.flags = ntohs(cr.flags); 1842 cr.mflags = ntohs(cr.mflags); 1843 if (client_v6_capable && cr.v6_flag) { 1844 AF(&matchaddr) = AF_INET6; 1845 AF(&matchmask) = AF_INET6; 1846 SOCK_ADDR6(&matchaddr) = cr.addr6; 1847 SOCK_ADDR6(&matchmask) = cr.mask6; 1848 } else { 1849 AF(&matchaddr) = AF_INET; 1850 AF(&matchmask) = AF_INET; 1851 NSRCADR(&matchaddr) = cr.addr; 1852 NSRCADR(&matchmask) = cr.mask; 1853 } 1854 hack_restrict(op, &matchaddr, &matchmask, cr.mflags, 1855 cr.flags, 0); 1856 datap += item_sz; 1857 } 1858 1859 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1860 } 1861 1862 1863 /* 1864 * mon_getlist - return monitor data 1865 */ 1866 static void 1867 mon_getlist( 1868 sockaddr_u *srcadr, 1869 endpt *inter, 1870 struct req_pkt *inpkt 1871 ) 1872 { 1873 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1874 } 1875 1876 1877 /* 1878 * Module entry points and the flags they correspond with 1879 */ 1880 struct reset_entry { 1881 int flag; /* flag this corresponds to */ 1882 void (*handler)(void); /* routine to handle request */ 1883 }; 1884 1885 struct reset_entry reset_entries[] = { 1886 { RESET_FLAG_ALLPEERS, peer_all_reset }, 1887 { RESET_FLAG_IO, io_clr_stats }, 1888 { RESET_FLAG_SYS, proto_clr_stats }, 1889 { RESET_FLAG_MEM, peer_clr_stats }, 1890 { RESET_FLAG_TIMER, timer_clr_stats }, 1891 { RESET_FLAG_AUTH, reset_auth_stats }, 1892 { RESET_FLAG_CTL, ctl_clr_stats }, 1893 { 0, 0 } 1894 }; 1895 1896 /* 1897 * reset_stats - reset statistic counters here and there 1898 */ 1899 static void 1900 reset_stats( 1901 sockaddr_u *srcadr, 1902 endpt *inter, 1903 struct req_pkt *inpkt 1904 ) 1905 { 1906 struct reset_flags *rflags; 1907 u_long flags; 1908 struct reset_entry *rent; 1909 1910 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 1911 msyslog(LOG_ERR, "reset_stats: err_nitems > 1"); 1912 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1913 return; 1914 } 1915 1916 rflags = (struct reset_flags *)&inpkt->u; 1917 flags = ntohl(rflags->flags); 1918 1919 if (flags & ~RESET_ALLFLAGS) { 1920 msyslog(LOG_ERR, "reset_stats: reset leaves %#lx", 1921 flags & ~RESET_ALLFLAGS); 1922 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1923 return; 1924 } 1925 1926 for (rent = reset_entries; rent->flag != 0; rent++) { 1927 if (flags & rent->flag) 1928 (*rent->handler)(); 1929 } 1930 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1931 } 1932 1933 1934 /* 1935 * reset_peer - clear a peer's statistics 1936 */ 1937 static void 1938 reset_peer( 1939 sockaddr_u *srcadr, 1940 endpt *inter, 1941 struct req_pkt *inpkt 1942 ) 1943 { 1944 u_short items; 1945 size_t item_sz; 1946 char * datap; 1947 struct conf_unpeer cp; 1948 struct peer * p; 1949 sockaddr_u peeraddr; 1950 int bad; 1951 1952 /* 1953 * We check first to see that every peer exists. If not, 1954 * we return an error. 1955 */ 1956 1957 items = INFO_NITEMS(inpkt->err_nitems); 1958 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1959 datap = inpkt->u.data; 1960 if (item_sz > sizeof(cp)) { 1961 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1962 return; 1963 } 1964 1965 bad = FALSE; 1966 while (items-- > 0 && !bad) { 1967 ZERO(cp); 1968 memcpy(&cp, datap, item_sz); 1969 ZERO_SOCK(&peeraddr); 1970 if (client_v6_capable && cp.v6_flag) { 1971 AF(&peeraddr) = AF_INET6; 1972 SOCK_ADDR6(&peeraddr) = cp.peeraddr6; 1973 } else { 1974 AF(&peeraddr) = AF_INET; 1975 NSRCADR(&peeraddr) = cp.peeraddr; 1976 } 1977 1978 #ifdef ISC_PLATFORM_HAVESALEN 1979 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1980 #endif 1981 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0); 1982 if (NULL == p) 1983 bad++; 1984 datap += item_sz; 1985 } 1986 1987 if (bad) { 1988 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1989 return; 1990 } 1991 1992 /* 1993 * Now do it in earnest. Needs to reload data pointer and item 1994 * counter. (Talos-CAN-0052) 1995 */ 1996 1997 items = INFO_NITEMS(inpkt->err_nitems); 1998 datap = inpkt->u.data; 1999 while (items-- > 0) { 2000 ZERO(cp); 2001 memcpy(&cp, datap, item_sz); 2002 ZERO_SOCK(&peeraddr); 2003 if (client_v6_capable && cp.v6_flag) { 2004 AF(&peeraddr) = AF_INET6; 2005 SOCK_ADDR6(&peeraddr) = cp.peeraddr6; 2006 } else { 2007 AF(&peeraddr) = AF_INET; 2008 NSRCADR(&peeraddr) = cp.peeraddr; 2009 } 2010 SET_PORT(&peeraddr, 123); 2011 #ifdef ISC_PLATFORM_HAVESALEN 2012 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 2013 #endif 2014 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0); 2015 while (p != NULL) { 2016 peer_reset(p); 2017 p = findexistingpeer(&peeraddr, NULL, p, -1, 0); 2018 } 2019 datap += item_sz; 2020 } 2021 2022 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2023 } 2024 2025 2026 /* 2027 * do_key_reread - reread the encryption key file 2028 */ 2029 static void 2030 do_key_reread( 2031 sockaddr_u *srcadr, 2032 endpt *inter, 2033 struct req_pkt *inpkt 2034 ) 2035 { 2036 rereadkeys(); 2037 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2038 } 2039 2040 2041 /* 2042 * trust_key - make one or more keys trusted 2043 */ 2044 static void 2045 trust_key( 2046 sockaddr_u *srcadr, 2047 endpt *inter, 2048 struct req_pkt *inpkt 2049 ) 2050 { 2051 do_trustkey(srcadr, inter, inpkt, 1); 2052 } 2053 2054 2055 /* 2056 * untrust_key - make one or more keys untrusted 2057 */ 2058 static void 2059 untrust_key( 2060 sockaddr_u *srcadr, 2061 endpt *inter, 2062 struct req_pkt *inpkt 2063 ) 2064 { 2065 do_trustkey(srcadr, inter, inpkt, 0); 2066 } 2067 2068 2069 /* 2070 * do_trustkey - make keys either trustable or untrustable 2071 */ 2072 static void 2073 do_trustkey( 2074 sockaddr_u *srcadr, 2075 endpt *inter, 2076 struct req_pkt *inpkt, 2077 u_long trust 2078 ) 2079 { 2080 register uint32_t *kp; 2081 register int items; 2082 2083 items = INFO_NITEMS(inpkt->err_nitems); 2084 kp = (uint32_t *)&inpkt->u; 2085 while (items-- > 0) { 2086 authtrust(*kp, trust); 2087 kp++; 2088 } 2089 2090 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2091 } 2092 2093 2094 /* 2095 * get_auth_info - return some stats concerning the authentication module 2096 */ 2097 static void 2098 get_auth_info( 2099 sockaddr_u *srcadr, 2100 endpt *inter, 2101 struct req_pkt *inpkt 2102 ) 2103 { 2104 register struct info_auth *ia; 2105 2106 ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt, 2107 sizeof(struct info_auth)); 2108 2109 ia->numkeys = htonl((u_int32)authnumkeys); 2110 ia->numfreekeys = htonl((u_int32)authnumfreekeys); 2111 ia->keylookups = htonl((u_int32)authkeylookups); 2112 ia->keynotfound = htonl((u_int32)authkeynotfound); 2113 ia->encryptions = htonl((u_int32)authencryptions); 2114 ia->decryptions = htonl((u_int32)authdecryptions); 2115 ia->keyuncached = htonl((u_int32)authkeyuncached); 2116 ia->expired = htonl((u_int32)authkeyexpired); 2117 ia->timereset = htonl((u_int32)(current_time - auth_timereset)); 2118 2119 (void) more_pkt(); 2120 flush_pkt(); 2121 } 2122 2123 2124 2125 /* 2126 * reset_auth_stats - reset the authentication stat counters. Done here 2127 * to keep ntp-isms out of the authentication module 2128 */ 2129 void 2130 reset_auth_stats(void) 2131 { 2132 authkeylookups = 0; 2133 authkeynotfound = 0; 2134 authencryptions = 0; 2135 authdecryptions = 0; 2136 authkeyuncached = 0; 2137 auth_timereset = current_time; 2138 } 2139 2140 2141 /* 2142 * req_get_traps - return information about current trap holders 2143 */ 2144 static void 2145 req_get_traps( 2146 sockaddr_u *srcadr, 2147 endpt *inter, 2148 struct req_pkt *inpkt 2149 ) 2150 { 2151 struct info_trap *it; 2152 struct ctl_trap *tr; 2153 size_t i; 2154 2155 if (num_ctl_traps == 0) { 2156 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2157 return; 2158 } 2159 2160 it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt, 2161 v6sizeof(struct info_trap)); 2162 2163 for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) { 2164 if (tr->tr_flags & TRAP_INUSE) { 2165 if (IS_IPV4(&tr->tr_addr)) { 2166 if (tr->tr_localaddr == any_interface) 2167 it->local_address = 0; 2168 else 2169 it->local_address 2170 = NSRCADR(&tr->tr_localaddr->sin); 2171 it->trap_address = NSRCADR(&tr->tr_addr); 2172 if (client_v6_capable) 2173 it->v6_flag = 0; 2174 } else { 2175 if (!client_v6_capable) 2176 continue; 2177 it->local_address6 2178 = SOCK_ADDR6(&tr->tr_localaddr->sin); 2179 it->trap_address6 = SOCK_ADDR6(&tr->tr_addr); 2180 it->v6_flag = 1; 2181 } 2182 it->trap_port = NSRCPORT(&tr->tr_addr); 2183 it->sequence = htons(tr->tr_sequence); 2184 it->settime = htonl((u_int32)(current_time - tr->tr_settime)); 2185 it->origtime = htonl((u_int32)(current_time - tr->tr_origtime)); 2186 it->resets = htonl((u_int32)tr->tr_resets); 2187 it->flags = htonl((u_int32)tr->tr_flags); 2188 it = (struct info_trap *)more_pkt(); 2189 } 2190 } 2191 flush_pkt(); 2192 } 2193 2194 2195 /* 2196 * req_set_trap - configure a trap 2197 */ 2198 static void 2199 req_set_trap( 2200 sockaddr_u *srcadr, 2201 endpt *inter, 2202 struct req_pkt *inpkt 2203 ) 2204 { 2205 do_setclr_trap(srcadr, inter, inpkt, 1); 2206 } 2207 2208 2209 2210 /* 2211 * req_clr_trap - unconfigure a trap 2212 */ 2213 static void 2214 req_clr_trap( 2215 sockaddr_u *srcadr, 2216 endpt *inter, 2217 struct req_pkt *inpkt 2218 ) 2219 { 2220 do_setclr_trap(srcadr, inter, inpkt, 0); 2221 } 2222 2223 2224 2225 /* 2226 * do_setclr_trap - do the grunge work of (un)configuring a trap 2227 */ 2228 static void 2229 do_setclr_trap( 2230 sockaddr_u *srcadr, 2231 endpt *inter, 2232 struct req_pkt *inpkt, 2233 int set 2234 ) 2235 { 2236 register struct conf_trap *ct; 2237 register endpt *linter; 2238 int res; 2239 sockaddr_u laddr; 2240 2241 /* 2242 * Prepare sockaddr 2243 */ 2244 ZERO_SOCK(&laddr); 2245 AF(&laddr) = AF(srcadr); 2246 SET_PORT(&laddr, NTP_PORT); 2247 2248 /* 2249 * Restrict ourselves to one item only. This eliminates 2250 * the error reporting problem. 2251 */ 2252 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 2253 msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1"); 2254 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2255 return; 2256 } 2257 ct = (struct conf_trap *)&inpkt->u; 2258 2259 /* 2260 * Look for the local interface. If none, use the default. 2261 */ 2262 if (ct->local_address == 0) { 2263 linter = any_interface; 2264 } else { 2265 if (IS_IPV4(&laddr)) 2266 NSRCADR(&laddr) = ct->local_address; 2267 else 2268 SOCK_ADDR6(&laddr) = ct->local_address6; 2269 linter = findinterface(&laddr); 2270 if (NULL == linter) { 2271 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2272 return; 2273 } 2274 } 2275 2276 if (IS_IPV4(&laddr)) 2277 NSRCADR(&laddr) = ct->trap_address; 2278 else 2279 SOCK_ADDR6(&laddr) = ct->trap_address6; 2280 if (ct->trap_port) 2281 NSRCPORT(&laddr) = ct->trap_port; 2282 else 2283 SET_PORT(&laddr, TRAPPORT); 2284 2285 if (set) { 2286 res = ctlsettrap(&laddr, linter, 0, 2287 INFO_VERSION(inpkt->rm_vn_mode)); 2288 } else { 2289 res = ctlclrtrap(&laddr, linter, 0); 2290 } 2291 2292 if (!res) { 2293 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2294 } else { 2295 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2296 } 2297 return; 2298 } 2299 2300 2301 2302 /* 2303 * set_request_keyid - set the keyid used to authenticate requests 2304 */ 2305 static void 2306 set_request_keyid( 2307 sockaddr_u *srcadr, 2308 endpt *inter, 2309 struct req_pkt *inpkt 2310 ) 2311 { 2312 keyid_t *pkeyid; 2313 2314 /* 2315 * Restrict ourselves to one item only. 2316 */ 2317 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 2318 msyslog(LOG_ERR, "set_request_keyid: err_nitems > 1"); 2319 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2320 return; 2321 } 2322 2323 pkeyid = (keyid_t *)&inpkt->u; 2324 info_auth_keyid = ntohl(*pkeyid); 2325 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2326 } 2327 2328 2329 2330 /* 2331 * set_control_keyid - set the keyid used to authenticate requests 2332 */ 2333 static void 2334 set_control_keyid( 2335 sockaddr_u *srcadr, 2336 endpt *inter, 2337 struct req_pkt *inpkt 2338 ) 2339 { 2340 keyid_t *pkeyid; 2341 2342 /* 2343 * Restrict ourselves to one item only. 2344 */ 2345 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 2346 msyslog(LOG_ERR, "set_control_keyid: err_nitems > 1"); 2347 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2348 return; 2349 } 2350 2351 pkeyid = (keyid_t *)&inpkt->u; 2352 ctl_auth_keyid = ntohl(*pkeyid); 2353 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2354 } 2355 2356 2357 2358 /* 2359 * get_ctl_stats - return some stats concerning the control message module 2360 */ 2361 static void 2362 get_ctl_stats( 2363 sockaddr_u *srcadr, 2364 endpt *inter, 2365 struct req_pkt *inpkt 2366 ) 2367 { 2368 register struct info_control *ic; 2369 2370 ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt, 2371 sizeof(struct info_control)); 2372 2373 ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset)); 2374 ic->numctlreq = htonl((u_int32)numctlreq); 2375 ic->numctlbadpkts = htonl((u_int32)numctlbadpkts); 2376 ic->numctlresponses = htonl((u_int32)numctlresponses); 2377 ic->numctlfrags = htonl((u_int32)numctlfrags); 2378 ic->numctlerrors = htonl((u_int32)numctlerrors); 2379 ic->numctltooshort = htonl((u_int32)numctltooshort); 2380 ic->numctlinputresp = htonl((u_int32)numctlinputresp); 2381 ic->numctlinputfrag = htonl((u_int32)numctlinputfrag); 2382 ic->numctlinputerr = htonl((u_int32)numctlinputerr); 2383 ic->numctlbadoffset = htonl((u_int32)numctlbadoffset); 2384 ic->numctlbadversion = htonl((u_int32)numctlbadversion); 2385 ic->numctldatatooshort = htonl((u_int32)numctldatatooshort); 2386 ic->numctlbadop = htonl((u_int32)numctlbadop); 2387 ic->numasyncmsgs = htonl((u_int32)numasyncmsgs); 2388 2389 (void) more_pkt(); 2390 flush_pkt(); 2391 } 2392 2393 2394 #ifdef KERNEL_PLL 2395 /* 2396 * get_kernel_info - get kernel pll/pps information 2397 */ 2398 static void 2399 get_kernel_info( 2400 sockaddr_u *srcadr, 2401 endpt *inter, 2402 struct req_pkt *inpkt 2403 ) 2404 { 2405 register struct info_kernel *ik; 2406 struct timex ntx; 2407 2408 if (!pll_control) { 2409 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2410 return; 2411 } 2412 2413 ZERO(ntx); 2414 if (ntp_adjtime(&ntx) < 0) 2415 msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m"); 2416 ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt, 2417 sizeof(struct info_kernel)); 2418 2419 /* 2420 * pll variables 2421 */ 2422 ik->offset = htonl((u_int32)ntx.offset); 2423 ik->freq = htonl((u_int32)ntx.freq); 2424 ik->maxerror = htonl((u_int32)ntx.maxerror); 2425 ik->esterror = htonl((u_int32)ntx.esterror); 2426 ik->status = htons(ntx.status); 2427 ik->constant = htonl((u_int32)ntx.constant); 2428 ik->precision = htonl((u_int32)ntx.precision); 2429 ik->tolerance = htonl((u_int32)ntx.tolerance); 2430 2431 /* 2432 * pps variables 2433 */ 2434 ik->ppsfreq = htonl((u_int32)ntx.ppsfreq); 2435 ik->jitter = htonl((u_int32)ntx.jitter); 2436 ik->shift = htons(ntx.shift); 2437 ik->stabil = htonl((u_int32)ntx.stabil); 2438 ik->jitcnt = htonl((u_int32)ntx.jitcnt); 2439 ik->calcnt = htonl((u_int32)ntx.calcnt); 2440 ik->errcnt = htonl((u_int32)ntx.errcnt); 2441 ik->stbcnt = htonl((u_int32)ntx.stbcnt); 2442 2443 (void) more_pkt(); 2444 flush_pkt(); 2445 } 2446 #endif /* KERNEL_PLL */ 2447 2448 2449 #ifdef REFCLOCK 2450 /* 2451 * get_clock_info - get info about a clock 2452 */ 2453 static void 2454 get_clock_info( 2455 sockaddr_u *srcadr, 2456 endpt *inter, 2457 struct req_pkt *inpkt 2458 ) 2459 { 2460 register struct info_clock *ic; 2461 register u_int32 *clkaddr; 2462 register int items; 2463 struct refclockstat clock_stat; 2464 sockaddr_u addr; 2465 l_fp ltmp; 2466 2467 ZERO_SOCK(&addr); 2468 AF(&addr) = AF_INET; 2469 #ifdef ISC_PLATFORM_HAVESALEN 2470 addr.sa.sa_len = SOCKLEN(&addr); 2471 #endif 2472 SET_PORT(&addr, NTP_PORT); 2473 items = INFO_NITEMS(inpkt->err_nitems); 2474 clkaddr = &inpkt->u.u32[0]; 2475 2476 ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt, 2477 sizeof(struct info_clock)); 2478 2479 while (items-- > 0 && ic) { 2480 NSRCADR(&addr) = *clkaddr++; 2481 if (!ISREFCLOCKADR(&addr) || NULL == 2482 findexistingpeer(&addr, NULL, NULL, -1, 0)) { 2483 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2484 return; 2485 } 2486 2487 clock_stat.kv_list = (struct ctl_var *)0; 2488 2489 refclock_control(&addr, NULL, &clock_stat); 2490 2491 ic->clockadr = NSRCADR(&addr); 2492 ic->type = clock_stat.type; 2493 ic->flags = clock_stat.flags; 2494 ic->lastevent = clock_stat.lastevent; 2495 ic->currentstatus = clock_stat.currentstatus; 2496 ic->polls = htonl((u_int32)clock_stat.polls); 2497 ic->noresponse = htonl((u_int32)clock_stat.noresponse); 2498 ic->badformat = htonl((u_int32)clock_stat.badformat); 2499 ic->baddata = htonl((u_int32)clock_stat.baddata); 2500 ic->timestarted = htonl((u_int32)clock_stat.timereset); 2501 DTOLFP(clock_stat.fudgetime1, <mp); 2502 HTONL_FP(<mp, &ic->fudgetime1); 2503 DTOLFP(clock_stat.fudgetime2, <mp); 2504 HTONL_FP(<mp, &ic->fudgetime2); 2505 ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1); 2506 ic->fudgeval2 = htonl(clock_stat.fudgeval2); 2507 2508 free_varlist(clock_stat.kv_list); 2509 2510 ic = (struct info_clock *)more_pkt(); 2511 } 2512 flush_pkt(); 2513 } 2514 2515 2516 2517 /* 2518 * set_clock_fudge - get a clock's fudge factors 2519 */ 2520 static void 2521 set_clock_fudge( 2522 sockaddr_u *srcadr, 2523 endpt *inter, 2524 struct req_pkt *inpkt 2525 ) 2526 { 2527 register struct conf_fudge *cf; 2528 register int items; 2529 struct refclockstat clock_stat; 2530 sockaddr_u addr; 2531 l_fp ltmp; 2532 2533 ZERO(addr); 2534 ZERO(clock_stat); 2535 items = INFO_NITEMS(inpkt->err_nitems); 2536 cf = (struct conf_fudge *)&inpkt->u; 2537 2538 while (items-- > 0) { 2539 AF(&addr) = AF_INET; 2540 NSRCADR(&addr) = cf->clockadr; 2541 #ifdef ISC_PLATFORM_HAVESALEN 2542 addr.sa.sa_len = SOCKLEN(&addr); 2543 #endif 2544 SET_PORT(&addr, NTP_PORT); 2545 if (!ISREFCLOCKADR(&addr) || NULL == 2546 findexistingpeer(&addr, NULL, NULL, -1, 0)) { 2547 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2548 return; 2549 } 2550 2551 switch(ntohl(cf->which)) { 2552 case FUDGE_TIME1: 2553 NTOHL_FP(&cf->fudgetime, <mp); 2554 LFPTOD(<mp, clock_stat.fudgetime1); 2555 clock_stat.haveflags = CLK_HAVETIME1; 2556 break; 2557 case FUDGE_TIME2: 2558 NTOHL_FP(&cf->fudgetime, <mp); 2559 LFPTOD(<mp, clock_stat.fudgetime2); 2560 clock_stat.haveflags = CLK_HAVETIME2; 2561 break; 2562 case FUDGE_VAL1: 2563 clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags); 2564 clock_stat.haveflags = CLK_HAVEVAL1; 2565 break; 2566 case FUDGE_VAL2: 2567 clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags); 2568 clock_stat.haveflags = CLK_HAVEVAL2; 2569 break; 2570 case FUDGE_FLAGS: 2571 clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf); 2572 clock_stat.haveflags = 2573 (CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4); 2574 break; 2575 default: 2576 msyslog(LOG_ERR, "set_clock_fudge: default!"); 2577 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2578 return; 2579 } 2580 2581 refclock_control(&addr, &clock_stat, (struct refclockstat *)0); 2582 } 2583 2584 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2585 } 2586 #endif 2587 2588 #ifdef REFCLOCK 2589 /* 2590 * get_clkbug_info - get debugging info about a clock 2591 */ 2592 static void 2593 get_clkbug_info( 2594 sockaddr_u *srcadr, 2595 endpt *inter, 2596 struct req_pkt *inpkt 2597 ) 2598 { 2599 register int i; 2600 register struct info_clkbug *ic; 2601 register u_int32 *clkaddr; 2602 register int items; 2603 struct refclockbug bug; 2604 sockaddr_u addr; 2605 2606 ZERO_SOCK(&addr); 2607 AF(&addr) = AF_INET; 2608 #ifdef ISC_PLATFORM_HAVESALEN 2609 addr.sa.sa_len = SOCKLEN(&addr); 2610 #endif 2611 SET_PORT(&addr, NTP_PORT); 2612 items = INFO_NITEMS(inpkt->err_nitems); 2613 clkaddr = (u_int32 *)&inpkt->u; 2614 2615 ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt, 2616 sizeof(struct info_clkbug)); 2617 2618 while (items-- > 0 && ic) { 2619 NSRCADR(&addr) = *clkaddr++; 2620 if (!ISREFCLOCKADR(&addr) || NULL == 2621 findexistingpeer(&addr, NULL, NULL, -1, 0)) { 2622 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2623 return; 2624 } 2625 2626 ZERO(bug); 2627 refclock_buginfo(&addr, &bug); 2628 if (bug.nvalues == 0 && bug.ntimes == 0) { 2629 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2630 return; 2631 } 2632 2633 ic->clockadr = NSRCADR(&addr); 2634 i = bug.nvalues; 2635 if (i > NUMCBUGVALUES) 2636 i = NUMCBUGVALUES; 2637 ic->nvalues = (u_char)i; 2638 ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1))); 2639 while (--i >= 0) 2640 ic->values[i] = htonl(bug.values[i]); 2641 2642 i = bug.ntimes; 2643 if (i > NUMCBUGTIMES) 2644 i = NUMCBUGTIMES; 2645 ic->ntimes = (u_char)i; 2646 ic->stimes = htonl(bug.stimes); 2647 while (--i >= 0) { 2648 HTONL_FP(&bug.times[i], &ic->times[i]); 2649 } 2650 2651 ic = (struct info_clkbug *)more_pkt(); 2652 } 2653 flush_pkt(); 2654 } 2655 #endif 2656 2657 /* 2658 * receiver of interface structures 2659 */ 2660 static void 2661 fill_info_if_stats(void *data, interface_info_t *interface_info) 2662 { 2663 struct info_if_stats **ifsp = (struct info_if_stats **)data; 2664 struct info_if_stats *ifs = *ifsp; 2665 endpt *ep = interface_info->ep; 2666 2667 if (NULL == ifs) 2668 return; 2669 2670 ZERO(*ifs); 2671 2672 if (IS_IPV6(&ep->sin)) { 2673 if (!client_v6_capable) 2674 return; 2675 ifs->v6_flag = 1; 2676 ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin); 2677 ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast); 2678 ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask); 2679 } else { 2680 ifs->v6_flag = 0; 2681 ifs->unaddr.addr = SOCK_ADDR4(&ep->sin); 2682 ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast); 2683 ifs->unmask.addr = SOCK_ADDR4(&ep->mask); 2684 } 2685 ifs->v6_flag = htonl(ifs->v6_flag); 2686 strlcpy(ifs->name, ep->name, sizeof(ifs->name)); 2687 ifs->family = htons(ep->family); 2688 ifs->flags = htonl(ep->flags); 2689 ifs->last_ttl = htonl(ep->last_ttl); 2690 ifs->num_mcast = htonl(ep->num_mcast); 2691 ifs->received = htonl(ep->received); 2692 ifs->sent = htonl(ep->sent); 2693 ifs->notsent = htonl(ep->notsent); 2694 ifs->ifindex = htonl(ep->ifindex); 2695 /* scope no longer in endpt, in in6_addr typically */ 2696 ifs->scopeid = ifs->ifindex; 2697 ifs->ifnum = htonl(ep->ifnum); 2698 ifs->uptime = htonl(current_time - ep->starttime); 2699 ifs->ignore_packets = ep->ignore_packets; 2700 ifs->peercnt = htonl(ep->peercnt); 2701 ifs->action = interface_info->action; 2702 2703 *ifsp = (struct info_if_stats *)more_pkt(); 2704 } 2705 2706 /* 2707 * get_if_stats - get interface statistics 2708 */ 2709 static void 2710 get_if_stats( 2711 sockaddr_u *srcadr, 2712 endpt *inter, 2713 struct req_pkt *inpkt 2714 ) 2715 { 2716 struct info_if_stats *ifs; 2717 2718 DPRINTF(3, ("wants interface statistics\n")); 2719 2720 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt, 2721 v6sizeof(struct info_if_stats)); 2722 2723 interface_enumerate(fill_info_if_stats, &ifs); 2724 2725 flush_pkt(); 2726 } 2727 2728 static void 2729 do_if_reload( 2730 sockaddr_u *srcadr, 2731 endpt *inter, 2732 struct req_pkt *inpkt 2733 ) 2734 { 2735 struct info_if_stats *ifs; 2736 2737 DPRINTF(3, ("wants interface reload\n")); 2738 2739 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt, 2740 v6sizeof(struct info_if_stats)); 2741 2742 interface_update(fill_info_if_stats, &ifs); 2743 2744 flush_pkt(); 2745 } 2746 2747