1 /* 2 * ntp_restrict.c - determine host restrictions 3 */ 4 #ifdef HAVE_CONFIG_H 5 #include <config.h> 6 #endif 7 8 #include <stdio.h> 9 #include <sys/types.h> 10 11 #include "ntpd.h" 12 #include "ntp_if.h" 13 #include "ntp_lists.h" 14 #include "ntp_stdlib.h" 15 #include "ntp_assert.h" 16 17 /* 18 * This code keeps a simple address-and-mask list of hosts we want 19 * to place restrictions on (or remove them from). The restrictions 20 * are implemented as a set of flags which tell you what the host 21 * can't do. There is a subroutine entry to return the flags. The 22 * list is kept sorted to reduce the average number of comparisons 23 * and make sure you get the set of restrictions most specific to 24 * the address. 25 * 26 * The algorithm is that, when looking up a host, it is first assumed 27 * that the default set of restrictions will apply. It then searches 28 * down through the list. Whenever it finds a match it adopts the 29 * match's flags instead. When you hit the point where the sorted 30 * address is greater than the target, you return with the last set of 31 * flags you found. Because of the ordering of the list, the most 32 * specific match will provide the final set of flags. 33 * 34 * This was originally intended to restrict you from sync'ing to your 35 * own broadcasts when you are doing that, by restricting yourself from 36 * your own interfaces. It was also thought it would sometimes be useful 37 * to keep a misbehaving host or two from abusing your primary clock. It 38 * has been expanded, however, to suit the needs of those with more 39 * restrictive access policies. 40 */ 41 /* 42 * We will use two lists, one for IPv4 addresses and one for IPv6 43 * addresses. This is not protocol-independant but for now I can't 44 * find a way to respect this. We'll check this later... JFB 07/2001 45 */ 46 #define MASK_IPV6_ADDR(dst, src, msk) \ 47 do { \ 48 int idx; \ 49 for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \ 50 (dst)->s6_addr[idx] = (src)->s6_addr[idx] \ 51 & (msk)->s6_addr[idx]; \ 52 } \ 53 } while (0) 54 55 /* 56 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty. 57 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes 58 * for allocator overhead). 59 */ 60 #define INC_RESLIST4 ((1024 - 16) / V4_SIZEOF_RESTRICT_U) 61 #define INC_RESLIST6 ((1024 - 16) / V6_SIZEOF_RESTRICT_U) 62 63 /* 64 * The restriction list 65 */ 66 restrict_u *restrictlist4; 67 restrict_u *restrictlist6; 68 static int restrictcount; /* count in the restrict lists */ 69 70 /* 71 * The free list and associated counters. Also some uninteresting 72 * stat counters. 73 */ 74 static restrict_u *resfree4; /* available entries (free list) */ 75 static restrict_u *resfree6; 76 77 static u_long res_calls; 78 static u_long res_found; 79 static u_long res_not_found; 80 81 /* 82 * Count number of restriction entries referring to RES_LIMITED, to 83 * control implicit activation/deactivation of the MRU monlist. 84 */ 85 static u_long res_limited_refcnt; 86 87 /* 88 * Our default entries. 89 */ 90 static restrict_u restrict_def4; 91 static restrict_u restrict_def6; 92 93 /* 94 * "restrict source ..." enabled knob and restriction bits. 95 */ 96 static int restrict_source_enabled; 97 static u_short restrict_source_flags; 98 static u_short restrict_source_mflags; 99 100 /* 101 * private functions 102 */ 103 static restrict_u * alloc_res4(void); 104 static restrict_u * alloc_res6(void); 105 static void free_res(restrict_u *, int); 106 static void inc_res_limited(void); 107 static void dec_res_limited(void); 108 static restrict_u * match_restrict4_addr(u_int32, u_short); 109 static restrict_u * match_restrict6_addr(const struct in6_addr *, 110 u_short); 111 static restrict_u * match_restrict_entry(const restrict_u *, int); 112 static int res_sorts_before4(restrict_u *, restrict_u *); 113 static int res_sorts_before6(restrict_u *, restrict_u *); 114 115 116 /* 117 * init_restrict - initialize the restriction data structures 118 */ 119 void 120 init_restrict(void) 121 { 122 /* 123 * The restriction lists begin with a default entry with address 124 * and mask 0, which will match any entry. The lists are kept 125 * sorted by descending address followed by descending mask: 126 * 127 * address mask 128 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer 129 * 192.168.0.0 255.255.0.0 kod limited 130 * 0.0.0.0 0.0.0.0 kod limited noquery 131 * 132 * The first entry which matches an address is used. With the 133 * example restrictions above, 192.168.0.0/24 matches the first 134 * entry, the rest of 192.168.0.0/16 matches the second, and 135 * everything else matches the third (default). 136 * 137 * Note this achieves the same result a little more efficiently 138 * than the documented behavior, which is to keep the lists 139 * sorted by ascending address followed by ascending mask, with 140 * the _last_ matching entry used. 141 * 142 * An additional wrinkle is we may have multiple entries with 143 * the same address and mask but differing match flags (mflags). 144 * At present there is only one, RESM_NTPONLY. Entries with 145 * RESM_NTPONLY are sorted earlier so they take precedence over 146 * any otherwise similar entry without. Again, this is the same 147 * behavior as but reversed implementation compared to the docs. 148 * 149 */ 150 LINK_SLIST(restrictlist4, &restrict_def4, link); 151 LINK_SLIST(restrictlist6, &restrict_def6, link); 152 restrictcount = 2; 153 } 154 155 156 static restrict_u * 157 alloc_res4(void) 158 { 159 const size_t cb = V4_SIZEOF_RESTRICT_U; 160 const size_t count = INC_RESLIST4; 161 restrict_u * rl; 162 restrict_u * res; 163 size_t i; 164 165 UNLINK_HEAD_SLIST(res, resfree4, link); 166 if (res != NULL) 167 return res; 168 169 rl = eallocarray(count, cb); 170 /* link all but the first onto free list */ 171 res = (void *)((char *)rl + (count - 1) * cb); 172 for (i = count - 1; i > 0; i--) { 173 LINK_SLIST(resfree4, res, link); 174 res = (void *)((char *)res - cb); 175 } 176 INSIST(rl == res); 177 /* allocate the first */ 178 return res; 179 } 180 181 182 static restrict_u * 183 alloc_res6(void) 184 { 185 const size_t cb = V6_SIZEOF_RESTRICT_U; 186 const size_t count = INC_RESLIST6; 187 restrict_u * rl; 188 restrict_u * res; 189 size_t i; 190 191 UNLINK_HEAD_SLIST(res, resfree6, link); 192 if (res != NULL) 193 return res; 194 195 rl = eallocarray(count, cb); 196 /* link all but the first onto free list */ 197 res = (void *)((char *)rl + (count - 1) * cb); 198 for (i = count - 1; i > 0; i--) { 199 LINK_SLIST(resfree6, res, link); 200 res = (void *)((char *)res - cb); 201 } 202 INSIST(rl == res); 203 /* allocate the first */ 204 return res; 205 } 206 207 208 static void 209 free_res( 210 restrict_u * res, 211 int v6 212 ) 213 { 214 restrict_u ** plisthead; 215 restrict_u * unlinked; 216 217 restrictcount--; 218 if (RES_LIMITED & res->flags) 219 dec_res_limited(); 220 221 if (v6) 222 plisthead = &restrictlist6; 223 else 224 plisthead = &restrictlist4; 225 UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u); 226 INSIST(unlinked == res); 227 228 if (v6) { 229 zero_mem(res, V6_SIZEOF_RESTRICT_U); 230 plisthead = &resfree6; 231 } else { 232 zero_mem(res, V4_SIZEOF_RESTRICT_U); 233 plisthead = &resfree4; 234 } 235 LINK_SLIST(*plisthead, res, link); 236 } 237 238 239 static void 240 inc_res_limited(void) 241 { 242 if (!res_limited_refcnt) 243 mon_start(MON_RES); 244 res_limited_refcnt++; 245 } 246 247 248 static void 249 dec_res_limited(void) 250 { 251 res_limited_refcnt--; 252 if (!res_limited_refcnt) 253 mon_stop(MON_RES); 254 } 255 256 257 static restrict_u * 258 match_restrict4_addr( 259 u_int32 addr, 260 u_short port 261 ) 262 { 263 const int v6 = 0; 264 restrict_u * res; 265 restrict_u * next; 266 267 for (res = restrictlist4; res != NULL; res = next) { 268 next = res->link; 269 if (res->expire && 270 res->expire <= current_time) 271 free_res(res, v6); 272 if (res->u.v4.addr == (addr & res->u.v4.mask) 273 && (!(RESM_NTPONLY & res->mflags) 274 || NTP_PORT == port)) 275 break; 276 } 277 return res; 278 } 279 280 281 static restrict_u * 282 match_restrict6_addr( 283 const struct in6_addr * addr, 284 u_short port 285 ) 286 { 287 const int v6 = 1; 288 restrict_u * res; 289 restrict_u * next; 290 struct in6_addr masked; 291 292 for (res = restrictlist6; res != NULL; res = next) { 293 next = res->link; 294 INSIST(next != res); 295 if (res->expire && 296 res->expire <= current_time) 297 free_res(res, v6); 298 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask); 299 if (ADDR6_EQ(&masked, &res->u.v6.addr) 300 && (!(RESM_NTPONLY & res->mflags) 301 || NTP_PORT == (int)port)) 302 break; 303 } 304 return res; 305 } 306 307 308 /* 309 * match_restrict_entry - find an exact match on a restrict list. 310 * 311 * Exact match is addr, mask, and mflags all equal. 312 * In order to use more common code for IPv4 and IPv6, this routine 313 * requires the caller to populate a restrict_u with mflags and either 314 * the v4 or v6 address and mask as appropriate. Other fields in the 315 * input restrict_u are ignored. 316 */ 317 static restrict_u * 318 match_restrict_entry( 319 const restrict_u * pmatch, 320 int v6 321 ) 322 { 323 restrict_u *res; 324 restrict_u *rlist; 325 size_t cb; 326 327 if (v6) { 328 rlist = restrictlist6; 329 cb = sizeof(pmatch->u.v6); 330 } else { 331 rlist = restrictlist4; 332 cb = sizeof(pmatch->u.v4); 333 } 334 335 for (res = rlist; res != NULL; res = res->link) 336 if (res->mflags == pmatch->mflags && 337 !memcmp(&res->u, &pmatch->u, cb)) 338 break; 339 return res; 340 } 341 342 343 /* 344 * res_sorts_before4 - compare two restrict4 entries 345 * 346 * Returns nonzero if r1 sorts before r2. We sort by descending 347 * address, then descending mask, then descending mflags, so sorting 348 * before means having a higher value. 349 */ 350 static int 351 res_sorts_before4( 352 restrict_u *r1, 353 restrict_u *r2 354 ) 355 { 356 int r1_before_r2; 357 358 if (r1->u.v4.addr > r2->u.v4.addr) 359 r1_before_r2 = 1; 360 else if (r1->u.v4.addr < r2->u.v4.addr) 361 r1_before_r2 = 0; 362 else if (r1->u.v4.mask > r2->u.v4.mask) 363 r1_before_r2 = 1; 364 else if (r1->u.v4.mask < r2->u.v4.mask) 365 r1_before_r2 = 0; 366 else if (r1->mflags > r2->mflags) 367 r1_before_r2 = 1; 368 else 369 r1_before_r2 = 0; 370 371 return r1_before_r2; 372 } 373 374 375 /* 376 * res_sorts_before6 - compare two restrict6 entries 377 * 378 * Returns nonzero if r1 sorts before r2. We sort by descending 379 * address, then descending mask, then descending mflags, so sorting 380 * before means having a higher value. 381 */ 382 static int 383 res_sorts_before6( 384 restrict_u *r1, 385 restrict_u *r2 386 ) 387 { 388 int r1_before_r2; 389 int cmp; 390 391 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr); 392 if (cmp > 0) /* r1->addr > r2->addr */ 393 r1_before_r2 = 1; 394 else if (cmp < 0) /* r2->addr > r1->addr */ 395 r1_before_r2 = 0; 396 else { 397 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask); 398 if (cmp > 0) /* r1->mask > r2->mask*/ 399 r1_before_r2 = 1; 400 else if (cmp < 0) /* r2->mask > r1->mask */ 401 r1_before_r2 = 0; 402 else if (r1->mflags > r2->mflags) 403 r1_before_r2 = 1; 404 else 405 r1_before_r2 = 0; 406 } 407 408 return r1_before_r2; 409 } 410 411 412 /* 413 * restrictions - return restrictions for this host 414 */ 415 u_short 416 restrictions( 417 sockaddr_u *srcadr 418 ) 419 { 420 restrict_u *match; 421 struct in6_addr *pin6; 422 u_short flags; 423 424 res_calls++; 425 flags = 0; 426 /* IPv4 source address */ 427 if (IS_IPV4(srcadr)) { 428 /* 429 * Ignore any packets with a multicast source address 430 * (this should be done early in the receive process, 431 * not later!) 432 */ 433 if (IN_CLASSD(SRCADR(srcadr))) 434 return (int)RES_IGNORE; 435 436 match = match_restrict4_addr(SRCADR(srcadr), 437 SRCPORT(srcadr)); 438 439 INSIST(match != NULL); 440 441 match->count++; 442 /* 443 * res_not_found counts only use of the final default 444 * entry, not any "restrict default ntpport ...", which 445 * would be just before the final default. 446 */ 447 if (&restrict_def4 == match) 448 res_not_found++; 449 else 450 res_found++; 451 flags = match->flags; 452 } 453 454 /* IPv6 source address */ 455 if (IS_IPV6(srcadr)) { 456 pin6 = PSOCK_ADDR6(srcadr); 457 458 /* 459 * Ignore any packets with a multicast source address 460 * (this should be done early in the receive process, 461 * not later!) 462 */ 463 if (IN6_IS_ADDR_MULTICAST(pin6)) 464 return (int)RES_IGNORE; 465 466 match = match_restrict6_addr(pin6, SRCPORT(srcadr)); 467 INSIST(match != NULL); 468 match->count++; 469 if (&restrict_def6 == match) 470 res_not_found++; 471 else 472 res_found++; 473 flags = match->flags; 474 } 475 return (flags); 476 } 477 478 479 /* 480 * hack_restrict - add/subtract/manipulate entries on the restrict list 481 */ 482 void 483 hack_restrict( 484 int op, 485 sockaddr_u * resaddr, 486 sockaddr_u * resmask, 487 u_short mflags, 488 u_short flags, 489 u_long expire 490 ) 491 { 492 int v6; 493 restrict_u match; 494 restrict_u * res; 495 restrict_u ** plisthead; 496 497 DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n", 498 op, stoa(resaddr), stoa(resmask), mflags, flags)); 499 500 if (NULL == resaddr) { 501 REQUIRE(NULL == resmask); 502 REQUIRE(RESTRICT_FLAGS == op); 503 restrict_source_flags = flags; 504 restrict_source_mflags = mflags; 505 restrict_source_enabled = 1; 506 return; 507 } 508 509 ZERO(match); 510 511 #if 0 512 /* silence VC9 potentially uninit warnings */ 513 // HMS: let's use a compiler-specific "enable" for this. 514 res = NULL; 515 v6 = 0; 516 #endif 517 518 if (IS_IPV4(resaddr)) { 519 v6 = 0; 520 /* 521 * Get address and mask in host byte order for easy 522 * comparison as u_int32 523 */ 524 match.u.v4.addr = SRCADR(resaddr); 525 match.u.v4.mask = SRCADR(resmask); 526 match.u.v4.addr &= match.u.v4.mask; 527 528 } else if (IS_IPV6(resaddr)) { 529 v6 = 1; 530 /* 531 * Get address and mask in network byte order for easy 532 * comparison as byte sequences (e.g. memcmp()) 533 */ 534 match.u.v6.mask = SOCK_ADDR6(resmask); 535 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr), 536 &match.u.v6.mask); 537 538 } else /* not IPv4 nor IPv6 */ 539 REQUIRE(0); 540 541 match.flags = flags; 542 match.mflags = mflags; 543 match.expire = expire; 544 res = match_restrict_entry(&match, v6); 545 546 switch (op) { 547 548 case RESTRICT_FLAGS: 549 /* 550 * Here we add bits to the flags. If this is a 551 * new restriction add it. 552 */ 553 if (NULL == res) { 554 if (v6) { 555 res = alloc_res6(); 556 memcpy(res, &match, 557 V6_SIZEOF_RESTRICT_U); 558 plisthead = &restrictlist6; 559 } else { 560 res = alloc_res4(); 561 memcpy(res, &match, 562 V4_SIZEOF_RESTRICT_U); 563 plisthead = &restrictlist4; 564 } 565 LINK_SORT_SLIST( 566 *plisthead, res, 567 (v6) 568 ? res_sorts_before6(res, L_S_S_CUR()) 569 : res_sorts_before4(res, L_S_S_CUR()), 570 link, restrict_u); 571 restrictcount++; 572 if (RES_LIMITED & flags) 573 inc_res_limited(); 574 } else { 575 if ((RES_LIMITED & flags) && 576 !(RES_LIMITED & res->flags)) 577 inc_res_limited(); 578 res->flags |= flags; 579 } 580 break; 581 582 case RESTRICT_UNFLAG: 583 /* 584 * Remove some bits from the flags. If we didn't 585 * find this one, just return. 586 */ 587 if (res != NULL) { 588 if ((RES_LIMITED & res->flags) 589 && (RES_LIMITED & flags)) 590 dec_res_limited(); 591 res->flags &= ~flags; 592 } 593 break; 594 595 case RESTRICT_REMOVE: 596 case RESTRICT_REMOVEIF: 597 /* 598 * Remove an entry from the table entirely if we 599 * found one. Don't remove the default entry and 600 * don't remove an interface entry. 601 */ 602 if (res != NULL 603 && (RESTRICT_REMOVEIF == op 604 || !(RESM_INTERFACE & res->mflags)) 605 && res != &restrict_def4 606 && res != &restrict_def6) 607 free_res(res, v6); 608 break; 609 610 default: /* unknown op */ 611 INSIST(0); 612 break; 613 } 614 615 } 616 617 618 /* 619 * restrict_source - maintains dynamic "restrict source ..." entries as 620 * peers come and go. 621 */ 622 void 623 restrict_source( 624 sockaddr_u * addr, 625 int farewell, /* 0 to add, 1 to remove */ 626 u_long expire /* 0 is infinite, valid until */ 627 ) 628 { 629 sockaddr_u onesmask; 630 restrict_u * res; 631 int found_specific; 632 633 if (!restrict_source_enabled || SOCK_UNSPEC(addr) || 634 IS_MCAST(addr) || ISREFCLOCKADR(addr)) 635 return; 636 637 REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr)); 638 639 SET_HOSTMASK(&onesmask, AF(addr)); 640 if (farewell) { 641 hack_restrict(RESTRICT_REMOVE, addr, &onesmask, 642 0, 0, 0); 643 DPRINTF(1, ("restrict_source: %s removed", stoa(addr))); 644 return; 645 } 646 647 /* 648 * If there is a specific entry for this address, hands 649 * off, as it is condidered more specific than "restrict 650 * server ...". 651 * However, if the specific entry found is a fleeting one 652 * added by pool_xmit() before soliciting, replace it 653 * immediately regardless of the expire value to make way 654 * for the more persistent entry. 655 */ 656 if (IS_IPV4(addr)) { 657 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr)); 658 INSIST(res != NULL); 659 found_specific = (SRCADR(&onesmask) == res->u.v4.mask); 660 } else { 661 res = match_restrict6_addr(&SOCK_ADDR6(addr), 662 SRCPORT(addr)); 663 INSIST(res != NULL); 664 found_specific = ADDR6_EQ(&res->u.v6.mask, 665 &SOCK_ADDR6(&onesmask)); 666 } 667 if (!expire && found_specific && res->expire) { 668 found_specific = 0; 669 free_res(res, IS_IPV6(addr)); 670 } 671 if (found_specific) 672 return; 673 674 hack_restrict(RESTRICT_FLAGS, addr, &onesmask, 675 restrict_source_mflags, restrict_source_flags, 676 expire); 677 DPRINTF(1, ("restrict_source: %s host restriction added\n", 678 stoa(addr))); 679 } 680