1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1983, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "defs.h" 33 34 __RCSID("$FreeBSD$"); 35 36 static struct rt_spare *rts_better(struct rt_entry *); 37 static struct rt_spare rts_empty = {0,0,0,HOPCNT_INFINITY,0,0,0}; 38 static void set_need_flash(void); 39 #ifdef _HAVE_SIN_LEN 40 static void masktrim(struct sockaddr_in *ap); 41 #else 42 static void masktrim(struct sockaddr_in_new *ap); 43 #endif 44 static void rtbad(struct rt_entry *); 45 46 47 struct radix_node_head *rhead; /* root of the radix tree */ 48 49 int need_flash = 1; /* flash update needed 50 * start =1 to suppress the 1st 51 */ 52 53 struct timeval age_timer; /* next check of old routes */ 54 struct timeval need_kern = { /* need to update kernel table */ 55 EPOCH+MIN_WAITTIME-1, 0 56 }; 57 58 int stopint; 59 60 int total_routes; 61 62 /* zap any old routes through this gateway */ 63 static naddr age_bad_gate; 64 65 66 /* It is desirable to "aggregate" routes, to combine differing routes of 67 * the same metric and next hop into a common route with a smaller netmask 68 * or to suppress redundant routes, routes that add no information to 69 * routes with smaller netmasks. 70 * 71 * A route is redundant if and only if any and all routes with smaller 72 * but matching netmasks and nets are the same. Since routes are 73 * kept sorted in the radix tree, redundant routes always come second. 74 * 75 * There are two kinds of aggregations. First, two routes of the same bit 76 * mask and differing only in the least significant bit of the network 77 * number can be combined into a single route with a coarser mask. 78 * 79 * Second, a route can be suppressed in favor of another route with a more 80 * coarse mask provided no incompatible routes with intermediate masks 81 * are present. The second kind of aggregation involves suppressing routes. 82 * A route must not be suppressed if an incompatible route exists with 83 * an intermediate mask, since the suppressed route would be covered 84 * by the intermediate. 85 * 86 * This code relies on the radix tree walk encountering routes 87 * sorted first by address, with the smallest address first. 88 */ 89 90 static struct ag_info ag_slots[NUM_AG_SLOTS], *ag_avail, *ag_corsest, *ag_finest; 91 92 /* #define DEBUG_AG */ 93 #ifdef DEBUG_AG 94 #define CHECK_AG() {int acnt = 0; struct ag_info *cag; \ 95 for (cag = ag_avail; cag != NULL; cag = cag->ag_fine) \ 96 acnt++; \ 97 for (cag = ag_corsest; cag != NULL; cag = cag->ag_fine) \ 98 acnt++; \ 99 if (acnt != NUM_AG_SLOTS) { \ 100 (void)fflush(stderr); \ 101 abort(); \ 102 } \ 103 } 104 #else 105 #define CHECK_AG() 106 #endif 107 108 109 /* Output the contents of an aggregation table slot. 110 * This function must always be immediately followed with the deletion 111 * of the target slot. 112 */ 113 static void 114 ag_out(struct ag_info *ag, 115 void (*out)(struct ag_info *)) 116 { 117 struct ag_info *ag_cors; 118 naddr bit; 119 120 121 /* Forget it if this route should not be output for split-horizon. */ 122 if (ag->ag_state & AGS_SPLIT_HZ) 123 return; 124 125 /* If we output both the even and odd twins, then the immediate parent, 126 * if it is present, is redundant, unless the parent manages to 127 * aggregate into something coarser. 128 * On successive calls, this code detects the even and odd twins, 129 * and marks the parent. 130 * 131 * Note that the order in which the radix tree code emits routes 132 * ensures that the twins are seen before the parent is emitted. 133 */ 134 ag_cors = ag->ag_cors; 135 if (ag_cors != NULL 136 && ag_cors->ag_mask == ag->ag_mask<<1 137 && ag_cors->ag_dst_h == (ag->ag_dst_h & ag_cors->ag_mask)) { 138 ag_cors->ag_state |= ((ag_cors->ag_dst_h == ag->ag_dst_h) 139 ? AGS_REDUN0 140 : AGS_REDUN1); 141 } 142 143 /* Skip it if this route is itself redundant. 144 * 145 * It is ok to change the contents of the slot here, since it is 146 * always deleted next. 147 */ 148 if (ag->ag_state & AGS_REDUN0) { 149 if (ag->ag_state & AGS_REDUN1) 150 return; /* quit if fully redundant */ 151 /* make it finer if it is half-redundant */ 152 bit = (-ag->ag_mask) >> 1; 153 ag->ag_dst_h |= bit; 154 ag->ag_mask |= bit; 155 156 } else if (ag->ag_state & AGS_REDUN1) { 157 /* make it finer if it is half-redundant */ 158 bit = (-ag->ag_mask) >> 1; 159 ag->ag_mask |= bit; 160 } 161 out(ag); 162 } 163 164 165 static void 166 ag_del(struct ag_info *ag) 167 { 168 CHECK_AG(); 169 170 if (ag->ag_cors == NULL) 171 ag_corsest = ag->ag_fine; 172 else 173 ag->ag_cors->ag_fine = ag->ag_fine; 174 175 if (ag->ag_fine == NULL) 176 ag_finest = ag->ag_cors; 177 else 178 ag->ag_fine->ag_cors = ag->ag_cors; 179 180 ag->ag_fine = ag_avail; 181 ag_avail = ag; 182 183 CHECK_AG(); 184 } 185 186 187 /* Flush routes waiting for aggregation. 188 * This must not suppress a route unless it is known that among all 189 * routes with coarser masks that match it, the one with the longest 190 * mask is appropriate. This is ensured by scanning the routes 191 * in lexical order, and with the most restrictive mask first 192 * among routes to the same destination. 193 */ 194 void 195 ag_flush(naddr lim_dst_h, /* flush routes to here */ 196 naddr lim_mask, /* matching this mask */ 197 void (*out)(struct ag_info *)) 198 { 199 struct ag_info *ag, *ag_cors; 200 naddr dst_h; 201 202 203 for (ag = ag_finest; 204 ag != NULL && ag->ag_mask >= lim_mask; 205 ag = ag_cors) { 206 ag_cors = ag->ag_cors; 207 208 /* work on only the specified routes */ 209 dst_h = ag->ag_dst_h; 210 if ((dst_h & lim_mask) != lim_dst_h) 211 continue; 212 213 if (!(ag->ag_state & AGS_SUPPRESS)) 214 ag_out(ag, out); 215 216 else for ( ; ; ag_cors = ag_cors->ag_cors) { 217 /* Look for a route that can suppress the 218 * current route */ 219 if (ag_cors == NULL) { 220 /* failed, so output it and look for 221 * another route to work on 222 */ 223 ag_out(ag, out); 224 break; 225 } 226 227 if ((dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h) { 228 /* We found a route with a coarser mask that 229 * aggregates the current target. 230 * 231 * If it has a different next hop, it 232 * cannot replace the target, so output 233 * the target. 234 */ 235 if (ag->ag_gate != ag_cors->ag_gate 236 && !(ag->ag_state & AGS_FINE_GATE) 237 && !(ag_cors->ag_state & AGS_CORS_GATE)) { 238 ag_out(ag, out); 239 break; 240 } 241 242 /* If the coarse route has a good enough 243 * metric, it suppresses the target. 244 * If the suppressed target was redundant, 245 * then mark the suppressor redundant. 246 */ 247 if (ag_cors->ag_pref <= ag->ag_pref) { 248 if (AG_IS_REDUN(ag->ag_state) 249 && ag_cors->ag_mask==ag->ag_mask<<1) { 250 if (ag_cors->ag_dst_h == dst_h) 251 ag_cors->ag_state |= AGS_REDUN0; 252 else 253 ag_cors->ag_state |= AGS_REDUN1; 254 } 255 if (ag->ag_tag != ag_cors->ag_tag) 256 ag_cors->ag_tag = 0; 257 if (ag->ag_nhop != ag_cors->ag_nhop) 258 ag_cors->ag_nhop = 0; 259 break; 260 } 261 } 262 } 263 264 /* That route has either been output or suppressed */ 265 ag_cors = ag->ag_cors; 266 ag_del(ag); 267 } 268 269 CHECK_AG(); 270 } 271 272 273 /* Try to aggregate a route with previous routes. 274 */ 275 void 276 ag_check(naddr dst, 277 naddr mask, 278 naddr gate, 279 naddr nhop, 280 char metric, 281 char pref, 282 u_int new_seqno, 283 u_short tag, 284 u_short state, 285 void (*out)(struct ag_info *)) /* output using this */ 286 { 287 struct ag_info *ag, *nag, *ag_cors; 288 naddr xaddr; 289 int x; 290 291 dst = ntohl(dst); 292 293 /* Punt non-contiguous subnet masks. 294 * 295 * (X & -X) contains a single bit if and only if X is a power of 2. 296 * (X + (X & -X)) == 0 if and only if X is a power of 2. 297 */ 298 if ((mask & -mask) + mask != 0) { 299 struct ag_info nc_ag; 300 301 nc_ag.ag_dst_h = dst; 302 nc_ag.ag_mask = mask; 303 nc_ag.ag_gate = gate; 304 nc_ag.ag_nhop = nhop; 305 nc_ag.ag_metric = metric; 306 nc_ag.ag_pref = pref; 307 nc_ag.ag_tag = tag; 308 nc_ag.ag_state = state; 309 nc_ag.ag_seqno = new_seqno; 310 out(&nc_ag); 311 return; 312 } 313 314 /* Search for the right slot in the aggregation table. 315 */ 316 ag_cors = NULL; 317 ag = ag_corsest; 318 while (ag != NULL) { 319 if (ag->ag_mask >= mask) 320 break; 321 322 /* Suppress old routes (i.e. combine with compatible routes 323 * with coarser masks) as we look for the right slot in the 324 * aggregation table for the new route. 325 * A route to an address less than the current destination 326 * will not be affected by the current route or any route 327 * seen hereafter. That means it is safe to suppress it. 328 * This check keeps poor routes (e.g. with large hop counts) 329 * from preventing suppression of finer routes. 330 */ 331 if (ag_cors != NULL 332 && ag->ag_dst_h < dst 333 && (ag->ag_state & AGS_SUPPRESS) 334 && ag_cors->ag_pref <= ag->ag_pref 335 && (ag->ag_dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h 336 && (ag_cors->ag_gate == ag->ag_gate 337 || (ag->ag_state & AGS_FINE_GATE) 338 || (ag_cors->ag_state & AGS_CORS_GATE))) { 339 /* If the suppressed target was redundant, 340 * then mark the suppressor redundant. 341 */ 342 if (AG_IS_REDUN(ag->ag_state) 343 && ag_cors->ag_mask == ag->ag_mask<<1) { 344 if (ag_cors->ag_dst_h == dst) 345 ag_cors->ag_state |= AGS_REDUN0; 346 else 347 ag_cors->ag_state |= AGS_REDUN1; 348 } 349 if (ag->ag_tag != ag_cors->ag_tag) 350 ag_cors->ag_tag = 0; 351 if (ag->ag_nhop != ag_cors->ag_nhop) 352 ag_cors->ag_nhop = 0; 353 ag_del(ag); 354 CHECK_AG(); 355 } else { 356 ag_cors = ag; 357 } 358 ag = ag_cors->ag_fine; 359 } 360 361 /* If we find the even/odd twin of the new route, and if the 362 * masks and so forth are equal, we can aggregate them. 363 * We can probably promote one of the pair. 364 * 365 * Since the routes are encountered in lexical order, 366 * the new route must be odd. However, the second or later 367 * times around this loop, it could be the even twin promoted 368 * from the even/odd pair of twins of the finer route. 369 */ 370 while (ag != NULL 371 && ag->ag_mask == mask 372 && ((ag->ag_dst_h ^ dst) & (mask<<1)) == 0) { 373 374 /* Here we know the target route and the route in the current 375 * slot have the same netmasks and differ by at most the 376 * last bit. They are either for the same destination, or 377 * for an even/odd pair of destinations. 378 */ 379 if (ag->ag_dst_h == dst) { 380 /* We have two routes to the same destination. 381 * Routes are encountered in lexical order, so a 382 * route is never promoted until the parent route is 383 * already present. So we know that the new route is 384 * a promoted (or aggregated) pair and the route 385 * already in the slot is the explicit route. 386 * 387 * Prefer the best route if their metrics differ, 388 * or the aggregated one if not, following a sort 389 * of longest-match rule. 390 */ 391 if (pref <= ag->ag_pref) { 392 ag->ag_gate = gate; 393 ag->ag_nhop = nhop; 394 ag->ag_tag = tag; 395 ag->ag_metric = metric; 396 ag->ag_pref = pref; 397 if (ag->ag_seqno < new_seqno) 398 ag->ag_seqno = new_seqno; 399 x = ag->ag_state; 400 ag->ag_state = state; 401 state = x; 402 } 403 404 /* Some bits are set if they are set on either route, 405 * except when the route is for an interface. 406 */ 407 if (!(ag->ag_state & AGS_IF)) 408 ag->ag_state |= (state & (AGS_AGGREGATE_EITHER 409 | AGS_REDUN0 410 | AGS_REDUN1)); 411 return; 412 } 413 414 /* If one of the routes can be promoted and the other can 415 * be suppressed, it may be possible to combine them or 416 * worthwhile to promote one. 417 * 418 * Any route that can be promoted is always 419 * marked to be eligible to be suppressed. 420 */ 421 if (!((state & AGS_AGGREGATE) 422 && (ag->ag_state & AGS_SUPPRESS)) 423 && !((ag->ag_state & AGS_AGGREGATE) 424 && (state & AGS_SUPPRESS))) 425 break; 426 427 /* A pair of even/odd twin routes can be combined 428 * if either is redundant, or if they are via the 429 * same gateway and have the same metric. 430 */ 431 if (AG_IS_REDUN(ag->ag_state) 432 || AG_IS_REDUN(state) 433 || (ag->ag_gate == gate 434 && ag->ag_pref == pref 435 && (state & ag->ag_state & AGS_AGGREGATE) != 0)) { 436 437 /* We have both the even and odd pairs. 438 * Since the routes are encountered in order, 439 * the route in the slot must be the even twin. 440 * 441 * Combine and promote (aggregate) the pair of routes. 442 */ 443 if (new_seqno < ag->ag_seqno) 444 new_seqno = ag->ag_seqno; 445 if (!AG_IS_REDUN(state)) 446 state &= ~AGS_REDUN1; 447 if (AG_IS_REDUN(ag->ag_state)) 448 state |= AGS_REDUN0; 449 else 450 state &= ~AGS_REDUN0; 451 state |= (ag->ag_state & AGS_AGGREGATE_EITHER); 452 if (ag->ag_tag != tag) 453 tag = 0; 454 if (ag->ag_nhop != nhop) 455 nhop = 0; 456 457 /* Get rid of the even twin that was already 458 * in the slot. 459 */ 460 ag_del(ag); 461 462 } else if (ag->ag_pref >= pref 463 && (ag->ag_state & AGS_AGGREGATE)) { 464 /* If we cannot combine the pair, maybe the route 465 * with the worse metric can be promoted. 466 * 467 * Promote the old, even twin, by giving its slot 468 * in the table to the new, odd twin. 469 */ 470 ag->ag_dst_h = dst; 471 472 xaddr = ag->ag_gate; 473 ag->ag_gate = gate; 474 gate = xaddr; 475 476 xaddr = ag->ag_nhop; 477 ag->ag_nhop = nhop; 478 nhop = xaddr; 479 480 x = ag->ag_tag; 481 ag->ag_tag = tag; 482 tag = x; 483 484 /* The promoted route is even-redundant only if the 485 * even twin was fully redundant. It is not 486 * odd-redundant because the odd-twin will still be 487 * in the table. 488 */ 489 x = ag->ag_state; 490 if (!AG_IS_REDUN(x)) 491 x &= ~AGS_REDUN0; 492 x &= ~AGS_REDUN1; 493 ag->ag_state = state; 494 state = x; 495 496 x = ag->ag_metric; 497 ag->ag_metric = metric; 498 metric = x; 499 500 x = ag->ag_pref; 501 ag->ag_pref = pref; 502 pref = x; 503 504 /* take the newest sequence number */ 505 if (new_seqno <= ag->ag_seqno) 506 new_seqno = ag->ag_seqno; 507 else 508 ag->ag_seqno = new_seqno; 509 510 } else { 511 if (!(state & AGS_AGGREGATE)) 512 break; /* cannot promote either twin */ 513 514 /* Promote the new, odd twin by shaving its 515 * mask and address. 516 * The promoted route is odd-redundant only if the 517 * odd twin was fully redundant. It is not 518 * even-redundant because the even twin is still in 519 * the table. 520 */ 521 if (!AG_IS_REDUN(state)) 522 state &= ~AGS_REDUN1; 523 state &= ~AGS_REDUN0; 524 if (new_seqno < ag->ag_seqno) 525 new_seqno = ag->ag_seqno; 526 else 527 ag->ag_seqno = new_seqno; 528 } 529 530 mask <<= 1; 531 dst &= mask; 532 533 if (ag_cors == NULL) { 534 ag = ag_corsest; 535 break; 536 } 537 ag = ag_cors; 538 ag_cors = ag->ag_cors; 539 } 540 541 /* When we can no longer promote and combine routes, 542 * flush the old route in the target slot. Also flush 543 * any finer routes that we know will never be aggregated by 544 * the new route. 545 * 546 * In case we moved toward coarser masks, 547 * get back where we belong 548 */ 549 if (ag != NULL 550 && ag->ag_mask < mask) { 551 ag_cors = ag; 552 ag = ag->ag_fine; 553 } 554 555 /* Empty the target slot 556 */ 557 if (ag != NULL && ag->ag_mask == mask) { 558 ag_flush(ag->ag_dst_h, ag->ag_mask, out); 559 ag = (ag_cors == NULL) ? ag_corsest : ag_cors->ag_fine; 560 } 561 562 #ifdef DEBUG_AG 563 (void)fflush(stderr); 564 if (ag == NULL && ag_cors != ag_finest) 565 abort(); 566 if (ag_cors == NULL && ag != ag_corsest) 567 abort(); 568 if (ag != NULL && ag->ag_cors != ag_cors) 569 abort(); 570 if (ag_cors != NULL && ag_cors->ag_fine != ag) 571 abort(); 572 CHECK_AG(); 573 #endif 574 575 /* Save the new route on the end of the table. 576 */ 577 nag = ag_avail; 578 ag_avail = nag->ag_fine; 579 580 nag->ag_dst_h = dst; 581 nag->ag_mask = mask; 582 nag->ag_gate = gate; 583 nag->ag_nhop = nhop; 584 nag->ag_metric = metric; 585 nag->ag_pref = pref; 586 nag->ag_tag = tag; 587 nag->ag_state = state; 588 nag->ag_seqno = new_seqno; 589 590 nag->ag_fine = ag; 591 if (ag != NULL) 592 ag->ag_cors = nag; 593 else 594 ag_finest = nag; 595 nag->ag_cors = ag_cors; 596 if (ag_cors == NULL) 597 ag_corsest = nag; 598 else 599 ag_cors->ag_fine = nag; 600 CHECK_AG(); 601 } 602 603 static const char * 604 rtm_type_name(u_char type) 605 { 606 static const char * const rtm_types[] = { 607 "RTM_ADD", 608 "RTM_DELETE", 609 "RTM_CHANGE", 610 "RTM_GET", 611 "RTM_LOSING", 612 "RTM_REDIRECT", 613 "RTM_MISS", 614 "RTM_LOCK", 615 "RTM_OLDADD", 616 "RTM_OLDDEL", 617 "RTM_RESOLVE", 618 "RTM_NEWADDR", 619 "RTM_DELADDR", 620 #ifdef RTM_OIFINFO 621 "RTM_OIFINFO", 622 #endif 623 "RTM_IFINFO", 624 "RTM_NEWMADDR", 625 "RTM_DELMADDR" 626 }; 627 #define NEW_RTM_PAT "RTM type %#x" 628 static char name0[sizeof(NEW_RTM_PAT)+2]; 629 630 631 if (type > sizeof(rtm_types)/sizeof(rtm_types[0]) 632 || type == 0) { 633 snprintf(name0, sizeof(name0), NEW_RTM_PAT, type); 634 return name0; 635 } else { 636 return rtm_types[type-1]; 637 } 638 #undef NEW_RTM_PAT 639 } 640 641 642 /* Trim a mask in a sockaddr 643 * Produce a length of 0 for an address of 0. 644 * Otherwise produce the index of the first zero byte. 645 */ 646 void 647 #ifdef _HAVE_SIN_LEN 648 masktrim(struct sockaddr_in *ap) 649 #else 650 masktrim(struct sockaddr_in_new *ap) 651 #endif 652 { 653 char *cp; 654 655 if (ap->sin_addr.s_addr == 0) { 656 ap->sin_len = 0; 657 return; 658 } 659 cp = (char *)(&ap->sin_addr.s_addr+1); 660 while (*--cp == 0) 661 continue; 662 ap->sin_len = cp - (char*)ap + 1; 663 } 664 665 666 /* Tell the kernel to add, delete or change a route 667 */ 668 static void 669 rtioctl(int action, /* RTM_DELETE, etc */ 670 naddr dst, 671 naddr gate, 672 naddr mask, 673 int metric, 674 int flags) 675 { 676 struct { 677 struct rt_msghdr w_rtm; 678 struct sockaddr_in w_dst; 679 struct sockaddr_in w_gate; 680 #ifdef _HAVE_SA_LEN 681 struct sockaddr_in w_mask; 682 #else 683 struct sockaddr_in_new w_mask; 684 #endif 685 } w; 686 long cc; 687 # define PAT " %-10s %s metric=%d flags=%#x" 688 # define ARGS rtm_type_name(action), rtname(dst,mask,gate), metric, flags 689 690 again: 691 memset(&w, 0, sizeof(w)); 692 w.w_rtm.rtm_msglen = sizeof(w); 693 w.w_rtm.rtm_version = RTM_VERSION; 694 w.w_rtm.rtm_type = action; 695 w.w_rtm.rtm_flags = flags; 696 w.w_rtm.rtm_seq = ++rt_sock_seqno; 697 w.w_rtm.rtm_addrs = RTA_DST|RTA_GATEWAY; 698 if (metric != 0 || action == RTM_CHANGE) { 699 w.w_rtm.rtm_rmx.rmx_hopcount = metric; 700 w.w_rtm.rtm_inits |= RTV_HOPCOUNT; 701 } 702 w.w_dst.sin_family = AF_INET; 703 w.w_dst.sin_addr.s_addr = dst; 704 w.w_gate.sin_family = AF_INET; 705 w.w_gate.sin_addr.s_addr = gate; 706 #ifdef _HAVE_SA_LEN 707 w.w_dst.sin_len = sizeof(w.w_dst); 708 w.w_gate.sin_len = sizeof(w.w_gate); 709 #endif 710 if (mask == HOST_MASK) { 711 w.w_rtm.rtm_flags |= RTF_HOST; 712 w.w_rtm.rtm_msglen -= sizeof(w.w_mask); 713 } else { 714 w.w_rtm.rtm_addrs |= RTA_NETMASK; 715 w.w_mask.sin_addr.s_addr = htonl(mask); 716 #ifdef _HAVE_SA_LEN 717 masktrim(&w.w_mask); 718 if (w.w_mask.sin_len == 0) 719 w.w_mask.sin_len = sizeof(long); 720 w.w_rtm.rtm_msglen -= (sizeof(w.w_mask) - w.w_mask.sin_len); 721 #endif 722 } 723 724 #ifndef NO_INSTALL 725 cc = write(rt_sock, &w, w.w_rtm.rtm_msglen); 726 if (cc < 0) { 727 if (errno == ESRCH 728 && (action == RTM_CHANGE || action == RTM_DELETE)) { 729 trace_act("route disappeared before" PAT, ARGS); 730 if (action == RTM_CHANGE) { 731 action = RTM_ADD; 732 goto again; 733 } 734 return; 735 } 736 msglog("write(rt_sock)" PAT ": %s", ARGS, strerror(errno)); 737 return; 738 } else if (cc != w.w_rtm.rtm_msglen) { 739 msglog("write(rt_sock) wrote %ld instead of %d for" PAT, 740 cc, w.w_rtm.rtm_msglen, ARGS); 741 return; 742 } 743 #endif 744 if (TRACEKERNEL) 745 trace_misc("write kernel" PAT, ARGS); 746 #undef PAT 747 #undef ARGS 748 } 749 750 751 #define KHASH_SIZE 71 /* should be prime */ 752 #define KHASH(a,m) khash_bins[((a) ^ (m)) % KHASH_SIZE] 753 static struct khash { 754 struct khash *k_next; 755 naddr k_dst; 756 naddr k_mask; 757 naddr k_gate; 758 short k_metric; 759 u_short k_state; 760 #define KS_NEW 0x001 761 #define KS_DELETE 0x002 /* need to delete the route */ 762 #define KS_ADD 0x004 /* add to the kernel */ 763 #define KS_CHANGE 0x008 /* tell kernel to change the route */ 764 #define KS_DEL_ADD 0x010 /* delete & add to change the kernel */ 765 #define KS_STATIC 0x020 /* Static flag in kernel */ 766 #define KS_GATEWAY 0x040 /* G flag in kernel */ 767 #define KS_DYNAMIC 0x080 /* result of redirect */ 768 #define KS_DELETED 0x100 /* already deleted from kernel */ 769 #define KS_CHECK 0x200 770 time_t k_keep; 771 #define K_KEEP_LIM 30 772 time_t k_redirect_time; /* when redirected route 1st seen */ 773 } *khash_bins[KHASH_SIZE]; 774 775 776 static struct khash* 777 kern_find(naddr dst, naddr mask, struct khash ***ppk) 778 { 779 struct khash *k, **pk; 780 781 for (pk = &KHASH(dst,mask); (k = *pk) != NULL; pk = &k->k_next) { 782 if (k->k_dst == dst && k->k_mask == mask) 783 break; 784 } 785 if (ppk != NULL) 786 *ppk = pk; 787 return k; 788 } 789 790 791 static struct khash* 792 kern_add(naddr dst, naddr mask) 793 { 794 struct khash *k, **pk; 795 796 k = kern_find(dst, mask, &pk); 797 if (k != NULL) 798 return k; 799 800 k = (struct khash *)rtmalloc(sizeof(*k), "kern_add"); 801 802 memset(k, 0, sizeof(*k)); 803 k->k_dst = dst; 804 k->k_mask = mask; 805 k->k_state = KS_NEW; 806 k->k_keep = now.tv_sec; 807 *pk = k; 808 809 return k; 810 } 811 812 813 /* If a kernel route has a non-zero metric, check that it is still in the 814 * daemon table, and not deleted by interfaces coming and going. 815 */ 816 static void 817 kern_check_static(struct khash *k, 818 struct interface *ifp) 819 { 820 struct rt_entry *rt; 821 struct rt_spare new; 822 823 if (k->k_metric == 0) 824 return; 825 826 memset(&new, 0, sizeof(new)); 827 new.rts_ifp = ifp; 828 new.rts_gate = k->k_gate; 829 new.rts_router = (ifp != NULL) ? ifp->int_addr : loopaddr; 830 new.rts_metric = k->k_metric; 831 new.rts_time = now.tv_sec; 832 833 rt = rtget(k->k_dst, k->k_mask); 834 if (rt != NULL) { 835 if (!(rt->rt_state & RS_STATIC)) 836 rtchange(rt, rt->rt_state | RS_STATIC, &new, 0); 837 } else { 838 rtadd(k->k_dst, k->k_mask, RS_STATIC, &new); 839 } 840 } 841 842 843 /* operate on a kernel entry 844 */ 845 static void 846 kern_ioctl(struct khash *k, 847 int action, /* RTM_DELETE, etc */ 848 int flags) 849 850 { 851 switch (action) { 852 case RTM_DELETE: 853 k->k_state &= ~KS_DYNAMIC; 854 if (k->k_state & KS_DELETED) 855 return; 856 k->k_state |= KS_DELETED; 857 break; 858 case RTM_ADD: 859 k->k_state &= ~KS_DELETED; 860 break; 861 case RTM_CHANGE: 862 if (k->k_state & KS_DELETED) { 863 action = RTM_ADD; 864 k->k_state &= ~KS_DELETED; 865 } 866 break; 867 } 868 869 rtioctl(action, k->k_dst, k->k_gate, k->k_mask, k->k_metric, flags); 870 } 871 872 873 /* add a route the kernel told us 874 */ 875 static void 876 rtm_add(struct rt_msghdr *rtm, 877 struct rt_addrinfo *info, 878 time_t keep) 879 { 880 struct khash *k; 881 struct interface *ifp; 882 naddr mask; 883 884 885 if (rtm->rtm_flags & RTF_HOST) { 886 mask = HOST_MASK; 887 } else if (INFO_MASK(info) != 0) { 888 mask = ntohl(S_ADDR(INFO_MASK(info))); 889 } else { 890 msglog("ignore %s without mask", rtm_type_name(rtm->rtm_type)); 891 return; 892 } 893 894 k = kern_add(S_ADDR(INFO_DST(info)), mask); 895 if (k->k_state & KS_NEW) 896 k->k_keep = now.tv_sec+keep; 897 if (INFO_GATE(info) == 0) { 898 trace_act("note %s without gateway", 899 rtm_type_name(rtm->rtm_type)); 900 k->k_metric = HOPCNT_INFINITY; 901 } else if (INFO_GATE(info)->sa_family != AF_INET) { 902 trace_act("note %s with gateway AF=%d", 903 rtm_type_name(rtm->rtm_type), 904 INFO_GATE(info)->sa_family); 905 k->k_metric = HOPCNT_INFINITY; 906 } else { 907 k->k_gate = S_ADDR(INFO_GATE(info)); 908 k->k_metric = rtm->rtm_rmx.rmx_hopcount; 909 if (k->k_metric < 0) 910 k->k_metric = 0; 911 else if (k->k_metric > HOPCNT_INFINITY-1) 912 k->k_metric = HOPCNT_INFINITY-1; 913 } 914 k->k_state &= ~(KS_DELETE | KS_ADD | KS_CHANGE | KS_DEL_ADD 915 | KS_DELETED | KS_GATEWAY | KS_STATIC 916 | KS_NEW | KS_CHECK); 917 if (rtm->rtm_flags & RTF_GATEWAY) 918 k->k_state |= KS_GATEWAY; 919 if (rtm->rtm_flags & RTF_STATIC) 920 k->k_state |= KS_STATIC; 921 922 if (0 != (rtm->rtm_flags & (RTF_DYNAMIC | RTF_MODIFIED))) { 923 if (INFO_AUTHOR(info) != 0 924 && INFO_AUTHOR(info)->sa_family == AF_INET) 925 ifp = iflookup(S_ADDR(INFO_AUTHOR(info))); 926 else 927 ifp = NULL; 928 if (supplier 929 && (ifp == NULL || !(ifp->int_state & IS_REDIRECT_OK))) { 930 /* Routers are not supposed to listen to redirects, 931 * so delete it if it came via an unknown interface 932 * or the interface does not have special permission. 933 */ 934 k->k_state &= ~KS_DYNAMIC; 935 k->k_state |= KS_DELETE; 936 LIM_SEC(need_kern, 0); 937 trace_act("mark for deletion redirected %s --> %s" 938 " via %s", 939 addrname(k->k_dst, k->k_mask, 0), 940 naddr_ntoa(k->k_gate), 941 ifp ? ifp->int_name : "unknown interface"); 942 } else { 943 k->k_state |= KS_DYNAMIC; 944 k->k_redirect_time = now.tv_sec; 945 trace_act("accept redirected %s --> %s via %s", 946 addrname(k->k_dst, k->k_mask, 0), 947 naddr_ntoa(k->k_gate), 948 ifp ? ifp->int_name : "unknown interface"); 949 } 950 return; 951 } 952 953 /* If it is not a static route, quit until the next comparison 954 * between the kernel and daemon tables, when it will be deleted. 955 */ 956 if (!(k->k_state & KS_STATIC)) { 957 k->k_state |= KS_DELETE; 958 LIM_SEC(need_kern, k->k_keep); 959 return; 960 } 961 962 /* Put static routes with real metrics into the daemon table so 963 * they can be advertised. 964 * 965 * Find the interface toward the gateway. 966 */ 967 ifp = iflookup(k->k_gate); 968 if (ifp == NULL) 969 msglog("static route %s --> %s impossibly lacks ifp", 970 addrname(S_ADDR(INFO_DST(info)), mask, 0), 971 naddr_ntoa(k->k_gate)); 972 973 kern_check_static(k, ifp); 974 } 975 976 977 /* deal with packet loss 978 */ 979 static void 980 rtm_lose(struct rt_msghdr *rtm, 981 struct rt_addrinfo *info) 982 { 983 if (INFO_GATE(info) == 0 984 || INFO_GATE(info)->sa_family != AF_INET) { 985 trace_act("ignore %s without gateway", 986 rtm_type_name(rtm->rtm_type)); 987 return; 988 } 989 990 if (rdisc_ok) 991 rdisc_age(S_ADDR(INFO_GATE(info))); 992 age(S_ADDR(INFO_GATE(info))); 993 } 994 995 996 /* Make the gateway slot of an info structure point to something 997 * useful. If it is not already useful, but it specifies an interface, 998 * then fill in the sockaddr_in provided and point it there. 999 */ 1000 static int 1001 get_info_gate(struct sockaddr **sap, 1002 struct sockaddr_in *rsin) 1003 { 1004 struct sockaddr_dl *sdl = (struct sockaddr_dl *)*sap; 1005 struct interface *ifp; 1006 1007 if (sdl == NULL) 1008 return 0; 1009 if ((sdl)->sdl_family == AF_INET) 1010 return 1; 1011 if ((sdl)->sdl_family != AF_LINK) 1012 return 0; 1013 1014 ifp = ifwithindex(sdl->sdl_index, 1); 1015 if (ifp == NULL) 1016 return 0; 1017 1018 rsin->sin_addr.s_addr = ifp->int_addr; 1019 #ifdef _HAVE_SA_LEN 1020 rsin->sin_len = sizeof(*rsin); 1021 #endif 1022 rsin->sin_family = AF_INET; 1023 *sap = (struct sockaddr*)rsin; 1024 1025 return 1; 1026 } 1027 1028 1029 /* Clean the kernel table by copying it to the daemon image. 1030 * Eventually the daemon will delete any extra routes. 1031 */ 1032 void 1033 flush_kern(void) 1034 { 1035 static char *sysctl_buf; 1036 static size_t sysctl_buf_size = 0; 1037 size_t needed; 1038 int mib[6]; 1039 char *next, *lim; 1040 struct rt_msghdr *rtm; 1041 struct sockaddr_in gate_sin; 1042 struct rt_addrinfo info; 1043 int i; 1044 struct khash *k; 1045 1046 1047 for (i = 0; i < KHASH_SIZE; i++) { 1048 for (k = khash_bins[i]; k != NULL; k = k->k_next) { 1049 k->k_state |= KS_CHECK; 1050 } 1051 } 1052 1053 mib[0] = CTL_NET; 1054 mib[1] = PF_ROUTE; 1055 mib[2] = 0; /* protocol */ 1056 mib[3] = 0; /* wildcard address family */ 1057 mib[4] = NET_RT_DUMP; 1058 mib[5] = 0; /* no flags */ 1059 for (;;) { 1060 if ((needed = sysctl_buf_size) != 0) { 1061 if (sysctl(mib, 6, sysctl_buf,&needed, 0, 0) >= 0) 1062 break; 1063 if (errno != ENOMEM && errno != EFAULT) 1064 BADERR(1,"flush_kern: sysctl(RT_DUMP)"); 1065 free(sysctl_buf); 1066 needed = 0; 1067 } 1068 if (sysctl(mib, 6, 0, &needed, 0, 0) < 0) 1069 BADERR(1,"flush_kern: sysctl(RT_DUMP) estimate"); 1070 /* Kludge around the habit of some systems, such as 1071 * BSD/OS 3.1, to not admit how many routes are in the 1072 * kernel, or at least to be quite wrong. 1073 */ 1074 needed += 50*(sizeof(*rtm)+5*sizeof(struct sockaddr)); 1075 sysctl_buf = rtmalloc(sysctl_buf_size = needed, 1076 "flush_kern sysctl(RT_DUMP)"); 1077 } 1078 1079 lim = sysctl_buf + needed; 1080 for (next = sysctl_buf; next < lim; next += rtm->rtm_msglen) { 1081 rtm = (struct rt_msghdr *)next; 1082 if (rtm->rtm_msglen == 0) { 1083 msglog("zero length kernel route at " 1084 " %#lx in buffer %#lx before %#lx", 1085 (u_long)rtm, (u_long)sysctl_buf, (u_long)lim); 1086 break; 1087 } 1088 1089 rt_xaddrs(&info, 1090 (struct sockaddr *)(rtm+1), 1091 (struct sockaddr *)(next + rtm->rtm_msglen), 1092 rtm->rtm_addrs); 1093 1094 if (INFO_DST(&info) == 0 1095 || INFO_DST(&info)->sa_family != AF_INET) 1096 continue; 1097 1098 #if defined (RTF_LLINFO) 1099 /* ignore ARP table entries on systems with a merged route 1100 * and ARP table. 1101 */ 1102 if (rtm->rtm_flags & RTF_LLINFO) 1103 continue; 1104 #endif 1105 #if defined(RTF_WASCLONED) && defined(__FreeBSD__) 1106 /* ignore cloned routes 1107 */ 1108 if (rtm->rtm_flags & RTF_WASCLONED) 1109 continue; 1110 #endif 1111 1112 /* ignore multicast addresses 1113 */ 1114 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info))))) 1115 continue; 1116 1117 if (!get_info_gate(&INFO_GATE(&info), &gate_sin)) 1118 continue; 1119 1120 /* Note static routes and interface routes, and also 1121 * preload the image of the kernel table so that 1122 * we can later clean it, as well as avoid making 1123 * unneeded changes. Keep the old kernel routes for a 1124 * few seconds to allow a RIP or router-discovery 1125 * response to be heard. 1126 */ 1127 rtm_add(rtm,&info,MIN_WAITTIME); 1128 } 1129 1130 for (i = 0; i < KHASH_SIZE; i++) { 1131 for (k = khash_bins[i]; k != NULL; k = k->k_next) { 1132 if (k->k_state & KS_CHECK) { 1133 msglog("%s --> %s disappeared from kernel", 1134 addrname(k->k_dst, k->k_mask, 0), 1135 naddr_ntoa(k->k_gate)); 1136 del_static(k->k_dst, k->k_mask, k->k_gate, 1); 1137 } 1138 } 1139 } 1140 } 1141 1142 1143 /* Listen to announcements from the kernel 1144 */ 1145 void 1146 read_rt(void) 1147 { 1148 long cc; 1149 struct interface *ifp; 1150 struct sockaddr_in gate_sin; 1151 naddr mask, gate; 1152 union { 1153 struct { 1154 struct rt_msghdr rtm; 1155 struct sockaddr addrs[RTAX_MAX]; 1156 } r; 1157 struct if_msghdr ifm; 1158 } m; 1159 char str[100], *strp; 1160 struct rt_addrinfo info; 1161 1162 1163 for (;;) { 1164 cc = read(rt_sock, &m, sizeof(m)); 1165 if (cc <= 0) { 1166 if (cc < 0 && errno != EWOULDBLOCK) 1167 LOGERR("read(rt_sock)"); 1168 return; 1169 } 1170 1171 if (m.r.rtm.rtm_version != RTM_VERSION) { 1172 msglog("bogus routing message version %d", 1173 m.r.rtm.rtm_version); 1174 continue; 1175 } 1176 1177 /* Ignore our own results. 1178 */ 1179 if (m.r.rtm.rtm_type <= RTM_CHANGE 1180 && m.r.rtm.rtm_pid == mypid) { 1181 static int complained = 0; 1182 if (!complained) { 1183 msglog("receiving our own change messages"); 1184 complained = 1; 1185 } 1186 continue; 1187 } 1188 1189 if (m.r.rtm.rtm_type == RTM_IFINFO 1190 || m.r.rtm.rtm_type == RTM_NEWADDR 1191 || m.r.rtm.rtm_type == RTM_DELADDR) { 1192 ifp = ifwithindex(m.ifm.ifm_index, 1193 m.r.rtm.rtm_type != RTM_DELADDR); 1194 if (ifp == NULL) 1195 trace_act("note %s with flags %#x" 1196 " for unknown interface index #%d", 1197 rtm_type_name(m.r.rtm.rtm_type), 1198 m.ifm.ifm_flags, 1199 m.ifm.ifm_index); 1200 else 1201 trace_act("note %s with flags %#x for %s", 1202 rtm_type_name(m.r.rtm.rtm_type), 1203 m.ifm.ifm_flags, 1204 ifp->int_name); 1205 1206 /* After being informed of a change to an interface, 1207 * check them all now if the check would otherwise 1208 * be a long time from now, if the interface is 1209 * not known, or if the interface has been turned 1210 * off or on. 1211 */ 1212 if (ifinit_timer.tv_sec-now.tv_sec>=CHECK_BAD_INTERVAL 1213 || ifp == NULL 1214 || ((ifp->int_if_flags ^ m.ifm.ifm_flags) 1215 & IFF_UP) != 0) 1216 ifinit_timer.tv_sec = now.tv_sec; 1217 continue; 1218 } 1219 #ifdef RTM_OIFINFO 1220 if (m.r.rtm.rtm_type == RTM_OIFINFO) 1221 continue; /* ignore compat message */ 1222 #endif 1223 1224 strlcpy(str, rtm_type_name(m.r.rtm.rtm_type), sizeof(str)); 1225 strp = &str[strlen(str)]; 1226 if (m.r.rtm.rtm_type <= RTM_CHANGE) 1227 strp += sprintf(strp," from pid %d",m.r.rtm.rtm_pid); 1228 1229 /* 1230 * Only messages that use the struct rt_msghdr format are 1231 * allowed beyond this point. 1232 */ 1233 if (m.r.rtm.rtm_type > RTM_RESOLVE) { 1234 trace_act("ignore %s", str); 1235 continue; 1236 } 1237 1238 rt_xaddrs(&info, m.r.addrs, &m.r.addrs[RTAX_MAX], 1239 m.r.rtm.rtm_addrs); 1240 1241 if (INFO_DST(&info) == 0) { 1242 trace_act("ignore %s without dst", str); 1243 continue; 1244 } 1245 1246 if (INFO_DST(&info)->sa_family != AF_INET) { 1247 trace_act("ignore %s for AF %d", str, 1248 INFO_DST(&info)->sa_family); 1249 continue; 1250 } 1251 1252 mask = ((INFO_MASK(&info) != 0) 1253 ? ntohl(S_ADDR(INFO_MASK(&info))) 1254 : (m.r.rtm.rtm_flags & RTF_HOST) 1255 ? HOST_MASK 1256 : std_mask(S_ADDR(INFO_DST(&info)))); 1257 1258 strp += sprintf(strp, ": %s", 1259 addrname(S_ADDR(INFO_DST(&info)), mask, 0)); 1260 1261 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info))))) { 1262 trace_act("ignore multicast %s", str); 1263 continue; 1264 } 1265 1266 #if defined(RTF_LLINFO) 1267 if (m.r.rtm.rtm_flags & RTF_LLINFO) { 1268 trace_act("ignore ARP %s", str); 1269 continue; 1270 } 1271 #endif 1272 1273 #if defined(RTF_WASCLONED) && defined(__FreeBSD__) 1274 if (m.r.rtm.rtm_flags & RTF_WASCLONED) { 1275 trace_act("ignore cloned %s", str); 1276 continue; 1277 } 1278 #endif 1279 1280 if (get_info_gate(&INFO_GATE(&info), &gate_sin)) { 1281 gate = S_ADDR(INFO_GATE(&info)); 1282 strp += sprintf(strp, " --> %s", naddr_ntoa(gate)); 1283 } else { 1284 gate = 0; 1285 } 1286 1287 if (INFO_AUTHOR(&info) != 0) 1288 strp += sprintf(strp, " by authority of %s", 1289 saddr_ntoa(INFO_AUTHOR(&info))); 1290 1291 switch (m.r.rtm.rtm_type) { 1292 case RTM_ADD: 1293 case RTM_CHANGE: 1294 case RTM_REDIRECT: 1295 if (m.r.rtm.rtm_errno != 0) { 1296 trace_act("ignore %s with \"%s\" error", 1297 str, strerror(m.r.rtm.rtm_errno)); 1298 } else { 1299 trace_act("%s", str); 1300 rtm_add(&m.r.rtm,&info,0); 1301 } 1302 break; 1303 1304 case RTM_DELETE: 1305 if (m.r.rtm.rtm_errno != 0 1306 && m.r.rtm.rtm_errno != ESRCH) { 1307 trace_act("ignore %s with \"%s\" error", 1308 str, strerror(m.r.rtm.rtm_errno)); 1309 } else { 1310 trace_act("%s", str); 1311 del_static(S_ADDR(INFO_DST(&info)), mask, 1312 gate, 1); 1313 } 1314 break; 1315 1316 case RTM_LOSING: 1317 trace_act("%s", str); 1318 rtm_lose(&m.r.rtm,&info); 1319 break; 1320 1321 default: 1322 trace_act("ignore %s", str); 1323 break; 1324 } 1325 } 1326 } 1327 1328 1329 /* after aggregating, note routes that belong in the kernel 1330 */ 1331 static void 1332 kern_out(struct ag_info *ag) 1333 { 1334 struct khash *k; 1335 1336 1337 /* Do not install bad routes if they are not already present. 1338 * This includes routes that had RS_NET_SYN for interfaces that 1339 * recently died. 1340 */ 1341 if (ag->ag_metric == HOPCNT_INFINITY) { 1342 k = kern_find(htonl(ag->ag_dst_h), ag->ag_mask, 0); 1343 if (k == NULL) 1344 return; 1345 } else { 1346 k = kern_add(htonl(ag->ag_dst_h), ag->ag_mask); 1347 } 1348 1349 if (k->k_state & KS_NEW) { 1350 /* will need to add new entry to the kernel table */ 1351 k->k_state = KS_ADD; 1352 if (ag->ag_state & AGS_GATEWAY) 1353 k->k_state |= KS_GATEWAY; 1354 k->k_gate = ag->ag_gate; 1355 k->k_metric = ag->ag_metric; 1356 return; 1357 } 1358 1359 if (k->k_state & KS_STATIC) 1360 return; 1361 1362 /* modify existing kernel entry if necessary */ 1363 if (k->k_gate != ag->ag_gate 1364 || k->k_metric != ag->ag_metric) { 1365 /* Must delete bad interface routes etc. to change them. */ 1366 if (k->k_metric == HOPCNT_INFINITY) 1367 k->k_state |= KS_DEL_ADD; 1368 k->k_gate = ag->ag_gate; 1369 k->k_metric = ag->ag_metric; 1370 k->k_state |= KS_CHANGE; 1371 } 1372 1373 /* If the daemon thinks the route should exist, forget 1374 * about any redirections. 1375 * If the daemon thinks the route should exist, eventually 1376 * override manual intervention by the operator. 1377 */ 1378 if ((k->k_state & (KS_DYNAMIC | KS_DELETED)) != 0) { 1379 k->k_state &= ~KS_DYNAMIC; 1380 k->k_state |= (KS_ADD | KS_DEL_ADD); 1381 } 1382 1383 if ((k->k_state & KS_GATEWAY) 1384 && !(ag->ag_state & AGS_GATEWAY)) { 1385 k->k_state &= ~KS_GATEWAY; 1386 k->k_state |= (KS_ADD | KS_DEL_ADD); 1387 } else if (!(k->k_state & KS_GATEWAY) 1388 && (ag->ag_state & AGS_GATEWAY)) { 1389 k->k_state |= KS_GATEWAY; 1390 k->k_state |= (KS_ADD | KS_DEL_ADD); 1391 } 1392 1393 /* Deleting-and-adding is necessary to change aspects of a route. 1394 * Just delete instead of deleting and then adding a bad route. 1395 * Otherwise, we want to keep the route in the kernel. 1396 */ 1397 if (k->k_metric == HOPCNT_INFINITY 1398 && (k->k_state & KS_DEL_ADD)) 1399 k->k_state |= KS_DELETE; 1400 else 1401 k->k_state &= ~KS_DELETE; 1402 #undef RT 1403 } 1404 1405 1406 /* ARGSUSED */ 1407 static int 1408 walk_kern(struct radix_node *rn, 1409 struct walkarg *argp UNUSED) 1410 { 1411 #define RT ((struct rt_entry *)rn) 1412 char metric, pref; 1413 u_int ags = 0; 1414 1415 1416 /* Do not install synthetic routes */ 1417 if (RT->rt_state & RS_NET_SYN) 1418 return 0; 1419 1420 if (!(RT->rt_state & RS_IF)) { 1421 /* This is an ordinary route, not for an interface. 1422 */ 1423 1424 /* aggregate, ordinary good routes without regard to 1425 * their metric 1426 */ 1427 pref = 1; 1428 ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_AGGREGATE); 1429 1430 /* Do not install host routes directly to hosts, to avoid 1431 * interfering with ARP entries in the kernel table. 1432 */ 1433 if (RT_ISHOST(RT) 1434 && ntohl(RT->rt_dst) == RT->rt_gate) 1435 return 0; 1436 1437 } else { 1438 /* This is an interface route. 1439 * Do not install routes for "external" remote interfaces. 1440 */ 1441 if (RT->rt_ifp != 0 && (RT->rt_ifp->int_state & IS_EXTERNAL)) 1442 return 0; 1443 1444 /* Interfaces should override received routes. 1445 */ 1446 pref = 0; 1447 ags |= (AGS_IF | AGS_CORS_GATE); 1448 1449 /* If it is not an interface, or an alias for an interface, 1450 * it must be a "gateway." 1451 * 1452 * If it is a "remote" interface, it is also a "gateway" to 1453 * the kernel if is not an alias. 1454 */ 1455 if (RT->rt_ifp == 0 1456 || (RT->rt_ifp->int_state & IS_REMOTE)) 1457 ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_AGGREGATE); 1458 } 1459 1460 /* If RIP is off and IRDP is on, let the route to the discovered 1461 * route suppress any RIP routes. Eventually the RIP routes 1462 * will time-out and be deleted. This reaches the steady-state 1463 * quicker. 1464 */ 1465 if ((RT->rt_state & RS_RDISC) && rip_sock < 0) 1466 ags |= AGS_CORS_GATE; 1467 1468 metric = RT->rt_metric; 1469 if (metric == HOPCNT_INFINITY) { 1470 /* if the route is dead, so try hard to aggregate. */ 1471 pref = HOPCNT_INFINITY; 1472 ags |= (AGS_FINE_GATE | AGS_SUPPRESS); 1473 ags &= ~(AGS_IF | AGS_CORS_GATE); 1474 } 1475 1476 ag_check(RT->rt_dst, RT->rt_mask, RT->rt_gate, 0, 1477 metric,pref, 0, 0, ags, kern_out); 1478 return 0; 1479 #undef RT 1480 } 1481 1482 1483 /* Update the kernel table to match the daemon table. 1484 */ 1485 static void 1486 fix_kern(void) 1487 { 1488 int i; 1489 struct khash *k, **pk; 1490 1491 1492 need_kern = age_timer; 1493 1494 /* Walk daemon table, updating the copy of the kernel table. 1495 */ 1496 (void)rn_walktree(rhead, walk_kern, 0); 1497 ag_flush(0,0,kern_out); 1498 1499 for (i = 0; i < KHASH_SIZE; i++) { 1500 for (pk = &khash_bins[i]; (k = *pk) != NULL; ) { 1501 /* Do not touch static routes */ 1502 if (k->k_state & KS_STATIC) { 1503 kern_check_static(k,0); 1504 pk = &k->k_next; 1505 continue; 1506 } 1507 1508 /* check hold on routes deleted by the operator */ 1509 if (k->k_keep > now.tv_sec) { 1510 /* ensure we check when the hold is over */ 1511 LIM_SEC(need_kern, k->k_keep); 1512 /* mark for the next cycle */ 1513 k->k_state |= KS_DELETE; 1514 pk = &k->k_next; 1515 continue; 1516 } 1517 1518 if ((k->k_state & KS_DELETE) 1519 && !(k->k_state & KS_DYNAMIC)) { 1520 kern_ioctl(k, RTM_DELETE, 0); 1521 *pk = k->k_next; 1522 free(k); 1523 continue; 1524 } 1525 1526 if (k->k_state & KS_DEL_ADD) 1527 kern_ioctl(k, RTM_DELETE, 0); 1528 1529 if (k->k_state & KS_ADD) { 1530 kern_ioctl(k, RTM_ADD, 1531 ((0 != (k->k_state & (KS_GATEWAY 1532 | KS_DYNAMIC))) 1533 ? RTF_GATEWAY : 0)); 1534 } else if (k->k_state & KS_CHANGE) { 1535 kern_ioctl(k, RTM_CHANGE, 1536 ((0 != (k->k_state & (KS_GATEWAY 1537 | KS_DYNAMIC))) 1538 ? RTF_GATEWAY : 0)); 1539 } 1540 k->k_state &= ~(KS_ADD|KS_CHANGE|KS_DEL_ADD); 1541 1542 /* Mark this route to be deleted in the next cycle. 1543 * This deletes routes that disappear from the 1544 * daemon table, since the normal aging code 1545 * will clear the bit for routes that have not 1546 * disappeared from the daemon table. 1547 */ 1548 k->k_state |= KS_DELETE; 1549 pk = &k->k_next; 1550 } 1551 } 1552 } 1553 1554 1555 /* Delete a static route in the image of the kernel table. 1556 */ 1557 void 1558 del_static(naddr dst, 1559 naddr mask, 1560 naddr gate, 1561 int gone) 1562 { 1563 struct khash *k; 1564 struct rt_entry *rt; 1565 1566 /* Just mark it in the table to be deleted next time the kernel 1567 * table is updated. 1568 * If it has already been deleted, mark it as such, and set its 1569 * keep-timer so that it will not be deleted again for a while. 1570 * This lets the operator delete a route added by the daemon 1571 * and add a replacement. 1572 */ 1573 k = kern_find(dst, mask, 0); 1574 if (k != NULL && (gate == 0 || k->k_gate == gate)) { 1575 k->k_state &= ~(KS_STATIC | KS_DYNAMIC | KS_CHECK); 1576 k->k_state |= KS_DELETE; 1577 if (gone) { 1578 k->k_state |= KS_DELETED; 1579 k->k_keep = now.tv_sec + K_KEEP_LIM; 1580 } 1581 } 1582 1583 rt = rtget(dst, mask); 1584 if (rt != NULL && (rt->rt_state & RS_STATIC)) 1585 rtbad(rt); 1586 } 1587 1588 1589 /* Delete all routes generated from ICMP Redirects that use a given gateway, 1590 * as well as old redirected routes. 1591 */ 1592 void 1593 del_redirects(naddr bad_gate, 1594 time_t old) 1595 { 1596 int i; 1597 struct khash *k; 1598 1599 1600 for (i = 0; i < KHASH_SIZE; i++) { 1601 for (k = khash_bins[i]; k != NULL; k = k->k_next) { 1602 if (!(k->k_state & KS_DYNAMIC) 1603 || (k->k_state & KS_STATIC)) 1604 continue; 1605 1606 if (k->k_gate != bad_gate 1607 && k->k_redirect_time > old 1608 && !supplier) 1609 continue; 1610 1611 k->k_state |= KS_DELETE; 1612 k->k_state &= ~KS_DYNAMIC; 1613 need_kern.tv_sec = now.tv_sec; 1614 trace_act("mark redirected %s --> %s for deletion", 1615 addrname(k->k_dst, k->k_mask, 0), 1616 naddr_ntoa(k->k_gate)); 1617 } 1618 } 1619 } 1620 1621 1622 /* Start the daemon tables. 1623 */ 1624 extern int max_keylen; 1625 1626 void 1627 rtinit(void) 1628 { 1629 int i; 1630 struct ag_info *ag; 1631 1632 /* Initialize the radix trees */ 1633 max_keylen = sizeof(struct sockaddr_in); 1634 rn_init(); 1635 rn_inithead(&rhead, 32); 1636 1637 /* mark all of the slots in the table free */ 1638 ag_avail = ag_slots; 1639 for (ag = ag_slots, i = 1; i < NUM_AG_SLOTS; i++) { 1640 ag->ag_fine = ag+1; 1641 ag++; 1642 } 1643 } 1644 1645 1646 #ifdef _HAVE_SIN_LEN 1647 static struct sockaddr_in dst_sock = {sizeof(dst_sock), AF_INET, 0, {0}, {0}}; 1648 static struct sockaddr_in mask_sock = {sizeof(mask_sock), AF_INET, 0, {0}, {0}}; 1649 #else 1650 static struct sockaddr_in_new dst_sock = {_SIN_ADDR_SIZE, AF_INET}; 1651 static struct sockaddr_in_new mask_sock = {_SIN_ADDR_SIZE, AF_INET}; 1652 #endif 1653 1654 1655 static void 1656 set_need_flash(void) 1657 { 1658 if (!need_flash) { 1659 need_flash = 1; 1660 /* Do not send the flash update immediately. Wait a little 1661 * while to hear from other routers. 1662 */ 1663 no_flash.tv_sec = now.tv_sec + MIN_WAITTIME; 1664 } 1665 } 1666 1667 1668 /* Get a particular routing table entry 1669 */ 1670 struct rt_entry * 1671 rtget(naddr dst, naddr mask) 1672 { 1673 struct rt_entry *rt; 1674 1675 dst_sock.sin_addr.s_addr = dst; 1676 mask_sock.sin_addr.s_addr = htonl(mask); 1677 masktrim(&mask_sock); 1678 rt = (struct rt_entry *)rhead->rnh_lookup(&dst_sock,&mask_sock,rhead); 1679 if (!rt 1680 || rt->rt_dst != dst 1681 || rt->rt_mask != mask) 1682 return 0; 1683 1684 return rt; 1685 } 1686 1687 1688 /* Find a route to dst as the kernel would. 1689 */ 1690 struct rt_entry * 1691 rtfind(naddr dst) 1692 { 1693 dst_sock.sin_addr.s_addr = dst; 1694 return (struct rt_entry *)rhead->rnh_matchaddr(&dst_sock, rhead); 1695 } 1696 1697 1698 /* add a route to the table 1699 */ 1700 void 1701 rtadd(naddr dst, 1702 naddr mask, 1703 u_int state, /* rt_state for the entry */ 1704 struct rt_spare *new) 1705 { 1706 struct rt_entry *rt; 1707 naddr smask; 1708 int i; 1709 struct rt_spare *rts; 1710 1711 rt = (struct rt_entry *)rtmalloc(sizeof (*rt), "rtadd"); 1712 memset(rt, 0, sizeof(*rt)); 1713 for (rts = rt->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) 1714 rts->rts_metric = HOPCNT_INFINITY; 1715 1716 rt->rt_nodes->rn_key = (caddr_t)&rt->rt_dst_sock; 1717 rt->rt_dst = dst; 1718 rt->rt_dst_sock.sin_family = AF_INET; 1719 #ifdef _HAVE_SIN_LEN 1720 rt->rt_dst_sock.sin_len = dst_sock.sin_len; 1721 #endif 1722 if (mask != HOST_MASK) { 1723 smask = std_mask(dst); 1724 if ((smask & ~mask) == 0 && mask > smask) 1725 state |= RS_SUBNET; 1726 } 1727 mask_sock.sin_addr.s_addr = htonl(mask); 1728 masktrim(&mask_sock); 1729 rt->rt_mask = mask; 1730 rt->rt_state = state; 1731 rt->rt_spares[0] = *new; 1732 rt->rt_time = now.tv_sec; 1733 rt->rt_poison_metric = HOPCNT_INFINITY; 1734 rt->rt_seqno = update_seqno; 1735 1736 if (++total_routes == MAX_ROUTES) 1737 msglog("have maximum (%d) routes", total_routes); 1738 if (TRACEACTIONS) 1739 trace_add_del("Add", rt); 1740 1741 need_kern.tv_sec = now.tv_sec; 1742 set_need_flash(); 1743 1744 if (0 == rhead->rnh_addaddr(&rt->rt_dst_sock, &mask_sock, 1745 rhead, rt->rt_nodes)) { 1746 msglog("rnh_addaddr() failed for %s mask=%#lx", 1747 naddr_ntoa(dst), (u_long)mask); 1748 free(rt); 1749 } 1750 } 1751 1752 1753 /* notice a changed route 1754 */ 1755 void 1756 rtchange(struct rt_entry *rt, 1757 u_int state, /* new state bits */ 1758 struct rt_spare *new, 1759 char *label) 1760 { 1761 if (rt->rt_metric != new->rts_metric) { 1762 /* Fix the kernel immediately if it seems the route 1763 * has gone bad, since there may be a working route that 1764 * aggregates this route. 1765 */ 1766 if (new->rts_metric == HOPCNT_INFINITY) { 1767 need_kern.tv_sec = now.tv_sec; 1768 if (new->rts_time >= now.tv_sec - EXPIRE_TIME) 1769 new->rts_time = now.tv_sec - EXPIRE_TIME; 1770 } 1771 rt->rt_seqno = update_seqno; 1772 set_need_flash(); 1773 } 1774 1775 if (rt->rt_gate != new->rts_gate) { 1776 need_kern.tv_sec = now.tv_sec; 1777 rt->rt_seqno = update_seqno; 1778 set_need_flash(); 1779 } 1780 1781 state |= (rt->rt_state & RS_SUBNET); 1782 1783 /* Keep various things from deciding ageless routes are stale. 1784 */ 1785 if (!AGE_RT(state, new->rts_ifp)) 1786 new->rts_time = now.tv_sec; 1787 1788 if (TRACEACTIONS) 1789 trace_change(rt, state, new, 1790 label ? label : "Chg "); 1791 1792 rt->rt_state = state; 1793 rt->rt_spares[0] = *new; 1794 } 1795 1796 1797 /* check for a better route among the spares 1798 */ 1799 static struct rt_spare * 1800 rts_better(struct rt_entry *rt) 1801 { 1802 struct rt_spare *rts, *rts1; 1803 int i; 1804 1805 /* find the best alternative among the spares */ 1806 rts = rt->rt_spares+1; 1807 for (i = NUM_SPARES, rts1 = rts+1; i > 2; i--, rts1++) { 1808 if (BETTER_LINK(rt,rts1,rts)) 1809 rts = rts1; 1810 } 1811 1812 return rts; 1813 } 1814 1815 1816 /* switch to a backup route 1817 */ 1818 void 1819 rtswitch(struct rt_entry *rt, 1820 struct rt_spare *rts) 1821 { 1822 struct rt_spare swap; 1823 char label[10]; 1824 1825 1826 /* Do not change permanent routes */ 1827 if (0 != (rt->rt_state & (RS_MHOME | RS_STATIC | RS_RDISC 1828 | RS_NET_SYN | RS_IF))) 1829 return; 1830 1831 /* find the best alternative among the spares */ 1832 if (rts == NULL) 1833 rts = rts_better(rt); 1834 1835 /* Do not bother if it is not worthwhile. 1836 */ 1837 if (!BETTER_LINK(rt, rts, rt->rt_spares)) 1838 return; 1839 1840 swap = rt->rt_spares[0]; 1841 (void)sprintf(label, "Use #%d", (int)(rts - rt->rt_spares)); 1842 rtchange(rt, rt->rt_state & ~(RS_NET_SYN | RS_RDISC), rts, label); 1843 if (swap.rts_metric == HOPCNT_INFINITY) { 1844 *rts = rts_empty; 1845 } else { 1846 *rts = swap; 1847 } 1848 } 1849 1850 1851 void 1852 rtdelete(struct rt_entry *rt) 1853 { 1854 struct khash *k; 1855 1856 1857 if (TRACEACTIONS) 1858 trace_add_del("Del", rt); 1859 1860 k = kern_find(rt->rt_dst, rt->rt_mask, 0); 1861 if (k != NULL) { 1862 k->k_state |= KS_DELETE; 1863 need_kern.tv_sec = now.tv_sec; 1864 } 1865 1866 dst_sock.sin_addr.s_addr = rt->rt_dst; 1867 mask_sock.sin_addr.s_addr = htonl(rt->rt_mask); 1868 masktrim(&mask_sock); 1869 if (rt != (struct rt_entry *)rhead->rnh_deladdr(&dst_sock, &mask_sock, 1870 rhead)) { 1871 msglog("rnh_deladdr() failed"); 1872 } else { 1873 free(rt); 1874 total_routes--; 1875 } 1876 } 1877 1878 1879 void 1880 rts_delete(struct rt_entry *rt, 1881 struct rt_spare *rts) 1882 { 1883 trace_upslot(rt, rts, &rts_empty); 1884 *rts = rts_empty; 1885 } 1886 1887 1888 /* Get rid of a bad route, and try to switch to a replacement. 1889 */ 1890 static void 1891 rtbad(struct rt_entry *rt) 1892 { 1893 struct rt_spare new; 1894 1895 /* Poison the route */ 1896 new = rt->rt_spares[0]; 1897 new.rts_metric = HOPCNT_INFINITY; 1898 rtchange(rt, rt->rt_state & ~(RS_IF | RS_LOCAL | RS_STATIC), &new, 0); 1899 rtswitch(rt, 0); 1900 } 1901 1902 1903 /* Junk a RS_NET_SYN or RS_LOCAL route, 1904 * unless it is needed by another interface. 1905 */ 1906 void 1907 rtbad_sub(struct rt_entry *rt) 1908 { 1909 struct interface *ifp, *ifp1; 1910 struct intnet *intnetp; 1911 u_int state; 1912 1913 1914 ifp1 = NULL; 1915 state = 0; 1916 1917 if (rt->rt_state & RS_LOCAL) { 1918 /* Is this the route through loopback for the interface? 1919 * If so, see if it is used by any other interfaces, such 1920 * as a point-to-point interface with the same local address. 1921 */ 1922 LIST_FOREACH(ifp, &ifnet, int_list) { 1923 /* Retain it if another interface needs it. 1924 */ 1925 if (ifp->int_addr == rt->rt_ifp->int_addr) { 1926 state |= RS_LOCAL; 1927 ifp1 = ifp; 1928 break; 1929 } 1930 } 1931 1932 } 1933 1934 if (!(state & RS_LOCAL)) { 1935 /* Retain RIPv1 logical network route if there is another 1936 * interface that justifies it. 1937 */ 1938 if (rt->rt_state & RS_NET_SYN) { 1939 LIST_FOREACH(ifp, &ifnet, int_list) { 1940 if ((ifp->int_state & IS_NEED_NET_SYN) 1941 && rt->rt_mask == ifp->int_std_mask 1942 && rt->rt_dst == ifp->int_std_addr) { 1943 state |= RS_NET_SYN; 1944 ifp1 = ifp; 1945 break; 1946 } 1947 } 1948 } 1949 1950 /* or if there is an authority route that needs it. */ 1951 for (intnetp = intnets; 1952 intnetp != NULL; 1953 intnetp = intnetp->intnet_next) { 1954 if (intnetp->intnet_addr == rt->rt_dst 1955 && intnetp->intnet_mask == rt->rt_mask) { 1956 state |= (RS_NET_SYN | RS_NET_INT); 1957 break; 1958 } 1959 } 1960 } 1961 1962 if (ifp1 != NULL || (state & RS_NET_SYN)) { 1963 struct rt_spare new = rt->rt_spares[0]; 1964 new.rts_ifp = ifp1; 1965 rtchange(rt, ((rt->rt_state & ~(RS_NET_SYN|RS_LOCAL)) | state), 1966 &new, 0); 1967 } else { 1968 rtbad(rt); 1969 } 1970 } 1971 1972 1973 /* Called while walking the table looking for sick interfaces 1974 * or after a time change. 1975 */ 1976 /* ARGSUSED */ 1977 int 1978 walk_bad(struct radix_node *rn, 1979 struct walkarg *argp UNUSED) 1980 { 1981 #define RT ((struct rt_entry *)rn) 1982 struct rt_spare *rts; 1983 int i; 1984 1985 1986 /* fix any spare routes through the interface 1987 */ 1988 rts = RT->rt_spares; 1989 for (i = NUM_SPARES; i != 1; i--) { 1990 rts++; 1991 if (rts->rts_metric < HOPCNT_INFINITY 1992 && (rts->rts_ifp == NULL 1993 || (rts->rts_ifp->int_state & IS_BROKE))) 1994 rts_delete(RT, rts); 1995 } 1996 1997 /* Deal with the main route 1998 */ 1999 /* finished if it has been handled before or if its interface is ok 2000 */ 2001 if (RT->rt_ifp == 0 || !(RT->rt_ifp->int_state & IS_BROKE)) 2002 return 0; 2003 2004 /* Bad routes for other than interfaces are easy. 2005 */ 2006 if (0 == (RT->rt_state & (RS_IF | RS_NET_SYN | RS_LOCAL))) { 2007 rtbad(RT); 2008 return 0; 2009 } 2010 2011 rtbad_sub(RT); 2012 return 0; 2013 #undef RT 2014 } 2015 2016 2017 /* Check the age of an individual route. 2018 */ 2019 /* ARGSUSED */ 2020 static int 2021 walk_age(struct radix_node *rn, 2022 struct walkarg *argp UNUSED) 2023 { 2024 #define RT ((struct rt_entry *)rn) 2025 struct interface *ifp; 2026 struct rt_spare *rts; 2027 int i; 2028 2029 2030 /* age all of the spare routes, including the primary route 2031 * currently in use 2032 */ 2033 rts = RT->rt_spares; 2034 for (i = NUM_SPARES; i != 0; i--, rts++) { 2035 2036 ifp = rts->rts_ifp; 2037 if (i == NUM_SPARES) { 2038 if (!AGE_RT(RT->rt_state, ifp)) { 2039 /* Keep various things from deciding ageless 2040 * routes are stale 2041 */ 2042 rts->rts_time = now.tv_sec; 2043 continue; 2044 } 2045 2046 /* forget RIP routes after RIP has been turned off. 2047 */ 2048 if (rip_sock < 0) { 2049 rtdelete(RT); 2050 return 0; 2051 } 2052 } 2053 2054 /* age failing routes 2055 */ 2056 if (age_bad_gate == rts->rts_gate 2057 && rts->rts_time >= now_stale) { 2058 rts->rts_time -= SUPPLY_INTERVAL; 2059 } 2060 2061 /* trash the spare routes when they go bad */ 2062 if (rts->rts_metric < HOPCNT_INFINITY 2063 && now_garbage > rts->rts_time 2064 && i != NUM_SPARES) 2065 rts_delete(RT, rts); 2066 } 2067 2068 2069 /* finished if the active route is still fresh */ 2070 if (now_stale <= RT->rt_time) 2071 return 0; 2072 2073 /* try to switch to an alternative */ 2074 rtswitch(RT, 0); 2075 2076 /* Delete a dead route after it has been publicly mourned. */ 2077 if (now_garbage > RT->rt_time) { 2078 rtdelete(RT); 2079 return 0; 2080 } 2081 2082 /* Start poisoning a bad route before deleting it. */ 2083 if (now.tv_sec - RT->rt_time > EXPIRE_TIME) { 2084 struct rt_spare new = RT->rt_spares[0]; 2085 new.rts_metric = HOPCNT_INFINITY; 2086 rtchange(RT, RT->rt_state, &new, 0); 2087 } 2088 return 0; 2089 } 2090 2091 2092 /* Watch for dead routes and interfaces. 2093 */ 2094 void 2095 age(naddr bad_gate) 2096 { 2097 struct interface *ifp; 2098 int need_query = 0; 2099 2100 /* If not listening to RIP, there is no need to age the routes in 2101 * the table. 2102 */ 2103 age_timer.tv_sec = (now.tv_sec 2104 + ((rip_sock < 0) ? NEVER : SUPPLY_INTERVAL)); 2105 2106 /* Check for dead IS_REMOTE interfaces by timing their 2107 * transmissions. 2108 */ 2109 LIST_FOREACH(ifp, &ifnet, int_list) { 2110 if (!(ifp->int_state & IS_REMOTE)) 2111 continue; 2112 2113 /* ignore unreachable remote interfaces */ 2114 if (!check_remote(ifp)) 2115 continue; 2116 2117 /* Restore remote interface that has become reachable 2118 */ 2119 if (ifp->int_state & IS_BROKE) 2120 if_ok(ifp, "remote "); 2121 2122 if (ifp->int_act_time != NEVER 2123 && now.tv_sec - ifp->int_act_time > EXPIRE_TIME) { 2124 msglog("remote interface %s to %s timed out after" 2125 " %ld:%ld", 2126 ifp->int_name, 2127 naddr_ntoa(ifp->int_dstaddr), 2128 (long)(now.tv_sec - ifp->int_act_time)/60, 2129 (long)(now.tv_sec - ifp->int_act_time)%60); 2130 if_sick(ifp); 2131 } 2132 2133 /* If we have not heard from the other router 2134 * recently, ask it. 2135 */ 2136 if (now.tv_sec >= ifp->int_query_time) { 2137 ifp->int_query_time = NEVER; 2138 need_query = 1; 2139 } 2140 } 2141 2142 /* Age routes. */ 2143 age_bad_gate = bad_gate; 2144 (void)rn_walktree(rhead, walk_age, 0); 2145 2146 /* delete old redirected routes to keep the kernel table small 2147 * and prevent blackholes 2148 */ 2149 del_redirects(bad_gate, now.tv_sec-STALE_TIME); 2150 2151 /* Update the kernel routing table. */ 2152 fix_kern(); 2153 2154 /* poke reticent remote gateways */ 2155 if (need_query) 2156 rip_query(); 2157 } 2158