1 /* 2 * Copyright (c) 1983, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "defs.h" 33 34 #ifdef __NetBSD__ 35 __RCSID("$NetBSD$"); 36 #elif defined(__FreeBSD__) 37 __RCSID("$FreeBSD$"); 38 #else 39 __RCSID("$Revision: 2.27 $"); 40 #ident "$Revision: 2.27 $" 41 #endif 42 43 44 u_int update_seqno; 45 46 47 /* walk the tree of routes with this for output 48 */ 49 struct { 50 struct sockaddr_in to; 51 naddr to_mask; 52 naddr to_net; 53 naddr to_std_mask; 54 naddr to_std_net; 55 struct interface *ifp; /* usually output interface */ 56 struct auth *a; 57 char metric; /* adjust metrics by interface */ 58 int npackets; 59 int gen_limit; 60 u_int state; 61 #define WS_ST_FLASH 0x001 /* send only changed routes */ 62 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */ 63 #define WS_ST_AG 0x004 /* ok to aggregate subnets */ 64 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */ 65 #define WS_ST_QUERY 0x010 /* responding to a query */ 66 #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */ 67 #define WS_ST_DEFAULT 0x040 /* faking a default */ 68 } ws; 69 70 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */ 71 struct ws_buf v12buf; 72 union pkt_buf ripv12_buf; 73 74 /* Another for only RIPv2 listeners */ 75 struct ws_buf v2buf; 76 union pkt_buf rip_v2_buf; 77 78 79 80 void 81 bufinit(void) 82 { 83 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE; 84 v12buf.buf = &ripv12_buf.rip; 85 v12buf.base = &v12buf.buf->rip_nets[0]; 86 87 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE; 88 rip_v2_buf.rip.rip_vers = RIPv2; 89 v2buf.buf = &rip_v2_buf.rip; 90 v2buf.base = &v2buf.buf->rip_nets[0]; 91 } 92 93 94 /* Send the contents of the global buffer via the non-multicast socket 95 */ 96 int /* <0 on failure */ 97 output(enum output_type type, 98 struct sockaddr_in *dst, /* send to here */ 99 struct interface *ifp, 100 struct rip *buf, 101 int size) /* this many bytes */ 102 { 103 struct sockaddr_in osin; 104 int flags; 105 const char *msg; 106 int res; 107 int soc; 108 int serrno; 109 110 osin = *dst; 111 if (osin.sin_port == 0) 112 osin.sin_port = htons(RIP_PORT); 113 #ifdef _HAVE_SIN_LEN 114 if (osin.sin_len == 0) 115 osin.sin_len = sizeof(osin); 116 #endif 117 118 soc = rip_sock; 119 flags = 0; 120 121 switch (type) { 122 case OUT_QUERY: 123 msg = "Answer Query"; 124 if (soc < 0) 125 soc = ifp->int_rip_sock; 126 break; 127 case OUT_UNICAST: 128 msg = "Send"; 129 if (soc < 0) 130 soc = ifp->int_rip_sock; 131 flags = MSG_DONTROUTE; 132 break; 133 case OUT_BROADCAST: 134 if (ifp->int_if_flags & IFF_POINTOPOINT) { 135 msg = "Send"; 136 } else { 137 msg = "Send bcast"; 138 } 139 flags = MSG_DONTROUTE; 140 break; 141 case OUT_MULTICAST: 142 if (ifp->int_if_flags & IFF_POINTOPOINT) { 143 msg = "Send pt-to-pt"; 144 } else if (ifp->int_state & IS_DUP) { 145 trace_act("abort multicast output via %s" 146 " with duplicate address", 147 ifp->int_name); 148 return 0; 149 } else { 150 msg = "Send mcast"; 151 if (rip_sock_mcast != ifp) { 152 struct ip_mreqn mreqn; 153 154 memset(&mreqn, 0, sizeof(struct ip_mreqn)); 155 mreqn.imr_ifindex = ifp->int_index; 156 if (0 > setsockopt(rip_sock, 157 IPPROTO_IP, 158 IP_MULTICAST_IF, 159 &mreqn, 160 sizeof(mreqn))) { 161 serrno = errno; 162 LOGERR("setsockopt(rip_sock, " 163 "IP_MULTICAST_IF)"); 164 errno = serrno; 165 ifp = 0; 166 return -1; 167 } 168 rip_sock_mcast = ifp; 169 } 170 osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP); 171 } 172 break; 173 174 case NO_OUT_MULTICAST: 175 case NO_OUT_RIPV2: 176 default: 177 #ifdef DEBUG 178 abort(); 179 #endif 180 return -1; 181 } 182 183 trace_rip(msg, "to", &osin, ifp, buf, size); 184 185 res = sendto(soc, buf, size, flags, 186 (struct sockaddr *)&osin, sizeof(osin)); 187 if (res < 0 188 && (ifp == 0 || !(ifp->int_state & IS_BROKE))) { 189 serrno = errno; 190 msglog("%s sendto(%s%s%s.%d): %s", msg, 191 ifp != 0 ? ifp->int_name : "", 192 ifp != 0 ? ", " : "", 193 inet_ntoa(osin.sin_addr), 194 ntohs(osin.sin_port), 195 strerror(errno)); 196 errno = serrno; 197 } 198 199 return res; 200 } 201 202 203 /* Find the first key for a packet to send. 204 * Try for a key that is eligible and has not expired, but settle for 205 * the last key if they have all expired. 206 * If no key is ready yet, give up. 207 */ 208 struct auth * 209 find_auth(struct interface *ifp) 210 { 211 struct auth *ap, *res; 212 int i; 213 214 215 if (ifp == 0) 216 return 0; 217 218 res = 0; 219 ap = ifp->int_auth; 220 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) { 221 /* stop looking after the last key */ 222 if (ap->type == RIP_AUTH_NONE) 223 break; 224 225 /* ignore keys that are not ready yet */ 226 if ((u_long)ap->start > (u_long)clk.tv_sec) 227 continue; 228 229 if ((u_long)ap->end < (u_long)clk.tv_sec) { 230 /* note best expired password as a fall-back */ 231 if (res == 0 || (u_long)ap->end > (u_long)res->end) 232 res = ap; 233 continue; 234 } 235 236 /* note key with the best future */ 237 if (res == 0 || (u_long)res->end < (u_long)ap->end) 238 res = ap; 239 } 240 return res; 241 } 242 243 244 void 245 clr_ws_buf(struct ws_buf *wb, 246 struct auth *ap) 247 { 248 struct netauth *na; 249 250 wb->lim = wb->base + NETS_LEN; 251 wb->n = wb->base; 252 memset(wb->n, 0, NETS_LEN*sizeof(*wb->n)); 253 254 /* (start to) install authentication if appropriate 255 */ 256 if (ap == 0) 257 return; 258 259 na = (struct netauth*)wb->n; 260 if (ap->type == RIP_AUTH_PW) { 261 na->a_family = RIP_AF_AUTH; 262 na->a_type = RIP_AUTH_PW; 263 memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw)); 264 wb->n++; 265 266 } else if (ap->type == RIP_AUTH_MD5) { 267 na->a_family = RIP_AF_AUTH; 268 na->a_type = RIP_AUTH_MD5; 269 na->au.a_md5.md5_keyid = ap->keyid; 270 na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN; 271 na->au.a_md5.md5_seqno = htonl(clk.tv_sec); 272 wb->n++; 273 wb->lim--; /* make room for trailer */ 274 } 275 } 276 277 278 void 279 end_md5_auth(struct ws_buf *wb, 280 struct auth *ap) 281 { 282 struct netauth *na, *na2; 283 MD5_CTX md5_ctx; 284 int len; 285 286 287 na = (struct netauth*)wb->base; 288 na2 = (struct netauth*)wb->n; 289 len = (char *)na2-(char *)wb->buf; 290 na2->a_family = RIP_AF_AUTH; 291 na2->a_type = htons(1); 292 na->au.a_md5.md5_pkt_len = htons(len); 293 MD5Init(&md5_ctx); 294 MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA); 295 MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN); 296 MD5Final(na2->au.au_pw, &md5_ctx); 297 wb->n++; 298 } 299 300 301 /* Send the buffer 302 */ 303 static void 304 supply_write(struct ws_buf *wb) 305 { 306 /* Output multicast only if legal. 307 * If we would multicast and it would be illegal, then discard the 308 * packet. 309 */ 310 switch (wb->type) { 311 case NO_OUT_MULTICAST: 312 trace_pkt("skip multicast to %s because impossible", 313 naddr_ntoa(ws.to.sin_addr.s_addr)); 314 break; 315 case NO_OUT_RIPV2: 316 break; 317 default: 318 if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5) 319 end_md5_auth(wb,ws.a); 320 if (output(wb->type, &ws.to, ws.ifp, wb->buf, 321 ((char *)wb->n - (char*)wb->buf)) < 0 322 && ws.ifp != 0) 323 if_sick(ws.ifp); 324 ws.npackets++; 325 break; 326 } 327 328 clr_ws_buf(wb,ws.a); 329 } 330 331 332 /* put an entry into the packet 333 */ 334 static void 335 supply_out(struct ag_info *ag) 336 { 337 int i; 338 naddr mask, v1_mask, dst_h, ddst_h = 0; 339 struct ws_buf *wb; 340 341 342 /* Skip this route if doing a flash update and it and the routes 343 * it aggregates have not changed recently. 344 */ 345 if (ag->ag_seqno < update_seqno 346 && (ws.state & WS_ST_FLASH)) 347 return; 348 349 dst_h = ag->ag_dst_h; 350 mask = ag->ag_mask; 351 v1_mask = ripv1_mask_host(htonl(dst_h), 352 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0); 353 i = 0; 354 355 /* If we are sending RIPv2 packets that cannot (or must not) be 356 * heard by RIPv1 listeners, do not worry about sub- or supernets. 357 * Subnets (from other networks) can only be sent via multicast. 358 * A pair of subnet routes might have been promoted so that they 359 * are legal to send by RIPv1. 360 * If RIPv1 is off, use the multicast buffer. 361 */ 362 if ((ws.state & WS_ST_RIP2_ALL) 363 || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) { 364 /* use the RIPv2-only buffer */ 365 wb = &v2buf; 366 367 } else { 368 /* use the RIPv1-or-RIPv2 buffer */ 369 wb = &v12buf; 370 371 /* Convert supernet route into corresponding set of network 372 * routes for RIPv1, but leave non-contiguous netmasks 373 * to ag_check(). 374 */ 375 if (v1_mask > mask 376 && mask + (mask & -mask) == 0) { 377 ddst_h = v1_mask & -v1_mask; 378 i = (v1_mask & ~mask)/ddst_h; 379 380 if (i > ws.gen_limit) { 381 /* Punt if we would have to generate an 382 * unreasonable number of routes. 383 */ 384 if (TRACECONTENTS) 385 trace_misc("sending %s-->%s as 1" 386 " instead of %d routes", 387 addrname(htonl(dst_h), mask, 388 1), 389 naddr_ntoa(ws.to.sin_addr 390 .s_addr), 391 i+1); 392 i = 0; 393 394 } else { 395 mask = v1_mask; 396 ws.gen_limit -= i; 397 } 398 } 399 } 400 401 do { 402 wb->n->n_family = RIP_AF_INET; 403 wb->n->n_dst = htonl(dst_h); 404 /* If the route is from router-discovery or we are 405 * shutting down, admit only a bad metric. 406 */ 407 wb->n->n_metric = ((stopint || ag->ag_metric < 1) 408 ? HOPCNT_INFINITY 409 : ag->ag_metric); 410 wb->n->n_metric = htonl(wb->n->n_metric); 411 /* Any non-zero bits in the supposedly unused RIPv1 fields 412 * cause the old `routed` to ignore the route. 413 * That means the mask and so forth cannot be sent 414 * in the hybrid RIPv1/RIPv2 mode. 415 */ 416 if (ws.state & WS_ST_RIP2_ALL) { 417 if (ag->ag_nhop != 0 418 && ((ws.state & WS_ST_QUERY) 419 || (ag->ag_nhop != ws.ifp->int_addr 420 && on_net(ag->ag_nhop, 421 ws.ifp->int_net, 422 ws.ifp->int_mask)))) 423 wb->n->n_nhop = ag->ag_nhop; 424 wb->n->n_mask = htonl(mask); 425 wb->n->n_tag = ag->ag_tag; 426 } 427 dst_h += ddst_h; 428 429 if (++wb->n >= wb->lim) 430 supply_write(wb); 431 } while (i-- != 0); 432 } 433 434 435 /* supply one route from the table 436 */ 437 /* ARGSUSED */ 438 static int 439 walk_supply(struct radix_node *rn, 440 struct walkarg *argp UNUSED) 441 { 442 #define RT ((struct rt_entry *)rn) 443 u_short ags; 444 char metric, pref; 445 naddr dst, nhop; 446 struct rt_spare *rts; 447 int i; 448 449 450 /* Do not advertise external remote interfaces or passive interfaces. 451 */ 452 if ((RT->rt_state & RS_IF) 453 && RT->rt_ifp != 0 454 && (RT->rt_ifp->int_state & IS_PASSIVE) 455 && !(RT->rt_state & RS_MHOME)) 456 return 0; 457 458 /* If being quiet about our ability to forward, then 459 * do not say anything unless responding to a query, 460 * except about our main interface. 461 */ 462 if (!supplier && !(ws.state & WS_ST_QUERY) 463 && !(RT->rt_state & RS_MHOME)) 464 return 0; 465 466 dst = RT->rt_dst; 467 468 /* do not collide with the fake default route */ 469 if (dst == RIP_DEFAULT 470 && (ws.state & WS_ST_DEFAULT)) 471 return 0; 472 473 if (RT->rt_state & RS_NET_SYN) { 474 if (RT->rt_state & RS_NET_INT) { 475 /* Do not send manual synthetic network routes 476 * into the subnet. 477 */ 478 if (on_net(ws.to.sin_addr.s_addr, 479 ntohl(dst), RT->rt_mask)) 480 return 0; 481 482 } else { 483 /* Do not send automatic synthetic network routes 484 * if they are not needed because no RIPv1 listeners 485 * can hear them. 486 */ 487 if (ws.state & WS_ST_RIP2_ALL) 488 return 0; 489 490 /* Do not send automatic synthetic network routes to 491 * the real subnet. 492 */ 493 if (on_net(ws.to.sin_addr.s_addr, 494 ntohl(dst), RT->rt_mask)) 495 return 0; 496 } 497 nhop = 0; 498 499 } else { 500 /* Advertise the next hop if this is not a route for one 501 * of our interfaces and the next hop is on the same 502 * network as the target. 503 * The final determination is made by supply_out(). 504 */ 505 if (!(RT->rt_state & RS_IF) 506 && RT->rt_gate != myaddr 507 && RT->rt_gate != loopaddr) 508 nhop = RT->rt_gate; 509 else 510 nhop = 0; 511 } 512 513 metric = RT->rt_metric; 514 ags = 0; 515 516 if (RT->rt_state & RS_MHOME) { 517 /* retain host route of multi-homed servers */ 518 ; 519 520 } else if (RT_ISHOST(RT)) { 521 /* We should always suppress (into existing network routes) 522 * the host routes for the local end of our point-to-point 523 * links. 524 * If we are suppressing host routes in general, then do so. 525 * Avoid advertising host routes onto their own network, 526 * where they should be handled by proxy-ARP. 527 */ 528 if ((RT->rt_state & RS_LOCAL) 529 || ridhosts 530 || on_net(dst, ws.to_net, ws.to_mask)) 531 ags |= AGS_SUPPRESS; 532 533 /* Aggregate stray host routes into network routes if allowed. 534 * We cannot aggregate host routes into small network routes 535 * without confusing RIPv1 listeners into thinking the 536 * network routes are host routes. 537 */ 538 if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL)) 539 ags |= AGS_AGGREGATE; 540 541 } else { 542 /* Always suppress network routes into other, existing 543 * network routes 544 */ 545 ags |= AGS_SUPPRESS; 546 547 /* Generate supernets if allowed. 548 * If we can be heard by RIPv1 systems, we will 549 * later convert back to ordinary nets. 550 * This unifies dealing with received supernets. 551 */ 552 if ((ws.state & WS_ST_AG) 553 && ((RT->rt_state & RS_SUBNET) 554 || (ws.state & WS_ST_SUPER_AG))) 555 ags |= AGS_AGGREGATE; 556 } 557 558 /* Do not send RIPv1 advertisements of subnets to other 559 * networks. If possible, multicast them by RIPv2. 560 */ 561 if ((RT->rt_state & RS_SUBNET) 562 && !(ws.state & WS_ST_RIP2_ALL) 563 && !on_net(dst, ws.to_std_net, ws.to_std_mask)) 564 ags |= AGS_RIPV2 | AGS_AGGREGATE; 565 566 567 /* Do not send a route back to where it came from, except in 568 * response to a query. This is "split-horizon". That means not 569 * advertising back to the same network and so via the same interface. 570 * 571 * We want to suppress routes that might have been fragmented 572 * from this route by a RIPv1 router and sent back to us, and so we 573 * cannot forget this route here. Let the split-horizon route 574 * suppress the fragmented routes and then itself be forgotten. 575 * 576 * Include the routes for both ends of point-to-point interfaces 577 * among those suppressed by split-horizon, since the other side 578 * should knows them as well as we do. 579 * 580 * Notice spare routes with the same metric that we are about to 581 * advertise, to split the horizon on redundant, inactive paths. 582 * 583 * Do not suppress advertisements of interface-related addresses on 584 * non-point-to-point interfaces. This ensures that we have something 585 * to say every 30 seconds to help detect broken Ethernets or 586 * other interfaces where one packet every 30 seconds costs nothing. 587 */ 588 if (ws.ifp != 0 589 && !(ws.state & WS_ST_QUERY) 590 && (ws.state & WS_ST_TO_ON_NET) 591 && (!(RT->rt_state & RS_IF) 592 || ws.ifp->int_if_flags & IFF_POINTOPOINT)) { 593 for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) { 594 if (rts->rts_metric > metric 595 || rts->rts_ifp != ws.ifp) 596 continue; 597 598 /* If we do not mark the route with AGS_SPLIT_HZ here, 599 * it will be poisoned-reverse, or advertised back 600 * toward its source with an infinite metric. 601 * If we have recently advertised the route with a 602 * better metric than we now have, then we should 603 * poison-reverse the route before suppressing it for 604 * split-horizon. 605 * 606 * In almost all cases, if there is no spare for the 607 * route then it is either old and dead or a brand 608 * new route. If it is brand new, there is no need 609 * for poison-reverse. If it is old and dead, it 610 * is already poisoned. 611 */ 612 if (RT->rt_poison_time < now_expire 613 || RT->rt_poison_metric >= metric 614 || RT->rt_spares[1].rts_gate == 0) { 615 ags |= AGS_SPLIT_HZ; 616 ags &= ~AGS_SUPPRESS; 617 } 618 metric = HOPCNT_INFINITY; 619 break; 620 } 621 } 622 623 /* Keep track of the best metric with which the 624 * route has been advertised recently. 625 */ 626 if (RT->rt_poison_metric >= metric 627 || RT->rt_poison_time < now_expire) { 628 RT->rt_poison_time = now.tv_sec; 629 RT->rt_poison_metric = metric; 630 } 631 632 /* Adjust the outgoing metric by the cost of the link. 633 * Avoid aggregation when a route is counting to infinity. 634 */ 635 pref = RT->rt_poison_metric + ws.metric; 636 metric += ws.metric; 637 638 /* Do not advertise stable routes that will be ignored, 639 * unless we are answering a query. 640 * If the route recently was advertised with a metric that 641 * would have been less than infinity through this interface, 642 * we need to continue to advertise it in order to poison it. 643 */ 644 if (metric >= HOPCNT_INFINITY) { 645 if (!(ws.state & WS_ST_QUERY) 646 && (pref >= HOPCNT_INFINITY 647 || RT->rt_poison_time < now_garbage)) 648 return 0; 649 650 metric = HOPCNT_INFINITY; 651 } 652 653 ag_check(dst, RT->rt_mask, 0, nhop, metric, pref, 654 RT->rt_seqno, RT->rt_tag, ags, supply_out); 655 return 0; 656 #undef RT 657 } 658 659 660 /* Supply dst with the contents of the routing tables. 661 * If this won't fit in one packet, chop it up into several. 662 */ 663 void 664 supply(struct sockaddr_in *dst, 665 struct interface *ifp, /* output interface */ 666 enum output_type type, 667 int flash, /* 1=flash update */ 668 int vers, /* RIP version */ 669 int passwd_ok) /* OK to include cleartext password */ 670 { 671 struct rt_entry *rt; 672 int def_metric; 673 674 675 ws.state = 0; 676 ws.gen_limit = 1024; 677 678 ws.to = *dst; 679 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr); 680 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask; 681 682 if (ifp != 0) { 683 ws.to_mask = ifp->int_mask; 684 ws.to_net = ifp->int_net; 685 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask)) 686 ws.state |= WS_ST_TO_ON_NET; 687 688 } else { 689 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0); 690 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask; 691 rt = rtfind(dst->sin_addr.s_addr); 692 if (rt) 693 ifp = rt->rt_ifp; 694 } 695 696 ws.npackets = 0; 697 if (flash) 698 ws.state |= WS_ST_FLASH; 699 700 if ((ws.ifp = ifp) == 0) { 701 ws.metric = 1; 702 } else { 703 /* Adjust the advertised metric by the outgoing interface 704 * metric. 705 */ 706 ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric; 707 } 708 709 ripv12_buf.rip.rip_vers = vers; 710 711 switch (type) { 712 case OUT_MULTICAST: 713 if (ifp->int_if_flags & IFF_MULTICAST) 714 v2buf.type = OUT_MULTICAST; 715 else 716 v2buf.type = NO_OUT_MULTICAST; 717 v12buf.type = OUT_BROADCAST; 718 break; 719 720 case OUT_QUERY: 721 ws.state |= WS_ST_QUERY; 722 /* FALLTHROUGH */ 723 case OUT_BROADCAST: 724 case OUT_UNICAST: 725 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2; 726 v12buf.type = type; 727 break; 728 729 case NO_OUT_MULTICAST: 730 case NO_OUT_RIPV2: 731 break; /* no output */ 732 } 733 734 if (vers == RIPv2) { 735 /* full RIPv2 only if cannot be heard by RIPv1 listeners */ 736 if (type != OUT_BROADCAST) 737 ws.state |= WS_ST_RIP2_ALL; 738 if ((ws.state & WS_ST_QUERY) 739 || !(ws.state & WS_ST_TO_ON_NET)) { 740 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG); 741 } else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) { 742 ws.state |= WS_ST_AG; 743 if (type != OUT_BROADCAST 744 && (ifp == 0 745 || !(ifp->int_state & IS_NO_SUPER_AG))) 746 ws.state |= WS_ST_SUPER_AG; 747 } 748 } 749 750 ws.a = (vers == RIPv2) ? find_auth(ifp) : 0; 751 if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW) 752 ws.a = 0; 753 clr_ws_buf(&v12buf,ws.a); 754 clr_ws_buf(&v2buf,ws.a); 755 756 /* Fake a default route if asked and if there is not already 757 * a better, real default route. 758 */ 759 if (supplier && (def_metric = ifp->int_d_metric) != 0) { 760 if (0 == (rt = rtget(RIP_DEFAULT, 0)) 761 || rt->rt_metric+ws.metric >= def_metric) { 762 ws.state |= WS_ST_DEFAULT; 763 ag_check(0, 0, 0, 0, def_metric, def_metric, 764 0, 0, 0, supply_out); 765 } else { 766 def_metric = rt->rt_metric+ws.metric; 767 } 768 769 /* If both RIPv2 and the poor-man's router discovery 770 * kludge are on, arrange to advertise an extra 771 * default route via RIPv1. 772 */ 773 if ((ws.state & WS_ST_RIP2_ALL) 774 && (ifp->int_state & IS_PM_RDISC)) { 775 ripv12_buf.rip.rip_vers = RIPv1; 776 v12buf.n->n_family = RIP_AF_INET; 777 v12buf.n->n_dst = htonl(RIP_DEFAULT); 778 v12buf.n->n_metric = htonl(def_metric); 779 v12buf.n++; 780 } 781 } 782 783 (void)rn_walktree(rhead, walk_supply, 0); 784 ag_flush(0,0,supply_out); 785 786 /* Flush the packet buffers, provided they are not empty and 787 * do not contain only the password. 788 */ 789 if (v12buf.n != v12buf.base 790 && (v12buf.n > v12buf.base+1 791 || v12buf.base->n_family != RIP_AF_AUTH)) 792 supply_write(&v12buf); 793 if (v2buf.n != v2buf.base 794 && (v2buf.n > v2buf.base+1 795 || v2buf.base->n_family != RIP_AF_AUTH)) 796 supply_write(&v2buf); 797 798 /* If we sent nothing and this is an answer to a query, send 799 * an empty buffer. 800 */ 801 if (ws.npackets == 0 802 && (ws.state & WS_ST_QUERY)) 803 supply_write(&v12buf); 804 } 805 806 807 /* send all of the routing table or just do a flash update 808 */ 809 void 810 rip_bcast(int flash) 811 { 812 #ifdef _HAVE_SIN_LEN 813 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}}; 814 #else 815 static struct sockaddr_in dst = {AF_INET}; 816 #endif 817 struct interface *ifp; 818 enum output_type type; 819 int vers; 820 struct timeval rtime; 821 822 823 need_flash = 0; 824 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME); 825 no_flash = rtime; 826 timevaladd(&no_flash, &now); 827 828 if (rip_sock < 0) 829 return; 830 831 trace_act("send %s and inhibit dynamic updates for %.3f sec", 832 flash ? "dynamic update" : "all routes", 833 rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0); 834 835 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) { 836 /* Skip interfaces not doing RIP. 837 * Do try broken interfaces to see if they have healed. 838 */ 839 if (IS_RIP_OUT_OFF(ifp->int_state)) 840 continue; 841 842 /* skip turned off interfaces */ 843 if (!iff_up(ifp->int_if_flags)) 844 continue; 845 846 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1; 847 848 if (ifp->int_if_flags & IFF_BROADCAST) { 849 /* ordinary, hardware interface */ 850 dst.sin_addr.s_addr = ifp->int_brdaddr; 851 852 if (vers == RIPv2 853 && !(ifp->int_state & IS_NO_RIP_MCAST)) { 854 type = OUT_MULTICAST; 855 } else { 856 type = OUT_BROADCAST; 857 } 858 859 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 860 /* point-to-point hardware interface */ 861 dst.sin_addr.s_addr = ifp->int_dstaddr; 862 type = OUT_UNICAST; 863 864 } else if (ifp->int_state & IS_REMOTE) { 865 /* remote interface */ 866 dst.sin_addr.s_addr = ifp->int_addr; 867 type = OUT_UNICAST; 868 869 } else { 870 /* ATM, HIPPI, etc. */ 871 continue; 872 } 873 874 supply(&dst, ifp, type, flash, vers, 1); 875 } 876 877 update_seqno++; /* all routes are up to date */ 878 } 879 880 881 /* Ask for routes 882 * Do it only once to an interface, and not even after the interface 883 * was broken and recovered. 884 */ 885 void 886 rip_query(void) 887 { 888 #ifdef _HAVE_SIN_LEN 889 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}}; 890 #else 891 static struct sockaddr_in dst = {AF_INET}; 892 #endif 893 struct interface *ifp; 894 struct rip buf; 895 enum output_type type; 896 897 898 if (rip_sock < 0) 899 return; 900 901 memset(&buf, 0, sizeof(buf)); 902 903 for (ifp = ifnet; ifp; ifp = ifp->int_next) { 904 /* Skip interfaces those already queried. 905 * Do not ask via interfaces through which we don't 906 * accept input. Do not ask via interfaces that cannot 907 * send RIP packets. 908 * Do try broken interfaces to see if they have healed. 909 */ 910 if (IS_RIP_IN_OFF(ifp->int_state) 911 || ifp->int_query_time != NEVER) 912 continue; 913 914 /* skip turned off interfaces */ 915 if (!iff_up(ifp->int_if_flags)) 916 continue; 917 918 buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1; 919 buf.rip_cmd = RIPCMD_REQUEST; 920 buf.rip_nets[0].n_family = RIP_AF_UNSPEC; 921 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY); 922 923 /* Send a RIPv1 query only if allowed and if we will 924 * listen to RIPv1 routers. 925 */ 926 if ((ifp->int_state & IS_NO_RIPV1_OUT) 927 || (ifp->int_state & IS_NO_RIPV1_IN)) { 928 buf.rip_vers = RIPv2; 929 } else { 930 buf.rip_vers = RIPv1; 931 } 932 933 if (ifp->int_if_flags & IFF_BROADCAST) { 934 /* ordinary, hardware interface */ 935 dst.sin_addr.s_addr = ifp->int_brdaddr; 936 937 /* Broadcast RIPv1 queries and RIPv2 queries 938 * when the hardware cannot multicast. 939 */ 940 if (buf.rip_vers == RIPv2 941 && (ifp->int_if_flags & IFF_MULTICAST) 942 && !(ifp->int_state & IS_NO_RIP_MCAST)) { 943 type = OUT_MULTICAST; 944 } else { 945 type = OUT_BROADCAST; 946 } 947 948 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 949 /* point-to-point hardware interface */ 950 dst.sin_addr.s_addr = ifp->int_dstaddr; 951 type = OUT_UNICAST; 952 953 } else if (ifp->int_state & IS_REMOTE) { 954 /* remote interface */ 955 dst.sin_addr.s_addr = ifp->int_addr; 956 type = OUT_UNICAST; 957 958 } else { 959 /* ATM, HIPPI, etc. */ 960 continue; 961 } 962 963 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL; 964 if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0) 965 if_sick(ifp); 966 } 967 } 968