1 /* 2 * Copyright (c) 1983, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #ifndef lint 35 #if 0 36 static char sccsid[] = "@(#)output.c 8.1 (Berkeley) 6/5/93"; 37 #endif 38 static const char rcsid[] = 39 "$Id$"; 40 #endif /* not lint */ 41 42 #include "defs.h" 43 44 45 int update_seqno; 46 47 48 /* walk the tree of routes with this for output 49 */ 50 struct { 51 struct sockaddr_in to; 52 naddr to_mask; 53 naddr to_net; 54 naddr to_std_mask; 55 naddr to_std_net; 56 struct interface *ifp; /* usually output interface */ 57 struct auth *a; 58 char metric; /* adjust metrics by interface */ 59 int npackets; 60 int gen_limit; 61 u_int state; 62 #define WS_ST_FLASH 0x001 /* send only changed routes */ 63 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */ 64 #define WS_ST_AG 0x004 /* ok to aggregate subnets */ 65 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */ 66 #define WS_ST_SUB_AG 0x010 /* aggregate subnets in odd case */ 67 #define WS_ST_QUERY 0x020 /* responding to a query */ 68 #define WS_ST_TO_ON_NET 0x040 /* sending onto one of our nets */ 69 #define WS_ST_DEFAULT 0x080 /* faking a default */ 70 } ws; 71 72 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */ 73 struct ws_buf v12buf; 74 union pkt_buf ripv12_buf; 75 76 /* Another for only RIPv2 listeners */ 77 struct ws_buf v2buf; 78 union pkt_buf rip_v2_buf; 79 80 81 82 void 83 bufinit(void) 84 { 85 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE; 86 v12buf.buf = &ripv12_buf.rip; 87 v12buf.base = &v12buf.buf->rip_nets[0]; 88 89 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE; 90 rip_v2_buf.rip.rip_vers = RIPv2; 91 v2buf.buf = &rip_v2_buf.rip; 92 v2buf.base = &v2buf.buf->rip_nets[0]; 93 } 94 95 96 /* Send the contents of the global buffer via the non-multicast socket 97 */ 98 int /* <0 on failure */ 99 output(enum output_type type, 100 struct sockaddr_in *dst, /* send to here */ 101 struct interface *ifp, 102 struct rip *buf, 103 int size) /* this many bytes */ 104 { 105 struct sockaddr_in sin; 106 int flags; 107 char *msg; 108 int res; 109 naddr tgt_mcast; 110 int soc; 111 int serrno; 112 113 sin = *dst; 114 if (sin.sin_port == 0) 115 sin.sin_port = htons(RIP_PORT); 116 #ifdef _HAVE_SIN_LEN 117 if (sin.sin_len == 0) 118 sin.sin_len = sizeof(sin); 119 #endif 120 121 soc = rip_sock; 122 flags = 0; 123 124 switch (type) { 125 case OUT_QUERY: 126 msg = "Answer Query"; 127 if (soc < 0) 128 soc = ifp->int_rip_sock; 129 break; 130 case OUT_UNICAST: 131 msg = "Send"; 132 if (soc < 0) 133 soc = ifp->int_rip_sock; 134 flags = MSG_DONTROUTE; 135 break; 136 case OUT_BROADCAST: 137 if (ifp->int_if_flags & IFF_POINTOPOINT) { 138 msg = "Send"; 139 } else { 140 msg = "Send bcast"; 141 } 142 flags = MSG_DONTROUTE; 143 break; 144 case OUT_MULTICAST: 145 if (ifp->int_if_flags & IFF_POINTOPOINT) { 146 msg = "Send pt-to-pt"; 147 } else if (ifp->int_state & IS_DUP) { 148 trace_act("abort multicast output via %s" 149 " with duplicate address", 150 ifp->int_name); 151 return 0; 152 } else { 153 msg = "Send mcast"; 154 if (rip_sock_mcast != ifp) { 155 #ifdef MCAST_PPP_BUG 156 /* Do not specify the primary interface 157 * explicitly if we have the multicast 158 * point-to-point kernel bug, since the 159 * kernel will do the wrong thing if the 160 * local address of a point-to-point link 161 * is the same as the address of an ordinary 162 * interface. 163 */ 164 if (ifp->int_addr == myaddr) { 165 tgt_mcast = 0; 166 } else 167 #endif 168 tgt_mcast = ifp->int_addr; 169 if (0 > setsockopt(rip_sock, 170 IPPROTO_IP, IP_MULTICAST_IF, 171 &tgt_mcast, 172 sizeof(tgt_mcast))) { 173 serrno = errno; 174 LOGERR("setsockopt(rip_sock," 175 "IP_MULTICAST_IF)"); 176 errno = serrno; 177 ifp = 0; 178 return -1; 179 } 180 rip_sock_mcast = ifp; 181 } 182 sin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP); 183 } 184 break; 185 186 case NO_OUT_MULTICAST: 187 case NO_OUT_RIPV2: 188 default: 189 #ifdef DEBUG 190 abort(); 191 #endif 192 return -1; 193 } 194 195 trace_rip(msg, "to", &sin, ifp, buf, size); 196 197 res = sendto(soc, buf, size, flags, 198 (struct sockaddr *)&sin, sizeof(sin)); 199 if (res < 0 200 && (ifp == 0 || !(ifp->int_state & IS_BROKE))) { 201 serrno = errno; 202 msglog("%s sendto(%s%s%s.%d): %s", msg, 203 ifp != 0 ? ifp->int_name : "", 204 ifp != 0 ? ", " : "", 205 inet_ntoa(sin.sin_addr), 206 ntohs(sin.sin_port), 207 strerror(errno)); 208 errno = serrno; 209 } 210 211 return res; 212 } 213 214 215 /* Find the first key for a packet to send. 216 * Try for a key that is eligible and has not expired, but settle for 217 * the last key if they have all expired. 218 * If no key is ready yet, give up. 219 */ 220 struct auth * 221 find_auth(struct interface *ifp) 222 { 223 struct auth *ap, *res; 224 int i; 225 226 227 if (ifp == 0) 228 return 0; 229 230 res = 0; 231 ap = ifp->int_auth; 232 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) { 233 /* stop looking after the last key */ 234 if (ap->type == RIP_AUTH_NONE) 235 break; 236 237 /* ignore keys that are not ready yet */ 238 if ((u_long)ap->start > (u_long)clk.tv_sec) 239 continue; 240 241 if ((u_long)ap->end < (u_long)clk.tv_sec) { 242 /* note best expired password as a fall-back */ 243 if (res == 0 || (u_long)ap->end > (u_long)res->end) 244 res = ap; 245 continue; 246 } 247 248 /* note key with the best future */ 249 if (res == 0 || (u_long)res->end < (u_long)ap->end) 250 res = ap; 251 } 252 return res; 253 } 254 255 256 void 257 clr_ws_buf(struct ws_buf *wb, 258 struct auth *ap) 259 { 260 struct netauth *na; 261 262 wb->lim = wb->base + NETS_LEN; 263 wb->n = wb->base; 264 bzero(wb->n, NETS_LEN*sizeof(*wb->n)); 265 266 /* install authentication if appropriate 267 */ 268 if (ap == 0) 269 return; 270 na = (struct netauth*)wb->n; 271 if (ap->type == RIP_AUTH_PW) { 272 na->a_family = RIP_AF_AUTH; 273 na->a_type = RIP_AUTH_PW; 274 bcopy(ap->key, na->au.au_pw, sizeof(na->au.au_pw)); 275 wb->n++; 276 277 } else if (ap->type == RIP_AUTH_MD5) { 278 na->a_family = RIP_AF_AUTH; 279 na->a_type = RIP_AUTH_MD5; 280 na->au.a_md5.md5_keyid = ap->keyid; 281 na->au.a_md5.md5_auth_len = RIP_AUTH_PW_LEN; 282 na->au.a_md5.md5_seqno = clk.tv_sec; 283 wb->n++; 284 wb->lim--; /* make room for trailer */ 285 } 286 } 287 288 289 void 290 end_md5_auth(struct ws_buf *wb, 291 struct auth *ap) 292 { 293 struct netauth *na, *na2; 294 MD5_CTX md5_ctx; 295 296 297 na = (struct netauth*)wb->base; 298 na2 = (struct netauth*)wb->n; 299 na2->a_family = RIP_AF_AUTH; 300 na2->a_type = 1; 301 bcopy(ap->key, na2->au.au_pw, sizeof(na2->au.au_pw)); 302 na->au.a_md5.md5_pkt_len = (char *)na2-(char *)(na+1); 303 MD5Init(&md5_ctx); 304 MD5Update(&md5_ctx, (u_char *)na, 305 (char *)(na2+1) - (char *)na); 306 MD5Final(na2->au.au_pw, &md5_ctx); 307 wb->n++; 308 } 309 310 311 /* Send the buffer 312 */ 313 static void 314 supply_write(struct ws_buf *wb) 315 { 316 /* Output multicast only if legal. 317 * If we would multicast and it would be illegal, then discard the 318 * packet. 319 */ 320 switch (wb->type) { 321 case NO_OUT_MULTICAST: 322 trace_pkt("skip multicast to %s because impossible", 323 naddr_ntoa(ws.to.sin_addr.s_addr)); 324 break; 325 case NO_OUT_RIPV2: 326 break; 327 default: 328 if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5) 329 end_md5_auth(wb,ws.a); 330 if (output(wb->type, &ws.to, ws.ifp, wb->buf, 331 ((char *)wb->n - (char*)wb->buf)) < 0 332 && ws.ifp != 0) 333 if_sick(ws.ifp); 334 ws.npackets++; 335 break; 336 } 337 338 clr_ws_buf(wb,ws.a); 339 } 340 341 342 /* put an entry into the packet 343 */ 344 static void 345 supply_out(struct ag_info *ag) 346 { 347 int i; 348 naddr mask, v1_mask, dst_h, ddst_h = 0; 349 struct ws_buf *wb; 350 351 352 /* Skip this route if doing a flash update and it and the routes 353 * it aggregates have not changed recently. 354 */ 355 if (ag->ag_seqno < update_seqno 356 && (ws.state & WS_ST_FLASH)) 357 return; 358 359 /* Skip this route if required by split-horizon. 360 */ 361 if (ag->ag_state & AGS_SPLIT_HZ) 362 return; 363 364 dst_h = ag->ag_dst_h; 365 mask = ag->ag_mask; 366 v1_mask = ripv1_mask_host(htonl(dst_h), 367 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0); 368 i = 0; 369 370 /* If we are sending RIPv2 packets that cannot (or must not) be 371 * heard by RIPv1 listeners, do not worry about sub- or supernets. 372 * Subnets (from other networks) can only be sent via multicast. 373 * A pair of subnet routes might have been promoted so that they 374 * are legal to send by RIPv1. 375 * If RIPv1 is off, use the multicast buffer. 376 */ 377 if ((ws.state & WS_ST_RIP2_ALL) 378 || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) { 379 /* use the RIPv2-only buffer */ 380 wb = &v2buf; 381 382 } else { 383 /* use the RIPv1-or-RIPv2 buffer */ 384 wb = &v12buf; 385 386 /* Convert supernet route into corresponding set of network 387 * routes for RIPv1, but leave non-contiguous netmasks 388 * to ag_check(). 389 */ 390 if (v1_mask > mask 391 && mask + (mask & -mask) == 0) { 392 ddst_h = v1_mask & -v1_mask; 393 i = (v1_mask & ~mask)/ddst_h; 394 395 if (i > ws.gen_limit) { 396 /* Punt if we would have to generate an 397 * unreasonable number of routes. 398 */ 399 #ifdef DEBUG 400 msglog("sending %s to %s as 1 instead" 401 " of %d routes", 402 addrname(htonl(dst_h),mask,1), 403 naddr_ntoa(ws.to.sin_addr.s_addr), 404 i+1); 405 #endif 406 i = 0; 407 408 } else { 409 mask = v1_mask; 410 ws.gen_limit -= i; 411 } 412 } 413 } 414 415 do { 416 wb->n->n_family = RIP_AF_INET; 417 wb->n->n_dst = htonl(dst_h); 418 /* If the route is from router-discovery or we are 419 * shutting down, admit only a bad metric. 420 */ 421 wb->n->n_metric = ((stopint || ag->ag_metric < 1) 422 ? HOPCNT_INFINITY 423 : ag->ag_metric); 424 HTONL(wb->n->n_metric); 425 /* Any non-zero bits in the supposedly unused RIPv1 fields 426 * cause the old `routed` to ignore the route. 427 * That means the mask and so forth cannot be sent 428 * in the hybrid RIPv1/RIPv2 mode. 429 */ 430 if (ws.state & WS_ST_RIP2_ALL) { 431 if (ag->ag_nhop != 0 432 && ((ws.state & WS_ST_QUERY) 433 || (ag->ag_nhop != ws.ifp->int_addr 434 && on_net(ag->ag_nhop, 435 ws.ifp->int_net, 436 ws.ifp->int_mask)))) 437 wb->n->n_nhop = ag->ag_nhop; 438 wb->n->n_mask = htonl(mask); 439 wb->n->n_tag = ag->ag_tag; 440 } 441 dst_h += ddst_h; 442 443 if (++wb->n >= wb->lim) 444 supply_write(wb); 445 } while (i-- != 0); 446 } 447 448 449 /* supply one route from the table 450 */ 451 /* ARGSUSED */ 452 static int 453 walk_supply(struct radix_node *rn, 454 struct walkarg *w) 455 { 456 #define RT ((struct rt_entry *)rn) 457 u_short ags; 458 char metric, pref; 459 naddr dst, nhop; 460 461 462 /* Do not advertise external remote interfaces or passive interfaces. 463 */ 464 if ((RT->rt_state & RS_IF) 465 && RT->rt_ifp != 0 466 && (RT->rt_ifp->int_if_flags & IS_PASSIVE) 467 && !(RT->rt_state & RS_MHOME)) 468 return 0; 469 470 /* If being quiet about our ability to forward, then 471 * do not say anything unless responding to a query, 472 * except about our main interface. 473 */ 474 if (!supplier && !(ws.state & WS_ST_QUERY) 475 && !(RT->rt_state & RS_MHOME)) 476 return 0; 477 478 dst = RT->rt_dst; 479 480 /* do not collide with the fake default route */ 481 if (dst == RIP_DEFAULT 482 && (ws.state & WS_ST_DEFAULT)) 483 return 0; 484 485 if (RT->rt_state & RS_NET_SYN) { 486 if (RT->rt_state & RS_NET_INT) { 487 /* Do not send manual synthetic network routes 488 * into the subnet. 489 */ 490 if (on_net(ws.to.sin_addr.s_addr, 491 ntohl(dst), RT->rt_mask)) 492 return 0; 493 494 } else { 495 /* Do not send automatic synthetic network routes 496 * if they are not needed because no RIPv1 listeners 497 * can hear them. 498 */ 499 if (ws.state & WS_ST_RIP2_ALL) 500 return 0; 501 502 /* Do not send automatic synthetic network routes to 503 * the real subnet. 504 */ 505 if (on_net(ws.to.sin_addr.s_addr, 506 ntohl(dst), RT->rt_mask)) 507 return 0; 508 } 509 nhop = 0; 510 511 } else { 512 /* Advertise the next hop if this is not a route for one 513 * of our interfaces and the next hop is on the same 514 * network as the target. 515 */ 516 if (!(RT->rt_state & RS_IF) 517 && RT->rt_gate != myaddr 518 && RT->rt_gate != loopaddr) 519 nhop = RT->rt_gate; 520 else 521 nhop = 0; 522 } 523 524 metric = RT->rt_metric; 525 ags = 0; 526 527 if (RT->rt_state & RS_MHOME) { 528 /* retain host route of multi-homed servers */ 529 ; 530 531 } else if (RT_ISHOST(RT)) { 532 /* We should always aggregate the host routes 533 * for the local end of our point-to-point links. 534 * If we are suppressing host routes in general, then do so. 535 * Avoid advertising host routes onto their own network, 536 * where they should be handled by proxy-ARP. 537 */ 538 if ((RT->rt_state & RS_LOCAL) 539 || ridhosts 540 || (ws.state & WS_ST_SUPER_AG) 541 || on_net(dst, ws.to_net, ws.to_mask)) 542 ags |= AGS_SUPPRESS; 543 544 if (ws.state & WS_ST_SUPER_AG) 545 ags |= AGS_PROMOTE; 546 547 } else if (ws.state & WS_ST_AG) { 548 /* Aggregate network routes, if we are allowed. 549 */ 550 ags |= AGS_SUPPRESS; 551 552 /* Generate supernets if allowed. 553 * If we can be heard by RIPv1 systems, we will 554 * later convert back to ordinary nets. 555 * This unifies dealing with received supernets. 556 */ 557 if ((RT->rt_state & RS_SUBNET) 558 || (ws.state & WS_ST_SUPER_AG)) 559 ags |= AGS_PROMOTE; 560 561 } 562 563 /* Do not send RIPv1 advertisements of subnets to other 564 * networks. If possible, multicast them by RIPv2. 565 */ 566 if ((RT->rt_state & RS_SUBNET) 567 && !(ws.state & WS_ST_RIP2_ALL) 568 && !on_net(dst, ws.to_std_net, ws.to_std_mask)) { 569 ags |= AGS_RIPV2 | AGS_PROMOTE; 570 if (ws.state & WS_ST_SUB_AG) 571 ags |= AGS_SUPPRESS; 572 } 573 574 /* Do not send a route back to where it came from, except in 575 * response to a query. This is "split-horizon". That means not 576 * advertising back to the same network and so via the same interface. 577 * 578 * We want to suppress routes that might have been fragmented 579 * from this route by a RIPv1 router and sent back to us, and so we 580 * cannot forget this route here. Let the split-horizon route 581 * aggregate (suppress) the fragmented routes and then itself be 582 * forgotten. 583 * 584 * Include the routes for both ends of point-to-point interfaces 585 * among those suppressed by split-horizon, since the other side 586 * should knows them as well as we do. 587 */ 588 if (RT->rt_ifp == ws.ifp && ws.ifp != 0 589 && !(ws.state & WS_ST_QUERY) 590 && (ws.state & WS_ST_TO_ON_NET) 591 && (!(RT->rt_state & RS_IF) 592 || ws.ifp->int_if_flags & IFF_POINTOPOINT)) { 593 /* If we do not mark the route with AGS_SPLIT_HZ here, 594 * it will be poisoned-reverse, or advertised back toward 595 * its source with an infinite metric. If we have recently 596 * advertised the route with a better metric than we now 597 * have, then we should poison-reverse the route before 598 * suppressing it for split-horizon. 599 * 600 * In almost all cases, if there is no spare for the route 601 * then it is either old and dead or a brand new route. 602 * If it is brand new, there is no need for poison-reverse. 603 * If it is old and dead, it is already poisoned. 604 */ 605 if (RT->rt_poison_time < now_expire 606 || RT->rt_poison_metric >= metric 607 || RT->rt_spares[1].rts_gate == 0) { 608 ags |= AGS_SPLIT_HZ; 609 ags &= ~(AGS_PROMOTE | AGS_SUPPRESS); 610 } 611 metric = HOPCNT_INFINITY; 612 } 613 614 /* Adjust the outgoing metric by the cost of the link. 615 */ 616 pref = metric + ws.metric; 617 if (pref < HOPCNT_INFINITY) { 618 /* Keep track of the best metric with which the 619 * route has been advertised recently. 620 */ 621 if (RT->rt_poison_metric >= metric 622 || RT->rt_poison_time < now_expire) { 623 RT->rt_poison_time = now.tv_sec; 624 RT->rt_poison_metric = metric; 625 } 626 metric = pref; 627 628 } else { 629 /* Do not advertise stable routes that will be ignored, 630 * unless we are answering a query. 631 * If the route recently was advertised with a metric that 632 * would have been less than infinity through this interface, 633 * we need to continue to advertise it in order to poison it. 634 */ 635 pref = RT->rt_poison_metric + ws.metric; 636 if (!(ws.state & WS_ST_QUERY) 637 && (pref >= HOPCNT_INFINITY 638 || RT->rt_poison_time < now_garbage)) 639 return 0; 640 641 metric = HOPCNT_INFINITY; 642 } 643 644 ag_check(dst, RT->rt_mask, 0, nhop, metric, pref, 645 RT->rt_seqno, RT->rt_tag, ags, supply_out); 646 return 0; 647 #undef RT 648 } 649 650 651 /* Supply dst with the contents of the routing tables. 652 * If this won't fit in one packet, chop it up into several. 653 */ 654 void 655 supply(struct sockaddr_in *dst, 656 struct interface *ifp, /* output interface */ 657 enum output_type type, 658 int flash, /* 1=flash update */ 659 int vers, /* RIP version */ 660 int passwd_ok) /* OK to include cleartext password */ 661 { 662 struct rt_entry *rt; 663 int def_metric; 664 665 666 ws.state = 0; 667 ws.gen_limit = 1024; 668 669 ws.to = *dst; 670 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr); 671 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask; 672 673 if (ifp != 0) { 674 ws.to_mask = ifp->int_mask; 675 ws.to_net = ifp->int_net; 676 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask)) 677 ws.state |= WS_ST_TO_ON_NET; 678 679 } else { 680 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0); 681 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask; 682 rt = rtfind(dst->sin_addr.s_addr); 683 if (rt) 684 ifp = rt->rt_ifp; 685 } 686 687 ws.npackets = 0; 688 if (flash) 689 ws.state |= WS_ST_FLASH; 690 if (type == OUT_QUERY) 691 ws.state |= WS_ST_QUERY; 692 693 if ((ws.ifp = ifp) == 0) { 694 ws.metric = 1; 695 } else { 696 /* Adjust the advertised metric by the outgoing interface 697 * metric. 698 */ 699 ws.metric = ifp->int_metric+1; 700 } 701 702 ripv12_buf.rip.rip_vers = vers; 703 704 switch (type) { 705 case OUT_BROADCAST: 706 v2buf.type = ((ifp != 0 && (ifp->int_if_flags & IFF_MULTICAST)) 707 ? OUT_MULTICAST 708 : NO_OUT_MULTICAST); 709 v12buf.type = OUT_BROADCAST; 710 break; 711 case OUT_MULTICAST: 712 v2buf.type = ((ifp != 0 && (ifp->int_if_flags & IFF_MULTICAST)) 713 ? OUT_MULTICAST 714 : NO_OUT_MULTICAST); 715 v12buf.type = OUT_BROADCAST; 716 break; 717 case OUT_UNICAST: 718 case OUT_QUERY: 719 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2; 720 v12buf.type = type; 721 break; 722 default: 723 v2buf.type = type; 724 v12buf.type = type; 725 break; 726 } 727 728 if (vers == RIPv2) { 729 /* full RIPv2 only if cannot be heard by RIPv1 listeners */ 730 if (type != OUT_BROADCAST) 731 ws.state |= WS_ST_RIP2_ALL; 732 if (!(ws.state & WS_ST_TO_ON_NET)) { 733 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG); 734 } else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) { 735 ws.state |= WS_ST_AG; 736 if (type != OUT_BROADCAST 737 && (ifp == 0 || !(ifp->int_state&IS_NO_SUPER_AG))) 738 ws.state |= WS_ST_SUPER_AG; 739 } 740 741 } else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) { 742 ws.state |= WS_ST_SUB_AG; 743 } 744 745 ws.a = (vers == RIPv2) ? find_auth(ifp) : 0; 746 if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW) 747 ws.a = 0; 748 clr_ws_buf(&v12buf,ws.a); 749 clr_ws_buf(&v2buf,ws.a); 750 751 /* Fake a default route if asked and if there is not already 752 * a better, real default route. 753 */ 754 if (supplier && (def_metric = ifp->int_d_metric) != 0) { 755 if (0 == (rt = rtget(RIP_DEFAULT, 0)) 756 || rt->rt_metric+ws.metric >= def_metric) { 757 ws.state |= WS_ST_DEFAULT; 758 ag_check(0, 0, 0, 0, def_metric, def_metric, 759 0, 0, 0, supply_out); 760 } else { 761 def_metric = rt->rt_metric+ws.metric; 762 } 763 764 /* If both RIPv2 and the poor-man's router discovery 765 * kludge are on, arrange to advertise an extra 766 * default route via RIPv1. 767 */ 768 if ((ws.state & WS_ST_RIP2_ALL) 769 && (ifp->int_state & IS_PM_RDISC)) { 770 ripv12_buf.rip.rip_vers = RIPv1; 771 v12buf.n->n_family = RIP_AF_INET; 772 v12buf.n->n_dst = htonl(RIP_DEFAULT); 773 v12buf.n->n_metric = htonl(def_metric); 774 v12buf.n++; 775 } 776 } 777 778 (void)rn_walktree(rhead, walk_supply, 0); 779 ag_flush(0,0,supply_out); 780 781 /* Flush the packet buffers, provided they are not empty and 782 * do not contain only the password. 783 */ 784 if (v12buf.n != v12buf.base 785 && (v12buf.n > v12buf.base+1 786 || v12buf.base->n_family != RIP_AF_AUTH)) 787 supply_write(&v12buf); 788 if (v2buf.n != v2buf.base 789 && (v2buf.n > v2buf.base+1 790 || v2buf.base->n_family != RIP_AF_AUTH)) 791 supply_write(&v2buf); 792 793 /* If we sent nothing and this is an answer to a query, send 794 * an empty buffer. 795 */ 796 if (ws.npackets == 0 797 && (ws.state & WS_ST_QUERY)) 798 supply_write(&v12buf); 799 } 800 801 802 /* send all of the routing table or just do a flash update 803 */ 804 void 805 rip_bcast(int flash) 806 { 807 #ifdef _HAVE_SIN_LEN 808 static struct sockaddr_in dst = {sizeof(dst), AF_INET}; 809 #else 810 static struct sockaddr_in dst = {AF_INET}; 811 #endif 812 struct interface *ifp; 813 enum output_type type; 814 int vers; 815 struct timeval rtime; 816 817 818 need_flash = 0; 819 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME); 820 no_flash = rtime; 821 timevaladd(&no_flash, &now); 822 823 if (rip_sock < 0) 824 return; 825 826 trace_act("send %s and inhibit dynamic updates for %.3f sec", 827 flash ? "dynamic update" : "all routes", 828 rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0); 829 830 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) { 831 /* Skip interfaces not doing RIP. 832 * Do try broken interfaces to see if they have healed. 833 */ 834 if (IS_RIP_OUT_OFF(ifp->int_state)) 835 continue; 836 837 /* skip turned off interfaces */ 838 if (!iff_alive(ifp->int_if_flags)) 839 continue; 840 841 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1; 842 843 if (ifp->int_if_flags & IFF_BROADCAST) { 844 /* ordinary, hardware interface */ 845 dst.sin_addr.s_addr = ifp->int_brdaddr; 846 847 /* If RIPv1 is not turned off, then broadcast so 848 * that RIPv1 listeners can hear. 849 */ 850 if (vers == RIPv2 851 && (ifp->int_state & IS_NO_RIPV1_OUT)) { 852 type = OUT_MULTICAST; 853 } else { 854 type = OUT_BROADCAST; 855 } 856 857 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 858 /* point-to-point hardware interface */ 859 dst.sin_addr.s_addr = ifp->int_dstaddr; 860 type = OUT_UNICAST; 861 862 } else if (ifp->int_state & IS_REMOTE) { 863 /* remote interface */ 864 dst.sin_addr.s_addr = ifp->int_addr; 865 type = OUT_UNICAST; 866 867 } else { 868 /* ATM, HIPPI, etc. */ 869 continue; 870 } 871 872 supply(&dst, ifp, type, flash, vers, 1); 873 } 874 875 update_seqno++; /* all routes are up to date */ 876 } 877 878 879 /* Ask for routes 880 * Do it only once to an interface, and not even after the interface 881 * was broken and recovered. 882 */ 883 void 884 rip_query(void) 885 { 886 #ifdef _HAVE_SIN_LEN 887 static struct sockaddr_in dst = {sizeof(dst), AF_INET}; 888 #else 889 static struct sockaddr_in dst = {AF_INET}; 890 #endif 891 struct interface *ifp; 892 struct rip buf; 893 enum output_type type; 894 895 896 if (rip_sock < 0) 897 return; 898 899 bzero(&buf, sizeof(buf)); 900 901 for (ifp = ifnet; ifp; ifp = ifp->int_next) { 902 /* Skip interfaces those already queried. 903 * Do not ask via interfaces through which we don't 904 * accept input. Do not ask via interfaces that cannot 905 * send RIP packets. 906 * Do try broken interfaces to see if they have healed. 907 */ 908 if (IS_RIP_IN_OFF(ifp->int_state) 909 || ifp->int_query_time != NEVER) 910 continue; 911 912 /* skip turned off interfaces */ 913 if (!iff_alive(ifp->int_if_flags)) 914 continue; 915 916 buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1; 917 buf.rip_cmd = RIPCMD_REQUEST; 918 buf.rip_nets[0].n_family = RIP_AF_UNSPEC; 919 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY); 920 921 if (ifp->int_if_flags & IFF_BROADCAST) { 922 /* ordinary, hardware interface */ 923 dst.sin_addr.s_addr = ifp->int_brdaddr; 924 /* if RIPv1 is not turned off, then broadcast so 925 * that RIPv1 listeners can hear. 926 */ 927 if (buf.rip_vers == RIPv2 928 && (ifp->int_state & IS_NO_RIPV1_OUT)) { 929 type = OUT_MULTICAST; 930 } else { 931 type = OUT_BROADCAST; 932 } 933 934 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 935 /* point-to-point hardware interface */ 936 dst.sin_addr.s_addr = ifp->int_dstaddr; 937 type = OUT_UNICAST; 938 939 } else if (ifp->int_state & IS_REMOTE) { 940 /* remote interface */ 941 dst.sin_addr.s_addr = ifp->int_addr; 942 type = OUT_UNICAST; 943 944 } else { 945 /* ATM, HIPPI, etc. */ 946 continue; 947 } 948 949 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL; 950 if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0) 951 if_sick(ifp); 952 } 953 } 954