1 /* 2 * Copyright (c) 1983, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "defs.h" 33 34 #ifdef __NetBSD__ 35 __RCSID("$NetBSD$"); 36 #elif defined(__FreeBSD__) 37 __RCSID("$FreeBSD$"); 38 #else 39 __RCSID("$Revision: 2.27 $"); 40 #ident "$Revision: 2.27 $" 41 #endif 42 43 44 u_int update_seqno; 45 46 47 /* walk the tree of routes with this for output 48 */ 49 struct { 50 struct sockaddr_in to; 51 naddr to_mask; 52 naddr to_net; 53 naddr to_std_mask; 54 naddr to_std_net; 55 struct interface *ifp; /* usually output interface */ 56 struct auth *a; 57 char metric; /* adjust metrics by interface */ 58 int npackets; 59 int gen_limit; 60 u_int state; 61 #define WS_ST_FLASH 0x001 /* send only changed routes */ 62 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */ 63 #define WS_ST_AG 0x004 /* ok to aggregate subnets */ 64 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */ 65 #define WS_ST_QUERY 0x010 /* responding to a query */ 66 #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */ 67 #define WS_ST_DEFAULT 0x040 /* faking a default */ 68 } ws; 69 70 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */ 71 struct ws_buf v12buf; 72 union pkt_buf ripv12_buf; 73 74 /* Another for only RIPv2 listeners */ 75 struct ws_buf v2buf; 76 union pkt_buf rip_v2_buf; 77 78 79 80 void 81 bufinit(void) 82 { 83 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE; 84 v12buf.buf = &ripv12_buf.rip; 85 v12buf.base = &v12buf.buf->rip_nets[0]; 86 87 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE; 88 rip_v2_buf.rip.rip_vers = RIPv2; 89 v2buf.buf = &rip_v2_buf.rip; 90 v2buf.base = &v2buf.buf->rip_nets[0]; 91 } 92 93 94 /* Send the contents of the global buffer via the non-multicast socket 95 */ 96 int /* <0 on failure */ 97 output(enum output_type type, 98 struct sockaddr_in *dst, /* send to here */ 99 struct interface *ifp, 100 struct rip *buf, 101 int size) /* this many bytes */ 102 { 103 struct sockaddr_in osin; 104 int flags; 105 const char *msg; 106 int res; 107 naddr tgt_mcast; 108 int soc; 109 int serrno; 110 111 osin = *dst; 112 if (osin.sin_port == 0) 113 osin.sin_port = htons(RIP_PORT); 114 #ifdef _HAVE_SIN_LEN 115 if (osin.sin_len == 0) 116 osin.sin_len = sizeof(osin); 117 #endif 118 119 soc = rip_sock; 120 flags = 0; 121 122 switch (type) { 123 case OUT_QUERY: 124 msg = "Answer Query"; 125 if (soc < 0) 126 soc = ifp->int_rip_sock; 127 break; 128 case OUT_UNICAST: 129 msg = "Send"; 130 if (soc < 0) 131 soc = ifp->int_rip_sock; 132 flags = MSG_DONTROUTE; 133 break; 134 case OUT_BROADCAST: 135 if (ifp->int_if_flags & IFF_POINTOPOINT) { 136 msg = "Send"; 137 } else { 138 msg = "Send bcast"; 139 } 140 flags = MSG_DONTROUTE; 141 break; 142 case OUT_MULTICAST: 143 if (ifp->int_if_flags & IFF_POINTOPOINT) { 144 msg = "Send pt-to-pt"; 145 } else if (ifp->int_state & IS_DUP) { 146 trace_act("abort multicast output via %s" 147 " with duplicate address", 148 ifp->int_name); 149 return 0; 150 } else { 151 msg = "Send mcast"; 152 if (rip_sock_mcast != ifp) { 153 #ifdef MCAST_IFINDEX 154 /* specify ifindex */ 155 tgt_mcast = htonl(ifp->int_index); 156 #else 157 #ifdef MCAST_PPP_BUG 158 /* Do not specify the primary interface 159 * explicitly if we have the multicast 160 * point-to-point kernel bug, since the 161 * kernel will do the wrong thing if the 162 * local address of a point-to-point link 163 * is the same as the address of an ordinary 164 * interface. 165 */ 166 if (ifp->int_addr == myaddr) { 167 tgt_mcast = 0; 168 } else 169 #endif 170 tgt_mcast = ifp->int_addr; 171 #endif 172 if (0 > setsockopt(rip_sock, 173 IPPROTO_IP, IP_MULTICAST_IF, 174 &tgt_mcast, 175 sizeof(tgt_mcast))) { 176 serrno = errno; 177 LOGERR("setsockopt(rip_sock," 178 "IP_MULTICAST_IF)"); 179 errno = serrno; 180 ifp = 0; 181 return -1; 182 } 183 rip_sock_mcast = ifp; 184 } 185 osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP); 186 } 187 break; 188 189 case NO_OUT_MULTICAST: 190 case NO_OUT_RIPV2: 191 default: 192 #ifdef DEBUG 193 abort(); 194 #endif 195 return -1; 196 } 197 198 trace_rip(msg, "to", &osin, ifp, buf, size); 199 200 res = sendto(soc, buf, size, flags, 201 (struct sockaddr *)&osin, sizeof(osin)); 202 if (res < 0 203 && (ifp == 0 || !(ifp->int_state & IS_BROKE))) { 204 serrno = errno; 205 msglog("%s sendto(%s%s%s.%d): %s", msg, 206 ifp != 0 ? ifp->int_name : "", 207 ifp != 0 ? ", " : "", 208 inet_ntoa(osin.sin_addr), 209 ntohs(osin.sin_port), 210 strerror(errno)); 211 errno = serrno; 212 } 213 214 return res; 215 } 216 217 218 /* Find the first key for a packet to send. 219 * Try for a key that is eligible and has not expired, but settle for 220 * the last key if they have all expired. 221 * If no key is ready yet, give up. 222 */ 223 struct auth * 224 find_auth(struct interface *ifp) 225 { 226 struct auth *ap, *res; 227 int i; 228 229 230 if (ifp == 0) 231 return 0; 232 233 res = 0; 234 ap = ifp->int_auth; 235 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) { 236 /* stop looking after the last key */ 237 if (ap->type == RIP_AUTH_NONE) 238 break; 239 240 /* ignore keys that are not ready yet */ 241 if ((u_long)ap->start > (u_long)clk.tv_sec) 242 continue; 243 244 if ((u_long)ap->end < (u_long)clk.tv_sec) { 245 /* note best expired password as a fall-back */ 246 if (res == 0 || (u_long)ap->end > (u_long)res->end) 247 res = ap; 248 continue; 249 } 250 251 /* note key with the best future */ 252 if (res == 0 || (u_long)res->end < (u_long)ap->end) 253 res = ap; 254 } 255 return res; 256 } 257 258 259 void 260 clr_ws_buf(struct ws_buf *wb, 261 struct auth *ap) 262 { 263 struct netauth *na; 264 265 wb->lim = wb->base + NETS_LEN; 266 wb->n = wb->base; 267 memset(wb->n, 0, NETS_LEN*sizeof(*wb->n)); 268 269 /* (start to) install authentication if appropriate 270 */ 271 if (ap == 0) 272 return; 273 274 na = (struct netauth*)wb->n; 275 if (ap->type == RIP_AUTH_PW) { 276 na->a_family = RIP_AF_AUTH; 277 na->a_type = RIP_AUTH_PW; 278 memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw)); 279 wb->n++; 280 281 } else if (ap->type == RIP_AUTH_MD5) { 282 na->a_family = RIP_AF_AUTH; 283 na->a_type = RIP_AUTH_MD5; 284 na->au.a_md5.md5_keyid = ap->keyid; 285 na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN; 286 na->au.a_md5.md5_seqno = htonl(clk.tv_sec); 287 wb->n++; 288 wb->lim--; /* make room for trailer */ 289 } 290 } 291 292 293 void 294 end_md5_auth(struct ws_buf *wb, 295 struct auth *ap) 296 { 297 struct netauth *na, *na2; 298 MD5_CTX md5_ctx; 299 int len; 300 301 302 na = (struct netauth*)wb->base; 303 na2 = (struct netauth*)wb->n; 304 len = (char *)na2-(char *)wb->buf; 305 na2->a_family = RIP_AF_AUTH; 306 na2->a_type = htons(1); 307 na->au.a_md5.md5_pkt_len = htons(len); 308 MD5Init(&md5_ctx); 309 MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA); 310 MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN); 311 MD5Final(na2->au.au_pw, &md5_ctx); 312 wb->n++; 313 } 314 315 316 /* Send the buffer 317 */ 318 static void 319 supply_write(struct ws_buf *wb) 320 { 321 /* Output multicast only if legal. 322 * If we would multicast and it would be illegal, then discard the 323 * packet. 324 */ 325 switch (wb->type) { 326 case NO_OUT_MULTICAST: 327 trace_pkt("skip multicast to %s because impossible", 328 naddr_ntoa(ws.to.sin_addr.s_addr)); 329 break; 330 case NO_OUT_RIPV2: 331 break; 332 default: 333 if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5) 334 end_md5_auth(wb,ws.a); 335 if (output(wb->type, &ws.to, ws.ifp, wb->buf, 336 ((char *)wb->n - (char*)wb->buf)) < 0 337 && ws.ifp != 0) 338 if_sick(ws.ifp); 339 ws.npackets++; 340 break; 341 } 342 343 clr_ws_buf(wb,ws.a); 344 } 345 346 347 /* put an entry into the packet 348 */ 349 static void 350 supply_out(struct ag_info *ag) 351 { 352 int i; 353 naddr mask, v1_mask, dst_h, ddst_h = 0; 354 struct ws_buf *wb; 355 356 357 /* Skip this route if doing a flash update and it and the routes 358 * it aggregates have not changed recently. 359 */ 360 if (ag->ag_seqno < update_seqno 361 && (ws.state & WS_ST_FLASH)) 362 return; 363 364 dst_h = ag->ag_dst_h; 365 mask = ag->ag_mask; 366 v1_mask = ripv1_mask_host(htonl(dst_h), 367 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0); 368 i = 0; 369 370 /* If we are sending RIPv2 packets that cannot (or must not) be 371 * heard by RIPv1 listeners, do not worry about sub- or supernets. 372 * Subnets (from other networks) can only be sent via multicast. 373 * A pair of subnet routes might have been promoted so that they 374 * are legal to send by RIPv1. 375 * If RIPv1 is off, use the multicast buffer. 376 */ 377 if ((ws.state & WS_ST_RIP2_ALL) 378 || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) { 379 /* use the RIPv2-only buffer */ 380 wb = &v2buf; 381 382 } else { 383 /* use the RIPv1-or-RIPv2 buffer */ 384 wb = &v12buf; 385 386 /* Convert supernet route into corresponding set of network 387 * routes for RIPv1, but leave non-contiguous netmasks 388 * to ag_check(). 389 */ 390 if (v1_mask > mask 391 && mask + (mask & -mask) == 0) { 392 ddst_h = v1_mask & -v1_mask; 393 i = (v1_mask & ~mask)/ddst_h; 394 395 if (i > ws.gen_limit) { 396 /* Punt if we would have to generate an 397 * unreasonable number of routes. 398 */ 399 if (TRACECONTENTS) 400 trace_misc("sending %s-->%s as 1" 401 " instead of %d routes", 402 addrname(htonl(dst_h), mask, 403 1), 404 naddr_ntoa(ws.to.sin_addr 405 .s_addr), 406 i+1); 407 i = 0; 408 409 } else { 410 mask = v1_mask; 411 ws.gen_limit -= i; 412 } 413 } 414 } 415 416 do { 417 wb->n->n_family = RIP_AF_INET; 418 wb->n->n_dst = htonl(dst_h); 419 /* If the route is from router-discovery or we are 420 * shutting down, admit only a bad metric. 421 */ 422 wb->n->n_metric = ((stopint || ag->ag_metric < 1) 423 ? HOPCNT_INFINITY 424 : ag->ag_metric); 425 wb->n->n_metric = htonl(wb->n->n_metric); 426 /* Any non-zero bits in the supposedly unused RIPv1 fields 427 * cause the old `routed` to ignore the route. 428 * That means the mask and so forth cannot be sent 429 * in the hybrid RIPv1/RIPv2 mode. 430 */ 431 if (ws.state & WS_ST_RIP2_ALL) { 432 if (ag->ag_nhop != 0 433 && ((ws.state & WS_ST_QUERY) 434 || (ag->ag_nhop != ws.ifp->int_addr 435 && on_net(ag->ag_nhop, 436 ws.ifp->int_net, 437 ws.ifp->int_mask)))) 438 wb->n->n_nhop = ag->ag_nhop; 439 wb->n->n_mask = htonl(mask); 440 wb->n->n_tag = ag->ag_tag; 441 } 442 dst_h += ddst_h; 443 444 if (++wb->n >= wb->lim) 445 supply_write(wb); 446 } while (i-- != 0); 447 } 448 449 450 /* supply one route from the table 451 */ 452 /* ARGSUSED */ 453 static int 454 walk_supply(struct radix_node *rn, 455 struct walkarg *argp UNUSED) 456 { 457 #define RT ((struct rt_entry *)rn) 458 u_short ags; 459 char metric, pref; 460 naddr dst, nhop; 461 struct rt_spare *rts; 462 int i; 463 464 465 /* Do not advertise external remote interfaces or passive interfaces. 466 */ 467 if ((RT->rt_state & RS_IF) 468 && RT->rt_ifp != 0 469 && (RT->rt_ifp->int_state & IS_PASSIVE) 470 && !(RT->rt_state & RS_MHOME)) 471 return 0; 472 473 /* If being quiet about our ability to forward, then 474 * do not say anything unless responding to a query, 475 * except about our main interface. 476 */ 477 if (!supplier && !(ws.state & WS_ST_QUERY) 478 && !(RT->rt_state & RS_MHOME)) 479 return 0; 480 481 dst = RT->rt_dst; 482 483 /* do not collide with the fake default route */ 484 if (dst == RIP_DEFAULT 485 && (ws.state & WS_ST_DEFAULT)) 486 return 0; 487 488 if (RT->rt_state & RS_NET_SYN) { 489 if (RT->rt_state & RS_NET_INT) { 490 /* Do not send manual synthetic network routes 491 * into the subnet. 492 */ 493 if (on_net(ws.to.sin_addr.s_addr, 494 ntohl(dst), RT->rt_mask)) 495 return 0; 496 497 } else { 498 /* Do not send automatic synthetic network routes 499 * if they are not needed because no RIPv1 listeners 500 * can hear them. 501 */ 502 if (ws.state & WS_ST_RIP2_ALL) 503 return 0; 504 505 /* Do not send automatic synthetic network routes to 506 * the real subnet. 507 */ 508 if (on_net(ws.to.sin_addr.s_addr, 509 ntohl(dst), RT->rt_mask)) 510 return 0; 511 } 512 nhop = 0; 513 514 } else { 515 /* Advertise the next hop if this is not a route for one 516 * of our interfaces and the next hop is on the same 517 * network as the target. 518 * The final determination is made by supply_out(). 519 */ 520 if (!(RT->rt_state & RS_IF) 521 && RT->rt_gate != myaddr 522 && RT->rt_gate != loopaddr) 523 nhop = RT->rt_gate; 524 else 525 nhop = 0; 526 } 527 528 metric = RT->rt_metric; 529 ags = 0; 530 531 if (RT->rt_state & RS_MHOME) { 532 /* retain host route of multi-homed servers */ 533 ; 534 535 } else if (RT_ISHOST(RT)) { 536 /* We should always suppress (into existing network routes) 537 * the host routes for the local end of our point-to-point 538 * links. 539 * If we are suppressing host routes in general, then do so. 540 * Avoid advertising host routes onto their own network, 541 * where they should be handled by proxy-ARP. 542 */ 543 if ((RT->rt_state & RS_LOCAL) 544 || ridhosts 545 || on_net(dst, ws.to_net, ws.to_mask)) 546 ags |= AGS_SUPPRESS; 547 548 /* Aggregate stray host routes into network routes if allowed. 549 * We cannot aggregate host routes into small network routes 550 * without confusing RIPv1 listeners into thinking the 551 * network routes are host routes. 552 */ 553 if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL)) 554 ags |= AGS_AGGREGATE; 555 556 } else { 557 /* Always suppress network routes into other, existing 558 * network routes 559 */ 560 ags |= AGS_SUPPRESS; 561 562 /* Generate supernets if allowed. 563 * If we can be heard by RIPv1 systems, we will 564 * later convert back to ordinary nets. 565 * This unifies dealing with received supernets. 566 */ 567 if ((ws.state & WS_ST_AG) 568 && ((RT->rt_state & RS_SUBNET) 569 || (ws.state & WS_ST_SUPER_AG))) 570 ags |= AGS_AGGREGATE; 571 } 572 573 /* Do not send RIPv1 advertisements of subnets to other 574 * networks. If possible, multicast them by RIPv2. 575 */ 576 if ((RT->rt_state & RS_SUBNET) 577 && !(ws.state & WS_ST_RIP2_ALL) 578 && !on_net(dst, ws.to_std_net, ws.to_std_mask)) 579 ags |= AGS_RIPV2 | AGS_AGGREGATE; 580 581 582 /* Do not send a route back to where it came from, except in 583 * response to a query. This is "split-horizon". That means not 584 * advertising back to the same network and so via the same interface. 585 * 586 * We want to suppress routes that might have been fragmented 587 * from this route by a RIPv1 router and sent back to us, and so we 588 * cannot forget this route here. Let the split-horizon route 589 * suppress the fragmented routes and then itself be forgotten. 590 * 591 * Include the routes for both ends of point-to-point interfaces 592 * among those suppressed by split-horizon, since the other side 593 * should knows them as well as we do. 594 * 595 * Notice spare routes with the same metric that we are about to 596 * advertise, to split the horizon on redundant, inactive paths. 597 * 598 * Do not suppress advertisements of interface-related addresses on 599 * non-point-to-point interfaces. This ensures that we have something 600 * to say every 30 seconds to help detect broken Ethernets or 601 * other interfaces where one packet every 30 seconds costs nothing. 602 */ 603 if (ws.ifp != 0 604 && !(ws.state & WS_ST_QUERY) 605 && (ws.state & WS_ST_TO_ON_NET) 606 && (!(RT->rt_state & RS_IF) 607 || ws.ifp->int_if_flags & IFF_POINTOPOINT)) { 608 for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) { 609 if (rts->rts_metric > metric 610 || rts->rts_ifp != ws.ifp) 611 continue; 612 613 /* If we do not mark the route with AGS_SPLIT_HZ here, 614 * it will be poisoned-reverse, or advertised back 615 * toward its source with an infinite metric. 616 * If we have recently advertised the route with a 617 * better metric than we now have, then we should 618 * poison-reverse the route before suppressing it for 619 * split-horizon. 620 * 621 * In almost all cases, if there is no spare for the 622 * route then it is either old and dead or a brand 623 * new route. If it is brand new, there is no need 624 * for poison-reverse. If it is old and dead, it 625 * is already poisoned. 626 */ 627 if (RT->rt_poison_time < now_expire 628 || RT->rt_poison_metric >= metric 629 || RT->rt_spares[1].rts_gate == 0) { 630 ags |= AGS_SPLIT_HZ; 631 ags &= ~AGS_SUPPRESS; 632 } 633 metric = HOPCNT_INFINITY; 634 break; 635 } 636 } 637 638 /* Keep track of the best metric with which the 639 * route has been advertised recently. 640 */ 641 if (RT->rt_poison_metric >= metric 642 || RT->rt_poison_time < now_expire) { 643 RT->rt_poison_time = now.tv_sec; 644 RT->rt_poison_metric = metric; 645 } 646 647 /* Adjust the outgoing metric by the cost of the link. 648 * Avoid aggregation when a route is counting to infinity. 649 */ 650 pref = RT->rt_poison_metric + ws.metric; 651 metric += ws.metric; 652 653 /* Do not advertise stable routes that will be ignored, 654 * unless we are answering a query. 655 * If the route recently was advertised with a metric that 656 * would have been less than infinity through this interface, 657 * we need to continue to advertise it in order to poison it. 658 */ 659 if (metric >= HOPCNT_INFINITY) { 660 if (!(ws.state & WS_ST_QUERY) 661 && (pref >= HOPCNT_INFINITY 662 || RT->rt_poison_time < now_garbage)) 663 return 0; 664 665 metric = HOPCNT_INFINITY; 666 } 667 668 ag_check(dst, RT->rt_mask, 0, nhop, metric, pref, 669 RT->rt_seqno, RT->rt_tag, ags, supply_out); 670 return 0; 671 #undef RT 672 } 673 674 675 /* Supply dst with the contents of the routing tables. 676 * If this won't fit in one packet, chop it up into several. 677 */ 678 void 679 supply(struct sockaddr_in *dst, 680 struct interface *ifp, /* output interface */ 681 enum output_type type, 682 int flash, /* 1=flash update */ 683 int vers, /* RIP version */ 684 int passwd_ok) /* OK to include cleartext password */ 685 { 686 struct rt_entry *rt; 687 int def_metric; 688 689 690 ws.state = 0; 691 ws.gen_limit = 1024; 692 693 ws.to = *dst; 694 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr); 695 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask; 696 697 if (ifp != 0) { 698 ws.to_mask = ifp->int_mask; 699 ws.to_net = ifp->int_net; 700 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask)) 701 ws.state |= WS_ST_TO_ON_NET; 702 703 } else { 704 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0); 705 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask; 706 rt = rtfind(dst->sin_addr.s_addr); 707 if (rt) 708 ifp = rt->rt_ifp; 709 } 710 711 ws.npackets = 0; 712 if (flash) 713 ws.state |= WS_ST_FLASH; 714 715 if ((ws.ifp = ifp) == 0) { 716 ws.metric = 1; 717 } else { 718 /* Adjust the advertised metric by the outgoing interface 719 * metric. 720 */ 721 ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric; 722 } 723 724 ripv12_buf.rip.rip_vers = vers; 725 726 switch (type) { 727 case OUT_MULTICAST: 728 if (ifp->int_if_flags & IFF_MULTICAST) 729 v2buf.type = OUT_MULTICAST; 730 else 731 v2buf.type = NO_OUT_MULTICAST; 732 v12buf.type = OUT_BROADCAST; 733 break; 734 735 case OUT_QUERY: 736 ws.state |= WS_ST_QUERY; 737 /* FALLTHROUGH */ 738 case OUT_BROADCAST: 739 case OUT_UNICAST: 740 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2; 741 v12buf.type = type; 742 break; 743 744 case NO_OUT_MULTICAST: 745 case NO_OUT_RIPV2: 746 break; /* no output */ 747 } 748 749 if (vers == RIPv2) { 750 /* full RIPv2 only if cannot be heard by RIPv1 listeners */ 751 if (type != OUT_BROADCAST) 752 ws.state |= WS_ST_RIP2_ALL; 753 if ((ws.state & WS_ST_QUERY) 754 || !(ws.state & WS_ST_TO_ON_NET)) { 755 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG); 756 } else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) { 757 ws.state |= WS_ST_AG; 758 if (type != OUT_BROADCAST 759 && (ifp == 0 760 || !(ifp->int_state & IS_NO_SUPER_AG))) 761 ws.state |= WS_ST_SUPER_AG; 762 } 763 } 764 765 ws.a = (vers == RIPv2) ? find_auth(ifp) : 0; 766 if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW) 767 ws.a = 0; 768 clr_ws_buf(&v12buf,ws.a); 769 clr_ws_buf(&v2buf,ws.a); 770 771 /* Fake a default route if asked and if there is not already 772 * a better, real default route. 773 */ 774 if (supplier && (def_metric = ifp->int_d_metric) != 0) { 775 if (0 == (rt = rtget(RIP_DEFAULT, 0)) 776 || rt->rt_metric+ws.metric >= def_metric) { 777 ws.state |= WS_ST_DEFAULT; 778 ag_check(0, 0, 0, 0, def_metric, def_metric, 779 0, 0, 0, supply_out); 780 } else { 781 def_metric = rt->rt_metric+ws.metric; 782 } 783 784 /* If both RIPv2 and the poor-man's router discovery 785 * kludge are on, arrange to advertise an extra 786 * default route via RIPv1. 787 */ 788 if ((ws.state & WS_ST_RIP2_ALL) 789 && (ifp->int_state & IS_PM_RDISC)) { 790 ripv12_buf.rip.rip_vers = RIPv1; 791 v12buf.n->n_family = RIP_AF_INET; 792 v12buf.n->n_dst = htonl(RIP_DEFAULT); 793 v12buf.n->n_metric = htonl(def_metric); 794 v12buf.n++; 795 } 796 } 797 798 (void)rn_walktree(rhead, walk_supply, 0); 799 ag_flush(0,0,supply_out); 800 801 /* Flush the packet buffers, provided they are not empty and 802 * do not contain only the password. 803 */ 804 if (v12buf.n != v12buf.base 805 && (v12buf.n > v12buf.base+1 806 || v12buf.base->n_family != RIP_AF_AUTH)) 807 supply_write(&v12buf); 808 if (v2buf.n != v2buf.base 809 && (v2buf.n > v2buf.base+1 810 || v2buf.base->n_family != RIP_AF_AUTH)) 811 supply_write(&v2buf); 812 813 /* If we sent nothing and this is an answer to a query, send 814 * an empty buffer. 815 */ 816 if (ws.npackets == 0 817 && (ws.state & WS_ST_QUERY)) 818 supply_write(&v12buf); 819 } 820 821 822 /* send all of the routing table or just do a flash update 823 */ 824 void 825 rip_bcast(int flash) 826 { 827 #ifdef _HAVE_SIN_LEN 828 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}}; 829 #else 830 static struct sockaddr_in dst = {AF_INET}; 831 #endif 832 struct interface *ifp; 833 enum output_type type; 834 int vers; 835 struct timeval rtime; 836 837 838 need_flash = 0; 839 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME); 840 no_flash = rtime; 841 timevaladd(&no_flash, &now); 842 843 if (rip_sock < 0) 844 return; 845 846 trace_act("send %s and inhibit dynamic updates for %.3f sec", 847 flash ? "dynamic update" : "all routes", 848 rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0); 849 850 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) { 851 /* Skip interfaces not doing RIP. 852 * Do try broken interfaces to see if they have healed. 853 */ 854 if (IS_RIP_OUT_OFF(ifp->int_state)) 855 continue; 856 857 /* skip turned off interfaces */ 858 if (!iff_up(ifp->int_if_flags)) 859 continue; 860 861 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1; 862 863 if (ifp->int_if_flags & IFF_BROADCAST) { 864 /* ordinary, hardware interface */ 865 dst.sin_addr.s_addr = ifp->int_brdaddr; 866 867 if (vers == RIPv2 868 && !(ifp->int_state & IS_NO_RIP_MCAST)) { 869 type = OUT_MULTICAST; 870 } else { 871 type = OUT_BROADCAST; 872 } 873 874 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 875 /* point-to-point hardware interface */ 876 dst.sin_addr.s_addr = ifp->int_dstaddr; 877 type = OUT_UNICAST; 878 879 } else if (ifp->int_state & IS_REMOTE) { 880 /* remote interface */ 881 dst.sin_addr.s_addr = ifp->int_addr; 882 type = OUT_UNICAST; 883 884 } else { 885 /* ATM, HIPPI, etc. */ 886 continue; 887 } 888 889 supply(&dst, ifp, type, flash, vers, 1); 890 } 891 892 update_seqno++; /* all routes are up to date */ 893 } 894 895 896 /* Ask for routes 897 * Do it only once to an interface, and not even after the interface 898 * was broken and recovered. 899 */ 900 void 901 rip_query(void) 902 { 903 #ifdef _HAVE_SIN_LEN 904 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}}; 905 #else 906 static struct sockaddr_in dst = {AF_INET}; 907 #endif 908 struct interface *ifp; 909 struct rip buf; 910 enum output_type type; 911 912 913 if (rip_sock < 0) 914 return; 915 916 memset(&buf, 0, sizeof(buf)); 917 918 for (ifp = ifnet; ifp; ifp = ifp->int_next) { 919 /* Skip interfaces those already queried. 920 * Do not ask via interfaces through which we don't 921 * accept input. Do not ask via interfaces that cannot 922 * send RIP packets. 923 * Do try broken interfaces to see if they have healed. 924 */ 925 if (IS_RIP_IN_OFF(ifp->int_state) 926 || ifp->int_query_time != NEVER) 927 continue; 928 929 /* skip turned off interfaces */ 930 if (!iff_up(ifp->int_if_flags)) 931 continue; 932 933 buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1; 934 buf.rip_cmd = RIPCMD_REQUEST; 935 buf.rip_nets[0].n_family = RIP_AF_UNSPEC; 936 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY); 937 938 /* Send a RIPv1 query only if allowed and if we will 939 * listen to RIPv1 routers. 940 */ 941 if ((ifp->int_state & IS_NO_RIPV1_OUT) 942 || (ifp->int_state & IS_NO_RIPV1_IN)) { 943 buf.rip_vers = RIPv2; 944 } else { 945 buf.rip_vers = RIPv1; 946 } 947 948 if (ifp->int_if_flags & IFF_BROADCAST) { 949 /* ordinary, hardware interface */ 950 dst.sin_addr.s_addr = ifp->int_brdaddr; 951 952 /* Broadcast RIPv1 queries and RIPv2 queries 953 * when the hardware cannot multicast. 954 */ 955 if (buf.rip_vers == RIPv2 956 && (ifp->int_if_flags & IFF_MULTICAST) 957 && !(ifp->int_state & IS_NO_RIP_MCAST)) { 958 type = OUT_MULTICAST; 959 } else { 960 type = OUT_BROADCAST; 961 } 962 963 } else if (ifp->int_if_flags & IFF_POINTOPOINT) { 964 /* point-to-point hardware interface */ 965 dst.sin_addr.s_addr = ifp->int_dstaddr; 966 type = OUT_UNICAST; 967 968 } else if (ifp->int_state & IS_REMOTE) { 969 /* remote interface */ 970 dst.sin_addr.s_addr = ifp->int_addr; 971 type = OUT_UNICAST; 972 973 } else { 974 /* ATM, HIPPI, etc. */ 975 continue; 976 } 977 978 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL; 979 if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0) 980 if_sick(ifp); 981 } 982 } 983