1 /*
2 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 *
5 * Copyright (c) 1983, 1988, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgment:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $FreeBSD: src/sbin/routed/output.c,v 1.7 2000/08/11 08:24:38 sheldonh Exp $
37 */
38
39 #include "defs.h"
40 #include <md5.h>
41 #include <alloca.h>
42
43 uint_t update_seqno;
44
45
46 /*
47 * walk the tree of routes with this for output
48 */
49 static struct {
50 struct sockaddr_in to;
51 in_addr_t to_mask;
52 in_addr_t to_net;
53 in_addr_t to_std_mask;
54 in_addr_t to_std_net;
55 struct interface *ifp; /* usually output interface */
56 struct auth *a;
57 uint8_t metric; /* adjust metrics by interface */
58 uint32_t npackets;
59 uint32_t gen_limit;
60 #define WS_GEN_LIMIT_MAX 1024
61 uint16_t state;
62 #define WS_ST_FLASH 0x001 /* send only changed routes */
63 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */
64 #define WS_ST_AG 0x004 /* ok to aggregate subnets */
65 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */
66 #define WS_ST_QUERY 0x010 /* responding to a query */
67 #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */
68 #define WS_ST_DEFAULT 0x040 /* faking a default */
69 } ws;
70
71 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
72 struct ws_buf v12buf;
73 static union pkt_buf ripv12_buf;
74
75 /* Another for only RIPv2 listeners */
76 static struct ws_buf v2buf;
77 static union pkt_buf rip_v2_buf;
78
79
80
81 void
bufinit(void)82 bufinit(void)
83 {
84 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
85 v12buf.buf = &ripv12_buf.rip;
86 v12buf.base = &v12buf.buf->rip_nets[0];
87
88 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
89 rip_v2_buf.rip.rip_vers = RIPv2;
90 v2buf.buf = &rip_v2_buf.rip;
91 v2buf.base = &v2buf.buf->rip_nets[0];
92 }
93
94
95 /*
96 * Send the contents of the global buffer via the non-multicast socket
97 */
98 int /* <0 on failure */
output(enum output_type type,struct sockaddr_in * dst,struct interface * ifp,struct rip * buf,int size)99 output(enum output_type type,
100 struct sockaddr_in *dst, /* send to here */
101 struct interface *ifp,
102 struct rip *buf,
103 int size) /* this many bytes */
104 {
105 struct sockaddr_in sin;
106 int flags;
107 const char *msg;
108 int res;
109 int ifindex;
110 struct in_addr addr;
111
112 sin = *dst;
113 if (sin.sin_port == 0)
114 sin.sin_port = htons(RIP_PORT);
115
116 flags = 0;
117
118 if (ifp == NULL && type == OUT_MULTICAST) {
119 msglog("Cannot send RIP message to %s",
120 inet_ntoa(sin.sin_addr));
121 return (-1);
122 }
123
124 switch (type) {
125 case OUT_QUERY:
126 msg = "Answer Query";
127 break;
128 case OUT_UNICAST:
129 msg = "Send";
130 flags = MSG_DONTROUTE;
131 break;
132 case OUT_BROADCAST:
133 msg = "Send bcast";
134 break;
135 case OUT_MULTICAST:
136 msg = "Send mcast";
137 break;
138
139 case NO_OUT_MULTICAST:
140 case NO_OUT_RIPV2:
141 default:
142 #ifdef DEBUG
143 abort();
144 #endif
145 return (-1);
146 }
147
148 /*
149 * IP_PKTINFO overrides IP_MULTICAST_IF, so we don't set ifindex
150 * for multicast traffic.
151 */
152 ifindex = (type != OUT_MULTICAST && type != OUT_QUERY &&
153 ifp != NULL && ifp->int_phys != NULL) ?
154 ifp->int_phys->phyi_index : 0;
155
156 if (rip_sock_interface != ifp) {
157 /*
158 * For multicast, we have to choose the source
159 * address. This is either the local address
160 * (non-point-to-point) or the remote address.
161 */
162 if (ifp != NULL) {
163 addr.s_addr = (ifp->int_if_flags & IFF_POINTOPOINT) ?
164 ifp->int_dstaddr : ifp->int_addr;
165 if (type == OUT_MULTICAST &&
166 setsockopt(rip_sock, IPPROTO_IP,
167 IP_MULTICAST_IF, &addr, sizeof (addr)) == -1) {
168 LOGERR("setsockopt(rip_sock, IP_MULTICAST_IF)");
169 return (-1);
170 }
171 }
172 rip_sock_interface = ifp;
173 }
174
175 trace_rip(msg, "to", &sin, ifp, buf, size);
176
177 res = sendtoif(rip_sock, buf, size, flags, &sin, ifindex);
178 if (res < 0 && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
179 writelog(LOG_WARNING, "%s sendto(%s%s%s.%d): %s", msg,
180 ifp != NULL ? ifp->int_name : "",
181 ifp != NULL ? ", " : "",
182 inet_ntoa(sin.sin_addr),
183 ntohs(sin.sin_port),
184 rip_strerror(errno));
185 }
186
187 return (res);
188 }
189
190 /*
191 * Semantically identical to sendto(), but sends the message through a
192 * specific interface (if ifindex is non-zero) using IP_PKTINFO.
193 */
194 int
sendtoif(int fd,const void * buf,uint_t bufsize,uint_t flags,struct sockaddr_in * sinp,uint_t ifindex)195 sendtoif(int fd, const void *buf, uint_t bufsize, uint_t flags,
196 struct sockaddr_in *sinp, uint_t ifindex)
197 {
198 struct iovec iov;
199 struct msghdr msg;
200 struct cmsghdr *cmsgp;
201 struct in_pktinfo *ipip;
202
203 iov.iov_base = (void *)buf;
204 iov.iov_len = bufsize;
205
206 (void) memset(&msg, 0, sizeof (struct msghdr));
207 msg.msg_name = (struct sockaddr *)sinp;
208 msg.msg_namelen = sizeof (struct sockaddr_in);
209 msg.msg_iov = &iov;
210 msg.msg_iovlen = 1;
211
212 if (ifindex != 0) {
213 /*
214 * We can't precisely predict the alignment padding we'll
215 * need, so we allocate the maximum alignment and then
216 * use CMSG_NXTHDR() to fix it up at the end.
217 */
218 msg.msg_controllen = sizeof (*cmsgp) + _MAX_ALIGNMENT +
219 sizeof (*ipip) + _MAX_ALIGNMENT + sizeof (*cmsgp);
220 msg.msg_control = alloca(msg.msg_controllen);
221
222 cmsgp = CMSG_FIRSTHDR(&msg);
223 ipip = (void *)CMSG_DATA(cmsgp);
224 (void) memset(ipip, 0, sizeof (struct in_pktinfo));
225 ipip->ipi_ifindex = ifindex;
226 cmsgp->cmsg_len = (caddr_t)(ipip + 1) - (caddr_t)cmsgp;
227 cmsgp->cmsg_type = IP_PKTINFO;
228 cmsgp->cmsg_level = IPPROTO_IP;
229
230 /*
231 * Correct the control message length.
232 */
233 cmsgp = CMSG_NXTHDR(&msg, cmsgp);
234 msg.msg_controllen = (caddr_t)cmsgp - (caddr_t)msg.msg_control;
235 }
236
237 return (sendmsg(fd, &msg, flags));
238 }
239
240 /*
241 * Find the first key for a packet to send.
242 * Try for a key that is eligible and has not expired, but settle for
243 * the last key if they have all expired.
244 * If no key is ready yet, give up.
245 */
246 struct auth *
find_auth(struct interface * ifp)247 find_auth(struct interface *ifp)
248 {
249 struct auth *ap, *res = NULL;
250 int i;
251
252
253 if (ifp == NULL)
254 return (NULL);
255
256 if ((ap = ifp->int_auth) == NULL)
257 return (NULL);
258
259 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
260 /* stop looking after the last key */
261 if (ap->type == RIP_AUTH_NONE)
262 break;
263
264 /* ignore keys that are not ready yet */
265 if ((ulong_t)ap->start > (ulong_t)clk.tv_sec)
266 continue;
267
268 if ((ulong_t)ap->end < (ulong_t)clk.tv_sec) {
269 /* note best expired password as a fall-back */
270 if (res == NULL ||
271 (((ulong_t)ap->end > (ulong_t)res->end)) &&
272 ((ulong_t)res->end < (ulong_t)clk.tv_sec))
273 res = ap;
274 continue;
275 }
276
277 /* note key with the best future */
278 if (res == NULL || (ulong_t)res->end < (ulong_t)ap->end)
279 res = ap;
280 }
281 return (res);
282 }
283
284
285 void
clr_ws_buf(struct ws_buf * wb,struct auth * ap)286 clr_ws_buf(struct ws_buf *wb, struct auth *ap)
287 {
288 struct netauth *na;
289
290 wb->lim = wb->base + NETS_LEN;
291 wb->n = wb->base;
292 (void) memset(wb->n, 0, NETS_LEN*sizeof (*wb->n));
293
294 /*
295 * (start to) install authentication if appropriate
296 */
297 if (ap == NULL)
298 return;
299
300 na = (struct netauth *)wb->n;
301 if (ap->type == RIP_AUTH_PW) {
302 na->a_family = RIP_AF_AUTH;
303 na->a_type = RIP_AUTH_PW;
304 (void) memcpy(na->au.au_pw, ap->key, sizeof (na->au.au_pw));
305 wb->n++;
306
307 } else if (ap->type == RIP_AUTH_MD5) {
308 na->a_family = RIP_AF_AUTH;
309 na->a_type = RIP_AUTH_MD5;
310 na->au.a_md5.md5_keyid = ap->keyid;
311 na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
312 na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
313 wb->n++;
314 wb->lim--; /* make room for trailer */
315 }
316 }
317
318
319 void
end_md5_auth(struct ws_buf * wb,struct auth * ap)320 end_md5_auth(struct ws_buf *wb, struct auth *ap)
321 {
322 struct netauth *na, *na2;
323 MD5_CTX md5_ctx;
324 int len;
325
326 na = (struct netauth *)wb->base;
327 na2 = (struct netauth *)wb->n;
328 len = (char *)na2-(char *)wb->buf;
329 na2->a_family = RIP_AF_AUTH;
330 na2->a_type = RIP_AUTH_TRAILER;
331 na->au.a_md5.md5_pkt_len = htons(len);
332 MD5Init(&md5_ctx);
333 /* len+4 to include auth trailer's family/type in MD5 sum */
334 MD5Update(&md5_ctx, (uchar_t *)wb->buf, len + 4);
335 MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
336 MD5Final(na2->au.au_pw, &md5_ctx);
337 wb->n++;
338 }
339
340
341 /*
342 * Send the buffer
343 */
344 static void
supply_write(struct ws_buf * wb)345 supply_write(struct ws_buf *wb)
346 {
347 /*
348 * Output multicast only if legal.
349 * If we would multicast and it would be illegal, then discard the
350 * packet.
351 */
352 switch (wb->type) {
353 case NO_OUT_MULTICAST:
354 trace_pkt("skip multicast to %s because impossible",
355 naddr_ntoa(ws.to.sin_addr.s_addr));
356 break;
357 case NO_OUT_RIPV2:
358 break;
359 default:
360 if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
361 end_md5_auth(wb, ws.a);
362 if (output(wb->type, &ws.to, ws.ifp, wb->buf,
363 ((char *)wb->n - (char *)wb->buf)) < 0 && ws.ifp != NULL)
364 if_sick(ws.ifp, _B_FALSE);
365 ws.npackets++;
366 break;
367 }
368
369 clr_ws_buf(wb, ws.a);
370 }
371
372
373 /*
374 * Put an entry into the packet
375 */
376 static void
supply_out(struct ag_info * ag)377 supply_out(struct ag_info *ag)
378 {
379 uint32_t dstcount;
380 in_addr_t mask, v1_mask, dst_h, ddst_h = 0;
381 struct ws_buf *wb;
382
383
384 /*
385 * Skip this route if doing a flash update and it and the routes
386 * it aggregates have not changed recently.
387 */
388 if (ag->ag_seqno < update_seqno && (ws.state & WS_ST_FLASH))
389 return;
390
391 dst_h = ag->ag_dst_h;
392 mask = ag->ag_mask;
393 v1_mask = ripv1_mask_host(htonl(dst_h),
394 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : NULL);
395 dstcount = 0;
396
397 /*
398 * If we are sending RIPv2 packets that cannot (or must not) be
399 * heard by RIPv1 listeners, do not worry about sub- or supernets.
400 * Subnets (from other networks) can only be sent via multicast.
401 * A pair of subnet routes might have been promoted so that they
402 * are legal to send by RIPv1.
403 * If RIPv1 is off, use the multicast buffer.
404 */
405 if ((ws.state & WS_ST_RIP2_ALL) ||
406 ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
407 /* use the RIPv2-only buffer */
408 wb = &v2buf;
409
410 } else {
411 /*
412 * use the RIPv1-or-RIPv2 buffer
413 */
414 wb = &v12buf;
415
416 /*
417 * Convert supernet route into corresponding set of network
418 * routes for RIPv1, but leave non-contiguous netmasks
419 * to ag_check().
420 */
421 if (v1_mask > mask &&
422 mask + (mask & -mask) == 0) {
423 ddst_h = v1_mask & -v1_mask;
424 dstcount = (v1_mask & ~mask)/ddst_h;
425
426 if (dstcount > ws.gen_limit) {
427 /*
428 * Punt if we would have to generate an
429 * unreasonable number of routes.
430 */
431 if (TRACECONTENTS)
432 trace_misc("sending %s-->%s as 1"
433 " instead of %d routes",
434 addrname(htonl(dst_h), mask, 1),
435 naddr_ntoa(ws.to.sin_addr.s_addr),
436 dstcount + 1);
437 dstcount = 0;
438
439 } else {
440 mask = v1_mask;
441 ws.gen_limit -= dstcount;
442 }
443 }
444 }
445
446 do {
447 wb->n->n_family = RIP_AF_INET;
448 wb->n->n_dst = htonl(dst_h);
449 /*
450 * If the route is from router-discovery or we are
451 * shutting down, or this is a broken/sick interface,
452 * admit only a bad metric.
453 */
454 wb->n->n_metric = ((stopint || ag->ag_metric < 1 ||
455 (ag->ag_ifp && (ag->ag_ifp->int_state &
456 (IS_BROKE|IS_SICK)))) ? HOPCNT_INFINITY : ag->ag_metric);
457 wb->n->n_metric = htonl(wb->n->n_metric);
458 /*
459 * Any non-zero bits in the supposedly unused RIPv1 fields
460 * cause the old `routed` to ignore the route.
461 * That means the mask and so forth cannot be sent
462 * in the hybrid RIPv1/RIPv2 mode.
463 */
464 if (ws.state & WS_ST_RIP2_ALL) {
465 if (ag->ag_nhop != 0 &&
466 ((ws.state & WS_ST_QUERY) ||
467 (ag->ag_nhop != ws.ifp->int_addr &&
468 on_net(ag->ag_nhop, ws.ifp->int_net,
469 ws.ifp->int_mask)) &&
470 ifwithaddr(ag->ag_nhop, _B_FALSE, _B_FALSE) ==
471 NULL))
472 wb->n->n_nhop = ag->ag_nhop;
473 wb->n->n_mask = htonl(mask);
474 wb->n->n_tag = ag->ag_tag;
475 }
476 dst_h += ddst_h;
477
478 if (++wb->n >= wb->lim)
479 supply_write(wb);
480 } while (dstcount-- > 0);
481 }
482
483
484 /*
485 * Supply one route from the table
486 */
487 /* ARGSUSED */
488 static int
walk_supply(struct radix_node * rn,void * argp)489 walk_supply(struct radix_node *rn, void *argp)
490 {
491 #define RT ((struct rt_entry *)rn)
492 ushort_t ags;
493 uint8_t metric, pref;
494 in_addr_t dst, nhop;
495 struct rt_spare *rts;
496 uint_t sparecount;
497
498
499 /*
500 * Do not advertise external remote interfaces or passive interfaces.
501 */
502 if ((RT->rt_state & RS_IF) && RT->rt_ifp != NULL &&
503 (RT->rt_ifp->int_state & IS_PASSIVE) &&
504 !(RT->rt_state & RS_MHOME))
505 return (0);
506 /*
507 * Do not advertise routes learnt from /etc/gateways.
508 */
509 if (RT->rt_spares[0].rts_origin == RO_FILE)
510 return (0);
511
512 /*
513 * Do not advertise routes which would lead to forwarding on a
514 * non-forwarding interface.
515 */
516 if (RT->rt_state & RS_NOPROPAGATE)
517 return (0);
518
519 /*
520 * If being quiet about our ability to forward, then
521 * do not say anything unless responding to a query,
522 * except about our main interface.
523 */
524 if (!should_supply(NULL) && !(ws.state & WS_ST_QUERY) &&
525 !(RT->rt_state & RS_MHOME))
526 return (0);
527
528 dst = RT->rt_dst;
529
530 /*
531 * do not collide with the fake default route
532 */
533 if (dst == RIP_DEFAULT && (ws.state & WS_ST_DEFAULT))
534 return (0);
535
536 if (RT->rt_state & RS_NET_SYN) {
537 if (RT->rt_state & RS_NET_INT) {
538 /*
539 * Do not send manual synthetic network routes
540 * into the subnet.
541 */
542 if (on_net(ws.to.sin_addr.s_addr,
543 ntohl(dst), RT->rt_mask))
544 return (0);
545
546 } else {
547 /*
548 * Do not send automatic synthetic network routes
549 * if they are not needed because no RIPv1 listeners
550 * can hear them.
551 */
552 if (ws.state & WS_ST_RIP2_ALL)
553 return (0);
554
555 /*
556 * Do not send automatic synthetic network routes to
557 * the real subnet.
558 */
559 if (on_net(ws.to.sin_addr.s_addr,
560 ntohl(dst), RT->rt_mask))
561 return (0);
562 }
563 nhop = 0;
564
565 } else {
566 /*
567 * Advertise the next hop if this is not a route for one
568 * of our interfaces and the next hop is on the same
569 * network as the target.
570 * The final determination is made by supply_out().
571 */
572 if (!(RT->rt_state & RS_IF) && !(RT->rt_state & RS_MHOME) &&
573 RT->rt_gate != loopaddr)
574 nhop = RT->rt_gate;
575 else
576 nhop = 0;
577 }
578
579 metric = RT->rt_metric;
580 ags = 0;
581
582 if (!RT_ISHOST(RT)) {
583 /*
584 * Always suppress network routes into other, existing
585 * network routes
586 */
587 ags |= AGS_SUPPRESS;
588
589 /*
590 * Generate supernets if allowed.
591 * If we can be heard by RIPv1 systems, we will
592 * later convert back to ordinary nets.
593 * This unifies dealing with received supernets.
594 */
595 if ((ws.state & WS_ST_AG) && ((RT->rt_state & RS_SUBNET) ||
596 (ws.state & WS_ST_SUPER_AG)))
597 ags |= AGS_AGGREGATE;
598 } else if (!(RT->rt_state & RS_MHOME)) {
599 /*
600 * We should always suppress (into existing network routes)
601 * the host routes for the local end of our point-to-point
602 * links.
603 * If we are suppressing host routes in general, then do so.
604 * Avoid advertising host routes onto their own network,
605 * where they should be handled by proxy-ARP.
606 */
607 if ((RT->rt_state & RS_LOCAL) || ridhosts ||
608 on_net(dst, ws.to_net, ws.to_mask))
609 ags |= AGS_SUPPRESS;
610
611 /*
612 * Aggregate stray host routes into network routes if allowed.
613 * We cannot aggregate host routes into small network routes
614 * without confusing RIPv1 listeners into thinking the
615 * network routes are host routes.
616 */
617 if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
618 ags |= AGS_AGGREGATE;
619 }
620
621 /*
622 * Do not send RIPv1 advertisements of subnets to other
623 * networks. If possible, multicast them by RIPv2.
624 */
625 if ((RT->rt_state & RS_SUBNET) && !(ws.state & WS_ST_RIP2_ALL) &&
626 !on_net(dst, ws.to_std_net, ws.to_std_mask))
627 ags |= AGS_RIPV2 | AGS_AGGREGATE;
628
629
630 /*
631 * Do not send a route back to where it came from, except in
632 * response to a query. This is "split-horizon". That means not
633 * advertising back to the same network and so via the same interface.
634 *
635 * We want to suppress routes that might have been fragmented
636 * from this route by a RIPv1 router and sent back to us, and so we
637 * cannot forget this route here. Let the split-horizon route
638 * suppress the fragmented routes and then itself be forgotten.
639 *
640 * Include the routes for both ends of point-to-point interfaces
641 * among those suppressed by split-horizon, since the other side
642 * should knows them as well as we do.
643 *
644 * Notice spare routes with the same metric that we are about to
645 * advertise, to split the horizon on redundant, inactive paths.
646 */
647 if (ws.ifp != NULL && !(ws.state & WS_ST_QUERY) &&
648 (ws.state & WS_ST_TO_ON_NET) && (!(RT->rt_state & RS_IF) ||
649 (ws.ifp->int_if_flags & IFF_POINTOPOINT))) {
650 for (rts = RT->rt_spares, sparecount = 0;
651 sparecount < RT->rt_num_spares; sparecount++, rts++) {
652 if (rts->rts_metric > metric || rts->rts_ifp != ws.ifp)
653 continue;
654
655 /*
656 * If we do not mark the route with AGS_SPLIT_HZ here,
657 * it will be poisoned-reverse, or advertised back
658 * toward its source with an infinite metric.
659 * If we have recently advertised the route with a
660 * better metric than we now have, then we should
661 * poison-reverse the route before suppressing it for
662 * split-horizon.
663 *
664 * In almost all cases, if there is no spare for the
665 * route then it is either old and dead or a brand
666 * new route. If it is brand new, there is no need
667 * for poison-reverse. If it is old and dead, it
668 * is already poisoned.
669 */
670 if (RT->rt_poison_time < now_expire ||
671 RT->rt_poison_metric >= metric ||
672 RT->rt_spares[1].rts_gate == 0) {
673 ags |= AGS_SPLIT_HZ;
674 ags &= ~AGS_SUPPRESS;
675 }
676 metric = HOPCNT_INFINITY;
677 break;
678 }
679 }
680
681 /*
682 * Keep track of the best metric with which the
683 * route has been advertised recently.
684 */
685 if (RT->rt_poison_metric >= metric ||
686 RT->rt_poison_time < now_expire) {
687 RT->rt_poison_time = now.tv_sec;
688 RT->rt_poison_metric = metric;
689 }
690
691 /*
692 * Adjust the outgoing metric by the cost of the link.
693 * Avoid aggregation when a route is counting to infinity.
694 */
695 pref = RT->rt_poison_metric + ws.metric;
696 metric += ws.metric;
697
698 /*
699 * If this is a static route pointing to the same interface
700 * upon which we are sending out the RIP RESPONSE
701 * adjust the preference so that we don't aggregate into this
702 * route. Note that the maximum possible hop count on a route
703 * per RFC 2453 is 16 (HOPCNT_INFINITY)
704 */
705 if ((RT->rt_state & RS_STATIC) && (ws.ifp == RT->rt_ifp))
706 pref = (HOPCNT_INFINITY+1);
707
708 /*
709 * Do not advertise stable routes that will be ignored,
710 * unless we are answering a query.
711 * If the route recently was advertised with a metric that
712 * would have been less than infinity through this interface,
713 * we need to continue to advertise it in order to poison it.
714 */
715 if (metric >= HOPCNT_INFINITY) {
716 if (!(ws.state & WS_ST_QUERY) && (pref >= HOPCNT_INFINITY ||
717 RT->rt_poison_time < now_garbage))
718 return (0);
719
720 metric = HOPCNT_INFINITY;
721 }
722
723 /*
724 * supply this route out on the wire- we only care about dest/mask
725 * and so can ignore all rt_spares[i] with i > 0
726 */
727 ag_check(dst, RT->rt_mask, 0, RT->rt_ifp, nhop, metric, pref,
728 RT->rt_seqno, RT->rt_tag, ags, supply_out);
729 return (0);
730 #undef RT
731 }
732
733
734 /*
735 * Supply dst with the contents of the routing tables.
736 * If this won't fit in one packet, chop it up into several.
737 */
738 void
supply(struct sockaddr_in * dst,struct interface * ifp,enum output_type type,int flash,int vers,boolean_t passwd_ok)739 supply(struct sockaddr_in *dst,
740 struct interface *ifp, /* output interface */
741 enum output_type type,
742 int flash, /* 1=flash update */
743 int vers, /* RIP version */
744 boolean_t passwd_ok) /* OK to include cleartext password */
745 {
746 struct rt_entry *rt;
747 uint8_t def_metric;
748
749
750 ws.state = 0;
751 ws.gen_limit = WS_GEN_LIMIT_MAX;
752
753 ws.to = *dst;
754 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
755 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
756
757 if (ifp != NULL) {
758 ws.to_mask = ifp->int_mask;
759 ws.to_net = ifp->int_net;
760 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask) ||
761 type == OUT_MULTICAST)
762 ws.state |= WS_ST_TO_ON_NET;
763
764 } else {
765 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, NULL);
766 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
767 rt = rtfind(dst->sin_addr.s_addr);
768 if (rt != NULL)
769 ifp = rt->rt_ifp;
770 else
771 return;
772 }
773
774 ws.npackets = 0;
775 if (flash)
776 ws.state |= WS_ST_FLASH;
777
778 ws.ifp = ifp;
779
780 /*
781 * Routes in the table were already adjusted by their respective
782 * destination interface costs (which are zero by default) on
783 * input. The following is the value by which each route's metric
784 * will be bumped up on output.
785 */
786 ws.metric = 1;
787
788 ripv12_buf.rip.rip_vers = vers;
789
790 switch (type) {
791 case OUT_MULTICAST:
792 if (ifp->int_if_flags & IFF_MULTICAST)
793 v2buf.type = OUT_MULTICAST;
794 else
795 v2buf.type = NO_OUT_MULTICAST;
796 v12buf.type = OUT_BROADCAST;
797 break;
798
799 case OUT_QUERY:
800 ws.state |= WS_ST_QUERY;
801 /* FALLTHROUGH */
802 case OUT_BROADCAST:
803 case OUT_UNICAST:
804 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
805 v12buf.type = type;
806 break;
807
808 case NO_OUT_MULTICAST:
809 case NO_OUT_RIPV2:
810 return; /* no output */
811 }
812
813 if (vers == RIPv2) {
814 /* full RIPv2 only if cannot be heard by RIPv1 listeners */
815 if (type != OUT_BROADCAST)
816 ws.state |= WS_ST_RIP2_ALL;
817 if ((ws.state & WS_ST_QUERY) || !(ws.state & WS_ST_TO_ON_NET)) {
818 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
819 } else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
820 ws.state |= WS_ST_AG;
821 if (type != OUT_BROADCAST && (ifp == NULL ||
822 !(ifp->int_state & IS_NO_SUPER_AG)))
823 ws.state |= WS_ST_SUPER_AG;
824 }
825
826 /* See if this packet needs authenticating */
827 ws.a = find_auth(ifp);
828 if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
829 ws.a = NULL;
830 if (ws.a != NULL && (ulong_t)ws.a->end < (ulong_t)clk.tv_sec &&
831 !ws.a->warnedflag) {
832 /*
833 * If the best key is an expired one, we may as
834 * well use it. Log this event.
835 */
836 writelog(LOG_WARNING,
837 "Using expired auth while transmitting to %s",
838 naddr_ntoa(ws.to.sin_addr.s_addr));
839 ws.a->warnedflag = 1;
840 }
841 } else {
842 ws.a = NULL;
843 }
844
845 clr_ws_buf(&v12buf, ws.a);
846 clr_ws_buf(&v2buf, ws.a);
847
848 /*
849 * Fake a default route if asked and if there is not already
850 * a better, real default route.
851 */
852 if (should_supply(NULL) && (def_metric = ifp->int_d_metric) != 0) {
853 if (NULL == (rt = rtget(RIP_DEFAULT, 0)) ||
854 rt->rt_metric+ws.metric >= def_metric) {
855 ws.state |= WS_ST_DEFAULT;
856 ag_check(0, 0, 0, NULL, 0, def_metric, def_metric,
857 0, 0, 0, supply_out);
858 } else {
859 def_metric = rt->rt_metric+ws.metric;
860 }
861
862 /*
863 * If both RIPv2 and the poor-man's router discovery
864 * kludge are on, arrange to advertise an extra
865 * default route via RIPv1.
866 */
867 if ((ws.state & WS_ST_RIP2_ALL) &&
868 (ifp->int_state & IS_PM_RDISC)) {
869 ripv12_buf.rip.rip_vers = RIPv1;
870 v12buf.n->n_family = RIP_AF_INET;
871 v12buf.n->n_dst = htonl(RIP_DEFAULT);
872 v12buf.n->n_metric = htonl(def_metric);
873 v12buf.n++;
874 }
875 }
876
877 (void) rn_walktree(rhead, walk_supply, NULL);
878 ag_flush(0, 0, supply_out);
879
880 /*
881 * Flush the packet buffers, provided they are not empty and
882 * do not contain only the password.
883 */
884 if (v12buf.n != v12buf.base &&
885 (v12buf.n > v12buf.base+1 ||
886 v12buf.base->n_family != RIP_AF_AUTH))
887 supply_write(&v12buf);
888 if (v2buf.n != v2buf.base && (v2buf.n > v2buf.base+1 ||
889 v2buf.base->n_family != RIP_AF_AUTH))
890 supply_write(&v2buf);
891
892 /*
893 * If we sent nothing and this is an answer to a query, send
894 * an empty buffer.
895 */
896 if (ws.npackets == 0 && (ws.state & WS_ST_QUERY)) {
897 supply_write(&v2buf);
898 if (ws.npackets == 0)
899 supply_write(&v12buf);
900 }
901 }
902
903
904 /*
905 * send all of the routing table or just do a flash update
906 */
907 void
rip_bcast(int flash)908 rip_bcast(int flash)
909 {
910 static struct sockaddr_in dst = {AF_INET};
911 struct interface *ifp;
912 enum output_type type;
913 int vers;
914 struct timeval rtime;
915
916
917 need_flash = _B_FALSE;
918 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
919 no_flash = rtime;
920 timevaladd(&no_flash, &now);
921
922 if (!rip_enabled)
923 return;
924
925 trace_act("send %s and inhibit dynamic updates for %.3f sec",
926 flash ? "dynamic update" : "all routes",
927 rtime.tv_sec + ((double)rtime.tv_usec)/1000000.0);
928
929 for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) {
930 /*
931 * Skip interfaces not doing RIP or for which IP
932 * forwarding isn't turned on. Skip duplicate
933 * interfaces, we don't want to generate duplicate
934 * packets. Do try broken interfaces to see if they
935 * have healed.
936 */
937 if (IS_RIP_OUT_OFF(ifp->int_state) ||
938 (ifp->int_state & IS_DUP) ||
939 !IS_IFF_ROUTING(ifp->int_if_flags))
940 continue;
941
942 /* skip turned off interfaces */
943 if (!IS_IFF_UP(ifp->int_if_flags))
944 continue;
945
946 /* skip interfaces we shouldn't use */
947 if (IS_IFF_QUIET(ifp->int_if_flags))
948 continue;
949
950 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
951 dst.sin_addr.s_addr = ifp->int_ripout_addr;
952
953 /*
954 * Ignore the interface if it's not broadcast,
955 * point-to-point, or remote. It must be non-broadcast
956 * multiaccess, and therefore unsupported.
957 */
958 if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) &&
959 !(ifp->int_state & IS_REMOTE))
960 continue;
961
962 type = (ifp->int_if_flags & IFF_BROADCAST) ?
963 OUT_BROADCAST : OUT_UNICAST;
964 if (vers == RIPv2 && (ifp->int_if_flags & IFF_MULTICAST) &&
965 !(ifp->int_state & IS_NO_RIP_MCAST))
966 type = OUT_MULTICAST;
967
968 supply(&dst, ifp, type, flash, vers, _B_TRUE);
969 }
970
971 update_seqno++; /* all routes are up to date */
972 }
973
974
975 /*
976 * Ask for routes
977 * Do it only once to an interface, and not even after the interface
978 * was broken and recovered.
979 */
980 void
rip_query(void)981 rip_query(void)
982 {
983 static struct sockaddr_in dst = {AF_INET};
984 struct interface *ifp;
985 struct rip buf;
986 enum output_type type;
987
988
989 if (!rip_enabled)
990 return;
991
992 (void) memset(&buf, 0, sizeof (buf));
993
994 for (ifp = ifnet; ifp; ifp = ifp->int_next) {
995 /*
996 * Skip interfaces those already queried. Do not ask
997 * via interfaces through which we don't accept input.
998 * Do not ask via interfaces that cannot send RIP
999 * packets. Don't send queries on duplicate
1000 * interfaces, that would generate duplicate packets
1001 * on link. Do try broken interfaces to see if they
1002 * have healed.
1003 */
1004 if (IS_RIP_IN_OFF(ifp->int_state) ||
1005 (ifp->int_state & IS_DUP) ||
1006 ifp->int_query_time != NEVER)
1007 continue;
1008
1009 /* skip turned off interfaces */
1010 if (!IS_IFF_UP(ifp->int_if_flags))
1011 continue;
1012
1013 /* skip interfaces we shouldn't use */
1014 if (IS_IFF_QUIET(ifp->int_if_flags))
1015 continue;
1016
1017 /*
1018 * Ignore the interface if it's not broadcast,
1019 * point-to-point, or remote. It must be non-broadcast
1020 * multiaccess, and therefore unsupported.
1021 */
1022 if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) &&
1023 !(ifp->int_state & IS_REMOTE))
1024 continue;
1025
1026 buf.rip_cmd = RIPCMD_REQUEST;
1027 buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
1028 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
1029
1030 /*
1031 * Send a RIPv1 query only if allowed and if we will
1032 * listen to RIPv1 routers.
1033 */
1034 if ((ifp->int_state & IS_NO_RIPV1_OUT) ||
1035 (ifp->int_state & IS_NO_RIPV1_IN)) {
1036 buf.rip_vers = RIPv2;
1037 } else {
1038 buf.rip_vers = RIPv1;
1039 }
1040
1041 dst.sin_addr.s_addr = ifp->int_ripout_addr;
1042
1043 type = (ifp->int_if_flags & IFF_BROADCAST) ?
1044 OUT_BROADCAST : OUT_UNICAST;
1045 if (buf.rip_vers == RIPv2 &&
1046 (ifp->int_if_flags & IFF_MULTICAST) &&
1047 !(ifp->int_state & IS_NO_RIP_MCAST))
1048 type = OUT_MULTICAST;
1049
1050 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
1051 if (output(type, &dst, ifp, &buf, sizeof (buf)) < 0)
1052 if_sick(ifp, _B_FALSE);
1053 }
1054 }
1055