xref: /freebsd/sbin/routed/output.c (revision d2387d42b8da231a5b95cbc313825fb2aadf26f6)
1 /*
2  * Copyright (c) 1983, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgment:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "defs.h"
37 
38 #ifdef __NetBSD__
39 __RCSID("$NetBSD$");
40 #elif defined(__FreeBSD__)
41 __RCSID("$FreeBSD$");
42 #else
43 __RCSID("$Revision: 2.27 $");
44 #ident "$Revision: 2.27 $"
45 #endif
46 #ident "$FreeBSD$"
47 
48 
49 u_int update_seqno;
50 
51 
52 /* walk the tree of routes with this for output
53  */
54 struct {
55 	struct sockaddr_in to;
56 	naddr	to_mask;
57 	naddr	to_net;
58 	naddr	to_std_mask;
59 	naddr	to_std_net;
60 	struct interface *ifp;		/* usually output interface */
61 	struct auth *a;
62 	char	metric;			/* adjust metrics by interface */
63 	int	npackets;
64 	int	gen_limit;
65 	u_int	state;
66 #define	    WS_ST_FLASH	    0x001	/* send only changed routes */
67 #define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
68 #define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
69 #define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
70 #define	    WS_ST_QUERY	    0x010	/* responding to a query */
71 #define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
72 #define	    WS_ST_DEFAULT   0x040	/* faking a default */
73 } ws;
74 
75 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
76 struct ws_buf v12buf;
77 union pkt_buf ripv12_buf;
78 
79 /* Another for only RIPv2 listeners */
80 struct ws_buf v2buf;
81 union pkt_buf rip_v2_buf;
82 
83 
84 
85 void
86 bufinit(void)
87 {
88 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
89 	v12buf.buf = &ripv12_buf.rip;
90 	v12buf.base = &v12buf.buf->rip_nets[0];
91 
92 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
93 	rip_v2_buf.rip.rip_vers = RIPv2;
94 	v2buf.buf = &rip_v2_buf.rip;
95 	v2buf.base = &v2buf.buf->rip_nets[0];
96 }
97 
98 
99 /* Send the contents of the global buffer via the non-multicast socket
100  */
101 int					/* <0 on failure */
102 output(enum output_type type,
103        struct sockaddr_in *dst,		/* send to here */
104        struct interface *ifp,
105        struct rip *buf,
106        int size)			/* this many bytes */
107 {
108 	struct sockaddr_in osin;
109 	int flags;
110 	const char *msg;
111 	int res;
112 	naddr tgt_mcast;
113 	int soc;
114 	int serrno;
115 
116 	osin = *dst;
117 	if (osin.sin_port == 0)
118 		osin.sin_port = htons(RIP_PORT);
119 #ifdef _HAVE_SIN_LEN
120 	if (osin.sin_len == 0)
121 		osin.sin_len = sizeof(osin);
122 #endif
123 
124 	soc = rip_sock;
125 	flags = 0;
126 
127 	switch (type) {
128 	case OUT_QUERY:
129 		msg = "Answer Query";
130 		if (soc < 0)
131 			soc = ifp->int_rip_sock;
132 		break;
133 	case OUT_UNICAST:
134 		msg = "Send";
135 		if (soc < 0)
136 			soc = ifp->int_rip_sock;
137 		flags = MSG_DONTROUTE;
138 		break;
139 	case OUT_BROADCAST:
140 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
141 			msg = "Send";
142 		} else {
143 			msg = "Send bcast";
144 		}
145 		flags = MSG_DONTROUTE;
146 		break;
147 	case OUT_MULTICAST:
148 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
149 			msg = "Send pt-to-pt";
150 		} else if (ifp->int_state & IS_DUP) {
151 			trace_act("abort multicast output via %s"
152 				  " with duplicate address",
153 				  ifp->int_name);
154 			return 0;
155 		} else {
156 			msg = "Send mcast";
157 			if (rip_sock_mcast != ifp) {
158 #ifdef MCAST_IFINDEX
159 				/* specify ifindex */
160 				tgt_mcast = htonl(ifp->int_index);
161 #else
162 #ifdef MCAST_PPP_BUG
163 				/* Do not specify the primary interface
164 				 * explicitly if we have the multicast
165 				 * point-to-point kernel bug, since the
166 				 * kernel will do the wrong thing if the
167 				 * local address of a point-to-point link
168 				 * is the same as the address of an ordinary
169 				 * interface.
170 				 */
171 				if (ifp->int_addr == myaddr) {
172 					tgt_mcast = 0;
173 				} else
174 #endif
175 				tgt_mcast = ifp->int_addr;
176 #endif
177 				if (0 > setsockopt(rip_sock,
178 						   IPPROTO_IP, IP_MULTICAST_IF,
179 						   &tgt_mcast,
180 						   sizeof(tgt_mcast))) {
181 					serrno = errno;
182 					LOGERR("setsockopt(rip_sock,"
183 					       "IP_MULTICAST_IF)");
184 					errno = serrno;
185 					ifp = 0;
186 					return -1;
187 				}
188 				rip_sock_mcast = ifp;
189 			}
190 			osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
191 		}
192 		break;
193 
194 	case NO_OUT_MULTICAST:
195 	case NO_OUT_RIPV2:
196 	default:
197 #ifdef DEBUG
198 		abort();
199 #endif
200 		return -1;
201 	}
202 
203 	trace_rip(msg, "to", &osin, ifp, buf, size);
204 
205 	res = sendto(soc, buf, size, flags,
206 		     (struct sockaddr *)&osin, sizeof(osin));
207 	if (res < 0
208 	    && (ifp == 0 || !(ifp->int_state & IS_BROKE))) {
209 		serrno = errno;
210 		msglog("%s sendto(%s%s%s.%d): %s", msg,
211 		       ifp != 0 ? ifp->int_name : "",
212 		       ifp != 0 ? ", " : "",
213 		       inet_ntoa(osin.sin_addr),
214 		       ntohs(osin.sin_port),
215 		       strerror(errno));
216 		errno = serrno;
217 	}
218 
219 	return res;
220 }
221 
222 
223 /* Find the first key for a packet to send.
224  * Try for a key that is eligible and has not expired, but settle for
225  * the last key if they have all expired.
226  * If no key is ready yet, give up.
227  */
228 struct auth *
229 find_auth(struct interface *ifp)
230 {
231 	struct auth *ap, *res;
232 	int i;
233 
234 
235 	if (ifp == 0)
236 		return 0;
237 
238 	res = 0;
239 	ap = ifp->int_auth;
240 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
241 		/* stop looking after the last key */
242 		if (ap->type == RIP_AUTH_NONE)
243 			break;
244 
245 		/* ignore keys that are not ready yet */
246 		if ((u_long)ap->start > (u_long)clk.tv_sec)
247 			continue;
248 
249 		if ((u_long)ap->end < (u_long)clk.tv_sec) {
250 			/* note best expired password as a fall-back */
251 			if (res == 0 || (u_long)ap->end > (u_long)res->end)
252 				res = ap;
253 			continue;
254 		}
255 
256 		/* note key with the best future */
257 		if (res == 0 || (u_long)res->end < (u_long)ap->end)
258 			res = ap;
259 	}
260 	return res;
261 }
262 
263 
264 void
265 clr_ws_buf(struct ws_buf *wb,
266 	   struct auth *ap)
267 {
268 	struct netauth *na;
269 
270 	wb->lim = wb->base + NETS_LEN;
271 	wb->n = wb->base;
272 	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
273 
274 	/* (start to) install authentication if appropriate
275 	 */
276 	if (ap == 0)
277 		return;
278 
279 	na = (struct netauth*)wb->n;
280 	if (ap->type == RIP_AUTH_PW) {
281 		na->a_family = RIP_AF_AUTH;
282 		na->a_type = RIP_AUTH_PW;
283 		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
284 		wb->n++;
285 
286 	} else if (ap->type ==  RIP_AUTH_MD5) {
287 		na->a_family = RIP_AF_AUTH;
288 		na->a_type = RIP_AUTH_MD5;
289 		na->au.a_md5.md5_keyid = ap->keyid;
290 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN;
291 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
292 		wb->n++;
293 		wb->lim--;		/* make room for trailer */
294 	}
295 }
296 
297 
298 void
299 end_md5_auth(struct ws_buf *wb,
300 	     struct auth *ap)
301 {
302 	struct netauth *na, *na2;
303 	MD5_CTX md5_ctx;
304 	int len;
305 
306 
307 	na = (struct netauth*)wb->base;
308 	na2 = (struct netauth*)wb->n;
309 	len = (char *)na2-(char *)wb->buf;
310 	na2->a_family = RIP_AF_AUTH;
311 	na2->a_type = htons(1);
312 	na->au.a_md5.md5_pkt_len = htons(len);
313 	MD5Init(&md5_ctx);
314 	MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA);
315 	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN);
316 	MD5Final(na2->au.au_pw, &md5_ctx);
317 	wb->n++;
318 }
319 
320 
321 /* Send the buffer
322  */
323 static void
324 supply_write(struct ws_buf *wb)
325 {
326 	/* Output multicast only if legal.
327 	 * If we would multicast and it would be illegal, then discard the
328 	 * packet.
329 	 */
330 	switch (wb->type) {
331 	case NO_OUT_MULTICAST:
332 		trace_pkt("skip multicast to %s because impossible",
333 			  naddr_ntoa(ws.to.sin_addr.s_addr));
334 		break;
335 	case NO_OUT_RIPV2:
336 		break;
337 	default:
338 		if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5)
339 			end_md5_auth(wb,ws.a);
340 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
341 			   ((char *)wb->n - (char*)wb->buf)) < 0
342 		    && ws.ifp != 0)
343 			if_sick(ws.ifp);
344 		ws.npackets++;
345 		break;
346 	}
347 
348 	clr_ws_buf(wb,ws.a);
349 }
350 
351 
352 /* put an entry into the packet
353  */
354 static void
355 supply_out(struct ag_info *ag)
356 {
357 	int i;
358 	naddr mask, v1_mask, dst_h, ddst_h = 0;
359 	struct ws_buf *wb;
360 
361 
362 	/* Skip this route if doing a flash update and it and the routes
363 	 * it aggregates have not changed recently.
364 	 */
365 	if (ag->ag_seqno < update_seqno
366 	    && (ws.state & WS_ST_FLASH))
367 		return;
368 
369 	dst_h = ag->ag_dst_h;
370 	mask = ag->ag_mask;
371 	v1_mask = ripv1_mask_host(htonl(dst_h),
372 				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
373 	i = 0;
374 
375 	/* If we are sending RIPv2 packets that cannot (or must not) be
376 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
377 	 * Subnets (from other networks) can only be sent via multicast.
378 	 * A pair of subnet routes might have been promoted so that they
379 	 * are legal to send by RIPv1.
380 	 * If RIPv1 is off, use the multicast buffer.
381 	 */
382 	if ((ws.state & WS_ST_RIP2_ALL)
383 	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
384 		/* use the RIPv2-only buffer */
385 		wb = &v2buf;
386 
387 	} else {
388 		/* use the RIPv1-or-RIPv2 buffer */
389 		wb = &v12buf;
390 
391 		/* Convert supernet route into corresponding set of network
392 		 * routes for RIPv1, but leave non-contiguous netmasks
393 		 * to ag_check().
394 		 */
395 		if (v1_mask > mask
396 		    && mask + (mask & -mask) == 0) {
397 			ddst_h = v1_mask & -v1_mask;
398 			i = (v1_mask & ~mask)/ddst_h;
399 
400 			if (i > ws.gen_limit) {
401 				/* Punt if we would have to generate an
402 				 * unreasonable number of routes.
403 				 */
404 				if (TRACECONTENTS)
405 					trace_misc("sending %s-->%s as 1"
406 						   " instead of %d routes",
407 						   addrname(htonl(dst_h), mask,
408 							1),
409 						   naddr_ntoa(ws.to.sin_addr
410 							.s_addr),
411 						   i+1);
412 				i = 0;
413 
414 			} else {
415 				mask = v1_mask;
416 				ws.gen_limit -= i;
417 			}
418 		}
419 	}
420 
421 	do {
422 		wb->n->n_family = RIP_AF_INET;
423 		wb->n->n_dst = htonl(dst_h);
424 		/* If the route is from router-discovery or we are
425 		 * shutting down, admit only a bad metric.
426 		 */
427 		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
428 				   ? HOPCNT_INFINITY
429 				   : ag->ag_metric);
430 		wb->n->n_metric = htonl(wb->n->n_metric);
431 		/* Any non-zero bits in the supposedly unused RIPv1 fields
432 		 * cause the old `routed` to ignore the route.
433 		 * That means the mask and so forth cannot be sent
434 		 * in the hybrid RIPv1/RIPv2 mode.
435 		 */
436 		if (ws.state & WS_ST_RIP2_ALL) {
437 			if (ag->ag_nhop != 0
438 			    && ((ws.state & WS_ST_QUERY)
439 				|| (ag->ag_nhop != ws.ifp->int_addr
440 				    && on_net(ag->ag_nhop,
441 					      ws.ifp->int_net,
442 					      ws.ifp->int_mask))))
443 				wb->n->n_nhop = ag->ag_nhop;
444 			wb->n->n_mask = htonl(mask);
445 			wb->n->n_tag = ag->ag_tag;
446 		}
447 		dst_h += ddst_h;
448 
449 		if (++wb->n >= wb->lim)
450 			supply_write(wb);
451 	} while (i-- != 0);
452 }
453 
454 
455 /* supply one route from the table
456  */
457 /* ARGSUSED */
458 static int
459 walk_supply(struct radix_node *rn,
460 	    struct walkarg *argp UNUSED)
461 {
462 #define RT ((struct rt_entry *)rn)
463 	u_short ags;
464 	char metric, pref;
465 	naddr dst, nhop;
466 	struct rt_spare *rts;
467 	int i;
468 
469 
470 	/* Do not advertise external remote interfaces or passive interfaces.
471 	 */
472 	if ((RT->rt_state & RS_IF)
473 	    && RT->rt_ifp != 0
474 	    && (RT->rt_ifp->int_state & IS_PASSIVE)
475 	    && !(RT->rt_state & RS_MHOME))
476 		return 0;
477 
478 	/* If being quiet about our ability to forward, then
479 	 * do not say anything unless responding to a query,
480 	 * except about our main interface.
481 	 */
482 	if (!supplier && !(ws.state & WS_ST_QUERY)
483 	    && !(RT->rt_state & RS_MHOME))
484 		return 0;
485 
486 	dst = RT->rt_dst;
487 
488 	/* do not collide with the fake default route */
489 	if (dst == RIP_DEFAULT
490 	    && (ws.state & WS_ST_DEFAULT))
491 		return 0;
492 
493 	if (RT->rt_state & RS_NET_SYN) {
494 		if (RT->rt_state & RS_NET_INT) {
495 			/* Do not send manual synthetic network routes
496 			 * into the subnet.
497 			 */
498 			if (on_net(ws.to.sin_addr.s_addr,
499 				   ntohl(dst), RT->rt_mask))
500 				return 0;
501 
502 		} else {
503 			/* Do not send automatic synthetic network routes
504 			 * if they are not needed because no RIPv1 listeners
505 			 * can hear them.
506 			 */
507 			if (ws.state & WS_ST_RIP2_ALL)
508 				return 0;
509 
510 			/* Do not send automatic synthetic network routes to
511 			 * the real subnet.
512 			 */
513 			if (on_net(ws.to.sin_addr.s_addr,
514 				   ntohl(dst), RT->rt_mask))
515 				return 0;
516 		}
517 		nhop = 0;
518 
519 	} else {
520 		/* Advertise the next hop if this is not a route for one
521 		 * of our interfaces and the next hop is on the same
522 		 * network as the target.
523 		 * The final determination is made by supply_out().
524 		 */
525 		if (!(RT->rt_state & RS_IF)
526 		    && RT->rt_gate != myaddr
527 		    && RT->rt_gate != loopaddr)
528 			nhop = RT->rt_gate;
529 		else
530 			nhop = 0;
531 	}
532 
533 	metric = RT->rt_metric;
534 	ags = 0;
535 
536 	if (RT->rt_state & RS_MHOME) {
537 		/* retain host route of multi-homed servers */
538 		;
539 
540 	} else if (RT_ISHOST(RT)) {
541 		/* We should always suppress (into existing network routes)
542 		 * the host routes for the local end of our point-to-point
543 		 * links.
544 		 * If we are suppressing host routes in general, then do so.
545 		 * Avoid advertising host routes onto their own network,
546 		 * where they should be handled by proxy-ARP.
547 		 */
548 		if ((RT->rt_state & RS_LOCAL)
549 		    || ridhosts
550 		    || on_net(dst, ws.to_net, ws.to_mask))
551 			ags |= AGS_SUPPRESS;
552 
553 		/* Aggregate stray host routes into network routes if allowed.
554 		 * We cannot aggregate host routes into small network routes
555 		 * without confusing RIPv1 listeners into thinking the
556 		 * network routes are host routes.
557 		 */
558 		if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
559 			ags |= AGS_AGGREGATE;
560 
561 	} else {
562 		/* Always suppress network routes into other, existing
563 		 * network routes
564 		 */
565 		ags |= AGS_SUPPRESS;
566 
567 		/* Generate supernets if allowed.
568 		 * If we can be heard by RIPv1 systems, we will
569 		 * later convert back to ordinary nets.
570 		 * This unifies dealing with received supernets.
571 		 */
572 		if ((ws.state & WS_ST_AG)
573 		    && ((RT->rt_state & RS_SUBNET)
574 			|| (ws.state & WS_ST_SUPER_AG)))
575 			ags |= AGS_AGGREGATE;
576 	}
577 
578 	/* Do not send RIPv1 advertisements of subnets to other
579 	 * networks. If possible, multicast them by RIPv2.
580 	 */
581 	if ((RT->rt_state & RS_SUBNET)
582 	    && !(ws.state & WS_ST_RIP2_ALL)
583 	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
584 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
585 
586 
587 	/* Do not send a route back to where it came from, except in
588 	 * response to a query.  This is "split-horizon".  That means not
589 	 * advertising back to the same network	and so via the same interface.
590 	 *
591 	 * We want to suppress routes that might have been fragmented
592 	 * from this route by a RIPv1 router and sent back to us, and so we
593 	 * cannot forget this route here.  Let the split-horizon route
594 	 * suppress the fragmented routes and then itself be forgotten.
595 	 *
596 	 * Include the routes for both ends of point-to-point interfaces
597 	 * among those suppressed by split-horizon, since the other side
598 	 * should knows them as well as we do.
599 	 *
600 	 * Notice spare routes with the same metric that we are about to
601 	 * advertise, to split the horizon on redundant, inactive paths.
602 	 *
603 	 * Do not suppress advertisements of interface-related addresses on
604 	 * non-point-to-point interfaces.  This ensures that we have something
605 	 * to say every 30 seconds to help detect broken Ethernets or
606 	 * other interfaces where one packet every 30 seconds costs nothing.
607 	 */
608 	if (ws.ifp != 0
609 	    && !(ws.state & WS_ST_QUERY)
610 	    && (ws.state & WS_ST_TO_ON_NET)
611 	    && (!(RT->rt_state & RS_IF)
612 		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
613 		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
614 			if (rts->rts_metric > metric
615 			    || rts->rts_ifp != ws.ifp)
616 				continue;
617 
618 			/* If we do not mark the route with AGS_SPLIT_HZ here,
619 			 * it will be poisoned-reverse, or advertised back
620 			 * toward its source with an infinite metric.
621 			 * If we have recently advertised the route with a
622 			 * better metric than we now have, then we should
623 			 * poison-reverse the route before suppressing it for
624 			 * split-horizon.
625 			 *
626 			 * In almost all cases, if there is no spare for the
627 			 * route then it is either old and dead or a brand
628 			 * new route. If it is brand new, there is no need
629 			 * for poison-reverse. If it is old and dead, it
630 			 * is already poisoned.
631 			 */
632 			if (RT->rt_poison_time < now_expire
633 			    || RT->rt_poison_metric >= metric
634 			    || RT->rt_spares[1].rts_gate == 0) {
635 				ags |= AGS_SPLIT_HZ;
636 				ags &= ~AGS_SUPPRESS;
637 			}
638 			metric = HOPCNT_INFINITY;
639 			break;
640 		}
641 	}
642 
643 	/* Keep track of the best metric with which the
644 	 * route has been advertised recently.
645 	 */
646 	if (RT->rt_poison_metric >= metric
647 	    || RT->rt_poison_time < now_expire) {
648 		RT->rt_poison_time = now.tv_sec;
649 		RT->rt_poison_metric = metric;
650 	}
651 
652 	/* Adjust the outgoing metric by the cost of the link.
653 	 * Avoid aggregation when a route is counting to infinity.
654 	 */
655 	pref = RT->rt_poison_metric + ws.metric;
656 	metric += ws.metric;
657 
658 	/* Do not advertise stable routes that will be ignored,
659 	 * unless we are answering a query.
660 	 * If the route recently was advertised with a metric that
661 	 * would have been less than infinity through this interface,
662 	 * we need to continue to advertise it in order to poison it.
663 	 */
664 	if (metric >= HOPCNT_INFINITY) {
665 		if (!(ws.state & WS_ST_QUERY)
666 		    && (pref >= HOPCNT_INFINITY
667 			|| RT->rt_poison_time < now_garbage))
668 			return 0;
669 
670 		metric = HOPCNT_INFINITY;
671 	}
672 
673 	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
674 		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
675 	return 0;
676 #undef RT
677 }
678 
679 
680 /* Supply dst with the contents of the routing tables.
681  * If this won't fit in one packet, chop it up into several.
682  */
683 void
684 supply(struct sockaddr_in *dst,
685        struct interface *ifp,		/* output interface */
686        enum output_type type,
687        int flash,			/* 1=flash update */
688        int vers,			/* RIP version */
689        int passwd_ok)			/* OK to include cleartext password */
690 {
691 	struct rt_entry *rt;
692 	int def_metric;
693 
694 
695 	ws.state = 0;
696 	ws.gen_limit = 1024;
697 
698 	ws.to = *dst;
699 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
700 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
701 
702 	if (ifp != 0) {
703 		ws.to_mask = ifp->int_mask;
704 		ws.to_net = ifp->int_net;
705 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
706 			ws.state |= WS_ST_TO_ON_NET;
707 
708 	} else {
709 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
710 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
711 		rt = rtfind(dst->sin_addr.s_addr);
712 		if (rt)
713 			ifp = rt->rt_ifp;
714 	}
715 
716 	ws.npackets = 0;
717 	if (flash)
718 		ws.state |= WS_ST_FLASH;
719 
720 	if ((ws.ifp = ifp) == 0) {
721 		ws.metric = 1;
722 	} else {
723 		/* Adjust the advertised metric by the outgoing interface
724 		 * metric.
725 		 */
726 		ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric;
727 	}
728 
729 	ripv12_buf.rip.rip_vers = vers;
730 
731 	switch (type) {
732 	case OUT_MULTICAST:
733 		if (ifp->int_if_flags & IFF_MULTICAST)
734 			v2buf.type = OUT_MULTICAST;
735 		else
736 			v2buf.type = NO_OUT_MULTICAST;
737 		v12buf.type = OUT_BROADCAST;
738 		break;
739 
740 	case OUT_QUERY:
741 		ws.state |= WS_ST_QUERY;
742 		/* FALLTHROUGH */
743 	case OUT_BROADCAST:
744 	case OUT_UNICAST:
745 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
746 		v12buf.type = type;
747 		break;
748 
749 	case NO_OUT_MULTICAST:
750 	case NO_OUT_RIPV2:
751 		break;			/* no output */
752 	}
753 
754 	if (vers == RIPv2) {
755 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
756 		if (type != OUT_BROADCAST)
757 			ws.state |= WS_ST_RIP2_ALL;
758 		if ((ws.state & WS_ST_QUERY)
759 		    || !(ws.state & WS_ST_TO_ON_NET)) {
760 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
761 		} else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) {
762 			ws.state |= WS_ST_AG;
763 			if (type != OUT_BROADCAST
764 			    && (ifp == 0
765 				|| !(ifp->int_state & IS_NO_SUPER_AG)))
766 				ws.state |= WS_ST_SUPER_AG;
767 		}
768 	}
769 
770 	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
771 	if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW)
772 		ws.a = 0;
773 	clr_ws_buf(&v12buf,ws.a);
774 	clr_ws_buf(&v2buf,ws.a);
775 
776 	/*  Fake a default route if asked and if there is not already
777 	 * a better, real default route.
778 	 */
779 	if (supplier && (def_metric = ifp->int_d_metric) != 0) {
780 		if (0 == (rt = rtget(RIP_DEFAULT, 0))
781 		    || rt->rt_metric+ws.metric >= def_metric) {
782 			ws.state |= WS_ST_DEFAULT;
783 			ag_check(0, 0, 0, 0, def_metric, def_metric,
784 				 0, 0, 0, supply_out);
785 		} else {
786 			def_metric = rt->rt_metric+ws.metric;
787 		}
788 
789 		/* If both RIPv2 and the poor-man's router discovery
790 		 * kludge are on, arrange to advertise an extra
791 		 * default route via RIPv1.
792 		 */
793 		if ((ws.state & WS_ST_RIP2_ALL)
794 		    && (ifp->int_state & IS_PM_RDISC)) {
795 			ripv12_buf.rip.rip_vers = RIPv1;
796 			v12buf.n->n_family = RIP_AF_INET;
797 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
798 			v12buf.n->n_metric = htonl(def_metric);
799 			v12buf.n++;
800 		}
801 	}
802 
803 	(void)rn_walktree(rhead, walk_supply, 0);
804 	ag_flush(0,0,supply_out);
805 
806 	/* Flush the packet buffers, provided they are not empty and
807 	 * do not contain only the password.
808 	 */
809 	if (v12buf.n != v12buf.base
810 	    && (v12buf.n > v12buf.base+1
811 		|| v12buf.base->n_family != RIP_AF_AUTH))
812 		supply_write(&v12buf);
813 	if (v2buf.n != v2buf.base
814 	    && (v2buf.n > v2buf.base+1
815 		|| v2buf.base->n_family != RIP_AF_AUTH))
816 		supply_write(&v2buf);
817 
818 	/* If we sent nothing and this is an answer to a query, send
819 	 * an empty buffer.
820 	 */
821 	if (ws.npackets == 0
822 	    && (ws.state & WS_ST_QUERY))
823 		supply_write(&v12buf);
824 }
825 
826 
827 /* send all of the routing table or just do a flash update
828  */
829 void
830 rip_bcast(int flash)
831 {
832 #ifdef _HAVE_SIN_LEN
833 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
834 #else
835 	static struct sockaddr_in dst = {AF_INET};
836 #endif
837 	struct interface *ifp;
838 	enum output_type type;
839 	int vers;
840 	struct timeval rtime;
841 
842 
843 	need_flash = 0;
844 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
845 	no_flash = rtime;
846 	timevaladd(&no_flash, &now);
847 
848 	if (rip_sock < 0)
849 		return;
850 
851 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
852 		  flash ? "dynamic update" : "all routes",
853 		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
854 
855 	for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) {
856 		/* Skip interfaces not doing RIP.
857 		 * Do try broken interfaces to see if they have healed.
858 		 */
859 		if (IS_RIP_OUT_OFF(ifp->int_state))
860 			continue;
861 
862 		/* skip turned off interfaces */
863 		if (!iff_up(ifp->int_if_flags))
864 			continue;
865 
866 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
867 
868 		if (ifp->int_if_flags & IFF_BROADCAST) {
869 			/* ordinary, hardware interface */
870 			dst.sin_addr.s_addr = ifp->int_brdaddr;
871 
872 			if (vers == RIPv2
873 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
874 				type = OUT_MULTICAST;
875 			} else {
876 				type = OUT_BROADCAST;
877 			}
878 
879 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
880 			/* point-to-point hardware interface */
881 			dst.sin_addr.s_addr = ifp->int_dstaddr;
882 			type = OUT_UNICAST;
883 
884 		} else if (ifp->int_state & IS_REMOTE) {
885 			/* remote interface */
886 			dst.sin_addr.s_addr = ifp->int_addr;
887 			type = OUT_UNICAST;
888 
889 		} else {
890 			/* ATM, HIPPI, etc. */
891 			continue;
892 		}
893 
894 		supply(&dst, ifp, type, flash, vers, 1);
895 	}
896 
897 	update_seqno++;			/* all routes are up to date */
898 }
899 
900 
901 /* Ask for routes
902  * Do it only once to an interface, and not even after the interface
903  * was broken and recovered.
904  */
905 void
906 rip_query(void)
907 {
908 #ifdef _HAVE_SIN_LEN
909 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
910 #else
911 	static struct sockaddr_in dst = {AF_INET};
912 #endif
913 	struct interface *ifp;
914 	struct rip buf;
915 	enum output_type type;
916 
917 
918 	if (rip_sock < 0)
919 		return;
920 
921 	memset(&buf, 0, sizeof(buf));
922 
923 	for (ifp = ifnet; ifp; ifp = ifp->int_next) {
924 		/* Skip interfaces those already queried.
925 		 * Do not ask via interfaces through which we don't
926 		 * accept input.  Do not ask via interfaces that cannot
927 		 * send RIP packets.
928 		 * Do try broken interfaces to see if they have healed.
929 		 */
930 		if (IS_RIP_IN_OFF(ifp->int_state)
931 		    || ifp->int_query_time != NEVER)
932 			continue;
933 
934 		/* skip turned off interfaces */
935 		if (!iff_up(ifp->int_if_flags))
936 			continue;
937 
938 		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
939 		buf.rip_cmd = RIPCMD_REQUEST;
940 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
941 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
942 
943 		/* Send a RIPv1 query only if allowed and if we will
944 		 * listen to RIPv1 routers.
945 		 */
946 		if ((ifp->int_state & IS_NO_RIPV1_OUT)
947 		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
948 			buf.rip_vers = RIPv2;
949 		} else {
950 			buf.rip_vers = RIPv1;
951 		}
952 
953 		if (ifp->int_if_flags & IFF_BROADCAST) {
954 			/* ordinary, hardware interface */
955 			dst.sin_addr.s_addr = ifp->int_brdaddr;
956 
957 			/* Broadcast RIPv1 queries and RIPv2 queries
958 			 * when the hardware cannot multicast.
959 			 */
960 			if (buf.rip_vers == RIPv2
961 			    && (ifp->int_if_flags & IFF_MULTICAST)
962 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
963 				type = OUT_MULTICAST;
964 			} else {
965 				type = OUT_BROADCAST;
966 			}
967 
968 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
969 			/* point-to-point hardware interface */
970 			dst.sin_addr.s_addr = ifp->int_dstaddr;
971 			type = OUT_UNICAST;
972 
973 		} else if (ifp->int_state & IS_REMOTE) {
974 			/* remote interface */
975 			dst.sin_addr.s_addr = ifp->int_addr;
976 			type = OUT_UNICAST;
977 
978 		} else {
979 			/* ATM, HIPPI, etc. */
980 			continue;
981 		}
982 
983 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
984 		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
985 			if_sick(ifp);
986 	}
987 }
988