xref: /titanic_41/usr/src/cmd/cmd-inet/usr.sbin/in.routed/output.c (revision e704a8f24a369484ba8f4a1cf49d4db00dd91166)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  *
5  * Copyright (c) 1983, 1988, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgment:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * $FreeBSD: src/sbin/routed/output.c,v 1.7 2000/08/11 08:24:38 sheldonh Exp $
37  */
38 
39 #pragma ident	"%Z%%M%	%I%	%E% SMI"
40 
41 #include "defs.h"
42 #include <md5.h>
43 #include <alloca.h>
44 
45 uint_t update_seqno;
46 
47 
48 /*
49  * walk the tree of routes with this for output
50  */
51 static	struct {
52 	struct sockaddr_in to;
53 	in_addr_t	to_mask;
54 	in_addr_t	to_net;
55 	in_addr_t	to_std_mask;
56 	in_addr_t	to_std_net;
57 	struct interface *ifp;		/* usually output interface */
58 	struct auth	*a;
59 	uint8_t		metric;		/* adjust metrics by interface */
60 	uint32_t	npackets;
61 	uint32_t	gen_limit;
62 #define	WS_GEN_LIMIT_MAX	1024
63 	uint16_t	state;
64 #define	WS_ST_FLASH	0x001	/* send only changed routes */
65 #define	WS_ST_RIP2_ALL	0x002	/* send full featured RIPv2 */
66 #define	WS_ST_AG	0x004	/* ok to aggregate subnets */
67 #define	WS_ST_SUPER_AG	0x008	/* ok to aggregate networks */
68 #define	WS_ST_QUERY	0x010	/* responding to a query */
69 #define	WS_ST_TO_ON_NET	0x020	/* sending onto one of our nets */
70 #define	WS_ST_DEFAULT	0x040	/* faking a default */
71 } ws;
72 
73 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
74 struct ws_buf v12buf;
75 static	union pkt_buf ripv12_buf;
76 
77 /* Another for only RIPv2 listeners */
78 static	struct ws_buf v2buf;
79 static	union pkt_buf rip_v2_buf;
80 
81 
82 
83 void
bufinit(void)84 bufinit(void)
85 {
86 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
87 	v12buf.buf = &ripv12_buf.rip;
88 	v12buf.base = &v12buf.buf->rip_nets[0];
89 
90 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
91 	rip_v2_buf.rip.rip_vers = RIPv2;
92 	v2buf.buf = &rip_v2_buf.rip;
93 	v2buf.base = &v2buf.buf->rip_nets[0];
94 }
95 
96 
97 /*
98  * Send the contents of the global buffer via the non-multicast socket
99  */
100 int					/* <0 on failure */
output(enum output_type type,struct sockaddr_in * dst,struct interface * ifp,struct rip * buf,int size)101 output(enum output_type type,
102     struct sockaddr_in *dst,		/* send to here */
103     struct interface *ifp,
104     struct rip *buf,
105     int size)			/* this many bytes */
106 {
107 	struct sockaddr_in sin;
108 	int flags;
109 	const char *msg;
110 	int res;
111 	int ifindex;
112 	struct in_addr addr;
113 
114 	sin = *dst;
115 	if (sin.sin_port == 0)
116 		sin.sin_port = htons(RIP_PORT);
117 
118 	flags = 0;
119 
120 	if (ifp == NULL && type == OUT_MULTICAST) {
121 		msglog("Cannot send RIP message to %s",
122 		    inet_ntoa(sin.sin_addr));
123 		return (-1);
124 	}
125 
126 	switch (type) {
127 	case OUT_QUERY:
128 		msg = "Answer Query";
129 		break;
130 	case OUT_UNICAST:
131 		msg = "Send";
132 		flags = MSG_DONTROUTE;
133 		break;
134 	case OUT_BROADCAST:
135 		msg = "Send bcast";
136 		break;
137 	case OUT_MULTICAST:
138 		msg = "Send mcast";
139 		break;
140 
141 	case NO_OUT_MULTICAST:
142 	case NO_OUT_RIPV2:
143 	default:
144 #ifdef DEBUG
145 		abort();
146 #endif
147 		return (-1);
148 	}
149 
150 	/*
151 	 * IP_PKTINFO overrides IP_MULTICAST_IF, so we don't set ifindex
152 	 * for multicast traffic.
153 	 */
154 	ifindex = (type != OUT_MULTICAST && type != OUT_QUERY &&
155 	    ifp != NULL && ifp->int_phys != NULL) ?
156 	    ifp->int_phys->phyi_index : 0;
157 
158 	if (rip_sock_interface != ifp) {
159 		/*
160 		 * For multicast, we have to choose the source
161 		 * address.  This is either the local address
162 		 * (non-point-to-point) or the remote address.
163 		 */
164 		if (ifp != NULL) {
165 			addr.s_addr = (ifp->int_if_flags & IFF_POINTOPOINT) ?
166 			    ifp->int_dstaddr : ifp->int_addr;
167 			if (type == OUT_MULTICAST &&
168 			    setsockopt(rip_sock, IPPROTO_IP,
169 			    IP_MULTICAST_IF, &addr, sizeof (addr)) == -1) {
170 				LOGERR("setsockopt(rip_sock, IP_MULTICAST_IF)");
171 				return (-1);
172 			}
173 		}
174 		rip_sock_interface = ifp;
175 	}
176 
177 	trace_rip(msg, "to", &sin, ifp, buf, size);
178 
179 	res = sendtoif(rip_sock, buf, size, flags, &sin, ifindex);
180 	if (res < 0 && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
181 		writelog(LOG_WARNING, "%s sendto(%s%s%s.%d): %s", msg,
182 		    ifp != NULL ? ifp->int_name : "",
183 		    ifp != NULL ? ", " : "",
184 		    inet_ntoa(sin.sin_addr),
185 		    ntohs(sin.sin_port),
186 		    rip_strerror(errno));
187 	}
188 
189 	return (res);
190 }
191 
192 /*
193  * Semantically identical to sendto(), but sends the message through a
194  * specific interface (if ifindex is non-zero) using IP_PKTINFO.
195  */
196 int
sendtoif(int fd,const void * buf,uint_t bufsize,uint_t flags,struct sockaddr_in * sinp,uint_t ifindex)197 sendtoif(int fd, const void *buf, uint_t bufsize, uint_t flags,
198     struct sockaddr_in *sinp, uint_t ifindex)
199 {
200 	struct iovec iov;
201 	struct msghdr msg;
202 	struct cmsghdr *cmsgp;
203 	struct in_pktinfo *ipip;
204 
205 	iov.iov_base = (void *)buf;
206 	iov.iov_len = bufsize;
207 
208 	(void) memset(&msg, 0, sizeof (struct msghdr));
209 	msg.msg_name = (struct sockaddr *)sinp;
210 	msg.msg_namelen = sizeof (struct sockaddr_in);
211 	msg.msg_iov = &iov;
212 	msg.msg_iovlen = 1;
213 
214 	if (ifindex != 0) {
215 		/*
216 		 * We can't precisely predict the alignment padding we'll
217 		 * need, so we allocate the maximum alignment and then
218 		 * use CMSG_NXTHDR() to fix it up at the end.
219 		 */
220 		msg.msg_controllen = sizeof (*cmsgp) + _MAX_ALIGNMENT +
221 		    sizeof (*ipip) + _MAX_ALIGNMENT + sizeof (*cmsgp);
222 		msg.msg_control = alloca(msg.msg_controllen);
223 
224 		cmsgp = CMSG_FIRSTHDR(&msg);
225 		ipip = (void *)CMSG_DATA(cmsgp);
226 		(void) memset(ipip, 0, sizeof (struct in_pktinfo));
227 		ipip->ipi_ifindex = ifindex;
228 		cmsgp->cmsg_len = (caddr_t)(ipip + 1) - (caddr_t)cmsgp;
229 		cmsgp->cmsg_type = IP_PKTINFO;
230 		cmsgp->cmsg_level = IPPROTO_IP;
231 
232 		/*
233 		 * Correct the control message length.
234 		 */
235 		cmsgp = CMSG_NXTHDR(&msg, cmsgp);
236 		msg.msg_controllen = (caddr_t)cmsgp - (caddr_t)msg.msg_control;
237 	}
238 
239 	return (sendmsg(fd, &msg, flags));
240 }
241 
242 /*
243  * Find the first key for a packet to send.
244  * Try for a key that is eligible and has not expired, but settle for
245  * the last key if they have all expired.
246  * If no key is ready yet, give up.
247  */
248 struct auth *
find_auth(struct interface * ifp)249 find_auth(struct interface *ifp)
250 {
251 	struct auth *ap, *res = NULL;
252 	int i;
253 
254 
255 	if (ifp == NULL)
256 		return (NULL);
257 
258 	if ((ap = ifp->int_auth) == NULL)
259 		return (NULL);
260 
261 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
262 		/* stop looking after the last key */
263 		if (ap->type == RIP_AUTH_NONE)
264 			break;
265 
266 		/* ignore keys that are not ready yet */
267 		if ((ulong_t)ap->start > (ulong_t)clk.tv_sec)
268 			continue;
269 
270 		if ((ulong_t)ap->end < (ulong_t)clk.tv_sec) {
271 			/* note best expired password as a fall-back */
272 			if (res == NULL ||
273 			    (((ulong_t)ap->end > (ulong_t)res->end)) &&
274 			    ((ulong_t)res->end < (ulong_t)clk.tv_sec))
275 				res = ap;
276 			continue;
277 		}
278 
279 		/* note key with the best future */
280 		if (res == NULL || (ulong_t)res->end < (ulong_t)ap->end)
281 			res = ap;
282 	}
283 	return (res);
284 }
285 
286 
287 void
clr_ws_buf(struct ws_buf * wb,struct auth * ap)288 clr_ws_buf(struct ws_buf *wb, struct auth *ap)
289 {
290 	struct netauth *na;
291 
292 	wb->lim = wb->base + NETS_LEN;
293 	wb->n = wb->base;
294 	(void) memset(wb->n, 0, NETS_LEN*sizeof (*wb->n));
295 
296 	/*
297 	 * (start to) install authentication if appropriate
298 	 */
299 	if (ap == NULL)
300 		return;
301 
302 	na = (struct netauth *)wb->n;
303 	if (ap->type == RIP_AUTH_PW) {
304 		na->a_family = RIP_AF_AUTH;
305 		na->a_type = RIP_AUTH_PW;
306 		(void) memcpy(na->au.au_pw, ap->key, sizeof (na->au.au_pw));
307 		wb->n++;
308 
309 	} else if (ap->type ==  RIP_AUTH_MD5) {
310 		na->a_family = RIP_AF_AUTH;
311 		na->a_type = RIP_AUTH_MD5;
312 		na->au.a_md5.md5_keyid = ap->keyid;
313 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
314 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
315 		wb->n++;
316 		wb->lim--;		/* make room for trailer */
317 	}
318 }
319 
320 
321 void
end_md5_auth(struct ws_buf * wb,struct auth * ap)322 end_md5_auth(struct ws_buf *wb, struct auth *ap)
323 {
324 	struct netauth *na, *na2;
325 	MD5_CTX md5_ctx;
326 	int len;
327 
328 	na = (struct netauth *)wb->base;
329 	na2 = (struct netauth *)wb->n;
330 	len = (char *)na2-(char *)wb->buf;
331 	na2->a_family = RIP_AF_AUTH;
332 	na2->a_type = RIP_AUTH_TRAILER;
333 	na->au.a_md5.md5_pkt_len = htons(len);
334 	MD5Init(&md5_ctx);
335 	/* len+4 to include auth trailer's family/type in MD5 sum */
336 	MD5Update(&md5_ctx, (uchar_t *)wb->buf, len + 4);
337 	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
338 	MD5Final(na2->au.au_pw, &md5_ctx);
339 	wb->n++;
340 }
341 
342 
343 /*
344  * Send the buffer
345  */
346 static void
supply_write(struct ws_buf * wb)347 supply_write(struct ws_buf *wb)
348 {
349 	/*
350 	 * Output multicast only if legal.
351 	 * If we would multicast and it would be illegal, then discard the
352 	 * packet.
353 	 */
354 	switch (wb->type) {
355 	case NO_OUT_MULTICAST:
356 		trace_pkt("skip multicast to %s because impossible",
357 		    naddr_ntoa(ws.to.sin_addr.s_addr));
358 		break;
359 	case NO_OUT_RIPV2:
360 		break;
361 	default:
362 		if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
363 			end_md5_auth(wb, ws.a);
364 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
365 		    ((char *)wb->n - (char *)wb->buf)) < 0 && ws.ifp != NULL)
366 			if_sick(ws.ifp, _B_FALSE);
367 		ws.npackets++;
368 		break;
369 	}
370 
371 	clr_ws_buf(wb, ws.a);
372 }
373 
374 
375 /*
376  * Put an entry into the packet
377  */
378 static void
supply_out(struct ag_info * ag)379 supply_out(struct ag_info *ag)
380 {
381 	uint32_t dstcount;
382 	in_addr_t mask, v1_mask, dst_h, ddst_h = 0;
383 	struct ws_buf *wb;
384 
385 
386 	/*
387 	 * Skip this route if doing a flash update and it and the routes
388 	 * it aggregates have not changed recently.
389 	 */
390 	if (ag->ag_seqno < update_seqno && (ws.state & WS_ST_FLASH))
391 		return;
392 
393 	dst_h = ag->ag_dst_h;
394 	mask = ag->ag_mask;
395 	v1_mask = ripv1_mask_host(htonl(dst_h),
396 	    (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : NULL);
397 	dstcount = 0;
398 
399 	/*
400 	 * If we are sending RIPv2 packets that cannot (or must not) be
401 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
402 	 * Subnets (from other networks) can only be sent via multicast.
403 	 * A pair of subnet routes might have been promoted so that they
404 	 * are legal to send by RIPv1.
405 	 * If RIPv1 is off, use the multicast buffer.
406 	 */
407 	if ((ws.state & WS_ST_RIP2_ALL) ||
408 	    ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
409 		/* use the RIPv2-only buffer */
410 		wb = &v2buf;
411 
412 	} else {
413 		/*
414 		 * use the RIPv1-or-RIPv2 buffer
415 		 */
416 		wb = &v12buf;
417 
418 		/*
419 		 * Convert supernet route into corresponding set of network
420 		 * routes for RIPv1, but leave non-contiguous netmasks
421 		 * to ag_check().
422 		 */
423 		if (v1_mask > mask &&
424 		    mask + (mask & -mask) == 0) {
425 			ddst_h = v1_mask & -v1_mask;
426 			dstcount = (v1_mask & ~mask)/ddst_h;
427 
428 			if (dstcount > ws.gen_limit) {
429 				/*
430 				 * Punt if we would have to generate an
431 				 * unreasonable number of routes.
432 				 */
433 				if (TRACECONTENTS)
434 					trace_misc("sending %s-->%s as 1"
435 					    " instead of %d routes",
436 					    addrname(htonl(dst_h), mask, 1),
437 					    naddr_ntoa(ws.to.sin_addr.s_addr),
438 					    dstcount + 1);
439 				dstcount = 0;
440 
441 			} else {
442 				mask = v1_mask;
443 				ws.gen_limit -= dstcount;
444 			}
445 		}
446 	}
447 
448 	do {
449 		wb->n->n_family = RIP_AF_INET;
450 		wb->n->n_dst = htonl(dst_h);
451 		/*
452 		 * If the route is from router-discovery or we are
453 		 * shutting down, or this is a broken/sick interface,
454 		 * admit only a bad metric.
455 		 */
456 		wb->n->n_metric = ((stopint || ag->ag_metric < 1 ||
457 		    (ag->ag_ifp && (ag->ag_ifp->int_state &
458 		    (IS_BROKE|IS_SICK)))) ? HOPCNT_INFINITY : ag->ag_metric);
459 		wb->n->n_metric = htonl(wb->n->n_metric);
460 		/*
461 		 * Any non-zero bits in the supposedly unused RIPv1 fields
462 		 * cause the old `routed` to ignore the route.
463 		 * That means the mask and so forth cannot be sent
464 		 * in the hybrid RIPv1/RIPv2 mode.
465 		 */
466 		if (ws.state & WS_ST_RIP2_ALL) {
467 			if (ag->ag_nhop != 0 &&
468 			    ((ws.state & WS_ST_QUERY) ||
469 			    (ag->ag_nhop != ws.ifp->int_addr &&
470 			    on_net(ag->ag_nhop, ws.ifp->int_net,
471 			    ws.ifp->int_mask)) &&
472 			    ifwithaddr(ag->ag_nhop, _B_FALSE, _B_FALSE) ==
473 			    NULL))
474 				wb->n->n_nhop = ag->ag_nhop;
475 			wb->n->n_mask = htonl(mask);
476 			wb->n->n_tag = ag->ag_tag;
477 		}
478 		dst_h += ddst_h;
479 
480 		if (++wb->n >= wb->lim)
481 			supply_write(wb);
482 	} while (dstcount-- > 0);
483 }
484 
485 
486 /*
487  * Supply one route from the table
488  */
489 /* ARGSUSED */
490 static int
walk_supply(struct radix_node * rn,void * argp)491 walk_supply(struct radix_node *rn, void *argp)
492 {
493 #define	RT ((struct rt_entry *)rn)
494 	ushort_t ags;
495 	uint8_t metric, pref;
496 	in_addr_t dst, nhop;
497 	struct rt_spare *rts;
498 	uint_t sparecount;
499 
500 
501 	/*
502 	 * Do not advertise external remote interfaces or passive interfaces.
503 	 */
504 	if ((RT->rt_state & RS_IF) && RT->rt_ifp != NULL &&
505 	    (RT->rt_ifp->int_state & IS_PASSIVE) &&
506 	    !(RT->rt_state & RS_MHOME))
507 		return (0);
508 	/*
509 	 * Do not advertise routes learnt from /etc/gateways.
510 	 */
511 	if (RT->rt_spares[0].rts_origin == RO_FILE)
512 		return (0);
513 
514 	/*
515 	 * Do not advertise routes which would lead to forwarding on a
516 	 * non-forwarding interface.
517 	 */
518 	if (RT->rt_state & RS_NOPROPAGATE)
519 		return (0);
520 
521 	/*
522 	 * If being quiet about our ability to forward, then
523 	 * do not say anything unless responding to a query,
524 	 * except about our main interface.
525 	 */
526 	if (!should_supply(NULL) && !(ws.state & WS_ST_QUERY) &&
527 	    !(RT->rt_state & RS_MHOME))
528 		return (0);
529 
530 	dst = RT->rt_dst;
531 
532 	/*
533 	 * do not collide with the fake default route
534 	 */
535 	if (dst == RIP_DEFAULT && (ws.state & WS_ST_DEFAULT))
536 		return (0);
537 
538 	if (RT->rt_state & RS_NET_SYN) {
539 		if (RT->rt_state & RS_NET_INT) {
540 			/*
541 			 * Do not send manual synthetic network routes
542 			 * into the subnet.
543 			 */
544 			if (on_net(ws.to.sin_addr.s_addr,
545 			    ntohl(dst), RT->rt_mask))
546 				return (0);
547 
548 		} else {
549 			/*
550 			 * Do not send automatic synthetic network routes
551 			 * if they are not needed because no RIPv1 listeners
552 			 * can hear them.
553 			 */
554 			if (ws.state & WS_ST_RIP2_ALL)
555 				return (0);
556 
557 			/*
558 			 * Do not send automatic synthetic network routes to
559 			 * the real subnet.
560 			 */
561 			if (on_net(ws.to.sin_addr.s_addr,
562 			    ntohl(dst), RT->rt_mask))
563 				return (0);
564 		}
565 		nhop = 0;
566 
567 	} else {
568 		/*
569 		 * Advertise the next hop if this is not a route for one
570 		 * of our interfaces and the next hop is on the same
571 		 * network as the target.
572 		 * The final determination is made by supply_out().
573 		 */
574 		if (!(RT->rt_state & RS_IF) && !(RT->rt_state & RS_MHOME) &&
575 		    RT->rt_gate != loopaddr)
576 			nhop = RT->rt_gate;
577 		else
578 			nhop = 0;
579 	}
580 
581 	metric = RT->rt_metric;
582 	ags = 0;
583 
584 	if (!RT_ISHOST(RT)) {
585 		/*
586 		 * Always suppress network routes into other, existing
587 		 * network routes
588 		 */
589 		ags |= AGS_SUPPRESS;
590 
591 		/*
592 		 * Generate supernets if allowed.
593 		 * If we can be heard by RIPv1 systems, we will
594 		 * later convert back to ordinary nets.
595 		 * This unifies dealing with received supernets.
596 		 */
597 		if ((ws.state & WS_ST_AG) && ((RT->rt_state & RS_SUBNET) ||
598 		    (ws.state & WS_ST_SUPER_AG)))
599 			ags |= AGS_AGGREGATE;
600 	} else if (!(RT->rt_state & RS_MHOME)) {
601 		/*
602 		 * We should always suppress (into existing network routes)
603 		 * the host routes for the local end of our point-to-point
604 		 * links.
605 		 * If we are suppressing host routes in general, then do so.
606 		 * Avoid advertising host routes onto their own network,
607 		 * where they should be handled by proxy-ARP.
608 		 */
609 		if ((RT->rt_state & RS_LOCAL) || ridhosts ||
610 		    on_net(dst, ws.to_net, ws.to_mask))
611 			ags |= AGS_SUPPRESS;
612 
613 		/*
614 		 * Aggregate stray host routes into network routes if allowed.
615 		 * We cannot aggregate host routes into small network routes
616 		 * without confusing RIPv1 listeners into thinking the
617 		 * network routes are host routes.
618 		 */
619 		if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
620 			ags |= AGS_AGGREGATE;
621 	}
622 
623 	/*
624 	 * Do not send RIPv1 advertisements of subnets to other
625 	 * networks. If possible, multicast them by RIPv2.
626 	 */
627 	if ((RT->rt_state & RS_SUBNET) && !(ws.state & WS_ST_RIP2_ALL) &&
628 	    !on_net(dst, ws.to_std_net, ws.to_std_mask))
629 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
630 
631 
632 	/*
633 	 * Do not send a route back to where it came from, except in
634 	 * response to a query.  This is "split-horizon".  That means not
635 	 * advertising back to the same network	and so via the same interface.
636 	 *
637 	 * We want to suppress routes that might have been fragmented
638 	 * from this route by a RIPv1 router and sent back to us, and so we
639 	 * cannot forget this route here.  Let the split-horizon route
640 	 * suppress the fragmented routes and then itself be forgotten.
641 	 *
642 	 * Include the routes for both ends of point-to-point interfaces
643 	 * among those suppressed by split-horizon, since the other side
644 	 * should knows them as well as we do.
645 	 *
646 	 * Notice spare routes with the same metric that we are about to
647 	 * advertise, to split the horizon on redundant, inactive paths.
648 	 */
649 	if (ws.ifp != NULL && !(ws.state & WS_ST_QUERY) &&
650 	    (ws.state & WS_ST_TO_ON_NET) && (!(RT->rt_state & RS_IF) ||
651 	    (ws.ifp->int_if_flags & IFF_POINTOPOINT))) {
652 		for (rts = RT->rt_spares, sparecount = 0;
653 		    sparecount < RT->rt_num_spares; sparecount++, rts++) {
654 			if (rts->rts_metric > metric || rts->rts_ifp != ws.ifp)
655 				continue;
656 
657 			/*
658 			 * If we do not mark the route with AGS_SPLIT_HZ here,
659 			 * it will be poisoned-reverse, or advertised back
660 			 * toward its source with an infinite metric.
661 			 * If we have recently advertised the route with a
662 			 * better metric than we now have, then we should
663 			 * poison-reverse the route before suppressing it for
664 			 * split-horizon.
665 			 *
666 			 * In almost all cases, if there is no spare for the
667 			 * route then it is either old and dead or a brand
668 			 * new route. If it is brand new, there is no need
669 			 * for poison-reverse. If it is old and dead, it
670 			 * is already poisoned.
671 			 */
672 			if (RT->rt_poison_time < now_expire ||
673 			    RT->rt_poison_metric >= metric ||
674 			    RT->rt_spares[1].rts_gate == 0) {
675 				ags |= AGS_SPLIT_HZ;
676 				ags &= ~AGS_SUPPRESS;
677 			}
678 			metric = HOPCNT_INFINITY;
679 			break;
680 		}
681 	}
682 
683 	/*
684 	 * Keep track of the best metric with which the
685 	 * route has been advertised recently.
686 	 */
687 	if (RT->rt_poison_metric >= metric ||
688 	    RT->rt_poison_time < now_expire) {
689 		RT->rt_poison_time = now.tv_sec;
690 		RT->rt_poison_metric = metric;
691 	}
692 
693 	/*
694 	 * Adjust the outgoing metric by the cost of the link.
695 	 * Avoid aggregation when a route is counting to infinity.
696 	 */
697 	pref = RT->rt_poison_metric + ws.metric;
698 	metric += ws.metric;
699 
700 	/*
701 	 * If this is a static route pointing to the same interface
702 	 * upon which we are sending out the RIP RESPONSE
703 	 * adjust the preference so that we don't aggregate into this
704 	 * route. Note that the maximum possible hop count on a route
705 	 * per RFC 2453 is 16 (HOPCNT_INFINITY)
706 	 */
707 	if ((RT->rt_state & RS_STATIC) && (ws.ifp == RT->rt_ifp))
708 		pref = (HOPCNT_INFINITY+1);
709 
710 	/*
711 	 * Do not advertise stable routes that will be ignored,
712 	 * unless we are answering a query.
713 	 * If the route recently was advertised with a metric that
714 	 * would have been less than infinity through this interface,
715 	 * we need to continue to advertise it in order to poison it.
716 	 */
717 	if (metric >= HOPCNT_INFINITY) {
718 		if (!(ws.state & WS_ST_QUERY) && (pref >= HOPCNT_INFINITY ||
719 		    RT->rt_poison_time < now_garbage))
720 			return (0);
721 
722 		metric = HOPCNT_INFINITY;
723 	}
724 
725 	/*
726 	 * supply this route out on the wire- we only care about dest/mask
727 	 * and so can ignore all rt_spares[i] with i > 0
728 	 */
729 	ag_check(dst, RT->rt_mask, 0, RT->rt_ifp, nhop, metric, pref,
730 	    RT->rt_seqno, RT->rt_tag, ags, supply_out);
731 	return (0);
732 #undef RT
733 }
734 
735 
736 /*
737  * Supply dst with the contents of the routing tables.
738  * If this won't fit in one packet, chop it up into several.
739  */
740 void
supply(struct sockaddr_in * dst,struct interface * ifp,enum output_type type,int flash,int vers,boolean_t passwd_ok)741 supply(struct sockaddr_in *dst,
742     struct interface *ifp,	/* output interface */
743     enum output_type type,
744     int flash,			/* 1=flash update */
745     int vers,			/* RIP version */
746     boolean_t passwd_ok)	/* OK to include cleartext password */
747 {
748 	struct rt_entry *rt;
749 	uint8_t def_metric;
750 
751 
752 	ws.state = 0;
753 	ws.gen_limit = WS_GEN_LIMIT_MAX;
754 
755 	ws.to = *dst;
756 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
757 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
758 
759 	if (ifp != NULL) {
760 		ws.to_mask = ifp->int_mask;
761 		ws.to_net = ifp->int_net;
762 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask) ||
763 		    type == OUT_MULTICAST)
764 			ws.state |= WS_ST_TO_ON_NET;
765 
766 	} else {
767 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, NULL);
768 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
769 		rt = rtfind(dst->sin_addr.s_addr);
770 		if (rt != NULL)
771 			ifp = rt->rt_ifp;
772 		else
773 			return;
774 	}
775 
776 	ws.npackets = 0;
777 	if (flash)
778 		ws.state |= WS_ST_FLASH;
779 
780 	ws.ifp = ifp;
781 
782 	/*
783 	 * Routes in the table were already adjusted by their respective
784 	 * destination interface costs (which are zero by default) on
785 	 * input.  The following is the value by which each route's metric
786 	 * will be bumped up on output.
787 	 */
788 	ws.metric = 1;
789 
790 	ripv12_buf.rip.rip_vers = vers;
791 
792 	switch (type) {
793 	case OUT_MULTICAST:
794 		if (ifp->int_if_flags & IFF_MULTICAST)
795 			v2buf.type = OUT_MULTICAST;
796 		else
797 			v2buf.type = NO_OUT_MULTICAST;
798 		v12buf.type = OUT_BROADCAST;
799 		break;
800 
801 	case OUT_QUERY:
802 		ws.state |= WS_ST_QUERY;
803 		/* FALLTHROUGH */
804 	case OUT_BROADCAST:
805 	case OUT_UNICAST:
806 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
807 		v12buf.type = type;
808 		break;
809 
810 	case NO_OUT_MULTICAST:
811 	case NO_OUT_RIPV2:
812 		return;			/* no output */
813 	}
814 
815 	if (vers == RIPv2) {
816 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
817 		if (type != OUT_BROADCAST)
818 			ws.state |= WS_ST_RIP2_ALL;
819 		if ((ws.state & WS_ST_QUERY) || !(ws.state & WS_ST_TO_ON_NET)) {
820 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
821 		} else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
822 			ws.state |= WS_ST_AG;
823 			if (type != OUT_BROADCAST && (ifp == NULL ||
824 			    !(ifp->int_state & IS_NO_SUPER_AG)))
825 				ws.state |= WS_ST_SUPER_AG;
826 		}
827 
828 		/* See if this packet needs authenticating */
829 		ws.a = find_auth(ifp);
830 		if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
831 			ws.a = NULL;
832 		if (ws.a != NULL && (ulong_t)ws.a->end < (ulong_t)clk.tv_sec &&
833 		    !ws.a->warnedflag) {
834 			/*
835 			 * If the best key is an expired one, we may as
836 			 * well use it.  Log this event.
837 			 */
838 			writelog(LOG_WARNING,
839 			    "Using expired auth while transmitting to %s",
840 			    naddr_ntoa(ws.to.sin_addr.s_addr));
841 			ws.a->warnedflag = 1;
842 		}
843 	} else {
844 		ws.a = NULL;
845 	}
846 
847 	clr_ws_buf(&v12buf, ws.a);
848 	clr_ws_buf(&v2buf, ws.a);
849 
850 	/*
851 	 * Fake a default route if asked and if there is not already
852 	 * a better, real default route.
853 	 */
854 	if (should_supply(NULL) && (def_metric = ifp->int_d_metric) != 0) {
855 		if (NULL == (rt = rtget(RIP_DEFAULT, 0)) ||
856 		    rt->rt_metric+ws.metric >= def_metric) {
857 			ws.state |= WS_ST_DEFAULT;
858 			ag_check(0, 0, 0, NULL, 0, def_metric, def_metric,
859 			    0, 0, 0, supply_out);
860 		} else {
861 			def_metric = rt->rt_metric+ws.metric;
862 		}
863 
864 		/*
865 		 * If both RIPv2 and the poor-man's router discovery
866 		 * kludge are on, arrange to advertise an extra
867 		 * default route via RIPv1.
868 		 */
869 		if ((ws.state & WS_ST_RIP2_ALL) &&
870 		    (ifp->int_state & IS_PM_RDISC)) {
871 			ripv12_buf.rip.rip_vers = RIPv1;
872 			v12buf.n->n_family = RIP_AF_INET;
873 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
874 			v12buf.n->n_metric = htonl(def_metric);
875 			v12buf.n++;
876 		}
877 	}
878 
879 	(void) rn_walktree(rhead, walk_supply, NULL);
880 	ag_flush(0, 0, supply_out);
881 
882 	/*
883 	 * Flush the packet buffers, provided they are not empty and
884 	 * do not contain only the password.
885 	 */
886 	if (v12buf.n != v12buf.base &&
887 	    (v12buf.n > v12buf.base+1 ||
888 	    v12buf.base->n_family != RIP_AF_AUTH))
889 		supply_write(&v12buf);
890 	if (v2buf.n != v2buf.base && (v2buf.n > v2buf.base+1 ||
891 	    v2buf.base->n_family != RIP_AF_AUTH))
892 		supply_write(&v2buf);
893 
894 	/*
895 	 * If we sent nothing and this is an answer to a query, send
896 	 * an empty buffer.
897 	 */
898 	if (ws.npackets == 0 && (ws.state & WS_ST_QUERY)) {
899 		supply_write(&v2buf);
900 		if (ws.npackets == 0)
901 			supply_write(&v12buf);
902 	}
903 }
904 
905 
906 /*
907  * send all of the routing table or just do a flash update
908  */
909 void
rip_bcast(int flash)910 rip_bcast(int flash)
911 {
912 	static struct sockaddr_in dst = {AF_INET};
913 	struct interface *ifp;
914 	enum output_type type;
915 	int vers;
916 	struct timeval rtime;
917 
918 
919 	need_flash = _B_FALSE;
920 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
921 	no_flash = rtime;
922 	timevaladd(&no_flash, &now);
923 
924 	if (!rip_enabled)
925 		return;
926 
927 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
928 	    flash ? "dynamic update" : "all routes",
929 	    rtime.tv_sec + ((double)rtime.tv_usec)/1000000.0);
930 
931 	for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) {
932 		/*
933 		 * Skip interfaces not doing RIP or for which IP
934 		 * forwarding isn't turned on.  Skip duplicate
935 		 * interfaces, we don't want to generate duplicate
936 		 * packets.  Do try broken interfaces to see if they
937 		 * have healed.
938 		 */
939 		if (IS_RIP_OUT_OFF(ifp->int_state) ||
940 		    (ifp->int_state & IS_DUP) ||
941 		    !IS_IFF_ROUTING(ifp->int_if_flags))
942 			continue;
943 
944 		/* skip turned off interfaces */
945 		if (!IS_IFF_UP(ifp->int_if_flags))
946 			continue;
947 
948 		/* skip interfaces we shouldn't use */
949 		if (IS_IFF_QUIET(ifp->int_if_flags))
950 			continue;
951 
952 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
953 		dst.sin_addr.s_addr = ifp->int_ripout_addr;
954 
955 		/*
956 		 * Ignore the interface if it's not broadcast,
957 		 * point-to-point, or remote.  It must be non-broadcast
958 		 * multiaccess, and therefore unsupported.
959 		 */
960 		if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) &&
961 		    !(ifp->int_state & IS_REMOTE))
962 			continue;
963 
964 		type = (ifp->int_if_flags & IFF_BROADCAST) ?
965 		    OUT_BROADCAST : OUT_UNICAST;
966 		if (vers == RIPv2 && (ifp->int_if_flags & IFF_MULTICAST) &&
967 		    !(ifp->int_state & IS_NO_RIP_MCAST))
968 			type = OUT_MULTICAST;
969 
970 		supply(&dst, ifp, type, flash, vers, _B_TRUE);
971 	}
972 
973 	update_seqno++;			/* all routes are up to date */
974 }
975 
976 
977 /*
978  * Ask for routes
979  * Do it only once to an interface, and not even after the interface
980  * was broken and recovered.
981  */
982 void
rip_query(void)983 rip_query(void)
984 {
985 	static struct sockaddr_in dst = {AF_INET};
986 	struct interface *ifp;
987 	struct rip buf;
988 	enum output_type type;
989 
990 
991 	if (!rip_enabled)
992 		return;
993 
994 	(void) memset(&buf, 0, sizeof (buf));
995 
996 	for (ifp = ifnet; ifp; ifp = ifp->int_next) {
997 		/*
998 		 * Skip interfaces those already queried.  Do not ask
999 		 * via interfaces through which we don't accept input.
1000 		 * Do not ask via interfaces that cannot send RIP
1001 		 * packets.  Don't send queries on duplicate
1002 		 * interfaces, that would generate duplicate packets
1003 		 * on link.  Do try broken interfaces to see if they
1004 		 * have healed.
1005 		 */
1006 		if (IS_RIP_IN_OFF(ifp->int_state) ||
1007 		    (ifp->int_state & IS_DUP) ||
1008 		    ifp->int_query_time != NEVER)
1009 			continue;
1010 
1011 		/* skip turned off interfaces */
1012 		if (!IS_IFF_UP(ifp->int_if_flags))
1013 			continue;
1014 
1015 		/* skip interfaces we shouldn't use */
1016 		if (IS_IFF_QUIET(ifp->int_if_flags))
1017 			continue;
1018 
1019 		/*
1020 		 * Ignore the interface if it's not broadcast,
1021 		 * point-to-point, or remote.  It must be non-broadcast
1022 		 * multiaccess, and therefore unsupported.
1023 		 */
1024 		if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) &&
1025 		    !(ifp->int_state & IS_REMOTE))
1026 			continue;
1027 
1028 		buf.rip_cmd = RIPCMD_REQUEST;
1029 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
1030 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
1031 
1032 		/*
1033 		 * Send a RIPv1 query only if allowed and if we will
1034 		 * listen to RIPv1 routers.
1035 		 */
1036 		if ((ifp->int_state & IS_NO_RIPV1_OUT) ||
1037 		    (ifp->int_state & IS_NO_RIPV1_IN)) {
1038 			buf.rip_vers = RIPv2;
1039 		} else {
1040 			buf.rip_vers = RIPv1;
1041 		}
1042 
1043 		dst.sin_addr.s_addr = ifp->int_ripout_addr;
1044 
1045 		type = (ifp->int_if_flags & IFF_BROADCAST) ?
1046 		    OUT_BROADCAST : OUT_UNICAST;
1047 		if (buf.rip_vers == RIPv2 &&
1048 		    (ifp->int_if_flags & IFF_MULTICAST) &&
1049 		    !(ifp->int_state & IS_NO_RIP_MCAST))
1050 			type = OUT_MULTICAST;
1051 
1052 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
1053 		if (output(type, &dst, ifp, &buf, sizeof (buf)) < 0)
1054 			if_sick(ifp, _B_FALSE);
1055 	}
1056 }
1057