xref: /freebsd/sys/netinet/in_rmx.c (revision 25408c853d9ecb2e76b9e38407338f86ecb8a55c)
1 /*-
2  * Copyright 1994, 1995 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This code does two things necessary for the enhanced TCP metrics to
32  * function in a useful manner:
33  *  1) It marks all non-host routes as `cloning', thus ensuring that
34  *     every actual reference to such a route actually gets turned
35  *     into a reference to a host route to the specific destination
36  *     requested.
37  *  2) When such routes lose all their references, it arranges for them
38  *     to be deleted in some random collection of circumstances, so that
39  *     a large quantity of stale routing data is not kept in kernel memory
40  *     indefinitely.  See in_rtqtimo() below for the exact mechanism.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51 #include <sys/mbuf.h>
52 #include <sys/syslog.h>
53 #include <sys/callout.h>
54 
55 #include <net/if.h>
56 #include <net/route.h>
57 #include <net/vnet.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip_icmp.h>
63 #include <netinet/ip_var.h>
64 
65 extern int	in_inithead(void **head, int off);
66 #ifdef VIMAGE
67 extern int	in_detachhead(void **head, int off);
68 #endif
69 
70 #define RTPRF_OURS		RTF_PROTO3	/* set on routes we manage */
71 
72 /*
73  * Do what we need to do when inserting a route.
74  */
75 static struct radix_node *
76 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
77     struct radix_node *treenodes)
78 {
79 	struct rtentry *rt = (struct rtentry *)treenodes;
80 	struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
81 
82 	RADIX_NODE_HEAD_WLOCK_ASSERT(head);
83 	/*
84 	 * A little bit of help for both IP output and input:
85 	 *   For host routes, we make sure that RTF_BROADCAST
86 	 *   is set for anything that looks like a broadcast address.
87 	 *   This way, we can avoid an expensive call to in_broadcast()
88 	 *   in ip_output() most of the time (because the route passed
89 	 *   to ip_output() is almost always a host route).
90 	 *
91 	 *   We also do the same for local addresses, with the thought
92 	 *   that this might one day be used to speed up ip_input().
93 	 *
94 	 * We also mark routes to multicast addresses as such, because
95 	 * it's easy to do and might be useful (but this is much more
96 	 * dubious since it's so easy to inspect the address).
97 	 */
98 	if (rt->rt_flags & RTF_HOST) {
99 		if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
100 			rt->rt_flags |= RTF_BROADCAST;
101 		} else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
102 		    sin->sin_addr.s_addr) {
103 			rt->rt_flags |= RTF_LOCAL;
104 		}
105 	}
106 	if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
107 		rt->rt_flags |= RTF_MULTICAST;
108 
109 	if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp)
110 		rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
111 
112 	return (rn_addroute(v_arg, n_arg, head, treenodes));
113 }
114 
115 /*
116  * This code is the inverse of in_clsroute: on first reference, if we
117  * were managing the route, stop doing so and set the expiration timer
118  * back off again.
119  */
120 static struct radix_node *
121 in_matroute(void *v_arg, struct radix_node_head *head)
122 {
123 	struct radix_node *rn = rn_match(v_arg, head);
124 	struct rtentry *rt = (struct rtentry *)rn;
125 
126 	if (rt) {
127 		RT_LOCK(rt);
128 		if (rt->rt_flags & RTPRF_OURS) {
129 			rt->rt_flags &= ~RTPRF_OURS;
130 			rt->rt_rmx.rmx_expire = 0;
131 		}
132 		RT_UNLOCK(rt);
133 	}
134 	return rn;
135 }
136 
137 static VNET_DEFINE(int, rtq_reallyold) = 60*60; /* one hour is "really old" */
138 #define	V_rtq_reallyold		VNET(rtq_reallyold)
139 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
140     &VNET_NAME(rtq_reallyold), 0,
141     "Default expiration time on dynamically learned routes");
142 
143 /* never automatically crank down to less */
144 static VNET_DEFINE(int, rtq_minreallyold) = 10;
145 #define	V_rtq_minreallyold	VNET(rtq_minreallyold)
146 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
147     &VNET_NAME(rtq_minreallyold), 0,
148     "Minimum time to attempt to hold onto dynamically learned routes");
149 
150 /* 128 cached routes is "too many" */
151 static VNET_DEFINE(int, rtq_toomany) = 128;
152 #define	V_rtq_toomany		VNET(rtq_toomany)
153 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
154     &VNET_NAME(rtq_toomany), 0,
155     "Upper limit on dynamically learned routes");
156 
157 /*
158  * On last reference drop, mark the route as belong to us so that it can be
159  * timed out.
160  */
161 static void
162 in_clsroute(struct radix_node *rn, struct radix_node_head *head)
163 {
164 	struct rtentry *rt = (struct rtentry *)rn;
165 
166 	RT_LOCK_ASSERT(rt);
167 
168 	if (!(rt->rt_flags & RTF_UP))
169 		return;			/* prophylactic measures */
170 
171 	if (rt->rt_flags & RTPRF_OURS)
172 		return;
173 
174 	if (!(rt->rt_flags & RTF_DYNAMIC))
175 		return;
176 
177 	/*
178 	 * If rtq_reallyold is 0, just delete the route without
179 	 * waiting for a timeout cycle to kill it.
180 	 */
181 	if (V_rtq_reallyold != 0) {
182 		rt->rt_flags |= RTPRF_OURS;
183 		rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold;
184 	} else {
185 		rtexpunge(rt);
186 	}
187 }
188 
189 struct rtqk_arg {
190 	struct radix_node_head *rnh;
191 	int draining;
192 	int killed;
193 	int found;
194 	int updating;
195 	time_t nextstop;
196 };
197 
198 /*
199  * Get rid of old routes.  When draining, this deletes everything, even when
200  * the timeout is not expired yet.  When updating, this makes sure that
201  * nothing has a timeout longer than the current value of rtq_reallyold.
202  */
203 static int
204 in_rtqkill(struct radix_node *rn, void *rock)
205 {
206 	struct rtqk_arg *ap = rock;
207 	struct rtentry *rt = (struct rtentry *)rn;
208 	int err;
209 
210 	RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
211 
212 	if (rt->rt_flags & RTPRF_OURS) {
213 		ap->found++;
214 
215 		if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
216 			if (rt->rt_refcnt > 0)
217 				panic("rtqkill route really not free");
218 
219 			err = in_rtrequest(RTM_DELETE,
220 					(struct sockaddr *)rt_key(rt),
221 					rt->rt_gateway, rt_mask(rt),
222 					rt->rt_flags | RTF_RNH_LOCKED, 0,
223 					rt->rt_fibnum);
224 			if (err) {
225 				log(LOG_WARNING, "in_rtqkill: error %d\n", err);
226 			} else {
227 				ap->killed++;
228 			}
229 		} else {
230 			if (ap->updating &&
231 			    (rt->rt_rmx.rmx_expire - time_uptime >
232 			     V_rtq_reallyold)) {
233 				rt->rt_rmx.rmx_expire =
234 				    time_uptime + V_rtq_reallyold;
235 			}
236 			ap->nextstop = lmin(ap->nextstop,
237 					    rt->rt_rmx.rmx_expire);
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 #define RTQ_TIMEOUT	60*10	/* run no less than once every ten minutes */
245 static VNET_DEFINE(int, rtq_timeout) = RTQ_TIMEOUT;
246 static VNET_DEFINE(struct callout, rtq_timer);
247 
248 #define	V_rtq_timeout		VNET(rtq_timeout)
249 #define	V_rtq_timer		VNET(rtq_timer)
250 
251 static void in_rtqtimo_one(void *rock);
252 
253 static void
254 in_rtqtimo(void *rock)
255 {
256 	CURVNET_SET((struct vnet *) rock);
257 	int fibnum;
258 	void *newrock;
259 	struct timeval atv;
260 
261 	for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
262 		newrock = rt_tables_get_rnh(fibnum, AF_INET);
263 		if (newrock != NULL)
264 			in_rtqtimo_one(newrock);
265 	}
266 	atv.tv_usec = 0;
267 	atv.tv_sec = V_rtq_timeout;
268 	callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
269 	CURVNET_RESTORE();
270 }
271 
272 static void
273 in_rtqtimo_one(void *rock)
274 {
275 	struct radix_node_head *rnh = rock;
276 	struct rtqk_arg arg;
277 	static time_t last_adjusted_timeout = 0;
278 
279 	arg.found = arg.killed = 0;
280 	arg.rnh = rnh;
281 	arg.nextstop = time_uptime + V_rtq_timeout;
282 	arg.draining = arg.updating = 0;
283 	RADIX_NODE_HEAD_LOCK(rnh);
284 	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
285 	RADIX_NODE_HEAD_UNLOCK(rnh);
286 
287 	/*
288 	 * Attempt to be somewhat dynamic about this:
289 	 * If there are ``too many'' routes sitting around taking up space,
290 	 * then crank down the timeout, and see if we can't make some more
291 	 * go away.  However, we make sure that we will never adjust more
292 	 * than once in rtq_timeout seconds, to keep from cranking down too
293 	 * hard.
294 	 */
295 	if ((arg.found - arg.killed > V_rtq_toomany) &&
296 	    (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
297 	    V_rtq_reallyold > V_rtq_minreallyold) {
298 		V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
299 		if (V_rtq_reallyold < V_rtq_minreallyold) {
300 			V_rtq_reallyold = V_rtq_minreallyold;
301 		}
302 
303 		last_adjusted_timeout = time_uptime;
304 #ifdef DIAGNOSTIC
305 		log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
306 		    V_rtq_reallyold);
307 #endif
308 		arg.found = arg.killed = 0;
309 		arg.updating = 1;
310 		RADIX_NODE_HEAD_LOCK(rnh);
311 		rnh->rnh_walktree(rnh, in_rtqkill, &arg);
312 		RADIX_NODE_HEAD_UNLOCK(rnh);
313 	}
314 
315 }
316 
317 void
318 in_rtqdrain(void)
319 {
320 	VNET_ITERATOR_DECL(vnet_iter);
321 	struct radix_node_head *rnh;
322 	struct rtqk_arg arg;
323 	int 	fibnum;
324 
325 	VNET_LIST_RLOCK_NOSLEEP();
326 	VNET_FOREACH(vnet_iter) {
327 		CURVNET_SET(vnet_iter);
328 
329 		for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
330 			rnh = rt_tables_get_rnh(fibnum, AF_INET);
331 			arg.found = arg.killed = 0;
332 			arg.rnh = rnh;
333 			arg.nextstop = 0;
334 			arg.draining = 1;
335 			arg.updating = 0;
336 			RADIX_NODE_HEAD_LOCK(rnh);
337 			rnh->rnh_walktree(rnh, in_rtqkill, &arg);
338 			RADIX_NODE_HEAD_UNLOCK(rnh);
339 		}
340 		CURVNET_RESTORE();
341 	}
342 	VNET_LIST_RUNLOCK_NOSLEEP();
343 }
344 
345 void
346 in_setmatchfunc(struct radix_node_head *rnh, int val)
347 {
348 
349 	rnh->rnh_matchaddr = (val != 0) ? rn_match : in_matroute;
350 }
351 
352 static int _in_rt_was_here;
353 /*
354  * Initialize our routing tree.
355  */
356 int
357 in_inithead(void **head, int off)
358 {
359 	struct radix_node_head *rnh;
360 
361 	/* XXX MRT
362 	 * This can be called from vfs_export.c too in which case 'off'
363 	 * will be 0. We know the correct value so just use that and
364 	 * return directly if it was 0.
365 	 * This is a hack that replaces an even worse hack on a bad hack
366 	 * on a bad design. After RELENG_7 this should be fixed but that
367 	 * will change the ABI, so for now do it this way.
368 	 */
369 	if (!rn_inithead(head, 32))
370 		return 0;
371 
372 	if (off == 0)		/* XXX MRT  see above */
373 		return 1;	/* only do the rest for a real routing table */
374 
375 	rnh = *head;
376 	rnh->rnh_addaddr = in_addroute;
377 	in_setmatchfunc(rnh, V_drop_redirect);
378 	rnh->rnh_close = in_clsroute;
379 	if (_in_rt_was_here == 0 ) {
380 		callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
381 		callout_reset(&V_rtq_timer, 1, in_rtqtimo, curvnet);
382 		_in_rt_was_here = 1;
383 	}
384 	return 1;
385 }
386 
387 #ifdef VIMAGE
388 int
389 in_detachhead(void **head, int off)
390 {
391 
392 	callout_drain(&V_rtq_timer);
393 	return (1);
394 }
395 #endif
396 
397 /*
398  * This zaps old routes when the interface goes down or interface
399  * address is deleted.  In the latter case, it deletes static routes
400  * that point to this address.  If we don't do this, we may end up
401  * using the old address in the future.  The ones we always want to
402  * get rid of are things like ARP entries, since the user might down
403  * the interface, walk over to a completely different network, and
404  * plug back in.
405  */
406 struct in_ifadown_arg {
407 	struct ifaddr *ifa;
408 	int del;
409 };
410 
411 static int
412 in_ifadownkill(struct radix_node *rn, void *xap)
413 {
414 	struct in_ifadown_arg *ap = xap;
415 	struct rtentry *rt = (struct rtentry *)rn;
416 
417 	RT_LOCK(rt);
418 	if (rt->rt_ifa == ap->ifa &&
419 	    (ap->del || !(rt->rt_flags & RTF_STATIC))) {
420 		/*
421 		 * Aquire a reference so that it can later be freed
422 		 * as the refcount would be 0 here in case of at least
423 		 * ap->del.
424 		 */
425 		RT_ADDREF(rt);
426 		/*
427 		 * Disconnect it from the tree and permit protocols
428 		 * to cleanup.
429 		 */
430 		rtexpunge(rt);
431 		/*
432 		 * At this point it is an rttrash node, and in case
433 		 * the above is the only reference we must free it.
434 		 * If we do not noone will have a pointer and the
435 		 * rtentry will be leaked forever.
436 		 * In case someone else holds a reference, we are
437 		 * fine as we only decrement the refcount. In that
438 		 * case if the other entity calls RT_REMREF, we
439 		 * will still be leaking but at least we tried.
440 		 */
441 		RTFREE_LOCKED(rt);
442 		return (0);
443 	}
444 	RT_UNLOCK(rt);
445 	return 0;
446 }
447 
448 int
449 in_ifadown(struct ifaddr *ifa, int delete)
450 {
451 	struct in_ifadown_arg arg;
452 	struct radix_node_head *rnh;
453 	int	fibnum;
454 
455 	if (ifa->ifa_addr->sa_family != AF_INET)
456 		return 1;
457 
458 	for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
459 		rnh = rt_tables_get_rnh(fibnum, AF_INET);
460 		arg.ifa = ifa;
461 		arg.del = delete;
462 		RADIX_NODE_HEAD_LOCK(rnh);
463 		rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
464 		RADIX_NODE_HEAD_UNLOCK(rnh);
465 		ifa->ifa_flags &= ~IFA_ROUTE;		/* XXXlocking? */
466 	}
467 	return 0;
468 }
469 
470 /*
471  * inet versions of rt functions. These have fib extensions and
472  * for now will just reference the _fib variants.
473  * eventually this order will be reversed,
474  */
475 void
476 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
477 {
478 	rtalloc_ign_fib(ro, ignflags, fibnum);
479 }
480 
481 int
482 in_rtrequest( int req,
483 	struct sockaddr *dst,
484 	struct sockaddr *gateway,
485 	struct sockaddr *netmask,
486 	int flags,
487 	struct rtentry **ret_nrt,
488 	u_int fibnum)
489 {
490 	return (rtrequest_fib(req, dst, gateway, netmask,
491 	    flags, ret_nrt, fibnum));
492 }
493 
494 struct rtentry *
495 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum)
496 {
497 	return (rtalloc1_fib(dst, report, ignflags, fibnum));
498 }
499 
500 void
501 in_rtredirect(struct sockaddr *dst,
502 	struct sockaddr *gateway,
503 	struct sockaddr *netmask,
504 	int flags,
505 	struct sockaddr *src,
506 	u_int fibnum)
507 {
508 	rtredirect_fib(dst, gateway, netmask, flags, src, fibnum);
509 }
510 
511 void
512 in_rtalloc(struct route *ro, u_int fibnum)
513 {
514 	rtalloc_ign_fib(ro, 0UL, fibnum);
515 }
516 
517 #if 0
518 int	 in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
519 int	 in_rtioctl(u_long, caddr_t, u_int);
520 int	 in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
521 #endif
522 
523 
524