1 /*- 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This code does two things necessary for the enhanced TCP metrics to 32 * function in a useful manner: 33 * 1) It marks all non-host routes as `cloning', thus ensuring that 34 * every actual reference to such a route actually gets turned 35 * into a reference to a host route to the specific destination 36 * requested. 37 * 2) When such routes lose all their references, it arranges for them 38 * to be deleted in some random collection of circumstances, so that 39 * a large quantity of stale routing data is not kept in kernel memory 40 * indefinitely. See in_rtqtimo() below for the exact mechanism. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/socket.h> 51 #include <sys/mbuf.h> 52 #include <sys/syslog.h> 53 #include <sys/callout.h> 54 #include <sys/vimage.h> 55 56 #include <net/if.h> 57 #include <net/route.h> 58 #include <netinet/in.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip_var.h> 61 62 extern int in_inithead(void **head, int off); 63 64 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ 65 66 /* 67 * Do what we need to do when inserting a route. 68 */ 69 static struct radix_node * 70 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, 71 struct radix_node *treenodes) 72 { 73 struct rtentry *rt = (struct rtentry *)treenodes; 74 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 75 struct radix_node *ret; 76 77 /* 78 * A little bit of help for both IP output and input: 79 * For host routes, we make sure that RTF_BROADCAST 80 * is set for anything that looks like a broadcast address. 81 * This way, we can avoid an expensive call to in_broadcast() 82 * in ip_output() most of the time (because the route passed 83 * to ip_output() is almost always a host route). 84 * 85 * We also do the same for local addresses, with the thought 86 * that this might one day be used to speed up ip_input(). 87 * 88 * We also mark routes to multicast addresses as such, because 89 * it's easy to do and might be useful (but this is much more 90 * dubious since it's so easy to inspect the address). 91 */ 92 if (rt->rt_flags & RTF_HOST) { 93 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 94 rt->rt_flags |= RTF_BROADCAST; 95 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 96 sin->sin_addr.s_addr) { 97 rt->rt_flags |= RTF_LOCAL; 98 } 99 } 100 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 101 rt->rt_flags |= RTF_MULTICAST; 102 103 if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp) 104 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 105 106 ret = rn_addroute(v_arg, n_arg, head, treenodes); 107 if (ret == NULL && rt->rt_flags & RTF_HOST) { 108 struct rtentry *rt2; 109 /* 110 * We are trying to add a host route, but can't. 111 * Find out if it is because of an 112 * ARP entry and delete it if so. 113 */ 114 rt2 = in_rtalloc1((struct sockaddr *)sin, 0, 115 RTF_CLONING, rt->rt_fibnum); 116 if (rt2) { 117 if (rt2->rt_flags & RTF_LLINFO && 118 rt2->rt_flags & RTF_HOST && 119 rt2->rt_gateway && 120 rt2->rt_gateway->sa_family == AF_LINK) { 121 rtexpunge(rt2); 122 RTFREE_LOCKED(rt2); 123 ret = rn_addroute(v_arg, n_arg, head, 124 treenodes); 125 } else 126 RTFREE_LOCKED(rt2); 127 } 128 } 129 130 return ret; 131 } 132 133 /* 134 * This code is the inverse of in_clsroute: on first reference, if we 135 * were managing the route, stop doing so and set the expiration timer 136 * back off again. 137 */ 138 static struct radix_node * 139 in_matroute(void *v_arg, struct radix_node_head *head) 140 { 141 struct radix_node *rn = rn_match(v_arg, head); 142 struct rtentry *rt = (struct rtentry *)rn; 143 144 /*XXX locking? */ 145 if (rt && rt->rt_refcnt == 0) { /* this is first reference */ 146 if (rt->rt_flags & RTPRF_OURS) { 147 rt->rt_flags &= ~RTPRF_OURS; 148 rt->rt_rmx.rmx_expire = 0; 149 } 150 } 151 return rn; 152 } 153 154 static int rtq_reallyold = 60*60; /* one hour is "really old" */ 155 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 156 &rtq_reallyold, 0, "Default expiration time on dynamically learned routes"); 157 158 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 159 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 160 &rtq_minreallyold, 0, 161 "Minimum time to attempt to hold onto dynamically learned routes"); 162 163 static int rtq_toomany = 128; /* 128 cached routes is "too many" */ 164 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 165 &rtq_toomany, 0, "Upper limit on dynamically learned routes"); 166 167 /* 168 * On last reference drop, mark the route as belong to us so that it can be 169 * timed out. 170 */ 171 static void 172 in_clsroute(struct radix_node *rn, struct radix_node_head *head) 173 { 174 struct rtentry *rt = (struct rtentry *)rn; 175 176 RT_LOCK_ASSERT(rt); 177 178 if (!(rt->rt_flags & RTF_UP)) 179 return; /* prophylactic measures */ 180 181 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 182 return; 183 184 if (rt->rt_flags & RTPRF_OURS) 185 return; 186 187 if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) 188 return; 189 190 /* 191 * If rtq_reallyold is 0, just delete the route without 192 * waiting for a timeout cycle to kill it. 193 */ 194 if (V_rtq_reallyold != 0) { 195 rt->rt_flags |= RTPRF_OURS; 196 rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold; 197 } else { 198 rtexpunge(rt); 199 } 200 } 201 202 struct rtqk_arg { 203 struct radix_node_head *rnh; 204 int draining; 205 int killed; 206 int found; 207 int updating; 208 time_t nextstop; 209 }; 210 211 /* 212 * Get rid of old routes. When draining, this deletes everything, even when 213 * the timeout is not expired yet. When updating, this makes sure that 214 * nothing has a timeout longer than the current value of rtq_reallyold. 215 */ 216 static int 217 in_rtqkill(struct radix_node *rn, void *rock) 218 { 219 struct rtqk_arg *ap = rock; 220 struct rtentry *rt = (struct rtentry *)rn; 221 int err; 222 223 if (rt->rt_flags & RTPRF_OURS) { 224 ap->found++; 225 226 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 227 if (rt->rt_refcnt > 0) 228 panic("rtqkill route really not free"); 229 230 err = in_rtrequest(RTM_DELETE, 231 (struct sockaddr *)rt_key(rt), 232 rt->rt_gateway, rt_mask(rt), 233 rt->rt_flags, 0, rt->rt_fibnum); 234 if (err) { 235 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 236 } else { 237 ap->killed++; 238 } 239 } else { 240 if (ap->updating && 241 (rt->rt_rmx.rmx_expire - time_uptime > 242 V_rtq_reallyold)) { 243 rt->rt_rmx.rmx_expire = 244 time_uptime + V_rtq_reallyold; 245 } 246 ap->nextstop = lmin(ap->nextstop, 247 rt->rt_rmx.rmx_expire); 248 } 249 } 250 251 return 0; 252 } 253 254 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 255 static int rtq_timeout = RTQ_TIMEOUT; 256 static struct callout rtq_timer; 257 258 static void in_rtqtimo_one(void *rock); 259 260 static void 261 in_rtqtimo(void *rock) 262 { 263 int fibnum; 264 void *newrock; 265 struct timeval atv; 266 267 KASSERT((rock == (void *)V_rt_tables[0][AF_INET]), 268 ("in_rtqtimo: unexpected arg")); 269 for (fibnum = 0; fibnum < rt_numfibs; fibnum++) { 270 if ((newrock = V_rt_tables[fibnum][AF_INET]) != NULL) 271 in_rtqtimo_one(newrock); 272 } 273 atv.tv_usec = 0; 274 atv.tv_sec = V_rtq_timeout; 275 callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock); 276 } 277 278 static void 279 in_rtqtimo_one(void *rock) 280 { 281 struct radix_node_head *rnh = rock; 282 struct rtqk_arg arg; 283 static time_t last_adjusted_timeout = 0; 284 285 arg.found = arg.killed = 0; 286 arg.rnh = rnh; 287 arg.nextstop = time_uptime + V_rtq_timeout; 288 arg.draining = arg.updating = 0; 289 RADIX_NODE_HEAD_LOCK(rnh); 290 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 291 RADIX_NODE_HEAD_UNLOCK(rnh); 292 293 /* 294 * Attempt to be somewhat dynamic about this: 295 * If there are ``too many'' routes sitting around taking up space, 296 * then crank down the timeout, and see if we can't make some more 297 * go away. However, we make sure that we will never adjust more 298 * than once in rtq_timeout seconds, to keep from cranking down too 299 * hard. 300 */ 301 if ((arg.found - arg.killed > V_rtq_toomany) && 302 (time_uptime - last_adjusted_timeout >= V_rtq_timeout) && 303 V_rtq_reallyold > V_rtq_minreallyold) { 304 V_rtq_reallyold = 2 * V_rtq_reallyold / 3; 305 if (V_rtq_reallyold < V_rtq_minreallyold) { 306 V_rtq_reallyold = V_rtq_minreallyold; 307 } 308 309 last_adjusted_timeout = time_uptime; 310 #ifdef DIAGNOSTIC 311 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 312 V_rtq_reallyold); 313 #endif 314 arg.found = arg.killed = 0; 315 arg.updating = 1; 316 RADIX_NODE_HEAD_LOCK(rnh); 317 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 318 RADIX_NODE_HEAD_UNLOCK(rnh); 319 } 320 321 } 322 323 void 324 in_rtqdrain(void) 325 { 326 struct radix_node_head *rnh; 327 struct rtqk_arg arg; 328 int fibnum; 329 330 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 331 rnh = V_rt_tables[fibnum][AF_INET]; 332 arg.found = arg.killed = 0; 333 arg.rnh = rnh; 334 arg.nextstop = 0; 335 arg.draining = 1; 336 arg.updating = 0; 337 RADIX_NODE_HEAD_LOCK(rnh); 338 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 339 RADIX_NODE_HEAD_UNLOCK(rnh); 340 } 341 } 342 343 static int _in_rt_was_here; 344 /* 345 * Initialize our routing tree. 346 */ 347 int 348 in_inithead(void **head, int off) 349 { 350 struct radix_node_head *rnh; 351 352 /* XXX MRT 353 * This can be called from vfs_export.c too in which case 'off' 354 * will be 0. We know the correct value so just use that and 355 * return directly if it was 0. 356 * This is a hack that replaces an even worse hack on a bad hack 357 * on a bad design. After RELENG_7 this should be fixed but that 358 * will change the ABI, so for now do it this way. 359 */ 360 if (!rn_inithead(head, 32)) 361 return 0; 362 363 if (off == 0) /* XXX MRT see above */ 364 return 1; /* only do the rest for a real routing table */ 365 366 rnh = *head; 367 rnh->rnh_addaddr = in_addroute; 368 rnh->rnh_matchaddr = in_matroute; 369 rnh->rnh_close = in_clsroute; 370 if (_in_rt_was_here == 0 ) { 371 callout_init(&V_rtq_timer, CALLOUT_MPSAFE); 372 in_rtqtimo(rnh); /* kick off timeout first time */ 373 _in_rt_was_here = 1; 374 } 375 return 1; 376 } 377 378 /* 379 * This zaps old routes when the interface goes down or interface 380 * address is deleted. In the latter case, it deletes static routes 381 * that point to this address. If we don't do this, we may end up 382 * using the old address in the future. The ones we always want to 383 * get rid of are things like ARP entries, since the user might down 384 * the interface, walk over to a completely different network, and 385 * plug back in. 386 */ 387 struct in_ifadown_arg { 388 struct ifaddr *ifa; 389 int del; 390 }; 391 392 static int 393 in_ifadownkill(struct radix_node *rn, void *xap) 394 { 395 struct in_ifadown_arg *ap = xap; 396 struct rtentry *rt = (struct rtentry *)rn; 397 398 RT_LOCK(rt); 399 if (rt->rt_ifa == ap->ifa && 400 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 401 /* 402 * We need to disable the automatic prune that happens 403 * in this case in rtrequest() because it will blow 404 * away the pointers that rn_walktree() needs in order 405 * continue our descent. We will end up deleting all 406 * the routes that rtrequest() would have in any case, 407 * so that behavior is not needed there. 408 */ 409 rt->rt_flags &= ~RTF_CLONING; 410 rtexpunge(rt); 411 } 412 RT_UNLOCK(rt); 413 return 0; 414 } 415 416 int 417 in_ifadown(struct ifaddr *ifa, int delete) 418 { 419 struct in_ifadown_arg arg; 420 struct radix_node_head *rnh; 421 int fibnum; 422 423 if (ifa->ifa_addr->sa_family != AF_INET) 424 return 1; 425 426 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 427 rnh = V_rt_tables[fibnum][AF_INET]; 428 arg.ifa = ifa; 429 arg.del = delete; 430 RADIX_NODE_HEAD_LOCK(rnh); 431 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 432 RADIX_NODE_HEAD_UNLOCK(rnh); 433 ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */ 434 } 435 return 0; 436 } 437 438 /* 439 * inet versions of rt functions. These have fib extensions and 440 * for now will just reference the _fib variants. 441 * eventually this order will be reversed, 442 */ 443 void 444 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum) 445 { 446 rtalloc_ign_fib(ro, ignflags, fibnum); 447 } 448 449 int 450 in_rtrequest( int req, 451 struct sockaddr *dst, 452 struct sockaddr *gateway, 453 struct sockaddr *netmask, 454 int flags, 455 struct rtentry **ret_nrt, 456 u_int fibnum) 457 { 458 return (rtrequest_fib(req, dst, gateway, netmask, 459 flags, ret_nrt, fibnum)); 460 } 461 462 struct rtentry * 463 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum) 464 { 465 return (rtalloc1_fib(dst, report, ignflags, fibnum)); 466 } 467 468 int 469 in_rt_check(struct rtentry **lrt, struct rtentry **lrt0, 470 struct sockaddr *dst, u_int fibnum) 471 { 472 return (rt_check_fib(lrt, lrt0, dst, fibnum)); 473 } 474 475 void 476 in_rtredirect(struct sockaddr *dst, 477 struct sockaddr *gateway, 478 struct sockaddr *netmask, 479 int flags, 480 struct sockaddr *src, 481 u_int fibnum) 482 { 483 rtredirect_fib(dst, gateway, netmask, flags, src, fibnum); 484 } 485 486 void 487 in_rtalloc(struct route *ro, u_int fibnum) 488 { 489 rtalloc_ign_fib(ro, 0UL, fibnum); 490 } 491 492 #if 0 493 int in_rt_getifa(struct rt_addrinfo *, u_int fibnum); 494 int in_rtioctl(u_long, caddr_t, u_int); 495 int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int); 496 #endif 497 498 499