1 /*- 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This code does two things necessary for the enhanced TCP metrics to 32 * function in a useful manner: 33 * 1) It marks all non-host routes as `cloning', thus ensuring that 34 * every actual reference to such a route actually gets turned 35 * into a reference to a host route to the specific destination 36 * requested. 37 * 2) When such routes lose all their references, it arranges for them 38 * to be deleted in some random collection of circumstances, so that 39 * a large quantity of stale routing data is not kept in kernel memory 40 * indefinitely. See in_rtqtimo() below for the exact mechanism. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/socket.h> 51 #include <sys/mbuf.h> 52 #include <sys/syslog.h> 53 #include <sys/callout.h> 54 #include <sys/vimage.h> 55 56 #include <net/if.h> 57 #include <net/route.h> 58 #include <netinet/in.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip_var.h> 61 62 extern int in_inithead(void **head, int off); 63 64 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ 65 66 /* 67 * Do what we need to do when inserting a route. 68 */ 69 static struct radix_node * 70 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, 71 struct radix_node *treenodes) 72 { 73 struct rtentry *rt = (struct rtentry *)treenodes; 74 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 75 struct radix_node *ret; 76 77 /* 78 * A little bit of help for both IP output and input: 79 * For host routes, we make sure that RTF_BROADCAST 80 * is set for anything that looks like a broadcast address. 81 * This way, we can avoid an expensive call to in_broadcast() 82 * in ip_output() most of the time (because the route passed 83 * to ip_output() is almost always a host route). 84 * 85 * We also do the same for local addresses, with the thought 86 * that this might one day be used to speed up ip_input(). 87 * 88 * We also mark routes to multicast addresses as such, because 89 * it's easy to do and might be useful (but this is much more 90 * dubious since it's so easy to inspect the address). 91 */ 92 if (rt->rt_flags & RTF_HOST) { 93 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 94 rt->rt_flags |= RTF_BROADCAST; 95 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 96 sin->sin_addr.s_addr) { 97 rt->rt_flags |= RTF_LOCAL; 98 } 99 } 100 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 101 rt->rt_flags |= RTF_MULTICAST; 102 103 if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp) 104 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 105 106 ret = rn_addroute(v_arg, n_arg, head, treenodes); 107 if (ret == NULL && rt->rt_flags & RTF_HOST) { 108 struct rtentry *rt2; 109 /* 110 * We are trying to add a host route, but can't. 111 * Find out if it is because of an 112 * ARP entry and delete it if so. 113 */ 114 rt2 = in_rtalloc1((struct sockaddr *)sin, 0, 115 RTF_CLONING, rt->rt_fibnum); 116 if (rt2) { 117 if (rt2->rt_flags & RTF_LLINFO && 118 rt2->rt_flags & RTF_HOST && 119 rt2->rt_gateway && 120 rt2->rt_gateway->sa_family == AF_LINK) { 121 rtexpunge(rt2); 122 RTFREE_LOCKED(rt2); 123 ret = rn_addroute(v_arg, n_arg, head, 124 treenodes); 125 } else 126 RTFREE_LOCKED(rt2); 127 } 128 } 129 130 return ret; 131 } 132 133 /* 134 * This code is the inverse of in_clsroute: on first reference, if we 135 * were managing the route, stop doing so and set the expiration timer 136 * back off again. 137 */ 138 static struct radix_node * 139 in_matroute(void *v_arg, struct radix_node_head *head) 140 { 141 struct radix_node *rn = rn_match(v_arg, head); 142 struct rtentry *rt = (struct rtentry *)rn; 143 144 /*XXX locking? */ 145 if (rt && rt->rt_refcnt == 0) { /* this is first reference */ 146 if (rt->rt_flags & RTPRF_OURS) { 147 rt->rt_flags &= ~RTPRF_OURS; 148 rt->rt_rmx.rmx_expire = 0; 149 } 150 } 151 return rn; 152 } 153 154 static int rtq_reallyold = 60*60; /* one hour is "really old" */ 155 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTEXPIRE, rtexpire, 156 CTLFLAG_RW, rtq_reallyold, 0, 157 "Default expiration time on dynamically learned routes"); 158 159 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 160 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTMINEXPIRE, 161 rtminexpire, CTLFLAG_RW, rtq_minreallyold, 0, 162 "Minimum time to attempt to hold onto dynamically learned routes"); 163 164 static int rtq_toomany = 128; /* 128 cached routes is "too many" */ 165 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTMAXCACHE, 166 rtmaxcache, CTLFLAG_RW, rtq_toomany, 0, 167 "Upper limit on dynamically learned routes"); 168 169 /* 170 * On last reference drop, mark the route as belong to us so that it can be 171 * timed out. 172 */ 173 static void 174 in_clsroute(struct radix_node *rn, struct radix_node_head *head) 175 { 176 INIT_VNET_INET(curvnet); 177 struct rtentry *rt = (struct rtentry *)rn; 178 179 RT_LOCK_ASSERT(rt); 180 181 if (!(rt->rt_flags & RTF_UP)) 182 return; /* prophylactic measures */ 183 184 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 185 return; 186 187 if (rt->rt_flags & RTPRF_OURS) 188 return; 189 190 if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) 191 return; 192 193 /* 194 * If rtq_reallyold is 0, just delete the route without 195 * waiting for a timeout cycle to kill it. 196 */ 197 if (V_rtq_reallyold != 0) { 198 rt->rt_flags |= RTPRF_OURS; 199 rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold; 200 } else { 201 rtexpunge(rt); 202 } 203 } 204 205 struct rtqk_arg { 206 struct radix_node_head *rnh; 207 int draining; 208 int killed; 209 int found; 210 int updating; 211 time_t nextstop; 212 }; 213 214 /* 215 * Get rid of old routes. When draining, this deletes everything, even when 216 * the timeout is not expired yet. When updating, this makes sure that 217 * nothing has a timeout longer than the current value of rtq_reallyold. 218 */ 219 static int 220 in_rtqkill(struct radix_node *rn, void *rock) 221 { 222 INIT_VNET_INET(curvnet); 223 struct rtqk_arg *ap = rock; 224 struct rtentry *rt = (struct rtentry *)rn; 225 int err; 226 227 if (rt->rt_flags & RTPRF_OURS) { 228 ap->found++; 229 230 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 231 if (rt->rt_refcnt > 0) 232 panic("rtqkill route really not free"); 233 234 err = in_rtrequest(RTM_DELETE, 235 (struct sockaddr *)rt_key(rt), 236 rt->rt_gateway, rt_mask(rt), 237 rt->rt_flags, 0, rt->rt_fibnum); 238 if (err) { 239 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 240 } else { 241 ap->killed++; 242 } 243 } else { 244 if (ap->updating && 245 (rt->rt_rmx.rmx_expire - time_uptime > 246 V_rtq_reallyold)) { 247 rt->rt_rmx.rmx_expire = 248 time_uptime + V_rtq_reallyold; 249 } 250 ap->nextstop = lmin(ap->nextstop, 251 rt->rt_rmx.rmx_expire); 252 } 253 } 254 255 return 0; 256 } 257 258 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 259 static int rtq_timeout = RTQ_TIMEOUT; 260 static struct callout rtq_timer; 261 262 static void in_rtqtimo_one(void *rock); 263 264 static void 265 in_rtqtimo(void *rock) 266 { 267 int fibnum; 268 void *newrock; 269 struct timeval atv; 270 271 KASSERT((rock == (void *)V_rt_tables[0][AF_INET]), 272 ("in_rtqtimo: unexpected arg")); 273 for (fibnum = 0; fibnum < rt_numfibs; fibnum++) { 274 if ((newrock = V_rt_tables[fibnum][AF_INET]) != NULL) 275 in_rtqtimo_one(newrock); 276 } 277 atv.tv_usec = 0; 278 atv.tv_sec = V_rtq_timeout; 279 callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock); 280 } 281 282 static void 283 in_rtqtimo_one(void *rock) 284 { 285 struct radix_node_head *rnh = rock; 286 struct rtqk_arg arg; 287 static time_t last_adjusted_timeout = 0; 288 289 arg.found = arg.killed = 0; 290 arg.rnh = rnh; 291 arg.nextstop = time_uptime + V_rtq_timeout; 292 arg.draining = arg.updating = 0; 293 RADIX_NODE_HEAD_LOCK(rnh); 294 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 295 RADIX_NODE_HEAD_UNLOCK(rnh); 296 297 /* 298 * Attempt to be somewhat dynamic about this: 299 * If there are ``too many'' routes sitting around taking up space, 300 * then crank down the timeout, and see if we can't make some more 301 * go away. However, we make sure that we will never adjust more 302 * than once in rtq_timeout seconds, to keep from cranking down too 303 * hard. 304 */ 305 if ((arg.found - arg.killed > V_rtq_toomany) && 306 (time_uptime - last_adjusted_timeout >= V_rtq_timeout) && 307 V_rtq_reallyold > V_rtq_minreallyold) { 308 V_rtq_reallyold = 2 * V_rtq_reallyold / 3; 309 if (V_rtq_reallyold < V_rtq_minreallyold) { 310 V_rtq_reallyold = V_rtq_minreallyold; 311 } 312 313 last_adjusted_timeout = time_uptime; 314 #ifdef DIAGNOSTIC 315 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 316 V_rtq_reallyold); 317 #endif 318 arg.found = arg.killed = 0; 319 arg.updating = 1; 320 RADIX_NODE_HEAD_LOCK(rnh); 321 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 322 RADIX_NODE_HEAD_UNLOCK(rnh); 323 } 324 325 } 326 327 void 328 in_rtqdrain(void) 329 { 330 VNET_ITERATOR_DECL(vnet_iter); 331 struct radix_node_head *rnh; 332 struct rtqk_arg arg; 333 int fibnum; 334 335 VNET_LIST_RLOCK(); 336 VNET_FOREACH(vnet_iter) { 337 CURVNET_SET(vnet_iter); 338 INIT_VNET_NET(vnet_iter); 339 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 340 rnh = V_rt_tables[fibnum][AF_INET]; 341 arg.found = arg.killed = 0; 342 arg.rnh = rnh; 343 arg.nextstop = 0; 344 arg.draining = 1; 345 arg.updating = 0; 346 RADIX_NODE_HEAD_LOCK(rnh); 347 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 348 RADIX_NODE_HEAD_UNLOCK(rnh); 349 } 350 CURVNET_RESTORE(); 351 } 352 VNET_LIST_RUNLOCK(); 353 } 354 355 static int _in_rt_was_here; 356 /* 357 * Initialize our routing tree. 358 */ 359 int 360 in_inithead(void **head, int off) 361 { 362 INIT_VNET_INET(curvnet); 363 struct radix_node_head *rnh; 364 365 /* XXX MRT 366 * This can be called from vfs_export.c too in which case 'off' 367 * will be 0. We know the correct value so just use that and 368 * return directly if it was 0. 369 * This is a hack that replaces an even worse hack on a bad hack 370 * on a bad design. After RELENG_7 this should be fixed but that 371 * will change the ABI, so for now do it this way. 372 */ 373 if (!rn_inithead(head, 32)) 374 return 0; 375 376 if (off == 0) /* XXX MRT see above */ 377 return 1; /* only do the rest for a real routing table */ 378 379 rnh = *head; 380 rnh->rnh_addaddr = in_addroute; 381 rnh->rnh_matchaddr = in_matroute; 382 rnh->rnh_close = in_clsroute; 383 if (_in_rt_was_here == 0 ) { 384 callout_init(&V_rtq_timer, CALLOUT_MPSAFE); 385 in_rtqtimo(rnh); /* kick off timeout first time */ 386 _in_rt_was_here = 1; 387 } 388 return 1; 389 } 390 391 /* 392 * This zaps old routes when the interface goes down or interface 393 * address is deleted. In the latter case, it deletes static routes 394 * that point to this address. If we don't do this, we may end up 395 * using the old address in the future. The ones we always want to 396 * get rid of are things like ARP entries, since the user might down 397 * the interface, walk over to a completely different network, and 398 * plug back in. 399 */ 400 struct in_ifadown_arg { 401 struct ifaddr *ifa; 402 int del; 403 }; 404 405 static int 406 in_ifadownkill(struct radix_node *rn, void *xap) 407 { 408 struct in_ifadown_arg *ap = xap; 409 struct rtentry *rt = (struct rtentry *)rn; 410 411 RT_LOCK(rt); 412 if (rt->rt_ifa == ap->ifa && 413 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 414 /* 415 * We need to disable the automatic prune that happens 416 * in this case in rtrequest() because it will blow 417 * away the pointers that rn_walktree() needs in order 418 * continue our descent. We will end up deleting all 419 * the routes that rtrequest() would have in any case, 420 * so that behavior is not needed there. 421 */ 422 rt->rt_flags &= ~RTF_CLONING; 423 rtexpunge(rt); 424 } 425 RT_UNLOCK(rt); 426 return 0; 427 } 428 429 int 430 in_ifadown(struct ifaddr *ifa, int delete) 431 { 432 INIT_VNET_NET(curvnet); 433 struct in_ifadown_arg arg; 434 struct radix_node_head *rnh; 435 int fibnum; 436 437 if (ifa->ifa_addr->sa_family != AF_INET) 438 return 1; 439 440 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 441 rnh = V_rt_tables[fibnum][AF_INET]; 442 arg.ifa = ifa; 443 arg.del = delete; 444 RADIX_NODE_HEAD_LOCK(rnh); 445 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 446 RADIX_NODE_HEAD_UNLOCK(rnh); 447 ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */ 448 } 449 return 0; 450 } 451 452 /* 453 * inet versions of rt functions. These have fib extensions and 454 * for now will just reference the _fib variants. 455 * eventually this order will be reversed, 456 */ 457 void 458 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum) 459 { 460 rtalloc_ign_fib(ro, ignflags, fibnum); 461 } 462 463 int 464 in_rtrequest( int req, 465 struct sockaddr *dst, 466 struct sockaddr *gateway, 467 struct sockaddr *netmask, 468 int flags, 469 struct rtentry **ret_nrt, 470 u_int fibnum) 471 { 472 return (rtrequest_fib(req, dst, gateway, netmask, 473 flags, ret_nrt, fibnum)); 474 } 475 476 struct rtentry * 477 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum) 478 { 479 return (rtalloc1_fib(dst, report, ignflags, fibnum)); 480 } 481 482 void 483 in_rtredirect(struct sockaddr *dst, 484 struct sockaddr *gateway, 485 struct sockaddr *netmask, 486 int flags, 487 struct sockaddr *src, 488 u_int fibnum) 489 { 490 rtredirect_fib(dst, gateway, netmask, flags, src, fibnum); 491 } 492 493 void 494 in_rtalloc(struct route *ro, u_int fibnum) 495 { 496 rtalloc_ign_fib(ro, 0UL, fibnum); 497 } 498 499 #if 0 500 int in_rt_getifa(struct rt_addrinfo *, u_int fibnum); 501 int in_rtioctl(u_long, caddr_t, u_int); 502 int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int); 503 #endif 504 505 506