1 /*- 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This code does two things necessary for the enhanced TCP metrics to 32 * function in a useful manner: 33 * 1) It marks all non-host routes as `cloning', thus ensuring that 34 * every actual reference to such a route actually gets turned 35 * into a reference to a host route to the specific destination 36 * requested. 37 * 2) When such routes lose all their references, it arranges for them 38 * to be deleted in some random collection of circumstances, so that 39 * a large quantity of stale routing data is not kept in kernel memory 40 * indefinitely. See in_rtqtimo() below for the exact mechanism. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/socket.h> 51 #include <sys/mbuf.h> 52 #include <sys/syslog.h> 53 #include <sys/callout.h> 54 #include <sys/vimage.h> 55 56 #include <net/if.h> 57 #include <net/route.h> 58 #include <net/vnet.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/vinet.h> 64 65 extern int in_inithead(void **head, int off); 66 67 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ 68 69 /* 70 * Do what we need to do when inserting a route. 71 */ 72 static struct radix_node * 73 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, 74 struct radix_node *treenodes) 75 { 76 struct rtentry *rt = (struct rtentry *)treenodes; 77 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 78 79 RADIX_NODE_HEAD_WLOCK_ASSERT(head); 80 /* 81 * A little bit of help for both IP output and input: 82 * For host routes, we make sure that RTF_BROADCAST 83 * is set for anything that looks like a broadcast address. 84 * This way, we can avoid an expensive call to in_broadcast() 85 * in ip_output() most of the time (because the route passed 86 * to ip_output() is almost always a host route). 87 * 88 * We also do the same for local addresses, with the thought 89 * that this might one day be used to speed up ip_input(). 90 * 91 * We also mark routes to multicast addresses as such, because 92 * it's easy to do and might be useful (but this is much more 93 * dubious since it's so easy to inspect the address). 94 */ 95 if (rt->rt_flags & RTF_HOST) { 96 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 97 rt->rt_flags |= RTF_BROADCAST; 98 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 99 sin->sin_addr.s_addr) { 100 rt->rt_flags |= RTF_LOCAL; 101 } 102 } 103 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 104 rt->rt_flags |= RTF_MULTICAST; 105 106 if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp) 107 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 108 109 return (rn_addroute(v_arg, n_arg, head, treenodes)); 110 } 111 112 /* 113 * This code is the inverse of in_clsroute: on first reference, if we 114 * were managing the route, stop doing so and set the expiration timer 115 * back off again. 116 */ 117 static struct radix_node * 118 in_matroute(void *v_arg, struct radix_node_head *head) 119 { 120 struct radix_node *rn = rn_match(v_arg, head); 121 struct rtentry *rt = (struct rtentry *)rn; 122 123 /*XXX locking? */ 124 if (rt && rt->rt_refcnt == 0) { /* this is first reference */ 125 if (rt->rt_flags & RTPRF_OURS) { 126 rt->rt_flags &= ~RTPRF_OURS; 127 rt->rt_rmx.rmx_expire = 0; 128 } 129 } 130 return rn; 131 } 132 133 #ifdef VIMAGE_GLOBALS 134 static int rtq_reallyold; 135 static int rtq_minreallyold; 136 static int rtq_toomany; 137 #endif 138 139 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTEXPIRE, rtexpire, 140 CTLFLAG_RW, rtq_reallyold, 0, 141 "Default expiration time on dynamically learned routes"); 142 143 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTMINEXPIRE, 144 rtminexpire, CTLFLAG_RW, rtq_minreallyold, 0, 145 "Minimum time to attempt to hold onto dynamically learned routes"); 146 147 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_RTMAXCACHE, 148 rtmaxcache, CTLFLAG_RW, rtq_toomany, 0, 149 "Upper limit on dynamically learned routes"); 150 151 /* 152 * On last reference drop, mark the route as belong to us so that it can be 153 * timed out. 154 */ 155 static void 156 in_clsroute(struct radix_node *rn, struct radix_node_head *head) 157 { 158 INIT_VNET_INET(curvnet); 159 struct rtentry *rt = (struct rtentry *)rn; 160 161 RT_LOCK_ASSERT(rt); 162 163 if (!(rt->rt_flags & RTF_UP)) 164 return; /* prophylactic measures */ 165 166 if (rt->rt_flags & RTPRF_OURS) 167 return; 168 169 if (!(rt->rt_flags & RTF_DYNAMIC)) 170 return; 171 172 /* 173 * If rtq_reallyold is 0, just delete the route without 174 * waiting for a timeout cycle to kill it. 175 */ 176 if (V_rtq_reallyold != 0) { 177 rt->rt_flags |= RTPRF_OURS; 178 rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold; 179 } else { 180 rtexpunge(rt); 181 } 182 } 183 184 struct rtqk_arg { 185 struct radix_node_head *rnh; 186 int draining; 187 int killed; 188 int found; 189 int updating; 190 time_t nextstop; 191 }; 192 193 /* 194 * Get rid of old routes. When draining, this deletes everything, even when 195 * the timeout is not expired yet. When updating, this makes sure that 196 * nothing has a timeout longer than the current value of rtq_reallyold. 197 */ 198 static int 199 in_rtqkill(struct radix_node *rn, void *rock) 200 { 201 INIT_VNET_INET(curvnet); 202 struct rtqk_arg *ap = rock; 203 struct rtentry *rt = (struct rtentry *)rn; 204 int err; 205 206 RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh); 207 208 if (rt->rt_flags & RTPRF_OURS) { 209 ap->found++; 210 211 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 212 if (rt->rt_refcnt > 0) 213 panic("rtqkill route really not free"); 214 215 err = in_rtrequest(RTM_DELETE, 216 (struct sockaddr *)rt_key(rt), 217 rt->rt_gateway, rt_mask(rt), 218 rt->rt_flags | RTF_RNH_LOCKED, 0, 219 rt->rt_fibnum); 220 if (err) { 221 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 222 } else { 223 ap->killed++; 224 } 225 } else { 226 if (ap->updating && 227 (rt->rt_rmx.rmx_expire - time_uptime > 228 V_rtq_reallyold)) { 229 rt->rt_rmx.rmx_expire = 230 time_uptime + V_rtq_reallyold; 231 } 232 ap->nextstop = lmin(ap->nextstop, 233 rt->rt_rmx.rmx_expire); 234 } 235 } 236 237 return 0; 238 } 239 240 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 241 #ifdef VIMAGE_GLOBALS 242 static int rtq_timeout; 243 static struct callout rtq_timer; 244 #endif 245 246 static void in_rtqtimo_one(void *rock); 247 248 static void 249 in_rtqtimo(void *rock) 250 { 251 int fibnum; 252 void *newrock; 253 struct timeval atv; 254 255 KASSERT((rock == (void *)V_rt_tables[0][AF_INET]), 256 ("in_rtqtimo: unexpected arg")); 257 for (fibnum = 0; fibnum < rt_numfibs; fibnum++) { 258 if ((newrock = V_rt_tables[fibnum][AF_INET]) != NULL) 259 in_rtqtimo_one(newrock); 260 } 261 atv.tv_usec = 0; 262 atv.tv_sec = V_rtq_timeout; 263 callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock); 264 } 265 266 static void 267 in_rtqtimo_one(void *rock) 268 { 269 INIT_VNET_INET(curvnet); 270 struct radix_node_head *rnh = rock; 271 struct rtqk_arg arg; 272 static time_t last_adjusted_timeout = 0; 273 274 arg.found = arg.killed = 0; 275 arg.rnh = rnh; 276 arg.nextstop = time_uptime + V_rtq_timeout; 277 arg.draining = arg.updating = 0; 278 RADIX_NODE_HEAD_LOCK(rnh); 279 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 280 RADIX_NODE_HEAD_UNLOCK(rnh); 281 282 /* 283 * Attempt to be somewhat dynamic about this: 284 * If there are ``too many'' routes sitting around taking up space, 285 * then crank down the timeout, and see if we can't make some more 286 * go away. However, we make sure that we will never adjust more 287 * than once in rtq_timeout seconds, to keep from cranking down too 288 * hard. 289 */ 290 if ((arg.found - arg.killed > V_rtq_toomany) && 291 (time_uptime - last_adjusted_timeout >= V_rtq_timeout) && 292 V_rtq_reallyold > V_rtq_minreallyold) { 293 V_rtq_reallyold = 2 * V_rtq_reallyold / 3; 294 if (V_rtq_reallyold < V_rtq_minreallyold) { 295 V_rtq_reallyold = V_rtq_minreallyold; 296 } 297 298 last_adjusted_timeout = time_uptime; 299 #ifdef DIAGNOSTIC 300 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 301 V_rtq_reallyold); 302 #endif 303 arg.found = arg.killed = 0; 304 arg.updating = 1; 305 RADIX_NODE_HEAD_LOCK(rnh); 306 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 307 RADIX_NODE_HEAD_UNLOCK(rnh); 308 } 309 310 } 311 312 void 313 in_rtqdrain(void) 314 { 315 VNET_ITERATOR_DECL(vnet_iter); 316 struct radix_node_head *rnh; 317 struct rtqk_arg arg; 318 int fibnum; 319 320 VNET_LIST_RLOCK(); 321 VNET_FOREACH(vnet_iter) { 322 CURVNET_SET(vnet_iter); 323 INIT_VNET_NET(vnet_iter); 324 325 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 326 rnh = V_rt_tables[fibnum][AF_INET]; 327 arg.found = arg.killed = 0; 328 arg.rnh = rnh; 329 arg.nextstop = 0; 330 arg.draining = 1; 331 arg.updating = 0; 332 RADIX_NODE_HEAD_LOCK(rnh); 333 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 334 RADIX_NODE_HEAD_UNLOCK(rnh); 335 } 336 CURVNET_RESTORE(); 337 } 338 VNET_LIST_RUNLOCK(); 339 } 340 341 static int _in_rt_was_here; 342 /* 343 * Initialize our routing tree. 344 */ 345 int 346 in_inithead(void **head, int off) 347 { 348 INIT_VNET_INET(curvnet); 349 struct radix_node_head *rnh; 350 351 /* XXX MRT 352 * This can be called from vfs_export.c too in which case 'off' 353 * will be 0. We know the correct value so just use that and 354 * return directly if it was 0. 355 * This is a hack that replaces an even worse hack on a bad hack 356 * on a bad design. After RELENG_7 this should be fixed but that 357 * will change the ABI, so for now do it this way. 358 */ 359 if (!rn_inithead(head, 32)) 360 return 0; 361 362 if (off == 0) /* XXX MRT see above */ 363 return 1; /* only do the rest for a real routing table */ 364 365 V_rtq_reallyold = 60*60; /* one hour is "really old" */ 366 V_rtq_minreallyold = 10; /* never automatically crank down to less */ 367 V_rtq_toomany = 128; /* 128 cached routes is "too many" */ 368 V_rtq_timeout = RTQ_TIMEOUT; 369 370 rnh = *head; 371 rnh->rnh_addaddr = in_addroute; 372 rnh->rnh_matchaddr = in_matroute; 373 rnh->rnh_close = in_clsroute; 374 if (_in_rt_was_here == 0 ) { 375 callout_init(&V_rtq_timer, CALLOUT_MPSAFE); 376 in_rtqtimo(rnh); /* kick off timeout first time */ 377 _in_rt_was_here = 1; 378 } 379 return 1; 380 } 381 382 /* 383 * This zaps old routes when the interface goes down or interface 384 * address is deleted. In the latter case, it deletes static routes 385 * that point to this address. If we don't do this, we may end up 386 * using the old address in the future. The ones we always want to 387 * get rid of are things like ARP entries, since the user might down 388 * the interface, walk over to a completely different network, and 389 * plug back in. 390 */ 391 struct in_ifadown_arg { 392 struct ifaddr *ifa; 393 int del; 394 }; 395 396 static int 397 in_ifadownkill(struct radix_node *rn, void *xap) 398 { 399 struct in_ifadown_arg *ap = xap; 400 struct rtentry *rt = (struct rtentry *)rn; 401 402 RT_LOCK(rt); 403 if (rt->rt_ifa == ap->ifa && 404 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 405 /* 406 * We need to disable the automatic prune that happens 407 * in this case in rtrequest() because it will blow 408 * away the pointers that rn_walktree() needs in order 409 * continue our descent. We will end up deleting all 410 * the routes that rtrequest() would have in any case, 411 * so that behavior is not needed there. 412 */ 413 rtexpunge(rt); 414 } 415 RT_UNLOCK(rt); 416 return 0; 417 } 418 419 int 420 in_ifadown(struct ifaddr *ifa, int delete) 421 { 422 INIT_VNET_NET(curvnet); 423 struct in_ifadown_arg arg; 424 struct radix_node_head *rnh; 425 int fibnum; 426 427 if (ifa->ifa_addr->sa_family != AF_INET) 428 return 1; 429 430 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { 431 rnh = V_rt_tables[fibnum][AF_INET]; 432 arg.ifa = ifa; 433 arg.del = delete; 434 RADIX_NODE_HEAD_LOCK(rnh); 435 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 436 RADIX_NODE_HEAD_UNLOCK(rnh); 437 ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */ 438 } 439 return 0; 440 } 441 442 /* 443 * inet versions of rt functions. These have fib extensions and 444 * for now will just reference the _fib variants. 445 * eventually this order will be reversed, 446 */ 447 void 448 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum) 449 { 450 rtalloc_ign_fib(ro, ignflags, fibnum); 451 } 452 453 int 454 in_rtrequest( int req, 455 struct sockaddr *dst, 456 struct sockaddr *gateway, 457 struct sockaddr *netmask, 458 int flags, 459 struct rtentry **ret_nrt, 460 u_int fibnum) 461 { 462 return (rtrequest_fib(req, dst, gateway, netmask, 463 flags, ret_nrt, fibnum)); 464 } 465 466 struct rtentry * 467 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum) 468 { 469 return (rtalloc1_fib(dst, report, ignflags, fibnum)); 470 } 471 472 void 473 in_rtredirect(struct sockaddr *dst, 474 struct sockaddr *gateway, 475 struct sockaddr *netmask, 476 int flags, 477 struct sockaddr *src, 478 u_int fibnum) 479 { 480 rtredirect_fib(dst, gateway, netmask, flags, src, fibnum); 481 } 482 483 void 484 in_rtalloc(struct route *ro, u_int fibnum) 485 { 486 rtalloc_ign_fib(ro, 0UL, fibnum); 487 } 488 489 #if 0 490 int in_rt_getifa(struct rt_addrinfo *, u_int fibnum); 491 int in_rtioctl(u_long, caddr_t, u_int); 492 int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int); 493 #endif 494 495 496