1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * This code does two things necessary for the enhanced TCP metrics to 34 * function in a useful manner: 35 * 1) It marks all non-host routes as `cloning', thus ensuring that 36 * every actual reference to such a route actually gets turned 37 * into a reference to a host route to the specific destination 38 * requested. 39 * 2) When such routes lose all their references, it arranges for them 40 * to be deleted in some random collection of circumstances, so that 41 * a large quantity of stale routing data is not kept in kernel memory 42 * indefinitely. See in_rtqtimo() below for the exact mechanism. 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/socket.h> 50 #include <sys/mbuf.h> 51 #include <sys/syslog.h> 52 53 #include <net/if.h> 54 #include <net/route.h> 55 #include <netinet/in.h> 56 #include <netinet/in_var.h> 57 58 extern int in_inithead __P((void **head, int off)); 59 60 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ 61 62 /* 63 * Do what we need to do when inserting a route. 64 */ 65 static struct radix_node * 66 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, 67 struct radix_node *treenodes) 68 { 69 struct rtentry *rt = (struct rtentry *)treenodes; 70 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 71 struct radix_node *ret; 72 73 /* 74 * For IP, all unicast non-host routes are automatically cloning. 75 */ 76 if(IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 77 rt->rt_flags |= RTF_MULTICAST; 78 79 if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { 80 rt->rt_flags |= RTF_PRCLONING; 81 } 82 83 /* 84 * A little bit of help for both IP output and input: 85 * For host routes, we make sure that RTF_BROADCAST 86 * is set for anything that looks like a broadcast address. 87 * This way, we can avoid an expensive call to in_broadcast() 88 * in ip_output() most of the time (because the route passed 89 * to ip_output() is almost always a host route). 90 * 91 * We also do the same for local addresses, with the thought 92 * that this might one day be used to speed up ip_input(). 93 * 94 * We also mark routes to multicast addresses as such, because 95 * it's easy to do and might be useful (but this is much more 96 * dubious since it's so easy to inspect the address). (This 97 * is done above.) 98 */ 99 if (rt->rt_flags & RTF_HOST) { 100 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 101 rt->rt_flags |= RTF_BROADCAST; 102 } else { 103 #define satosin(sa) ((struct sockaddr_in *)sa) 104 if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr 105 == sin->sin_addr.s_addr) 106 rt->rt_flags |= RTF_LOCAL; 107 #undef satosin 108 } 109 } 110 111 if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) 112 && rt->rt_ifp) 113 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 114 115 ret = rn_addroute(v_arg, n_arg, head, treenodes); 116 if (ret == NULL && rt->rt_flags & RTF_HOST) { 117 struct rtentry *rt2; 118 /* 119 * We are trying to add a host route, but can't. 120 * Find out if it is because of an 121 * ARP entry and delete it if so. 122 */ 123 rt2 = rtalloc1((struct sockaddr *)sin, 0, 124 RTF_CLONING | RTF_PRCLONING); 125 if (rt2) { 126 if (rt2->rt_flags & RTF_LLINFO && 127 rt2->rt_flags & RTF_HOST && 128 rt2->rt_gateway && 129 rt2->rt_gateway->sa_family == AF_LINK) { 130 rtrequest(RTM_DELETE, 131 (struct sockaddr *)rt_key(rt2), 132 rt2->rt_gateway, 133 rt_mask(rt2), rt2->rt_flags, 0); 134 ret = rn_addroute(v_arg, n_arg, head, 135 treenodes); 136 } 137 RTFREE(rt2); 138 } 139 } 140 return ret; 141 } 142 143 /* 144 * This code is the inverse of in_clsroute: on first reference, if we 145 * were managing the route, stop doing so and set the expiration timer 146 * back off again. 147 */ 148 static struct radix_node * 149 in_matroute(void *v_arg, struct radix_node_head *head) 150 { 151 struct radix_node *rn = rn_match(v_arg, head); 152 struct rtentry *rt = (struct rtentry *)rn; 153 154 if(rt && rt->rt_refcnt == 0) { /* this is first reference */ 155 if(rt->rt_flags & RTPRF_OURS) { 156 rt->rt_flags &= ~RTPRF_OURS; 157 rt->rt_rmx.rmx_expire = 0; 158 } 159 } 160 return rn; 161 } 162 163 static int rtq_reallyold = 60*60; 164 /* one hour is ``really old'' */ 165 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 166 &rtq_reallyold , 0, 167 "Default expiration time on dynamically learned routes"); 168 169 static int rtq_minreallyold = 10; 170 /* never automatically crank down to less */ 171 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 172 &rtq_minreallyold , 0, 173 "Minimum time to attempt to hold onto dynamically learned routes"); 174 175 static int rtq_toomany = 128; 176 /* 128 cached routes is ``too many'' */ 177 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 178 &rtq_toomany , 0, "Upper limit on dynamically learned routes"); 179 180 /* 181 * On last reference drop, mark the route as belong to us so that it can be 182 * timed out. 183 */ 184 static void 185 in_clsroute(struct radix_node *rn, struct radix_node_head *head) 186 { 187 struct rtentry *rt = (struct rtentry *)rn; 188 189 if(!(rt->rt_flags & RTF_UP)) 190 return; /* prophylactic measures */ 191 192 if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 193 return; 194 195 if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) 196 != RTF_WASCLONED) 197 return; 198 199 /* 200 * As requested by David Greenman: 201 * If rtq_reallyold is 0, just delete the route without 202 * waiting for a timeout cycle to kill it. 203 */ 204 if(rtq_reallyold != 0) { 205 rt->rt_flags |= RTPRF_OURS; 206 rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; 207 } else { 208 rtrequest(RTM_DELETE, 209 (struct sockaddr *)rt_key(rt), 210 rt->rt_gateway, rt_mask(rt), 211 rt->rt_flags, 0); 212 } 213 } 214 215 struct rtqk_arg { 216 struct radix_node_head *rnh; 217 int draining; 218 int killed; 219 int found; 220 int updating; 221 time_t nextstop; 222 }; 223 224 /* 225 * Get rid of old routes. When draining, this deletes everything, even when 226 * the timeout is not expired yet. When updating, this makes sure that 227 * nothing has a timeout longer than the current value of rtq_reallyold. 228 */ 229 static int 230 in_rtqkill(struct radix_node *rn, void *rock) 231 { 232 struct rtqk_arg *ap = rock; 233 struct rtentry *rt = (struct rtentry *)rn; 234 int err; 235 236 if(rt->rt_flags & RTPRF_OURS) { 237 ap->found++; 238 239 if(ap->draining || rt->rt_rmx.rmx_expire <= time_second) { 240 if(rt->rt_refcnt > 0) 241 panic("rtqkill route really not free"); 242 243 err = rtrequest(RTM_DELETE, 244 (struct sockaddr *)rt_key(rt), 245 rt->rt_gateway, rt_mask(rt), 246 rt->rt_flags, 0); 247 if(err) { 248 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 249 } else { 250 ap->killed++; 251 } 252 } else { 253 if(ap->updating 254 && (rt->rt_rmx.rmx_expire - time_second 255 > rtq_reallyold)) { 256 rt->rt_rmx.rmx_expire = time_second 257 + rtq_reallyold; 258 } 259 ap->nextstop = lmin(ap->nextstop, 260 rt->rt_rmx.rmx_expire); 261 } 262 } 263 264 return 0; 265 } 266 267 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 268 static int rtq_timeout = RTQ_TIMEOUT; 269 270 static void 271 in_rtqtimo(void *rock) 272 { 273 struct radix_node_head *rnh = rock; 274 struct rtqk_arg arg; 275 struct timeval atv; 276 static time_t last_adjusted_timeout = 0; 277 int s; 278 279 arg.found = arg.killed = 0; 280 arg.rnh = rnh; 281 arg.nextstop = time_second + rtq_timeout; 282 arg.draining = arg.updating = 0; 283 s = splnet(); 284 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 285 splx(s); 286 287 /* 288 * Attempt to be somewhat dynamic about this: 289 * If there are ``too many'' routes sitting around taking up space, 290 * then crank down the timeout, and see if we can't make some more 291 * go away. However, we make sure that we will never adjust more 292 * than once in rtq_timeout seconds, to keep from cranking down too 293 * hard. 294 */ 295 if((arg.found - arg.killed > rtq_toomany) 296 && (time_second - last_adjusted_timeout >= rtq_timeout) 297 && rtq_reallyold > rtq_minreallyold) { 298 rtq_reallyold = 2*rtq_reallyold / 3; 299 if(rtq_reallyold < rtq_minreallyold) { 300 rtq_reallyold = rtq_minreallyold; 301 } 302 303 last_adjusted_timeout = time_second; 304 #ifdef DIAGNOSTIC 305 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 306 rtq_reallyold); 307 #endif 308 arg.found = arg.killed = 0; 309 arg.updating = 1; 310 s = splnet(); 311 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 312 splx(s); 313 } 314 315 atv.tv_usec = 0; 316 atv.tv_sec = arg.nextstop - time_second; 317 timeout(in_rtqtimo, rock, tvtohz(&atv)); 318 } 319 320 void 321 in_rtqdrain(void) 322 { 323 struct radix_node_head *rnh = rt_tables[AF_INET]; 324 struct rtqk_arg arg; 325 int s; 326 arg.found = arg.killed = 0; 327 arg.rnh = rnh; 328 arg.nextstop = 0; 329 arg.draining = 1; 330 arg.updating = 0; 331 s = splnet(); 332 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 333 splx(s); 334 } 335 336 /* 337 * Initialize our routing tree. 338 */ 339 int 340 in_inithead(void **head, int off) 341 { 342 struct radix_node_head *rnh; 343 344 if(!rn_inithead(head, off)) 345 return 0; 346 347 if(head != (void **)&rt_tables[AF_INET]) /* BOGUS! */ 348 return 1; /* only do this for the real routing table */ 349 350 rnh = *head; 351 rnh->rnh_addaddr = in_addroute; 352 rnh->rnh_matchaddr = in_matroute; 353 rnh->rnh_close = in_clsroute; 354 in_rtqtimo(rnh); /* kick off timeout first time */ 355 return 1; 356 } 357 358 359 /* 360 * This zaps old routes when the interface goes down. 361 * Currently it doesn't delete static routes; there are 362 * arguments one could make for both behaviors. For the moment, 363 * we will adopt the Principle of Least Surprise and leave them 364 * alone (with the knowledge that this will not be enough for some 365 * people). The ones we really want to get rid of are things like ARP 366 * entries, since the user might down the interface, walk over to a completely 367 * different network, and plug back in. 368 */ 369 struct in_ifadown_arg { 370 struct radix_node_head *rnh; 371 struct ifaddr *ifa; 372 }; 373 374 static int 375 in_ifadownkill(struct radix_node *rn, void *xap) 376 { 377 struct in_ifadown_arg *ap = xap; 378 struct rtentry *rt = (struct rtentry *)rn; 379 int err; 380 381 if (rt->rt_ifa == ap->ifa && !(rt->rt_flags & RTF_STATIC)) { 382 /* 383 * We need to disable the automatic prune that happens 384 * in this case in rtrequest() because it will blow 385 * away the pointers that rn_walktree() needs in order 386 * continue our descent. We will end up deleting all 387 * the routes that rtrequest() would have in any case, 388 * so that behavior is not needed there. 389 */ 390 rt->rt_flags &= ~RTF_PRCLONING; 391 err = rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), 392 rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); 393 if (err) { 394 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 395 } 396 } 397 return 0; 398 } 399 400 int 401 in_ifadown(struct ifaddr *ifa) 402 { 403 struct in_ifadown_arg arg; 404 struct radix_node_head *rnh; 405 406 if (ifa->ifa_addr->sa_family != AF_INET) 407 return 1; 408 409 arg.rnh = rnh = rt_tables[AF_INET]; 410 arg.ifa = ifa; 411 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 412 ifa->ifa_flags &= ~IFA_ROUTE; 413 return 0; 414 } 415