1 /*- 2 * Copyright (c) 1980, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)route.c 8.3.1.1 (Berkeley) 2/23/95 30 * $FreeBSD$ 31 */ 32 /************************************************************************ 33 * Note: In this file a 'fib' is a "forwarding information base" * 34 * Which is the new name for an in kernel routing (next hop) table. * 35 ***********************************************************************/ 36 37 #include "opt_inet.h" 38 #include "opt_route.h" 39 #include "opt_mrouting.h" 40 #include "opt_mpath.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/socket.h> 47 #include <sys/sysctl.h> 48 #include <sys/sysproto.h> 49 #include <sys/proc.h> 50 #include <sys/domain.h> 51 #include <sys/kernel.h> 52 #include <sys/vimage.h> 53 54 #include <net/if.h> 55 #include <net/route.h> 56 57 #ifdef RADIX_MPATH 58 #include <net/radix_mpath.h> 59 #endif 60 61 #include <netinet/in.h> 62 #include <netinet/ip_mroute.h> 63 64 #include <vm/uma.h> 65 66 #ifndef ROUTETABLES 67 #define RT_NUMFIBS 1 68 #define RT_MAXFIBS 1 69 #else 70 /* while we use 4 bits in the mbuf flags, 71 * we are limited to 16 72 */ 73 #define RT_MAXFIBS 16 74 #if ROUTETABLES > RT_MAXFIBS 75 #define RT_NUMFIBS RT_MAXFIBS 76 #error "ROUTETABLES defined too big" 77 #else 78 #if ROUTETABLES == 0 79 #define RT_NUMFIBS 1 80 #else 81 #define RT_NUMFIBS ROUTETABLES 82 #endif 83 #endif 84 #endif 85 86 u_int rt_numfibs = RT_NUMFIBS; 87 SYSCTL_INT(_net, OID_AUTO, fibs, CTLFLAG_RD, &rt_numfibs, 0, ""); 88 /* 89 * Allow the boot code to allow LESS than RT_MAXFIBS to be used. 90 * We can't do more because storage is statically allocated for now. 91 * (for compatibility reasons.. this will change). 92 */ 93 TUNABLE_INT("net.fibs", &rt_numfibs); 94 95 /* 96 * By default add routes to all fibs for new interfaces. 97 * Once this is set to 0 then only allocate routes on interface 98 * changes for the FIB of the caller when adding a new set of addresses 99 * to an interface. XXX this is a shotgun aproach to a problem that needs 100 * a more fine grained solution.. that will come. 101 */ 102 u_int rt_add_addr_allfibs = 1; 103 SYSCTL_INT(_net, OID_AUTO, add_addr_allfibs, CTLFLAG_RW, 104 &rt_add_addr_allfibs, 0, ""); 105 TUNABLE_INT("net.add_addr_allfibs", &rt_add_addr_allfibs); 106 107 static struct rtstat rtstat; 108 109 /* by default only the first 'row' of tables will be accessed. */ 110 /* 111 * XXXMRT When we fix netstat, and do this differnetly, 112 * we can allocate this dynamically. As long as we are keeping 113 * things backwards compaitble we need to allocate this 114 * statically. 115 */ 116 struct radix_node_head *rt_tables[RT_MAXFIBS][AF_MAX+1]; 117 118 static int rttrash; /* routes not in table but not freed */ 119 120 static void rt_maskedcopy(struct sockaddr *, 121 struct sockaddr *, struct sockaddr *); 122 123 /* compare two sockaddr structures */ 124 #define sa_equal(a1, a2) (bcmp((a1), (a2), (a1)->sa_len) == 0) 125 126 /* 127 * Convert a 'struct radix_node *' to a 'struct rtentry *'. 128 * The operation can be done safely (in this code) because a 129 * 'struct rtentry' starts with two 'struct radix_node''s, the first 130 * one representing leaf nodes in the routing tree, which is 131 * what the code in radix.c passes us as a 'struct radix_node'. 132 * 133 * But because there are a lot of assumptions in this conversion, 134 * do not cast explicitly, but always use the macro below. 135 */ 136 #define RNTORT(p) ((struct rtentry *)(p)) 137 138 static uma_zone_t rtzone; /* Routing table UMA zone. */ 139 140 #if 0 141 /* default fib for tunnels to use */ 142 u_int tunnel_fib = 0; 143 SYSCTL_INT(_net, OID_AUTO, tunnelfib, CTLFLAG_RD, &tunnel_fib, 0, ""); 144 #endif 145 146 /* 147 * handler for net.my_fibnum 148 */ 149 static int 150 sysctl_my_fibnum(SYSCTL_HANDLER_ARGS) 151 { 152 int fibnum; 153 int error; 154 155 fibnum = curthread->td_proc->p_fibnum; 156 error = sysctl_handle_int(oidp, &fibnum, 0, req); 157 return (error); 158 } 159 160 SYSCTL_PROC(_net, OID_AUTO, my_fibnum, CTLTYPE_INT|CTLFLAG_RD, 161 NULL, 0, &sysctl_my_fibnum, "I", "default FIB of caller"); 162 163 static void 164 route_init(void) 165 { 166 int table; 167 struct domain *dom; 168 int fam; 169 170 /* whack the tunable ints into line. */ 171 if (rt_numfibs > RT_MAXFIBS) 172 rt_numfibs = RT_MAXFIBS; 173 if (rt_numfibs == 0) 174 rt_numfibs = 1; 175 rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), NULL, NULL, 176 NULL, NULL, UMA_ALIGN_PTR, 0); 177 rn_init(); /* initialize all zeroes, all ones, mask table */ 178 179 for (dom = domains; dom; dom = dom->dom_next) { 180 if (dom->dom_rtattach) { 181 for (table = 0; table < rt_numfibs; table++) { 182 if ( (fam = dom->dom_family) == AF_INET || 183 table == 0) { 184 /* for now only AF_INET has > 1 table */ 185 /* XXX MRT 186 * rtattach will be also called 187 * from vfs_export.c but the 188 * offset will be 0 189 * (only for AF_INET and AF_INET6 190 * which don't need it anyhow) 191 */ 192 dom->dom_rtattach( 193 (void **)&V_rt_tables[table][fam], 194 dom->dom_rtoffset); 195 } else { 196 break; 197 } 198 } 199 } 200 } 201 } 202 203 #ifndef _SYS_SYSPROTO_H_ 204 struct setfib_args { 205 int fibnum; 206 }; 207 #endif 208 int 209 setfib(struct thread *td, struct setfib_args *uap) 210 { 211 if (uap->fibnum < 0 || uap->fibnum >= rt_numfibs) 212 return EINVAL; 213 td->td_proc->p_fibnum = uap->fibnum; 214 return (0); 215 } 216 217 /* 218 * Packet routing routines. 219 */ 220 void 221 rtalloc(struct route *ro) 222 { 223 rtalloc_ign_fib(ro, 0UL, 0); 224 } 225 226 void 227 rtalloc_fib(struct route *ro, u_int fibnum) 228 { 229 rtalloc_ign_fib(ro, 0UL, fibnum); 230 } 231 232 void 233 rtalloc_ign(struct route *ro, u_long ignore) 234 { 235 struct rtentry *rt; 236 237 if ((rt = ro->ro_rt) != NULL) { 238 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP) 239 return; 240 RTFREE(rt); 241 ro->ro_rt = NULL; 242 } 243 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, 0); 244 if (ro->ro_rt) 245 RT_UNLOCK(ro->ro_rt); 246 } 247 248 void 249 rtalloc_ign_fib(struct route *ro, u_long ignore, u_int fibnum) 250 { 251 struct rtentry *rt; 252 253 if ((rt = ro->ro_rt) != NULL) { 254 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP) 255 return; 256 RTFREE(rt); 257 ro->ro_rt = NULL; 258 } 259 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, fibnum); 260 if (ro->ro_rt) 261 RT_UNLOCK(ro->ro_rt); 262 } 263 264 /* 265 * Look up the route that matches the address given 266 * Or, at least try.. Create a cloned route if needed. 267 * 268 * The returned route, if any, is locked. 269 */ 270 struct rtentry * 271 rtalloc1(struct sockaddr *dst, int report, u_long ignflags) 272 { 273 return (rtalloc1_fib(dst, report, ignflags, 0)); 274 } 275 276 struct rtentry * 277 rtalloc1_fib(struct sockaddr *dst, int report, u_long ignflags, 278 u_int fibnum) 279 { 280 struct radix_node_head *rnh; 281 struct rtentry *rt; 282 struct radix_node *rn; 283 struct rtentry *newrt; 284 struct rt_addrinfo info; 285 u_long nflags; 286 int err = 0, msgtype = RTM_MISS; 287 288 KASSERT((fibnum < rt_numfibs), ("rtalloc1_fib: bad fibnum")); 289 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */ 290 fibnum = 0; 291 rnh = V_rt_tables[fibnum][dst->sa_family]; 292 newrt = NULL; 293 /* 294 * Look up the address in the table for that Address Family 295 */ 296 if (rnh == NULL) { 297 V_rtstat.rts_unreach++; 298 goto miss2; 299 } 300 RADIX_NODE_HEAD_LOCK(rnh); 301 if ((rn = rnh->rnh_matchaddr(dst, rnh)) && 302 (rn->rn_flags & RNF_ROOT) == 0) { 303 /* 304 * If we find it and it's not the root node, then 305 * get a reference on the rtentry associated. 306 */ 307 newrt = rt = RNTORT(rn); 308 nflags = rt->rt_flags & ~ignflags; 309 if (report && (nflags & RTF_CLONING)) { 310 /* 311 * We are apparently adding (report = 0 in delete). 312 * If it requires that it be cloned, do so. 313 * (This implies it wasn't a HOST route.) 314 */ 315 err = rtrequest_fib(RTM_RESOLVE, dst, NULL, 316 NULL, 0, &newrt, fibnum); 317 if (err) { 318 /* 319 * If the cloning didn't succeed, maybe 320 * what we have will do. Return that. 321 */ 322 newrt = rt; /* existing route */ 323 RT_LOCK(newrt); 324 RT_ADDREF(newrt); 325 goto miss; 326 } 327 KASSERT(newrt, ("no route and no error")); 328 RT_LOCK(newrt); 329 if (newrt->rt_flags & RTF_XRESOLVE) { 330 /* 331 * If the new route specifies it be 332 * externally resolved, then go do that. 333 */ 334 msgtype = RTM_RESOLVE; 335 goto miss; 336 } 337 /* Inform listeners of the new route. */ 338 bzero(&info, sizeof(info)); 339 info.rti_info[RTAX_DST] = rt_key(newrt); 340 info.rti_info[RTAX_NETMASK] = rt_mask(newrt); 341 info.rti_info[RTAX_GATEWAY] = newrt->rt_gateway; 342 if (newrt->rt_ifp != NULL) { 343 info.rti_info[RTAX_IFP] = 344 newrt->rt_ifp->if_addr->ifa_addr; 345 info.rti_info[RTAX_IFA] = newrt->rt_ifa->ifa_addr; 346 } 347 rt_missmsg(RTM_ADD, &info, newrt->rt_flags, 0); 348 } else { 349 RT_LOCK(newrt); 350 RT_ADDREF(newrt); 351 } 352 RADIX_NODE_HEAD_UNLOCK(rnh); 353 } else { 354 /* 355 * Either we hit the root or couldn't find any match, 356 * Which basically means 357 * "caint get there frm here" 358 */ 359 V_rtstat.rts_unreach++; 360 miss: 361 RADIX_NODE_HEAD_UNLOCK(rnh); 362 miss2: if (report) { 363 /* 364 * If required, report the failure to the supervising 365 * Authorities. 366 * For a delete, this is not an error. (report == 0) 367 */ 368 bzero(&info, sizeof(info)); 369 info.rti_info[RTAX_DST] = dst; 370 rt_missmsg(msgtype, &info, 0, err); 371 } 372 } 373 if (newrt) 374 RT_LOCK_ASSERT(newrt); 375 return (newrt); 376 } 377 378 /* 379 * Remove a reference count from an rtentry. 380 * If the count gets low enough, take it out of the routing table 381 */ 382 void 383 rtfree(struct rtentry *rt) 384 { 385 struct radix_node_head *rnh; 386 387 KASSERT(rt != NULL,("%s: NULL rt", __func__)); 388 rnh = V_rt_tables[rt->rt_fibnum][rt_key(rt)->sa_family]; 389 KASSERT(rnh != NULL,("%s: NULL rnh", __func__)); 390 391 RT_LOCK_ASSERT(rt); 392 393 /* 394 * The callers should use RTFREE_LOCKED() or RTFREE(), so 395 * we should come here exactly with the last reference. 396 */ 397 RT_REMREF(rt); 398 if (rt->rt_refcnt > 0) { 399 printf("%s: %p has %lu refs\n", __func__, rt, rt->rt_refcnt); 400 goto done; 401 } 402 403 /* 404 * On last reference give the "close method" a chance 405 * to cleanup private state. This also permits (for 406 * IPv4 and IPv6) a chance to decide if the routing table 407 * entry should be purged immediately or at a later time. 408 * When an immediate purge is to happen the close routine 409 * typically calls rtexpunge which clears the RTF_UP flag 410 * on the entry so that the code below reclaims the storage. 411 */ 412 if (rt->rt_refcnt == 0 && rnh->rnh_close) 413 rnh->rnh_close((struct radix_node *)rt, rnh); 414 415 /* 416 * If we are no longer "up" (and ref == 0) 417 * then we can free the resources associated 418 * with the route. 419 */ 420 if ((rt->rt_flags & RTF_UP) == 0) { 421 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 422 panic("rtfree 2"); 423 /* 424 * the rtentry must have been removed from the routing table 425 * so it is represented in rttrash.. remove that now. 426 */ 427 V_rttrash--; 428 #ifdef DIAGNOSTIC 429 if (rt->rt_refcnt < 0) { 430 printf("rtfree: %p not freed (neg refs)\n", rt); 431 goto done; 432 } 433 #endif 434 /* 435 * release references on items we hold them on.. 436 * e.g other routes and ifaddrs. 437 */ 438 if (rt->rt_ifa) 439 IFAFREE(rt->rt_ifa); 440 rt->rt_parent = NULL; /* NB: no refcnt on parent */ 441 442 /* 443 * The key is separatly alloc'd so free it (see rt_setgate()). 444 * This also frees the gateway, as they are always malloc'd 445 * together. 446 */ 447 Free(rt_key(rt)); 448 449 /* 450 * and the rtentry itself of course 451 */ 452 RT_LOCK_DESTROY(rt); 453 uma_zfree(rtzone, rt); 454 return; 455 } 456 done: 457 RT_UNLOCK(rt); 458 } 459 460 461 /* 462 * Force a routing table entry to the specified 463 * destination to go through the given gateway. 464 * Normally called as a result of a routing redirect 465 * message from the network layer. 466 */ 467 void 468 rtredirect(struct sockaddr *dst, 469 struct sockaddr *gateway, 470 struct sockaddr *netmask, 471 int flags, 472 struct sockaddr *src) 473 { 474 rtredirect_fib(dst, gateway, netmask, flags, src, 0); 475 } 476 477 void 478 rtredirect_fib(struct sockaddr *dst, 479 struct sockaddr *gateway, 480 struct sockaddr *netmask, 481 int flags, 482 struct sockaddr *src, 483 u_int fibnum) 484 { 485 struct rtentry *rt, *rt0 = NULL; 486 int error = 0; 487 short *stat = NULL; 488 struct rt_addrinfo info; 489 struct ifaddr *ifa; 490 491 /* verify the gateway is directly reachable */ 492 if ((ifa = ifa_ifwithnet(gateway)) == NULL) { 493 error = ENETUNREACH; 494 goto out; 495 } 496 rt = rtalloc1_fib(dst, 0, 0UL, fibnum); /* NB: rt is locked */ 497 /* 498 * If the redirect isn't from our current router for this dst, 499 * it's either old or wrong. If it redirects us to ourselves, 500 * we have a routing loop, perhaps as a result of an interface 501 * going down recently. 502 */ 503 if (!(flags & RTF_DONE) && rt && 504 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 505 error = EINVAL; 506 else if (ifa_ifwithaddr(gateway)) 507 error = EHOSTUNREACH; 508 if (error) 509 goto done; 510 /* 511 * Create a new entry if we just got back a wildcard entry 512 * or the the lookup failed. This is necessary for hosts 513 * which use routing redirects generated by smart gateways 514 * to dynamically build the routing tables. 515 */ 516 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) 517 goto create; 518 /* 519 * Don't listen to the redirect if it's 520 * for a route to an interface. 521 */ 522 if (rt->rt_flags & RTF_GATEWAY) { 523 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { 524 /* 525 * Changing from route to net => route to host. 526 * Create new route, rather than smashing route to net. 527 */ 528 create: 529 rt0 = rt; 530 rt = NULL; 531 532 flags |= RTF_GATEWAY | RTF_DYNAMIC; 533 bzero((caddr_t)&info, sizeof(info)); 534 info.rti_info[RTAX_DST] = dst; 535 info.rti_info[RTAX_GATEWAY] = gateway; 536 info.rti_info[RTAX_NETMASK] = netmask; 537 info.rti_ifa = ifa; 538 info.rti_flags = flags; 539 error = rtrequest1_fib(RTM_ADD, &info, &rt, fibnum); 540 if (rt != NULL) { 541 RT_LOCK(rt); 542 EVENTHANDLER_INVOKE(route_redirect_event, rt0, rt, dst); 543 flags = rt->rt_flags; 544 } 545 if (rt0) 546 RTFREE_LOCKED(rt0); 547 548 stat = &V_rtstat.rts_dynamic; 549 } else { 550 struct rtentry *gwrt; 551 552 /* 553 * Smash the current notion of the gateway to 554 * this destination. Should check about netmask!!! 555 */ 556 rt->rt_flags |= RTF_MODIFIED; 557 flags |= RTF_MODIFIED; 558 stat = &V_rtstat.rts_newgateway; 559 /* 560 * add the key and gateway (in one malloc'd chunk). 561 */ 562 rt_setgate(rt, rt_key(rt), gateway); 563 gwrt = rtalloc1(gateway, 1, 0); 564 EVENTHANDLER_INVOKE(route_redirect_event, rt, gwrt, dst); 565 RTFREE_LOCKED(gwrt); 566 } 567 } else 568 error = EHOSTUNREACH; 569 done: 570 if (rt) 571 RTFREE_LOCKED(rt); 572 out: 573 if (error) 574 V_rtstat.rts_badredirect++; 575 else if (stat != NULL) 576 (*stat)++; 577 bzero((caddr_t)&info, sizeof(info)); 578 info.rti_info[RTAX_DST] = dst; 579 info.rti_info[RTAX_GATEWAY] = gateway; 580 info.rti_info[RTAX_NETMASK] = netmask; 581 info.rti_info[RTAX_AUTHOR] = src; 582 rt_missmsg(RTM_REDIRECT, &info, flags, error); 583 } 584 585 int 586 rtioctl(u_long req, caddr_t data) 587 { 588 return (rtioctl_fib(req, data, 0)); 589 } 590 591 /* 592 * Routing table ioctl interface. 593 */ 594 int 595 rtioctl_fib(u_long req, caddr_t data, u_int fibnum) 596 { 597 598 /* 599 * If more ioctl commands are added here, make sure the proper 600 * super-user checks are being performed because it is possible for 601 * prison-root to make it this far if raw sockets have been enabled 602 * in jails. 603 */ 604 #ifdef INET 605 /* Multicast goop, grrr... */ 606 return mrt_ioctl ? mrt_ioctl(req, data, fibnum) : EOPNOTSUPP; 607 #else /* INET */ 608 return ENXIO; 609 #endif /* INET */ 610 } 611 612 struct ifaddr * 613 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 614 { 615 return (ifa_ifwithroute_fib(flags, dst, gateway, 0)); 616 } 617 618 struct ifaddr * 619 ifa_ifwithroute_fib(int flags, struct sockaddr *dst, struct sockaddr *gateway, 620 u_int fibnum) 621 { 622 register struct ifaddr *ifa; 623 int not_found = 0; 624 625 if ((flags & RTF_GATEWAY) == 0) { 626 /* 627 * If we are adding a route to an interface, 628 * and the interface is a pt to pt link 629 * we should search for the destination 630 * as our clue to the interface. Otherwise 631 * we can use the local address. 632 */ 633 ifa = NULL; 634 if (flags & RTF_HOST) 635 ifa = ifa_ifwithdstaddr(dst); 636 if (ifa == NULL) 637 ifa = ifa_ifwithaddr(gateway); 638 } else { 639 /* 640 * If we are adding a route to a remote net 641 * or host, the gateway may still be on the 642 * other end of a pt to pt link. 643 */ 644 ifa = ifa_ifwithdstaddr(gateway); 645 } 646 if (ifa == NULL) 647 ifa = ifa_ifwithnet(gateway); 648 if (ifa == NULL) { 649 struct rtentry *rt = rtalloc1_fib(gateway, 0, 0UL, fibnum); 650 if (rt == NULL) 651 return (NULL); 652 /* 653 * dismiss a gateway that is reachable only 654 * through the default router 655 */ 656 switch (gateway->sa_family) { 657 case AF_INET: 658 if (satosin(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) 659 not_found = 1; 660 break; 661 case AF_INET6: 662 if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(rt))->sin6_addr)) 663 not_found = 1; 664 break; 665 default: 666 break; 667 } 668 RT_REMREF(rt); 669 RT_UNLOCK(rt); 670 if (not_found) 671 return (NULL); 672 if ((ifa = rt->rt_ifa) == NULL) 673 return (NULL); 674 } 675 if (ifa->ifa_addr->sa_family != dst->sa_family) { 676 struct ifaddr *oifa = ifa; 677 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 678 if (ifa == NULL) 679 ifa = oifa; 680 } 681 return (ifa); 682 } 683 684 static walktree_f_t rt_fixdelete; 685 static walktree_f_t rt_fixchange; 686 687 struct rtfc_arg { 688 struct rtentry *rt0; 689 struct radix_node_head *rnh; 690 }; 691 692 /* 693 * Do appropriate manipulations of a routing tree given 694 * all the bits of info needed 695 */ 696 int 697 rtrequest(int req, 698 struct sockaddr *dst, 699 struct sockaddr *gateway, 700 struct sockaddr *netmask, 701 int flags, 702 struct rtentry **ret_nrt) 703 { 704 return (rtrequest_fib(req, dst, gateway, netmask, flags, ret_nrt, 0)); 705 } 706 707 int 708 rtrequest_fib(int req, 709 struct sockaddr *dst, 710 struct sockaddr *gateway, 711 struct sockaddr *netmask, 712 int flags, 713 struct rtentry **ret_nrt, 714 u_int fibnum) 715 { 716 struct rt_addrinfo info; 717 718 if (dst->sa_len == 0) 719 return(EINVAL); 720 721 bzero((caddr_t)&info, sizeof(info)); 722 info.rti_flags = flags; 723 info.rti_info[RTAX_DST] = dst; 724 info.rti_info[RTAX_GATEWAY] = gateway; 725 info.rti_info[RTAX_NETMASK] = netmask; 726 return rtrequest1_fib(req, &info, ret_nrt, fibnum); 727 } 728 729 /* 730 * These (questionable) definitions of apparent local variables apply 731 * to the next two functions. XXXXXX!!! 732 */ 733 #define dst info->rti_info[RTAX_DST] 734 #define gateway info->rti_info[RTAX_GATEWAY] 735 #define netmask info->rti_info[RTAX_NETMASK] 736 #define ifaaddr info->rti_info[RTAX_IFA] 737 #define ifpaddr info->rti_info[RTAX_IFP] 738 #define flags info->rti_flags 739 740 int 741 rt_getifa(struct rt_addrinfo *info) 742 { 743 return (rt_getifa_fib(info, 0)); 744 } 745 746 int 747 rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum) 748 { 749 struct ifaddr *ifa; 750 int error = 0; 751 752 /* 753 * ifp may be specified by sockaddr_dl 754 * when protocol address is ambiguous. 755 */ 756 if (info->rti_ifp == NULL && ifpaddr != NULL && 757 ifpaddr->sa_family == AF_LINK && 758 (ifa = ifa_ifwithnet(ifpaddr)) != NULL) 759 info->rti_ifp = ifa->ifa_ifp; 760 if (info->rti_ifa == NULL && ifaaddr != NULL) 761 info->rti_ifa = ifa_ifwithaddr(ifaaddr); 762 if (info->rti_ifa == NULL) { 763 struct sockaddr *sa; 764 765 sa = ifaaddr != NULL ? ifaaddr : 766 (gateway != NULL ? gateway : dst); 767 if (sa != NULL && info->rti_ifp != NULL) 768 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp); 769 else if (dst != NULL && gateway != NULL) 770 info->rti_ifa = ifa_ifwithroute_fib(flags, dst, gateway, 771 fibnum); 772 else if (sa != NULL) 773 info->rti_ifa = ifa_ifwithroute_fib(flags, sa, sa, 774 fibnum); 775 } 776 if ((ifa = info->rti_ifa) != NULL) { 777 if (info->rti_ifp == NULL) 778 info->rti_ifp = ifa->ifa_ifp; 779 } else 780 error = ENETUNREACH; 781 return (error); 782 } 783 784 /* 785 * Expunges references to a route that's about to be reclaimed. 786 * The route must be locked. 787 */ 788 int 789 rtexpunge(struct rtentry *rt) 790 { 791 struct radix_node *rn; 792 struct radix_node_head *rnh; 793 struct ifaddr *ifa; 794 int error = 0; 795 796 RT_LOCK_ASSERT(rt); 797 #if 0 798 /* 799 * We cannot assume anything about the reference count 800 * because protocols call us in many situations; often 801 * before unwinding references to the table entry. 802 */ 803 KASSERT(rt->rt_refcnt <= 1, ("bogus refcnt %ld", rt->rt_refcnt)); 804 #endif 805 /* 806 * Find the correct routing tree to use for this Address Family 807 */ 808 rnh = V_rt_tables[rt->rt_fibnum][rt_key(rt)->sa_family]; 809 if (rnh == NULL) 810 return (EAFNOSUPPORT); 811 812 RADIX_NODE_HEAD_LOCK(rnh); 813 814 /* 815 * Remove the item from the tree; it should be there, 816 * but when callers invoke us blindly it may not (sigh). 817 */ 818 rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), rnh); 819 if (rn == NULL) { 820 error = ESRCH; 821 goto bad; 822 } 823 KASSERT((rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) == 0, 824 ("unexpected flags 0x%x", rn->rn_flags)); 825 KASSERT(rt == RNTORT(rn), 826 ("lookup mismatch, rt %p rn %p", rt, rn)); 827 828 rt->rt_flags &= ~RTF_UP; 829 830 /* 831 * Now search what's left of the subtree for any cloned 832 * routes which might have been formed from this node. 833 */ 834 if ((rt->rt_flags & RTF_CLONING) && rt_mask(rt)) 835 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 836 rt_fixdelete, rt); 837 838 /* 839 * Remove any external references we may have. 840 * This might result in another rtentry being freed if 841 * we held its last reference. 842 */ 843 if (rt->rt_gwroute) { 844 RTFREE(rt->rt_gwroute); 845 rt->rt_gwroute = NULL; 846 } 847 848 /* 849 * Give the protocol a chance to keep things in sync. 850 */ 851 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) { 852 struct rt_addrinfo info; 853 854 bzero((caddr_t)&info, sizeof(info)); 855 info.rti_flags = rt->rt_flags; 856 info.rti_info[RTAX_DST] = rt_key(rt); 857 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 858 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 859 ifa->ifa_rtrequest(RTM_DELETE, rt, &info); 860 } 861 862 /* 863 * one more rtentry floating around that is not 864 * linked to the routing table. 865 */ 866 V_rttrash++; 867 bad: 868 RADIX_NODE_HEAD_UNLOCK(rnh); 869 return (error); 870 } 871 872 int 873 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt) 874 { 875 return (rtrequest1_fib(req, info, ret_nrt, 0)); 876 } 877 878 int 879 rtrequest1_fib(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt, 880 u_int fibnum) 881 { 882 int error = 0; 883 register struct rtentry *rt; 884 register struct radix_node *rn; 885 register struct radix_node_head *rnh; 886 struct ifaddr *ifa; 887 struct sockaddr *ndst; 888 #define senderr(x) { error = x ; goto bad; } 889 890 KASSERT((fibnum < rt_numfibs), ("rtrequest1_fib: bad fibnum")); 891 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */ 892 fibnum = 0; 893 /* 894 * Find the correct routing tree to use for this Address Family 895 */ 896 rnh = V_rt_tables[fibnum][dst->sa_family]; 897 if (rnh == NULL) 898 return (EAFNOSUPPORT); 899 RADIX_NODE_HEAD_LOCK(rnh); 900 /* 901 * If we are adding a host route then we don't want to put 902 * a netmask in the tree, nor do we want to clone it. 903 */ 904 if (flags & RTF_HOST) { 905 netmask = NULL; 906 flags &= ~RTF_CLONING; 907 } 908 switch (req) { 909 case RTM_DELETE: 910 #ifdef RADIX_MPATH 911 /* 912 * if we got multipath routes, we require users to specify 913 * a matching RTAX_GATEWAY. 914 */ 915 if (rn_mpath_capable(rnh)) { 916 struct rtentry *rto = NULL; 917 918 rn = rnh->rnh_matchaddr(dst, rnh); 919 if (rn == NULL) 920 senderr(ESRCH); 921 rto = rt = RNTORT(rn); 922 rt = rt_mpath_matchgate(rt, gateway); 923 if (!rt) 924 senderr(ESRCH); 925 /* 926 * this is the first entry in the chain 927 */ 928 if (rto == rt) { 929 rn = rn_mpath_next((struct radix_node *)rt); 930 /* 931 * there is another entry, now it's active 932 */ 933 if (rn) { 934 rto = RNTORT(rn); 935 RT_LOCK(rto); 936 rto->rt_flags |= RTF_UP; 937 RT_UNLOCK(rto); 938 } else if (rt->rt_flags & RTF_GATEWAY) { 939 /* 940 * For gateway routes, we need to 941 * make sure that we we are deleting 942 * the correct gateway. 943 * rt_mpath_matchgate() does not 944 * check the case when there is only 945 * one route in the chain. 946 */ 947 if (gateway && 948 (rt->rt_gateway->sa_len != gateway->sa_len || 949 memcmp(rt->rt_gateway, gateway, gateway->sa_len))) 950 senderr(ESRCH); 951 } 952 /* 953 * use the normal delete code to remove 954 * the first entry 955 */ 956 goto normal_rtdel; 957 } 958 /* 959 * if the entry is 2nd and on up 960 */ 961 if (!rt_mpath_deldup(rto, rt)) 962 panic ("rtrequest1: rt_mpath_deldup"); 963 RT_LOCK(rt); 964 RT_ADDREF(rt); 965 rt->rt_flags &= ~RTF_UP; 966 goto deldone; /* done with the RTM_DELETE command */ 967 } 968 969 normal_rtdel: 970 #endif 971 /* 972 * Remove the item from the tree and return it. 973 * Complain if it is not there and do no more processing. 974 */ 975 rn = rnh->rnh_deladdr(dst, netmask, rnh); 976 if (rn == NULL) 977 senderr(ESRCH); 978 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 979 panic ("rtrequest delete"); 980 rt = RNTORT(rn); 981 RT_LOCK(rt); 982 RT_ADDREF(rt); 983 rt->rt_flags &= ~RTF_UP; 984 985 /* 986 * Now search what's left of the subtree for any cloned 987 * routes which might have been formed from this node. 988 */ 989 if ((rt->rt_flags & RTF_CLONING) && 990 rt_mask(rt)) { 991 rnh->rnh_walktree_from(rnh, dst, rt_mask(rt), 992 rt_fixdelete, rt); 993 } 994 995 /* 996 * Remove any external references we may have. 997 * This might result in another rtentry being freed if 998 * we held its last reference. 999 */ 1000 if (rt->rt_gwroute) { 1001 RTFREE(rt->rt_gwroute); 1002 rt->rt_gwroute = NULL; 1003 } 1004 1005 /* 1006 * give the protocol a chance to keep things in sync. 1007 */ 1008 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 1009 ifa->ifa_rtrequest(RTM_DELETE, rt, info); 1010 1011 #ifdef RADIX_MPATH 1012 deldone: 1013 #endif 1014 /* 1015 * One more rtentry floating around that is not 1016 * linked to the routing table. rttrash will be decremented 1017 * when RTFREE(rt) is eventually called. 1018 */ 1019 V_rttrash++; 1020 1021 /* 1022 * If the caller wants it, then it can have it, 1023 * but it's up to it to free the rtentry as we won't be 1024 * doing it. 1025 */ 1026 if (ret_nrt) { 1027 *ret_nrt = rt; 1028 RT_UNLOCK(rt); 1029 } else 1030 RTFREE_LOCKED(rt); 1031 break; 1032 1033 case RTM_RESOLVE: 1034 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 1035 senderr(EINVAL); 1036 ifa = rt->rt_ifa; 1037 /* XXX locking? */ 1038 flags = rt->rt_flags & 1039 ~(RTF_CLONING | RTF_STATIC); 1040 flags |= RTF_WASCLONED; 1041 gateway = rt->rt_gateway; 1042 if ((netmask = rt->rt_genmask) == NULL) 1043 flags |= RTF_HOST; 1044 goto makeroute; 1045 1046 case RTM_ADD: 1047 if ((flags & RTF_GATEWAY) && !gateway) 1048 senderr(EINVAL); 1049 if (dst && gateway && (dst->sa_family != gateway->sa_family) && 1050 (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK)) 1051 senderr(EINVAL); 1052 1053 if (info->rti_ifa == NULL && (error = rt_getifa_fib(info, fibnum))) 1054 senderr(error); 1055 ifa = info->rti_ifa; 1056 1057 makeroute: 1058 rt = uma_zalloc(rtzone, M_NOWAIT | M_ZERO); 1059 if (rt == NULL) 1060 senderr(ENOBUFS); 1061 RT_LOCK_INIT(rt); 1062 rt->rt_flags = RTF_UP | flags; 1063 rt->rt_fibnum = fibnum; 1064 /* 1065 * Add the gateway. Possibly re-malloc-ing the storage for it 1066 * also add the rt_gwroute if possible. 1067 */ 1068 RT_LOCK(rt); 1069 if ((error = rt_setgate(rt, dst, gateway)) != 0) { 1070 RT_LOCK_DESTROY(rt); 1071 uma_zfree(rtzone, rt); 1072 senderr(error); 1073 } 1074 1075 /* 1076 * point to the (possibly newly malloc'd) dest address. 1077 */ 1078 ndst = (struct sockaddr *)rt_key(rt); 1079 1080 /* 1081 * make sure it contains the value we want (masked if needed). 1082 */ 1083 if (netmask) { 1084 rt_maskedcopy(dst, ndst, netmask); 1085 } else 1086 bcopy(dst, ndst, dst->sa_len); 1087 1088 /* 1089 * Note that we now have a reference to the ifa. 1090 * This moved from below so that rnh->rnh_addaddr() can 1091 * examine the ifa and ifa->ifa_ifp if it so desires. 1092 */ 1093 IFAREF(ifa); 1094 rt->rt_ifa = ifa; 1095 rt->rt_ifp = ifa->ifa_ifp; 1096 1097 #ifdef RADIX_MPATH 1098 /* do not permit exactly the same dst/mask/gw pair */ 1099 if (rn_mpath_capable(rnh) && 1100 rt_mpath_conflict(rnh, rt, netmask)) { 1101 if (rt->rt_gwroute) 1102 RTFREE(rt->rt_gwroute); 1103 if (rt->rt_ifa) { 1104 IFAFREE(rt->rt_ifa); 1105 } 1106 Free(rt_key(rt)); 1107 RT_LOCK_DESTROY(rt); 1108 uma_zfree(rtzone, rt); 1109 senderr(EEXIST); 1110 } 1111 #endif 1112 1113 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ 1114 rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes); 1115 if (rn == NULL) { 1116 struct rtentry *rt2; 1117 /* 1118 * Uh-oh, we already have one of these in the tree. 1119 * We do a special hack: if the route that's already 1120 * there was generated by the cloning mechanism 1121 * then we just blow it away and retry the insertion 1122 * of the new one. 1123 */ 1124 rt2 = rtalloc1_fib(dst, 0, 0, fibnum); 1125 if (rt2 && rt2->rt_parent) { 1126 rtexpunge(rt2); 1127 RT_UNLOCK(rt2); 1128 rn = rnh->rnh_addaddr(ndst, netmask, 1129 rnh, rt->rt_nodes); 1130 } else if (rt2) { 1131 /* undo the extra ref we got */ 1132 RTFREE_LOCKED(rt2); 1133 } 1134 } 1135 1136 /* 1137 * If it still failed to go into the tree, 1138 * then un-make it (this should be a function) 1139 */ 1140 if (rn == NULL) { 1141 if (rt->rt_gwroute) 1142 RTFREE(rt->rt_gwroute); 1143 if (rt->rt_ifa) 1144 IFAFREE(rt->rt_ifa); 1145 Free(rt_key(rt)); 1146 RT_LOCK_DESTROY(rt); 1147 uma_zfree(rtzone, rt); 1148 senderr(EEXIST); 1149 } 1150 1151 rt->rt_parent = NULL; 1152 1153 /* 1154 * If we got here from RESOLVE, then we are cloning 1155 * so clone the rest, and note that we 1156 * are a clone (and increment the parent's references) 1157 */ 1158 if (req == RTM_RESOLVE) { 1159 KASSERT(ret_nrt && *ret_nrt, 1160 ("no route to clone from")); 1161 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ 1162 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */ 1163 if ((*ret_nrt)->rt_flags & RTF_CLONING) { 1164 /* 1165 * NB: We do not bump the refcnt on the parent 1166 * entry under the assumption that it will 1167 * remain so long as we do. This is 1168 * important when deleting the parent route 1169 * as this operation requires traversing 1170 * the tree to delete all clones and futzing 1171 * with refcnts requires us to double-lock 1172 * parent through this back reference. 1173 */ 1174 rt->rt_parent = *ret_nrt; 1175 } 1176 } 1177 1178 /* 1179 * If this protocol has something to add to this then 1180 * allow it to do that as well. 1181 */ 1182 if (ifa->ifa_rtrequest) 1183 ifa->ifa_rtrequest(req, rt, info); 1184 1185 /* 1186 * We repeat the same procedure from rt_setgate() here because 1187 * it doesn't fire when we call it there because the node 1188 * hasn't been added to the tree yet. 1189 */ 1190 if (req == RTM_ADD && 1191 !(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) { 1192 struct rtfc_arg arg; 1193 arg.rnh = rnh; 1194 arg.rt0 = rt; 1195 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 1196 rt_fixchange, &arg); 1197 } 1198 1199 /* 1200 * actually return a resultant rtentry and 1201 * give the caller a single reference. 1202 */ 1203 if (ret_nrt) { 1204 *ret_nrt = rt; 1205 RT_ADDREF(rt); 1206 } 1207 RT_UNLOCK(rt); 1208 break; 1209 default: 1210 error = EOPNOTSUPP; 1211 } 1212 bad: 1213 RADIX_NODE_HEAD_UNLOCK(rnh); 1214 return (error); 1215 #undef senderr 1216 } 1217 1218 #undef dst 1219 #undef gateway 1220 #undef netmask 1221 #undef ifaaddr 1222 #undef ifpaddr 1223 #undef flags 1224 1225 /* 1226 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family'' 1227 * (i.e., the routes related to it by the operation of cloning). This 1228 * routine is iterated over all potential former-child-routes by way of 1229 * rnh->rnh_walktree_from() above, and those that actually are children of 1230 * the late parent (passed in as VP here) are themselves deleted. 1231 */ 1232 static int 1233 rt_fixdelete(struct radix_node *rn, void *vp) 1234 { 1235 struct rtentry *rt = RNTORT(rn); 1236 struct rtentry *rt0 = vp; 1237 1238 if (rt->rt_parent == rt0 && 1239 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING))) { 1240 return rtrequest_fib(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 1241 rt->rt_flags, NULL, rt->rt_fibnum); 1242 } 1243 return 0; 1244 } 1245 1246 /* 1247 * This routine is called from rt_setgate() to do the analogous thing for 1248 * adds and changes. There is the added complication in this case of a 1249 * middle insert; i.e., insertion of a new network route between an older 1250 * network route and (cloned) host routes. For this reason, a simple check 1251 * of rt->rt_parent is insufficient; each candidate route must be tested 1252 * against the (mask, value) of the new route (passed as before in vp) 1253 * to see if the new route matches it. 1254 * 1255 * XXX - it may be possible to do fixdelete() for changes and reserve this 1256 * routine just for adds. I'm not sure why I thought it was necessary to do 1257 * changes this way. 1258 */ 1259 1260 static int 1261 rt_fixchange(struct radix_node *rn, void *vp) 1262 { 1263 struct rtentry *rt = RNTORT(rn); 1264 struct rtfc_arg *ap = vp; 1265 struct rtentry *rt0 = ap->rt0; 1266 struct radix_node_head *rnh = ap->rnh; 1267 u_char *xk1, *xm1, *xk2, *xmp; 1268 int i, len, mlen; 1269 1270 /* make sure we have a parent, and route is not pinned or cloning */ 1271 if (!rt->rt_parent || 1272 (rt->rt_flags & (RTF_PINNED | RTF_CLONING))) 1273 return 0; 1274 1275 if (rt->rt_parent == rt0) /* parent match */ 1276 goto delete_rt; 1277 /* 1278 * There probably is a function somewhere which does this... 1279 * if not, there should be. 1280 */ 1281 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len); 1282 1283 xk1 = (u_char *)rt_key(rt0); 1284 xm1 = (u_char *)rt_mask(rt0); 1285 xk2 = (u_char *)rt_key(rt); 1286 1287 /* avoid applying a less specific route */ 1288 xmp = (u_char *)rt_mask(rt->rt_parent); 1289 mlen = rt_key(rt->rt_parent)->sa_len; 1290 if (mlen > rt_key(rt0)->sa_len) /* less specific route */ 1291 return 0; 1292 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) 1293 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) 1294 return 0; /* less specific route */ 1295 1296 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) 1297 if ((xk2[i] & xm1[i]) != xk1[i]) 1298 return 0; /* no match */ 1299 1300 /* 1301 * OK, this node is a clone, and matches the node currently being 1302 * changed/added under the node's mask. So, get rid of it. 1303 */ 1304 delete_rt: 1305 return rtrequest_fib(RTM_DELETE, rt_key(rt), NULL, 1306 rt_mask(rt), rt->rt_flags, NULL, rt->rt_fibnum); 1307 } 1308 1309 int 1310 rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) 1311 { 1312 /* XXX dst may be overwritten, can we move this to below */ 1313 struct radix_node_head *rnh = V_rt_tables[rt->rt_fibnum][dst->sa_family]; 1314 int dlen = SA_SIZE(dst), glen = SA_SIZE(gate); 1315 1316 again: 1317 RT_LOCK_ASSERT(rt); 1318 1319 /* 1320 * A host route with the destination equal to the gateway 1321 * will interfere with keeping LLINFO in the routing 1322 * table, so disallow it. 1323 */ 1324 if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) == 1325 (RTF_HOST|RTF_GATEWAY)) && 1326 dst->sa_len == gate->sa_len && 1327 bcmp(dst, gate, dst->sa_len) == 0) { 1328 /* 1329 * The route might already exist if this is an RTM_CHANGE 1330 * or a routing redirect, so try to delete it. 1331 */ 1332 if (rt_key(rt)) 1333 rtexpunge(rt); 1334 return EADDRNOTAVAIL; 1335 } 1336 1337 /* 1338 * Cloning loop avoidance in case of bad configuration. 1339 */ 1340 if (rt->rt_flags & RTF_GATEWAY) { 1341 struct rtentry *gwrt; 1342 1343 RT_UNLOCK(rt); /* XXX workaround LOR */ 1344 gwrt = rtalloc1_fib(gate, 1, 0, rt->rt_fibnum); 1345 if (gwrt == rt) { 1346 RT_REMREF(rt); 1347 return (EADDRINUSE); /* failure */ 1348 } 1349 /* 1350 * Try to reacquire the lock on rt, and if it fails, 1351 * clean state and restart from scratch. 1352 */ 1353 if (!RT_TRYLOCK(rt)) { 1354 RTFREE_LOCKED(gwrt); 1355 RT_LOCK(rt); 1356 goto again; 1357 } 1358 /* 1359 * If there is already a gwroute, then drop it. If we 1360 * are asked to replace route with itself, then do 1361 * not leak its refcounter. 1362 */ 1363 if (rt->rt_gwroute != NULL) { 1364 if (rt->rt_gwroute == gwrt) { 1365 RT_REMREF(rt->rt_gwroute); 1366 } else 1367 RTFREE(rt->rt_gwroute); 1368 } 1369 1370 if ((rt->rt_gwroute = gwrt) != NULL) 1371 RT_UNLOCK(rt->rt_gwroute); 1372 } 1373 1374 /* 1375 * Prepare to store the gateway in rt->rt_gateway. 1376 * Both dst and gateway are stored one after the other in the same 1377 * malloc'd chunk. If we have room, we can reuse the old buffer, 1378 * rt_gateway already points to the right place. 1379 * Otherwise, malloc a new block and update the 'dst' address. 1380 */ 1381 if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway)) { 1382 caddr_t new; 1383 1384 R_Malloc(new, caddr_t, dlen + glen); 1385 if (new == NULL) 1386 return ENOBUFS; 1387 /* 1388 * XXX note, we copy from *dst and not *rt_key(rt) because 1389 * rt_setgate() can be called to initialize a newly 1390 * allocated route entry, in which case rt_key(rt) == NULL 1391 * (and also rt->rt_gateway == NULL). 1392 * Free()/free() handle a NULL argument just fine. 1393 */ 1394 bcopy(dst, new, dlen); 1395 Free(rt_key(rt)); /* free old block, if any */ 1396 rt_key(rt) = (struct sockaddr *)new; 1397 rt->rt_gateway = (struct sockaddr *)(new + dlen); 1398 } 1399 1400 /* 1401 * Copy the new gateway value into the memory chunk. 1402 */ 1403 bcopy(gate, rt->rt_gateway, glen); 1404 1405 /* 1406 * This isn't going to do anything useful for host routes, so 1407 * don't bother. Also make sure we have a reasonable mask 1408 * (we don't yet have one during adds). 1409 */ 1410 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) { 1411 struct rtfc_arg arg; 1412 1413 arg.rnh = rnh; 1414 arg.rt0 = rt; 1415 RT_UNLOCK(rt); /* XXX workaround LOR */ 1416 RADIX_NODE_HEAD_LOCK(rnh); 1417 RT_LOCK(rt); 1418 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), 1419 rt_fixchange, &arg); 1420 RADIX_NODE_HEAD_UNLOCK(rnh); 1421 } 1422 1423 return 0; 1424 } 1425 1426 static void 1427 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, struct sockaddr *netmask) 1428 { 1429 register u_char *cp1 = (u_char *)src; 1430 register u_char *cp2 = (u_char *)dst; 1431 register u_char *cp3 = (u_char *)netmask; 1432 u_char *cplim = cp2 + *cp3; 1433 u_char *cplim2 = cp2 + *cp1; 1434 1435 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1436 cp3 += 2; 1437 if (cplim > cplim2) 1438 cplim = cplim2; 1439 while (cp2 < cplim) 1440 *cp2++ = *cp1++ & *cp3++; 1441 if (cp2 < cplim2) 1442 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2)); 1443 } 1444 1445 /* 1446 * Set up a routing table entry, normally 1447 * for an interface. 1448 */ 1449 #define _SOCKADDR_TMPSIZE 128 /* Not too big.. kernel stack size is limited */ 1450 static inline int 1451 rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum) 1452 { 1453 struct sockaddr *dst; 1454 struct sockaddr *netmask; 1455 struct rtentry *rt = NULL; 1456 struct rt_addrinfo info; 1457 int error = 0; 1458 int startfib, endfib; 1459 char tempbuf[_SOCKADDR_TMPSIZE]; 1460 int didwork = 0; 1461 int a_failure = 0; 1462 1463 if (flags & RTF_HOST) { 1464 dst = ifa->ifa_dstaddr; 1465 netmask = NULL; 1466 } else { 1467 dst = ifa->ifa_addr; 1468 netmask = ifa->ifa_netmask; 1469 } 1470 if ( dst->sa_family != AF_INET) 1471 fibnum = 0; 1472 if (fibnum == -1) { 1473 if (rt_add_addr_allfibs == 0 && cmd == (int)RTM_ADD) { 1474 startfib = endfib = curthread->td_proc->p_fibnum; 1475 } else { 1476 startfib = 0; 1477 endfib = rt_numfibs - 1; 1478 } 1479 } else { 1480 KASSERT((fibnum < rt_numfibs), ("rtinit1: bad fibnum")); 1481 startfib = fibnum; 1482 endfib = fibnum; 1483 } 1484 if (dst->sa_len == 0) 1485 return(EINVAL); 1486 1487 /* 1488 * If it's a delete, check that if it exists, 1489 * it's on the correct interface or we might scrub 1490 * a route to another ifa which would 1491 * be confusing at best and possibly worse. 1492 */ 1493 if (cmd == RTM_DELETE) { 1494 /* 1495 * It's a delete, so it should already exist.. 1496 * If it's a net, mask off the host bits 1497 * (Assuming we have a mask) 1498 * XXX this is kinda inet specific.. 1499 */ 1500 if (netmask != NULL) { 1501 rt_maskedcopy(dst, (struct sockaddr *)tempbuf, netmask); 1502 dst = (struct sockaddr *)tempbuf; 1503 } 1504 } 1505 /* 1506 * Now go through all the requested tables (fibs) and do the 1507 * requested action. Realistically, this will either be fib 0 1508 * for protocols that don't do multiple tables or all the 1509 * tables for those that do. XXX For this version only AF_INET. 1510 * When that changes code should be refactored to protocol 1511 * independent parts and protocol dependent parts. 1512 */ 1513 for ( fibnum = startfib; fibnum <= endfib; fibnum++) { 1514 if (cmd == RTM_DELETE) { 1515 struct radix_node_head *rnh; 1516 struct radix_node *rn; 1517 /* 1518 * Look up an rtentry that is in the routing tree and 1519 * contains the correct info. 1520 */ 1521 if ((rnh = V_rt_tables[fibnum][dst->sa_family]) == NULL) 1522 /* this table doesn't exist but others might */ 1523 continue; 1524 RADIX_NODE_HEAD_LOCK(rnh); 1525 #ifdef RADIX_MPATH 1526 if (rn_mpath_capable(rnh)) { 1527 1528 rn = rnh->rnh_matchaddr(dst, rnh); 1529 if (rn == NULL) 1530 error = ESRCH; 1531 else { 1532 rt = RNTORT(rn); 1533 /* 1534 * for interface route the 1535 * rt->rt_gateway is sockaddr_intf 1536 * for cloning ARP entries, so 1537 * rt_mpath_matchgate must use the 1538 * interface address 1539 */ 1540 rt = rt_mpath_matchgate(rt, 1541 ifa->ifa_addr); 1542 if (!rt) 1543 error = ESRCH; 1544 } 1545 } 1546 else 1547 #endif 1548 rn = rnh->rnh_lookup(dst, netmask, rnh); 1549 error = (rn == NULL || 1550 (rn->rn_flags & RNF_ROOT) || 1551 RNTORT(rn)->rt_ifa != ifa || 1552 !sa_equal((struct sockaddr *)rn->rn_key, dst)); 1553 RADIX_NODE_HEAD_UNLOCK(rnh); 1554 if (error) { 1555 /* this is only an error if bad on ALL tables */ 1556 continue; 1557 } 1558 } 1559 /* 1560 * Do the actual request 1561 */ 1562 bzero((caddr_t)&info, sizeof(info)); 1563 info.rti_ifa = ifa; 1564 info.rti_flags = flags | ifa->ifa_flags; 1565 info.rti_info[RTAX_DST] = dst; 1566 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1567 info.rti_info[RTAX_NETMASK] = netmask; 1568 error = rtrequest1_fib(cmd, &info, &rt, fibnum); 1569 if (error == 0 && rt != NULL) { 1570 /* 1571 * notify any listening routing agents of the change 1572 */ 1573 RT_LOCK(rt); 1574 #ifdef RADIX_MPATH 1575 /* 1576 * in case address alias finds the first address 1577 * e.g. ifconfig bge0 192.103.54.246/24 1578 * e.g. ifconfig bge0 192.103.54.247/24 1579 * the address set in the route is 192.103.54.246 1580 * so we need to replace it with 192.103.54.247 1581 */ 1582 if (memcmp(rt->rt_ifa->ifa_addr, 1583 ifa->ifa_addr, ifa->ifa_addr->sa_len)) { 1584 IFAFREE(rt->rt_ifa); 1585 IFAREF(ifa); 1586 rt->rt_ifp = ifa->ifa_ifp; 1587 rt->rt_ifa = ifa; 1588 } 1589 #endif 1590 rt_newaddrmsg(cmd, ifa, error, rt); 1591 if (cmd == RTM_DELETE) { 1592 /* 1593 * If we are deleting, and we found an entry, 1594 * then it's been removed from the tree.. 1595 * now throw it away. 1596 */ 1597 RTFREE_LOCKED(rt); 1598 } else { 1599 if (cmd == RTM_ADD) { 1600 /* 1601 * We just wanted to add it.. 1602 * we don't actually need a reference. 1603 */ 1604 RT_REMREF(rt); 1605 } 1606 RT_UNLOCK(rt); 1607 } 1608 didwork = 1; 1609 } 1610 if (error) 1611 a_failure = error; 1612 } 1613 if (cmd == RTM_DELETE) { 1614 if (didwork) { 1615 error = 0; 1616 } else { 1617 /* we only give an error if it wasn't in any table */ 1618 error = ((flags & RTF_HOST) ? 1619 EHOSTUNREACH : ENETUNREACH); 1620 } 1621 } else { 1622 if (a_failure) { 1623 /* return an error if any of them failed */ 1624 error = a_failure; 1625 } 1626 } 1627 return (error); 1628 } 1629 1630 /* special one for inet internal use. may not use. */ 1631 int 1632 rtinit_fib(struct ifaddr *ifa, int cmd, int flags) 1633 { 1634 return (rtinit1(ifa, cmd, flags, -1)); 1635 } 1636 1637 /* 1638 * Set up a routing table entry, normally 1639 * for an interface. 1640 */ 1641 int 1642 rtinit(struct ifaddr *ifa, int cmd, int flags) 1643 { 1644 struct sockaddr *dst; 1645 int fib = 0; 1646 1647 if (flags & RTF_HOST) { 1648 dst = ifa->ifa_dstaddr; 1649 } else { 1650 dst = ifa->ifa_addr; 1651 } 1652 1653 if (dst->sa_family == AF_INET) 1654 fib = -1; 1655 return (rtinit1(ifa, cmd, flags, fib)); 1656 } 1657 1658 /* 1659 * rt_check() is invoked on each layer 2 output path, prior to 1660 * encapsulating outbound packets. 1661 * 1662 * The function is mostly used to find a routing entry for the gateway, 1663 * which in some protocol families could also point to the link-level 1664 * address for the gateway itself (the side effect of revalidating the 1665 * route to the destination is rather pointless at this stage, we did it 1666 * already a moment before in the pr_output() routine to locate the ifp 1667 * and gateway to use). 1668 * 1669 * When we remove the layer-3 to layer-2 mapping tables from the 1670 * routing table, this function can be removed. 1671 * 1672 * === On input === 1673 * *dst is the address of the NEXT HOP (which coincides with the 1674 * final destination if directly reachable); 1675 * *lrt0 points to the cached route to the final destination; 1676 * *lrt is not meaningful; 1677 * fibnum is the index to the correct network fib for this packet 1678 * 1679 * === Operation === 1680 * If the route is marked down try to find a new route. If the route 1681 * to the gateway is gone, try to setup a new route. Otherwise, 1682 * if the route is marked for packets to be rejected, enforce that. 1683 * 1684 * === On return === 1685 * *dst is unchanged; 1686 * *lrt0 points to the (possibly new) route to the final destination 1687 * *lrt points to the route to the next hop 1688 * 1689 * Their values are meaningful ONLY if no error is returned. 1690 */ 1691 int 1692 rt_check(struct rtentry **lrt, struct rtentry **lrt0, struct sockaddr *dst) 1693 { 1694 return (rt_check_fib(lrt, lrt0, dst, 0)); 1695 } 1696 1697 int 1698 rt_check_fib(struct rtentry **lrt, struct rtentry **lrt0, struct sockaddr *dst, 1699 u_int fibnum) 1700 { 1701 struct rtentry *rt; 1702 struct rtentry *rt0; 1703 int error; 1704 1705 KASSERT(*lrt0 != NULL, ("rt_check")); 1706 rt = rt0 = *lrt0; 1707 1708 /* NB: the locking here is tortuous... */ 1709 RT_LOCK(rt); 1710 if ((rt->rt_flags & RTF_UP) == 0) { 1711 RT_UNLOCK(rt); 1712 rt = rtalloc1_fib(dst, 1, 0UL, fibnum); 1713 if (rt != NULL) { 1714 RT_REMREF(rt); 1715 /* XXX what about if change? */ 1716 } else 1717 return (EHOSTUNREACH); 1718 rt0 = rt; 1719 } 1720 /* XXX BSD/OS checks dst->sa_family != AF_NS */ 1721 if (rt->rt_flags & RTF_GATEWAY) { 1722 if (rt->rt_gwroute == NULL) 1723 goto lookup; 1724 rt = rt->rt_gwroute; 1725 RT_LOCK(rt); /* NB: gwroute */ 1726 if ((rt->rt_flags & RTF_UP) == 0) { 1727 RTFREE_LOCKED(rt); /* unlock gwroute */ 1728 rt = rt0; 1729 rt0->rt_gwroute = NULL; 1730 lookup: 1731 RT_UNLOCK(rt0); 1732 /* XXX MRT link level looked up in table 0 */ 1733 rt = rtalloc1_fib(rt->rt_gateway, 1, 0UL, 0); 1734 if (rt == rt0) { 1735 RT_REMREF(rt0); 1736 RT_UNLOCK(rt0); 1737 return (ENETUNREACH); 1738 } 1739 RT_LOCK(rt0); 1740 if (rt0->rt_gwroute != NULL) 1741 RTFREE(rt0->rt_gwroute); 1742 rt0->rt_gwroute = rt; 1743 if (rt == NULL) { 1744 RT_UNLOCK(rt0); 1745 return (EHOSTUNREACH); 1746 } 1747 } 1748 RT_UNLOCK(rt0); 1749 } 1750 /* XXX why are we inspecting rmx_expire? */ 1751 error = (rt->rt_flags & RTF_REJECT) && 1752 (rt->rt_rmx.rmx_expire == 0 || 1753 time_uptime < rt->rt_rmx.rmx_expire); 1754 if (error) { 1755 RT_UNLOCK(rt); 1756 return (rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); 1757 } 1758 1759 *lrt = rt; 1760 *lrt0 = rt0; 1761 return (0); 1762 } 1763 1764 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */ 1765 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0); 1766