1 /*- 2 * Copyright (c) 1980, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)route.c 8.3.1.1 (Berkeley) 2/23/95 30 * $FreeBSD$ 31 */ 32 /************************************************************************ 33 * Note: In this file a 'fib' is a "forwarding information base" * 34 * Which is the new name for an in kernel routing (next hop) table. * 35 ***********************************************************************/ 36 37 #include "opt_inet.h" 38 #include "opt_route.h" 39 #include "opt_mrouting.h" 40 #include "opt_mpath.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/syslog.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/socket.h> 48 #include <sys/sysctl.h> 49 #include <sys/syslog.h> 50 #include <sys/sysproto.h> 51 #include <sys/proc.h> 52 #include <sys/domain.h> 53 #include <sys/kernel.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/route.h> 58 #include <net/vnet.h> 59 #include <net/flowtable.h> 60 61 #ifdef RADIX_MPATH 62 #include <net/radix_mpath.h> 63 #endif 64 65 #include <netinet/in.h> 66 #include <netinet/ip_mroute.h> 67 68 #include <vm/uma.h> 69 70 u_int rt_numfibs = RT_NUMFIBS; 71 SYSCTL_UINT(_net, OID_AUTO, fibs, CTLFLAG_RD, &rt_numfibs, 0, ""); 72 /* 73 * Allow the boot code to allow LESS than RT_MAXFIBS to be used. 74 * We can't do more because storage is statically allocated for now. 75 * (for compatibility reasons.. this will change). 76 */ 77 TUNABLE_INT("net.fibs", &rt_numfibs); 78 79 /* 80 * By default add routes to all fibs for new interfaces. 81 * Once this is set to 0 then only allocate routes on interface 82 * changes for the FIB of the caller when adding a new set of addresses 83 * to an interface. XXX this is a shotgun aproach to a problem that needs 84 * a more fine grained solution.. that will come. 85 */ 86 u_int rt_add_addr_allfibs = 1; 87 SYSCTL_UINT(_net, OID_AUTO, add_addr_allfibs, CTLFLAG_RW, 88 &rt_add_addr_allfibs, 0, ""); 89 TUNABLE_INT("net.add_addr_allfibs", &rt_add_addr_allfibs); 90 91 VNET_DEFINE(struct rtstat, rtstat); 92 #define V_rtstat VNET(rtstat) 93 94 VNET_DEFINE(struct radix_node_head *, rt_tables); 95 #define V_rt_tables VNET(rt_tables) 96 97 VNET_DEFINE(int, rttrash); /* routes not in table but not freed */ 98 #define V_rttrash VNET(rttrash) 99 100 101 /* compare two sockaddr structures */ 102 #define sa_equal(a1, a2) (bcmp((a1), (a2), (a1)->sa_len) == 0) 103 104 /* 105 * Convert a 'struct radix_node *' to a 'struct rtentry *'. 106 * The operation can be done safely (in this code) because a 107 * 'struct rtentry' starts with two 'struct radix_node''s, the first 108 * one representing leaf nodes in the routing tree, which is 109 * what the code in radix.c passes us as a 'struct radix_node'. 110 * 111 * But because there are a lot of assumptions in this conversion, 112 * do not cast explicitly, but always use the macro below. 113 */ 114 #define RNTORT(p) ((struct rtentry *)(p)) 115 116 static VNET_DEFINE(uma_zone_t, rtzone); /* Routing table UMA zone. */ 117 #define V_rtzone VNET(rtzone) 118 119 #if 0 120 /* default fib for tunnels to use */ 121 u_int tunnel_fib = 0; 122 SYSCTL_INT(_net, OID_AUTO, tunnelfib, CTLFLAG_RD, &tunnel_fib, 0, ""); 123 #endif 124 125 /* 126 * handler for net.my_fibnum 127 */ 128 static int 129 sysctl_my_fibnum(SYSCTL_HANDLER_ARGS) 130 { 131 int fibnum; 132 int error; 133 134 fibnum = curthread->td_proc->p_fibnum; 135 error = sysctl_handle_int(oidp, &fibnum, 0, req); 136 return (error); 137 } 138 139 SYSCTL_PROC(_net, OID_AUTO, my_fibnum, CTLTYPE_INT|CTLFLAG_RD, 140 NULL, 0, &sysctl_my_fibnum, "I", "default FIB of caller"); 141 142 static __inline struct radix_node_head ** 143 rt_tables_get_rnh_ptr(int table, int fam) 144 { 145 struct radix_node_head **rnh; 146 147 KASSERT(table >= 0 && table < rt_numfibs, ("%s: table out of bounds.", 148 __func__)); 149 KASSERT(fam >= 0 && fam < (AF_MAX+1), ("%s: fam out of bounds.", 150 __func__)); 151 152 /* rnh is [fib=0][af=0]. */ 153 rnh = (struct radix_node_head **)V_rt_tables; 154 /* Get the offset to the requested table and fam. */ 155 rnh += table * (AF_MAX+1) + fam; 156 157 return (rnh); 158 } 159 160 struct radix_node_head * 161 rt_tables_get_rnh(int table, int fam) 162 { 163 164 return (*rt_tables_get_rnh_ptr(table, fam)); 165 } 166 167 /* 168 * route initialization must occur before ip6_init2(), which happenas at 169 * SI_ORDER_MIDDLE. 170 */ 171 static void 172 route_init(void) 173 { 174 struct domain *dom; 175 int max_keylen = 0; 176 177 /* whack the tunable ints into line. */ 178 if (rt_numfibs > RT_MAXFIBS) 179 rt_numfibs = RT_MAXFIBS; 180 if (rt_numfibs == 0) 181 rt_numfibs = 1; 182 183 for (dom = domains; dom; dom = dom->dom_next) 184 if (dom->dom_maxrtkey > max_keylen) 185 max_keylen = dom->dom_maxrtkey; 186 187 rn_init(max_keylen); /* init all zeroes, all ones, mask table */ 188 } 189 SYSINIT(route_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0); 190 191 static void 192 vnet_route_init(const void *unused __unused) 193 { 194 struct domain *dom; 195 struct radix_node_head **rnh; 196 int table; 197 int fam; 198 199 V_rt_tables = malloc(rt_numfibs * (AF_MAX+1) * 200 sizeof(struct radix_node_head *), M_RTABLE, M_WAITOK|M_ZERO); 201 202 V_rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), NULL, NULL, 203 NULL, NULL, UMA_ALIGN_PTR, 0); 204 for (dom = domains; dom; dom = dom->dom_next) { 205 if (dom->dom_rtattach) { 206 for (table = 0; table < rt_numfibs; table++) { 207 if ( (fam = dom->dom_family) == AF_INET || 208 table == 0) { 209 /* for now only AF_INET has > 1 table */ 210 /* XXX MRT 211 * rtattach will be also called 212 * from vfs_export.c but the 213 * offset will be 0 214 * (only for AF_INET and AF_INET6 215 * which don't need it anyhow) 216 */ 217 rnh = rt_tables_get_rnh_ptr(table, fam); 218 if (rnh == NULL) 219 panic("%s: rnh NULL", __func__); 220 dom->dom_rtattach((void **)rnh, 221 dom->dom_rtoffset); 222 } else { 223 break; 224 } 225 } 226 } 227 } 228 } 229 VNET_SYSINIT(vnet_route_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, 230 vnet_route_init, 0); 231 232 #ifdef VIMAGE 233 static void 234 vnet_route_uninit(const void *unused __unused) 235 { 236 int table; 237 int fam; 238 struct domain *dom; 239 struct radix_node_head **rnh; 240 241 for (dom = domains; dom; dom = dom->dom_next) { 242 if (dom->dom_rtdetach) { 243 for (table = 0; table < rt_numfibs; table++) { 244 if ( (fam = dom->dom_family) == AF_INET || 245 table == 0) { 246 /* For now only AF_INET has > 1 tbl. */ 247 rnh = rt_tables_get_rnh_ptr(table, fam); 248 if (rnh == NULL) 249 panic("%s: rnh NULL", __func__); 250 dom->dom_rtdetach((void **)rnh, 251 dom->dom_rtoffset); 252 } else { 253 break; 254 } 255 } 256 } 257 } 258 } 259 VNET_SYSUNINIT(vnet_route_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, 260 vnet_route_uninit, 0); 261 #endif 262 263 #ifndef _SYS_SYSPROTO_H_ 264 struct setfib_args { 265 int fibnum; 266 }; 267 #endif 268 int 269 setfib(struct thread *td, struct setfib_args *uap) 270 { 271 if (uap->fibnum < 0 || uap->fibnum >= rt_numfibs) 272 return EINVAL; 273 td->td_proc->p_fibnum = uap->fibnum; 274 return (0); 275 } 276 277 /* 278 * Packet routing routines. 279 */ 280 void 281 rtalloc(struct route *ro) 282 { 283 rtalloc_ign_fib(ro, 0UL, 0); 284 } 285 286 void 287 rtalloc_fib(struct route *ro, u_int fibnum) 288 { 289 rtalloc_ign_fib(ro, 0UL, fibnum); 290 } 291 292 void 293 rtalloc_ign(struct route *ro, u_long ignore) 294 { 295 struct rtentry *rt; 296 297 if ((rt = ro->ro_rt) != NULL) { 298 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP) 299 return; 300 RTFREE(rt); 301 ro->ro_rt = NULL; 302 } 303 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, 0); 304 if (ro->ro_rt) 305 RT_UNLOCK(ro->ro_rt); 306 } 307 308 void 309 rtalloc_ign_fib(struct route *ro, u_long ignore, u_int fibnum) 310 { 311 struct rtentry *rt; 312 313 if ((rt = ro->ro_rt) != NULL) { 314 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP) 315 return; 316 RTFREE(rt); 317 ro->ro_rt = NULL; 318 } 319 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, fibnum); 320 if (ro->ro_rt) 321 RT_UNLOCK(ro->ro_rt); 322 } 323 324 /* 325 * Look up the route that matches the address given 326 * Or, at least try.. Create a cloned route if needed. 327 * 328 * The returned route, if any, is locked. 329 */ 330 struct rtentry * 331 rtalloc1(struct sockaddr *dst, int report, u_long ignflags) 332 { 333 return (rtalloc1_fib(dst, report, ignflags, 0)); 334 } 335 336 struct rtentry * 337 rtalloc1_fib(struct sockaddr *dst, int report, u_long ignflags, 338 u_int fibnum) 339 { 340 struct radix_node_head *rnh; 341 struct radix_node *rn; 342 struct rtentry *newrt; 343 struct rt_addrinfo info; 344 int err = 0, msgtype = RTM_MISS; 345 int needlock; 346 347 KASSERT((fibnum < rt_numfibs), ("rtalloc1_fib: bad fibnum")); 348 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */ 349 fibnum = 0; 350 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 351 newrt = NULL; 352 if (rnh == NULL) 353 goto miss; 354 355 /* 356 * Look up the address in the table for that Address Family 357 */ 358 needlock = !(ignflags & RTF_RNH_LOCKED); 359 if (needlock) 360 RADIX_NODE_HEAD_RLOCK(rnh); 361 #ifdef INVARIANTS 362 else 363 RADIX_NODE_HEAD_LOCK_ASSERT(rnh); 364 #endif 365 rn = rnh->rnh_matchaddr(dst, rnh); 366 if (rn && ((rn->rn_flags & RNF_ROOT) == 0)) { 367 newrt = RNTORT(rn); 368 RT_LOCK(newrt); 369 RT_ADDREF(newrt); 370 if (needlock) 371 RADIX_NODE_HEAD_RUNLOCK(rnh); 372 goto done; 373 374 } else if (needlock) 375 RADIX_NODE_HEAD_RUNLOCK(rnh); 376 377 /* 378 * Either we hit the root or couldn't find any match, 379 * Which basically means 380 * "caint get there frm here" 381 */ 382 miss: 383 V_rtstat.rts_unreach++; 384 385 if (report) { 386 /* 387 * If required, report the failure to the supervising 388 * Authorities. 389 * For a delete, this is not an error. (report == 0) 390 */ 391 bzero(&info, sizeof(info)); 392 info.rti_info[RTAX_DST] = dst; 393 rt_missmsg(msgtype, &info, 0, err); 394 } 395 done: 396 if (newrt) 397 RT_LOCK_ASSERT(newrt); 398 return (newrt); 399 } 400 401 /* 402 * Remove a reference count from an rtentry. 403 * If the count gets low enough, take it out of the routing table 404 */ 405 void 406 rtfree(struct rtentry *rt) 407 { 408 struct radix_node_head *rnh; 409 410 KASSERT(rt != NULL,("%s: NULL rt", __func__)); 411 rnh = rt_tables_get_rnh(rt->rt_fibnum, rt_key(rt)->sa_family); 412 KASSERT(rnh != NULL,("%s: NULL rnh", __func__)); 413 414 RT_LOCK_ASSERT(rt); 415 416 /* 417 * The callers should use RTFREE_LOCKED() or RTFREE(), so 418 * we should come here exactly with the last reference. 419 */ 420 RT_REMREF(rt); 421 if (rt->rt_refcnt > 0) { 422 log(LOG_DEBUG, "%s: %p has %d refs\n", __func__, rt, rt->rt_refcnt); 423 goto done; 424 } 425 426 /* 427 * On last reference give the "close method" a chance 428 * to cleanup private state. This also permits (for 429 * IPv4 and IPv6) a chance to decide if the routing table 430 * entry should be purged immediately or at a later time. 431 * When an immediate purge is to happen the close routine 432 * typically calls rtexpunge which clears the RTF_UP flag 433 * on the entry so that the code below reclaims the storage. 434 */ 435 if (rt->rt_refcnt == 0 && rnh->rnh_close) 436 rnh->rnh_close((struct radix_node *)rt, rnh); 437 438 /* 439 * If we are no longer "up" (and ref == 0) 440 * then we can free the resources associated 441 * with the route. 442 */ 443 if ((rt->rt_flags & RTF_UP) == 0) { 444 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 445 panic("rtfree 2"); 446 /* 447 * the rtentry must have been removed from the routing table 448 * so it is represented in rttrash.. remove that now. 449 */ 450 V_rttrash--; 451 #ifdef DIAGNOSTIC 452 if (rt->rt_refcnt < 0) { 453 printf("rtfree: %p not freed (neg refs)\n", rt); 454 goto done; 455 } 456 #endif 457 /* 458 * release references on items we hold them on.. 459 * e.g other routes and ifaddrs. 460 */ 461 if (rt->rt_ifa) 462 ifa_free(rt->rt_ifa); 463 /* 464 * The key is separatly alloc'd so free it (see rt_setgate()). 465 * This also frees the gateway, as they are always malloc'd 466 * together. 467 */ 468 Free(rt_key(rt)); 469 470 /* 471 * and the rtentry itself of course 472 */ 473 RT_LOCK_DESTROY(rt); 474 uma_zfree(V_rtzone, rt); 475 return; 476 } 477 done: 478 RT_UNLOCK(rt); 479 } 480 481 482 /* 483 * Force a routing table entry to the specified 484 * destination to go through the given gateway. 485 * Normally called as a result of a routing redirect 486 * message from the network layer. 487 */ 488 void 489 rtredirect(struct sockaddr *dst, 490 struct sockaddr *gateway, 491 struct sockaddr *netmask, 492 int flags, 493 struct sockaddr *src) 494 { 495 rtredirect_fib(dst, gateway, netmask, flags, src, 0); 496 } 497 498 void 499 rtredirect_fib(struct sockaddr *dst, 500 struct sockaddr *gateway, 501 struct sockaddr *netmask, 502 int flags, 503 struct sockaddr *src, 504 u_int fibnum) 505 { 506 struct rtentry *rt, *rt0 = NULL; 507 int error = 0; 508 short *stat = NULL; 509 struct rt_addrinfo info; 510 struct ifaddr *ifa; 511 struct radix_node_head *rnh; 512 513 ifa = NULL; 514 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 515 if (rnh == NULL) { 516 error = EAFNOSUPPORT; 517 goto out; 518 } 519 520 /* verify the gateway is directly reachable */ 521 if ((ifa = ifa_ifwithnet(gateway, 0)) == NULL) { 522 error = ENETUNREACH; 523 goto out; 524 } 525 rt = rtalloc1_fib(dst, 0, 0UL, fibnum); /* NB: rt is locked */ 526 /* 527 * If the redirect isn't from our current router for this dst, 528 * it's either old or wrong. If it redirects us to ourselves, 529 * we have a routing loop, perhaps as a result of an interface 530 * going down recently. 531 */ 532 if (!(flags & RTF_DONE) && rt && 533 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 534 error = EINVAL; 535 else if (ifa_ifwithaddr_check(gateway)) 536 error = EHOSTUNREACH; 537 if (error) 538 goto done; 539 /* 540 * Create a new entry if we just got back a wildcard entry 541 * or the lookup failed. This is necessary for hosts 542 * which use routing redirects generated by smart gateways 543 * to dynamically build the routing tables. 544 */ 545 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) 546 goto create; 547 /* 548 * Don't listen to the redirect if it's 549 * for a route to an interface. 550 */ 551 if (rt->rt_flags & RTF_GATEWAY) { 552 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { 553 /* 554 * Changing from route to net => route to host. 555 * Create new route, rather than smashing route to net. 556 */ 557 create: 558 rt0 = rt; 559 rt = NULL; 560 561 flags |= RTF_GATEWAY | RTF_DYNAMIC; 562 bzero((caddr_t)&info, sizeof(info)); 563 info.rti_info[RTAX_DST] = dst; 564 info.rti_info[RTAX_GATEWAY] = gateway; 565 info.rti_info[RTAX_NETMASK] = netmask; 566 info.rti_ifa = ifa; 567 info.rti_flags = flags; 568 if (rt0 != NULL) 569 RT_UNLOCK(rt0); /* drop lock to avoid LOR with RNH */ 570 error = rtrequest1_fib(RTM_ADD, &info, &rt, fibnum); 571 if (rt != NULL) { 572 RT_LOCK(rt); 573 if (rt0 != NULL) 574 EVENTHANDLER_INVOKE(route_redirect_event, rt0, rt, dst); 575 flags = rt->rt_flags; 576 } 577 if (rt0 != NULL) 578 RTFREE(rt0); 579 580 stat = &V_rtstat.rts_dynamic; 581 } else { 582 struct rtentry *gwrt; 583 584 /* 585 * Smash the current notion of the gateway to 586 * this destination. Should check about netmask!!! 587 */ 588 rt->rt_flags |= RTF_MODIFIED; 589 flags |= RTF_MODIFIED; 590 stat = &V_rtstat.rts_newgateway; 591 /* 592 * add the key and gateway (in one malloc'd chunk). 593 */ 594 RT_UNLOCK(rt); 595 RADIX_NODE_HEAD_LOCK(rnh); 596 RT_LOCK(rt); 597 rt_setgate(rt, rt_key(rt), gateway); 598 gwrt = rtalloc1(gateway, 1, RTF_RNH_LOCKED); 599 RADIX_NODE_HEAD_UNLOCK(rnh); 600 EVENTHANDLER_INVOKE(route_redirect_event, rt, gwrt, dst); 601 RTFREE_LOCKED(gwrt); 602 } 603 } else 604 error = EHOSTUNREACH; 605 done: 606 if (rt) 607 RTFREE_LOCKED(rt); 608 out: 609 if (error) 610 V_rtstat.rts_badredirect++; 611 else if (stat != NULL) 612 (*stat)++; 613 bzero((caddr_t)&info, sizeof(info)); 614 info.rti_info[RTAX_DST] = dst; 615 info.rti_info[RTAX_GATEWAY] = gateway; 616 info.rti_info[RTAX_NETMASK] = netmask; 617 info.rti_info[RTAX_AUTHOR] = src; 618 rt_missmsg(RTM_REDIRECT, &info, flags, error); 619 if (ifa != NULL) 620 ifa_free(ifa); 621 } 622 623 int 624 rtioctl(u_long req, caddr_t data) 625 { 626 return (rtioctl_fib(req, data, 0)); 627 } 628 629 /* 630 * Routing table ioctl interface. 631 */ 632 int 633 rtioctl_fib(u_long req, caddr_t data, u_int fibnum) 634 { 635 636 /* 637 * If more ioctl commands are added here, make sure the proper 638 * super-user checks are being performed because it is possible for 639 * prison-root to make it this far if raw sockets have been enabled 640 * in jails. 641 */ 642 #ifdef INET 643 /* Multicast goop, grrr... */ 644 return mrt_ioctl ? mrt_ioctl(req, data, fibnum) : EOPNOTSUPP; 645 #else /* INET */ 646 return ENXIO; 647 #endif /* INET */ 648 } 649 650 /* 651 * For both ifa_ifwithroute() routines, 'ifa' is returned referenced. 652 */ 653 struct ifaddr * 654 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway) 655 { 656 return (ifa_ifwithroute_fib(flags, dst, gateway, 0)); 657 } 658 659 struct ifaddr * 660 ifa_ifwithroute_fib(int flags, struct sockaddr *dst, struct sockaddr *gateway, 661 u_int fibnum) 662 { 663 register struct ifaddr *ifa; 664 int not_found = 0; 665 666 if ((flags & RTF_GATEWAY) == 0) { 667 /* 668 * If we are adding a route to an interface, 669 * and the interface is a pt to pt link 670 * we should search for the destination 671 * as our clue to the interface. Otherwise 672 * we can use the local address. 673 */ 674 ifa = NULL; 675 if (flags & RTF_HOST) 676 ifa = ifa_ifwithdstaddr(dst); 677 if (ifa == NULL) 678 ifa = ifa_ifwithaddr(gateway); 679 } else { 680 /* 681 * If we are adding a route to a remote net 682 * or host, the gateway may still be on the 683 * other end of a pt to pt link. 684 */ 685 ifa = ifa_ifwithdstaddr(gateway); 686 } 687 if (ifa == NULL) 688 ifa = ifa_ifwithnet(gateway, 0); 689 if (ifa == NULL) { 690 struct rtentry *rt = rtalloc1_fib(gateway, 0, RTF_RNH_LOCKED, fibnum); 691 if (rt == NULL) 692 return (NULL); 693 /* 694 * dismiss a gateway that is reachable only 695 * through the default router 696 */ 697 switch (gateway->sa_family) { 698 case AF_INET: 699 if (satosin(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) 700 not_found = 1; 701 break; 702 case AF_INET6: 703 if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(rt))->sin6_addr)) 704 not_found = 1; 705 break; 706 default: 707 break; 708 } 709 if (!not_found && rt->rt_ifa != NULL) { 710 ifa = rt->rt_ifa; 711 ifa_ref(ifa); 712 } 713 RT_REMREF(rt); 714 RT_UNLOCK(rt); 715 if (not_found || ifa == NULL) 716 return (NULL); 717 } 718 if (ifa->ifa_addr->sa_family != dst->sa_family) { 719 struct ifaddr *oifa = ifa; 720 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); 721 if (ifa == NULL) 722 ifa = oifa; 723 else 724 ifa_free(oifa); 725 } 726 return (ifa); 727 } 728 729 /* 730 * Do appropriate manipulations of a routing tree given 731 * all the bits of info needed 732 */ 733 int 734 rtrequest(int req, 735 struct sockaddr *dst, 736 struct sockaddr *gateway, 737 struct sockaddr *netmask, 738 int flags, 739 struct rtentry **ret_nrt) 740 { 741 return (rtrequest_fib(req, dst, gateway, netmask, flags, ret_nrt, 0)); 742 } 743 744 int 745 rtrequest_fib(int req, 746 struct sockaddr *dst, 747 struct sockaddr *gateway, 748 struct sockaddr *netmask, 749 int flags, 750 struct rtentry **ret_nrt, 751 u_int fibnum) 752 { 753 struct rt_addrinfo info; 754 755 if (dst->sa_len == 0) 756 return(EINVAL); 757 758 bzero((caddr_t)&info, sizeof(info)); 759 info.rti_flags = flags; 760 info.rti_info[RTAX_DST] = dst; 761 info.rti_info[RTAX_GATEWAY] = gateway; 762 info.rti_info[RTAX_NETMASK] = netmask; 763 return rtrequest1_fib(req, &info, ret_nrt, fibnum); 764 } 765 766 /* 767 * These (questionable) definitions of apparent local variables apply 768 * to the next two functions. XXXXXX!!! 769 */ 770 #define dst info->rti_info[RTAX_DST] 771 #define gateway info->rti_info[RTAX_GATEWAY] 772 #define netmask info->rti_info[RTAX_NETMASK] 773 #define ifaaddr info->rti_info[RTAX_IFA] 774 #define ifpaddr info->rti_info[RTAX_IFP] 775 #define flags info->rti_flags 776 777 int 778 rt_getifa(struct rt_addrinfo *info) 779 { 780 return (rt_getifa_fib(info, 0)); 781 } 782 783 /* 784 * Look up rt_addrinfo for a specific fib. Note that if rti_ifa is defined, 785 * it will be referenced so the caller must free it. 786 */ 787 int 788 rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum) 789 { 790 struct ifaddr *ifa; 791 int error = 0; 792 793 /* 794 * ifp may be specified by sockaddr_dl 795 * when protocol address is ambiguous. 796 */ 797 if (info->rti_ifp == NULL && ifpaddr != NULL && 798 ifpaddr->sa_family == AF_LINK && 799 (ifa = ifa_ifwithnet(ifpaddr, 0)) != NULL) { 800 info->rti_ifp = ifa->ifa_ifp; 801 ifa_free(ifa); 802 } 803 if (info->rti_ifa == NULL && ifaaddr != NULL) 804 info->rti_ifa = ifa_ifwithaddr(ifaaddr); 805 if (info->rti_ifa == NULL) { 806 struct sockaddr *sa; 807 808 sa = ifaaddr != NULL ? ifaaddr : 809 (gateway != NULL ? gateway : dst); 810 if (sa != NULL && info->rti_ifp != NULL) 811 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp); 812 else if (dst != NULL && gateway != NULL) 813 info->rti_ifa = ifa_ifwithroute_fib(flags, dst, gateway, 814 fibnum); 815 else if (sa != NULL) 816 info->rti_ifa = ifa_ifwithroute_fib(flags, sa, sa, 817 fibnum); 818 } 819 if ((ifa = info->rti_ifa) != NULL) { 820 if (info->rti_ifp == NULL) 821 info->rti_ifp = ifa->ifa_ifp; 822 } else 823 error = ENETUNREACH; 824 return (error); 825 } 826 827 /* 828 * Expunges references to a route that's about to be reclaimed. 829 * The route must be locked. 830 */ 831 int 832 rtexpunge(struct rtentry *rt) 833 { 834 #if !defined(RADIX_MPATH) 835 struct radix_node *rn; 836 #else 837 struct rt_addrinfo info; 838 int fib; 839 struct rtentry *rt0; 840 #endif 841 struct radix_node_head *rnh; 842 struct ifaddr *ifa; 843 int error = 0; 844 845 /* 846 * Find the correct routing tree to use for this Address Family 847 */ 848 rnh = rt_tables_get_rnh(rt->rt_fibnum, rt_key(rt)->sa_family); 849 RT_LOCK_ASSERT(rt); 850 if (rnh == NULL) 851 return (EAFNOSUPPORT); 852 RADIX_NODE_HEAD_LOCK_ASSERT(rnh); 853 854 #ifdef RADIX_MPATH 855 fib = rt->rt_fibnum; 856 bzero(&info, sizeof(info)); 857 info.rti_ifp = rt->rt_ifp; 858 info.rti_flags = RTF_RNH_LOCKED; 859 info.rti_info[RTAX_DST] = rt_key(rt); 860 info.rti_info[RTAX_GATEWAY] = rt->rt_ifa->ifa_addr; 861 862 RT_UNLOCK(rt); 863 error = rtrequest1_fib(RTM_DELETE, &info, &rt0, fib); 864 865 if (error == 0 && rt0 != NULL) { 866 rt = rt0; 867 RT_LOCK(rt); 868 } else if (error != 0) { 869 RT_LOCK(rt); 870 return (error); 871 } 872 #else 873 /* 874 * Remove the item from the tree; it should be there, 875 * but when callers invoke us blindly it may not (sigh). 876 */ 877 rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), rnh); 878 if (rn == NULL) { 879 error = ESRCH; 880 goto bad; 881 } 882 KASSERT((rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) == 0, 883 ("unexpected flags 0x%x", rn->rn_flags)); 884 KASSERT(rt == RNTORT(rn), 885 ("lookup mismatch, rt %p rn %p", rt, rn)); 886 #endif /* RADIX_MPATH */ 887 888 rt->rt_flags &= ~RTF_UP; 889 890 /* 891 * Give the protocol a chance to keep things in sync. 892 */ 893 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) { 894 struct rt_addrinfo info; 895 896 bzero((caddr_t)&info, sizeof(info)); 897 info.rti_flags = rt->rt_flags; 898 info.rti_info[RTAX_DST] = rt_key(rt); 899 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 900 info.rti_info[RTAX_NETMASK] = rt_mask(rt); 901 ifa->ifa_rtrequest(RTM_DELETE, rt, &info); 902 } 903 904 /* 905 * one more rtentry floating around that is not 906 * linked to the routing table. 907 */ 908 V_rttrash++; 909 #if !defined(RADIX_MPATH) 910 bad: 911 #endif 912 return (error); 913 } 914 915 #ifdef RADIX_MPATH 916 static int 917 rn_mpath_update(int req, struct rt_addrinfo *info, 918 struct radix_node_head *rnh, struct rtentry **ret_nrt) 919 { 920 /* 921 * if we got multipath routes, we require users to specify 922 * a matching RTAX_GATEWAY. 923 */ 924 struct rtentry *rt, *rto = NULL; 925 register struct radix_node *rn; 926 int error = 0; 927 928 rn = rnh->rnh_matchaddr(dst, rnh); 929 if (rn == NULL) 930 return (ESRCH); 931 rto = rt = RNTORT(rn); 932 rt = rt_mpath_matchgate(rt, gateway); 933 if (rt == NULL) 934 return (ESRCH); 935 /* 936 * this is the first entry in the chain 937 */ 938 if (rto == rt) { 939 rn = rn_mpath_next((struct radix_node *)rt); 940 /* 941 * there is another entry, now it's active 942 */ 943 if (rn) { 944 rto = RNTORT(rn); 945 RT_LOCK(rto); 946 rto->rt_flags |= RTF_UP; 947 RT_UNLOCK(rto); 948 } else if (rt->rt_flags & RTF_GATEWAY) { 949 /* 950 * For gateway routes, we need to 951 * make sure that we we are deleting 952 * the correct gateway. 953 * rt_mpath_matchgate() does not 954 * check the case when there is only 955 * one route in the chain. 956 */ 957 if (gateway && 958 (rt->rt_gateway->sa_len != gateway->sa_len || 959 memcmp(rt->rt_gateway, gateway, gateway->sa_len))) 960 error = ESRCH; 961 else { 962 /* 963 * remove from tree before returning it 964 * to the caller 965 */ 966 rn = rnh->rnh_deladdr(dst, netmask, rnh); 967 KASSERT(rt == RNTORT(rn), ("radix node disappeared")); 968 goto gwdelete; 969 } 970 971 } 972 /* 973 * use the normal delete code to remove 974 * the first entry 975 */ 976 if (req != RTM_DELETE) 977 goto nondelete; 978 979 error = ENOENT; 980 goto done; 981 } 982 983 /* 984 * if the entry is 2nd and on up 985 */ 986 if ((req == RTM_DELETE) && !rt_mpath_deldup(rto, rt)) 987 panic ("rtrequest1: rt_mpath_deldup"); 988 gwdelete: 989 RT_LOCK(rt); 990 RT_ADDREF(rt); 991 if (req == RTM_DELETE) { 992 rt->rt_flags &= ~RTF_UP; 993 /* 994 * One more rtentry floating around that is not 995 * linked to the routing table. rttrash will be decremented 996 * when RTFREE(rt) is eventually called. 997 */ 998 V_rttrash++; 999 } 1000 1001 nondelete: 1002 if (req != RTM_DELETE) 1003 panic("unrecognized request %d", req); 1004 1005 1006 /* 1007 * If the caller wants it, then it can have it, 1008 * but it's up to it to free the rtentry as we won't be 1009 * doing it. 1010 */ 1011 if (ret_nrt) { 1012 *ret_nrt = rt; 1013 RT_UNLOCK(rt); 1014 } else 1015 RTFREE_LOCKED(rt); 1016 done: 1017 return (error); 1018 } 1019 #endif 1020 1021 int 1022 rtrequest1_fib(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt, 1023 u_int fibnum) 1024 { 1025 int error = 0, needlock = 0; 1026 register struct rtentry *rt; 1027 #ifdef FLOWTABLE 1028 register struct rtentry *rt0; 1029 #endif 1030 register struct radix_node *rn; 1031 register struct radix_node_head *rnh; 1032 struct ifaddr *ifa; 1033 struct sockaddr *ndst; 1034 #define senderr(x) { error = x ; goto bad; } 1035 1036 KASSERT((fibnum < rt_numfibs), ("rtrequest1_fib: bad fibnum")); 1037 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */ 1038 fibnum = 0; 1039 /* 1040 * Find the correct routing tree to use for this Address Family 1041 */ 1042 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 1043 if (rnh == NULL) 1044 return (EAFNOSUPPORT); 1045 needlock = ((flags & RTF_RNH_LOCKED) == 0); 1046 flags &= ~RTF_RNH_LOCKED; 1047 if (needlock) 1048 RADIX_NODE_HEAD_LOCK(rnh); 1049 else 1050 RADIX_NODE_HEAD_LOCK_ASSERT(rnh); 1051 /* 1052 * If we are adding a host route then we don't want to put 1053 * a netmask in the tree, nor do we want to clone it. 1054 */ 1055 if (flags & RTF_HOST) 1056 netmask = NULL; 1057 1058 switch (req) { 1059 case RTM_DELETE: 1060 #ifdef RADIX_MPATH 1061 if (rn_mpath_capable(rnh)) { 1062 error = rn_mpath_update(req, info, rnh, ret_nrt); 1063 /* 1064 * "bad" holds true for the success case 1065 * as well 1066 */ 1067 if (error != ENOENT) 1068 goto bad; 1069 error = 0; 1070 } 1071 #endif 1072 /* 1073 * Remove the item from the tree and return it. 1074 * Complain if it is not there and do no more processing. 1075 */ 1076 rn = rnh->rnh_deladdr(dst, netmask, rnh); 1077 if (rn == NULL) 1078 senderr(ESRCH); 1079 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 1080 panic ("rtrequest delete"); 1081 rt = RNTORT(rn); 1082 RT_LOCK(rt); 1083 RT_ADDREF(rt); 1084 rt->rt_flags &= ~RTF_UP; 1085 1086 /* 1087 * give the protocol a chance to keep things in sync. 1088 */ 1089 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) 1090 ifa->ifa_rtrequest(RTM_DELETE, rt, info); 1091 1092 /* 1093 * One more rtentry floating around that is not 1094 * linked to the routing table. rttrash will be decremented 1095 * when RTFREE(rt) is eventually called. 1096 */ 1097 V_rttrash++; 1098 1099 /* 1100 * If the caller wants it, then it can have it, 1101 * but it's up to it to free the rtentry as we won't be 1102 * doing it. 1103 */ 1104 if (ret_nrt) { 1105 *ret_nrt = rt; 1106 RT_UNLOCK(rt); 1107 } else 1108 RTFREE_LOCKED(rt); 1109 break; 1110 case RTM_RESOLVE: 1111 /* 1112 * resolve was only used for route cloning 1113 * here for compat 1114 */ 1115 break; 1116 case RTM_ADD: 1117 if ((flags & RTF_GATEWAY) && !gateway) 1118 senderr(EINVAL); 1119 if (dst && gateway && (dst->sa_family != gateway->sa_family) && 1120 (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK)) 1121 senderr(EINVAL); 1122 1123 if (info->rti_ifa == NULL) { 1124 error = rt_getifa_fib(info, fibnum); 1125 if (error) 1126 senderr(error); 1127 } else 1128 ifa_ref(info->rti_ifa); 1129 ifa = info->rti_ifa; 1130 rt = uma_zalloc(V_rtzone, M_NOWAIT | M_ZERO); 1131 if (rt == NULL) { 1132 if (ifa != NULL) 1133 ifa_free(ifa); 1134 senderr(ENOBUFS); 1135 } 1136 RT_LOCK_INIT(rt); 1137 rt->rt_flags = RTF_UP | flags; 1138 rt->rt_fibnum = fibnum; 1139 /* 1140 * Add the gateway. Possibly re-malloc-ing the storage for it 1141 * 1142 */ 1143 RT_LOCK(rt); 1144 if ((error = rt_setgate(rt, dst, gateway)) != 0) { 1145 RT_LOCK_DESTROY(rt); 1146 if (ifa != NULL) 1147 ifa_free(ifa); 1148 uma_zfree(V_rtzone, rt); 1149 senderr(error); 1150 } 1151 1152 /* 1153 * point to the (possibly newly malloc'd) dest address. 1154 */ 1155 ndst = (struct sockaddr *)rt_key(rt); 1156 1157 /* 1158 * make sure it contains the value we want (masked if needed). 1159 */ 1160 if (netmask) { 1161 rt_maskedcopy(dst, ndst, netmask); 1162 } else 1163 bcopy(dst, ndst, dst->sa_len); 1164 1165 /* 1166 * We use the ifa reference returned by rt_getifa_fib(). 1167 * This moved from below so that rnh->rnh_addaddr() can 1168 * examine the ifa and ifa->ifa_ifp if it so desires. 1169 */ 1170 rt->rt_ifa = ifa; 1171 rt->rt_ifp = ifa->ifa_ifp; 1172 rt->rt_rmx.rmx_weight = 1; 1173 1174 #ifdef RADIX_MPATH 1175 /* do not permit exactly the same dst/mask/gw pair */ 1176 if (rn_mpath_capable(rnh) && 1177 rt_mpath_conflict(rnh, rt, netmask)) { 1178 if (rt->rt_ifa) { 1179 ifa_free(rt->rt_ifa); 1180 } 1181 Free(rt_key(rt)); 1182 RT_LOCK_DESTROY(rt); 1183 uma_zfree(V_rtzone, rt); 1184 senderr(EEXIST); 1185 } 1186 #endif 1187 1188 #ifdef FLOWTABLE 1189 rt0 = NULL; 1190 /* XXX 1191 * "flow-table" only support IPv4 at the moment. 1192 */ 1193 #ifdef INET 1194 if (dst->sa_family == AF_INET) { 1195 rn = rnh->rnh_matchaddr(dst, rnh); 1196 if (rn && ((rn->rn_flags & RNF_ROOT) == 0)) { 1197 struct sockaddr *mask; 1198 u_char *m, *n; 1199 int len; 1200 1201 /* 1202 * compare mask to see if the new route is 1203 * more specific than the existing one 1204 */ 1205 rt0 = RNTORT(rn); 1206 RT_LOCK(rt0); 1207 RT_ADDREF(rt0); 1208 RT_UNLOCK(rt0); 1209 /* 1210 * A host route is already present, so 1211 * leave the flow-table entries as is. 1212 */ 1213 if (rt0->rt_flags & RTF_HOST) { 1214 RTFREE(rt0); 1215 rt0 = NULL; 1216 } else if (!(flags & RTF_HOST) && netmask) { 1217 mask = rt_mask(rt0); 1218 len = mask->sa_len; 1219 m = (u_char *)mask; 1220 n = (u_char *)netmask; 1221 while (len-- > 0) { 1222 if (*n != *m) 1223 break; 1224 n++; 1225 m++; 1226 } 1227 if (len == 0 || (*n < *m)) { 1228 RTFREE(rt0); 1229 rt0 = NULL; 1230 } 1231 } 1232 } 1233 } 1234 #endif 1235 #endif 1236 1237 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ 1238 rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes); 1239 /* 1240 * If it still failed to go into the tree, 1241 * then un-make it (this should be a function) 1242 */ 1243 if (rn == NULL) { 1244 if (rt->rt_ifa) 1245 ifa_free(rt->rt_ifa); 1246 Free(rt_key(rt)); 1247 RT_LOCK_DESTROY(rt); 1248 uma_zfree(V_rtzone, rt); 1249 #ifdef FLOWTABLE 1250 if (rt0 != NULL) 1251 RTFREE(rt0); 1252 #endif 1253 senderr(EEXIST); 1254 } 1255 #ifdef FLOWTABLE 1256 else if (rt0 != NULL) { 1257 #ifdef INET 1258 flowtable_route_flush(V_ip_ft, rt0); 1259 #endif 1260 RTFREE(rt0); 1261 } 1262 #endif 1263 1264 /* 1265 * If this protocol has something to add to this then 1266 * allow it to do that as well. 1267 */ 1268 if (ifa->ifa_rtrequest) 1269 ifa->ifa_rtrequest(req, rt, info); 1270 1271 /* 1272 * actually return a resultant rtentry and 1273 * give the caller a single reference. 1274 */ 1275 if (ret_nrt) { 1276 *ret_nrt = rt; 1277 RT_ADDREF(rt); 1278 } 1279 RT_UNLOCK(rt); 1280 break; 1281 default: 1282 error = EOPNOTSUPP; 1283 } 1284 bad: 1285 if (needlock) 1286 RADIX_NODE_HEAD_UNLOCK(rnh); 1287 return (error); 1288 #undef senderr 1289 } 1290 1291 #undef dst 1292 #undef gateway 1293 #undef netmask 1294 #undef ifaaddr 1295 #undef ifpaddr 1296 #undef flags 1297 1298 int 1299 rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) 1300 { 1301 /* XXX dst may be overwritten, can we move this to below */ 1302 int dlen = SA_SIZE(dst), glen = SA_SIZE(gate); 1303 #ifdef INVARIANTS 1304 struct radix_node_head *rnh; 1305 1306 rnh = rt_tables_get_rnh(rt->rt_fibnum, dst->sa_family); 1307 #endif 1308 1309 RT_LOCK_ASSERT(rt); 1310 RADIX_NODE_HEAD_LOCK_ASSERT(rnh); 1311 1312 /* 1313 * Prepare to store the gateway in rt->rt_gateway. 1314 * Both dst and gateway are stored one after the other in the same 1315 * malloc'd chunk. If we have room, we can reuse the old buffer, 1316 * rt_gateway already points to the right place. 1317 * Otherwise, malloc a new block and update the 'dst' address. 1318 */ 1319 if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway)) { 1320 caddr_t new; 1321 1322 R_Malloc(new, caddr_t, dlen + glen); 1323 if (new == NULL) 1324 return ENOBUFS; 1325 /* 1326 * XXX note, we copy from *dst and not *rt_key(rt) because 1327 * rt_setgate() can be called to initialize a newly 1328 * allocated route entry, in which case rt_key(rt) == NULL 1329 * (and also rt->rt_gateway == NULL). 1330 * Free()/free() handle a NULL argument just fine. 1331 */ 1332 bcopy(dst, new, dlen); 1333 Free(rt_key(rt)); /* free old block, if any */ 1334 rt_key(rt) = (struct sockaddr *)new; 1335 rt->rt_gateway = (struct sockaddr *)(new + dlen); 1336 } 1337 1338 /* 1339 * Copy the new gateway value into the memory chunk. 1340 */ 1341 bcopy(gate, rt->rt_gateway, glen); 1342 1343 return (0); 1344 } 1345 1346 void 1347 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, struct sockaddr *netmask) 1348 { 1349 register u_char *cp1 = (u_char *)src; 1350 register u_char *cp2 = (u_char *)dst; 1351 register u_char *cp3 = (u_char *)netmask; 1352 u_char *cplim = cp2 + *cp3; 1353 u_char *cplim2 = cp2 + *cp1; 1354 1355 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1356 cp3 += 2; 1357 if (cplim > cplim2) 1358 cplim = cplim2; 1359 while (cp2 < cplim) 1360 *cp2++ = *cp1++ & *cp3++; 1361 if (cp2 < cplim2) 1362 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2)); 1363 } 1364 1365 /* 1366 * Set up a routing table entry, normally 1367 * for an interface. 1368 */ 1369 #define _SOCKADDR_TMPSIZE 128 /* Not too big.. kernel stack size is limited */ 1370 static inline int 1371 rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum) 1372 { 1373 struct sockaddr *dst; 1374 struct sockaddr *netmask; 1375 struct rtentry *rt = NULL; 1376 struct rt_addrinfo info; 1377 int error = 0; 1378 int startfib, endfib; 1379 char tempbuf[_SOCKADDR_TMPSIZE]; 1380 int didwork = 0; 1381 int a_failure = 0; 1382 static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; 1383 1384 if (flags & RTF_HOST) { 1385 dst = ifa->ifa_dstaddr; 1386 netmask = NULL; 1387 } else { 1388 dst = ifa->ifa_addr; 1389 netmask = ifa->ifa_netmask; 1390 } 1391 if ( dst->sa_family != AF_INET) 1392 fibnum = 0; 1393 if (fibnum == -1) { 1394 if (rt_add_addr_allfibs == 0 && cmd == (int)RTM_ADD) { 1395 startfib = endfib = curthread->td_proc->p_fibnum; 1396 } else { 1397 startfib = 0; 1398 endfib = rt_numfibs - 1; 1399 } 1400 } else { 1401 KASSERT((fibnum < rt_numfibs), ("rtinit1: bad fibnum")); 1402 startfib = fibnum; 1403 endfib = fibnum; 1404 } 1405 if (dst->sa_len == 0) 1406 return(EINVAL); 1407 1408 /* 1409 * If it's a delete, check that if it exists, 1410 * it's on the correct interface or we might scrub 1411 * a route to another ifa which would 1412 * be confusing at best and possibly worse. 1413 */ 1414 if (cmd == RTM_DELETE) { 1415 /* 1416 * It's a delete, so it should already exist.. 1417 * If it's a net, mask off the host bits 1418 * (Assuming we have a mask) 1419 * XXX this is kinda inet specific.. 1420 */ 1421 if (netmask != NULL) { 1422 rt_maskedcopy(dst, (struct sockaddr *)tempbuf, netmask); 1423 dst = (struct sockaddr *)tempbuf; 1424 } 1425 } 1426 /* 1427 * Now go through all the requested tables (fibs) and do the 1428 * requested action. Realistically, this will either be fib 0 1429 * for protocols that don't do multiple tables or all the 1430 * tables for those that do. XXX For this version only AF_INET. 1431 * When that changes code should be refactored to protocol 1432 * independent parts and protocol dependent parts. 1433 */ 1434 for ( fibnum = startfib; fibnum <= endfib; fibnum++) { 1435 if (cmd == RTM_DELETE) { 1436 struct radix_node_head *rnh; 1437 struct radix_node *rn; 1438 /* 1439 * Look up an rtentry that is in the routing tree and 1440 * contains the correct info. 1441 */ 1442 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 1443 if (rnh == NULL) 1444 /* this table doesn't exist but others might */ 1445 continue; 1446 RADIX_NODE_HEAD_LOCK(rnh); 1447 #ifdef RADIX_MPATH 1448 if (rn_mpath_capable(rnh)) { 1449 1450 rn = rnh->rnh_matchaddr(dst, rnh); 1451 if (rn == NULL) 1452 error = ESRCH; 1453 else { 1454 rt = RNTORT(rn); 1455 /* 1456 * for interface route the 1457 * rt->rt_gateway is sockaddr_intf 1458 * for cloning ARP entries, so 1459 * rt_mpath_matchgate must use the 1460 * interface address 1461 */ 1462 rt = rt_mpath_matchgate(rt, 1463 ifa->ifa_addr); 1464 if (!rt) 1465 error = ESRCH; 1466 } 1467 } 1468 else 1469 #endif 1470 rn = rnh->rnh_lookup(dst, netmask, rnh); 1471 error = (rn == NULL || 1472 (rn->rn_flags & RNF_ROOT) || 1473 RNTORT(rn)->rt_ifa != ifa || 1474 !sa_equal((struct sockaddr *)rn->rn_key, dst)); 1475 RADIX_NODE_HEAD_UNLOCK(rnh); 1476 if (error) { 1477 /* this is only an error if bad on ALL tables */ 1478 continue; 1479 } 1480 } 1481 /* 1482 * Do the actual request 1483 */ 1484 bzero((caddr_t)&info, sizeof(info)); 1485 info.rti_ifa = ifa; 1486 info.rti_flags = flags | ifa->ifa_flags; 1487 info.rti_info[RTAX_DST] = dst; 1488 /* 1489 * doing this for compatibility reasons 1490 */ 1491 if (cmd == RTM_ADD) 1492 info.rti_info[RTAX_GATEWAY] = 1493 (struct sockaddr *)&null_sdl; 1494 else 1495 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1496 info.rti_info[RTAX_NETMASK] = netmask; 1497 error = rtrequest1_fib(cmd, &info, &rt, fibnum); 1498 if (error == 0 && rt != NULL) { 1499 /* 1500 * notify any listening routing agents of the change 1501 */ 1502 RT_LOCK(rt); 1503 #ifdef RADIX_MPATH 1504 /* 1505 * in case address alias finds the first address 1506 * e.g. ifconfig bge0 192.103.54.246/24 1507 * e.g. ifconfig bge0 192.103.54.247/24 1508 * the address set in the route is 192.103.54.246 1509 * so we need to replace it with 192.103.54.247 1510 */ 1511 if (memcmp(rt->rt_ifa->ifa_addr, 1512 ifa->ifa_addr, ifa->ifa_addr->sa_len)) { 1513 ifa_free(rt->rt_ifa); 1514 ifa_ref(ifa); 1515 rt->rt_ifp = ifa->ifa_ifp; 1516 rt->rt_ifa = ifa; 1517 } 1518 #endif 1519 /* 1520 * doing this for compatibility reasons 1521 */ 1522 if (cmd == RTM_ADD) { 1523 ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type = 1524 rt->rt_ifp->if_type; 1525 ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index = 1526 rt->rt_ifp->if_index; 1527 } 1528 RT_ADDREF(rt); 1529 RT_UNLOCK(rt); 1530 rt_newaddrmsg(cmd, ifa, error, rt); 1531 RT_LOCK(rt); 1532 RT_REMREF(rt); 1533 if (cmd == RTM_DELETE) { 1534 /* 1535 * If we are deleting, and we found an entry, 1536 * then it's been removed from the tree.. 1537 * now throw it away. 1538 */ 1539 RTFREE_LOCKED(rt); 1540 } else { 1541 if (cmd == RTM_ADD) { 1542 /* 1543 * We just wanted to add it.. 1544 * we don't actually need a reference. 1545 */ 1546 RT_REMREF(rt); 1547 } 1548 RT_UNLOCK(rt); 1549 } 1550 didwork = 1; 1551 } 1552 if (error) 1553 a_failure = error; 1554 } 1555 if (cmd == RTM_DELETE) { 1556 if (didwork) { 1557 error = 0; 1558 } else { 1559 /* we only give an error if it wasn't in any table */ 1560 error = ((flags & RTF_HOST) ? 1561 EHOSTUNREACH : ENETUNREACH); 1562 } 1563 } else { 1564 if (a_failure) { 1565 /* return an error if any of them failed */ 1566 error = a_failure; 1567 } 1568 } 1569 return (error); 1570 } 1571 1572 /* special one for inet internal use. may not use. */ 1573 int 1574 rtinit_fib(struct ifaddr *ifa, int cmd, int flags) 1575 { 1576 return (rtinit1(ifa, cmd, flags, -1)); 1577 } 1578 1579 /* 1580 * Set up a routing table entry, normally 1581 * for an interface. 1582 */ 1583 int 1584 rtinit(struct ifaddr *ifa, int cmd, int flags) 1585 { 1586 struct sockaddr *dst; 1587 int fib = 0; 1588 1589 if (flags & RTF_HOST) { 1590 dst = ifa->ifa_dstaddr; 1591 } else { 1592 dst = ifa->ifa_addr; 1593 } 1594 1595 if (dst->sa_family == AF_INET) 1596 fib = -1; 1597 return (rtinit1(ifa, cmd, flags, fib)); 1598 } 1599