1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_route.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/syslog.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/rmlock.h> 44 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <net/if_dl.h> 48 #include <net/vnet.h> 49 #include <net/route.h> 50 #include <net/route/route_ctl.h> 51 #include <net/route/route_var.h> 52 #include <net/route/nhop_utils.h> 53 #include <net/route/nhop.h> 54 #include <net/route/nhop_var.h> 55 #include <netinet/in.h> 56 #include <netinet6/scope6_var.h> 57 58 #include <vm/uma.h> 59 60 /* 61 * This file contains control plane routing tables functions. 62 * 63 * All functions assumes they are called in net epoch. 64 */ 65 66 struct rib_subscription { 67 CK_STAILQ_ENTRY(rib_subscription) next; 68 rib_subscription_cb_t *func; 69 void *arg; 70 struct rib_head *rnh; 71 enum rib_subscription_type type; 72 struct epoch_context epoch_ctx; 73 }; 74 75 static int add_route(struct rib_head *rnh, struct rt_addrinfo *info, 76 struct rib_cmd_info *rc); 77 static int add_route_nhop(struct rib_head *rnh, struct rtentry *rt, 78 struct rt_addrinfo *info, struct route_nhop_data *rnd, 79 struct rib_cmd_info *rc); 80 static int del_route(struct rib_head *rnh, struct rt_addrinfo *info, 81 struct rib_cmd_info *rc); 82 static int change_route(struct rib_head *rnh, struct rt_addrinfo *info, 83 struct route_nhop_data *nhd_orig, struct rib_cmd_info *rc); 84 85 static int rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, 86 struct rib_cmd_info *rc); 87 88 static void rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 89 struct rib_cmd_info *rc); 90 91 static void destroy_subscription_epoch(epoch_context_t ctx); 92 #ifdef ROUTE_MPATH 93 static bool rib_can_multipath(struct rib_head *rh); 94 #endif 95 96 /* Per-vnet multipath routing configuration */ 97 SYSCTL_DECL(_net_route); 98 #define V_rib_route_multipath VNET(rib_route_multipath) 99 #ifdef ROUTE_MPATH 100 #define _MP_FLAGS CTLFLAG_RW 101 #else 102 #define _MP_FLAGS CTLFLAG_RD 103 #endif 104 VNET_DEFINE(u_int, rib_route_multipath) = 1; 105 SYSCTL_UINT(_net_route, OID_AUTO, multipath, _MP_FLAGS | CTLFLAG_VNET, 106 &VNET_NAME(rib_route_multipath), 0, "Enable route multipath"); 107 #undef _MP_FLAGS 108 109 #if defined(INET) && defined(INET6) 110 FEATURE(ipv4_rfc5549_support, "Route IPv4 packets via IPv6 nexthops"); 111 #define V_rib_route_ipv6_nexthop VNET(rib_route_ipv6_nexthop) 112 VNET_DEFINE(u_int, rib_route_ipv6_nexthop) = 1; 113 SYSCTL_UINT(_net_route, OID_AUTO, ipv6_nexthop, CTLFLAG_RW | CTLFLAG_VNET, 114 &VNET_NAME(rib_route_ipv6_nexthop), 0, "Enable IPv4 route via IPv6 Next Hop address"); 115 #endif 116 117 /* Routing table UMA zone */ 118 VNET_DEFINE_STATIC(uma_zone_t, rtzone); 119 #define V_rtzone VNET(rtzone) 120 121 /* Debug bits */ 122 SYSCTL_NODE(_net_route, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 123 124 void 125 vnet_rtzone_init() 126 { 127 128 V_rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), 129 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 130 } 131 132 #ifdef VIMAGE 133 void 134 vnet_rtzone_destroy() 135 { 136 137 uma_zdestroy(V_rtzone); 138 } 139 #endif 140 141 static void 142 destroy_rtentry(struct rtentry *rt) 143 { 144 #ifdef VIMAGE 145 struct nhop_object *nh = rt->rt_nhop; 146 147 /* 148 * At this moment rnh, nh_control may be already freed. 149 * nhop interface may have been migrated to a different vnet. 150 * Use vnet stored in the nexthop to delete the entry. 151 */ 152 #ifdef ROUTE_MPATH 153 if (NH_IS_NHGRP(nh)) { 154 struct weightened_nhop *wn; 155 uint32_t num_nhops; 156 wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); 157 nh = wn[0].nh; 158 } 159 #endif 160 CURVNET_SET(nhop_get_vnet(nh)); 161 #endif 162 163 /* Unreference nexthop */ 164 nhop_free_any(rt->rt_nhop); 165 166 uma_zfree(V_rtzone, rt); 167 168 CURVNET_RESTORE(); 169 } 170 171 /* 172 * Epoch callback indicating rtentry is safe to destroy 173 */ 174 static void 175 destroy_rtentry_epoch(epoch_context_t ctx) 176 { 177 struct rtentry *rt; 178 179 rt = __containerof(ctx, struct rtentry, rt_epoch_ctx); 180 181 destroy_rtentry(rt); 182 } 183 184 /* 185 * Schedule rtentry deletion 186 */ 187 static void 188 rtfree(struct rtentry *rt) 189 { 190 191 KASSERT(rt != NULL, ("%s: NULL rt", __func__)); 192 193 epoch_call(net_epoch_preempt, destroy_rtentry_epoch, 194 &rt->rt_epoch_ctx); 195 } 196 197 static struct rib_head * 198 get_rnh(uint32_t fibnum, const struct rt_addrinfo *info) 199 { 200 struct rib_head *rnh; 201 struct sockaddr *dst; 202 203 KASSERT((fibnum < rt_numfibs), ("rib_add_route: bad fibnum")); 204 205 dst = info->rti_info[RTAX_DST]; 206 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 207 208 return (rnh); 209 } 210 211 #if defined(INET) && defined(INET6) 212 static bool 213 rib_can_ipv6_nexthop_address(struct rib_head *rh) 214 { 215 int result; 216 217 CURVNET_SET(rh->rib_vnet); 218 result = !!V_rib_route_ipv6_nexthop; 219 CURVNET_RESTORE(); 220 221 return (result); 222 } 223 #endif 224 225 #ifdef ROUTE_MPATH 226 static bool 227 rib_can_multipath(struct rib_head *rh) 228 { 229 int result; 230 231 CURVNET_SET(rh->rib_vnet); 232 result = !!V_rib_route_multipath; 233 CURVNET_RESTORE(); 234 235 return (result); 236 } 237 238 /* 239 * Check is nhop is multipath-eligible. 240 * Avoid nhops without gateways and redirects. 241 * 242 * Returns 1 for multipath-eligible nexthop, 243 * 0 otherwise. 244 */ 245 bool 246 nhop_can_multipath(const struct nhop_object *nh) 247 { 248 249 if ((nh->nh_flags & NHF_MULTIPATH) != 0) 250 return (1); 251 if ((nh->nh_flags & NHF_GATEWAY) == 0) 252 return (0); 253 if ((nh->nh_flags & NHF_REDIRECT) != 0) 254 return (0); 255 256 return (1); 257 } 258 #endif 259 260 static int 261 get_info_weight(const struct rt_addrinfo *info, uint32_t default_weight) 262 { 263 uint32_t weight; 264 265 if (info->rti_mflags & RTV_WEIGHT) 266 weight = info->rti_rmx->rmx_weight; 267 else 268 weight = default_weight; 269 /* Keep upper 1 byte for adm distance purposes */ 270 if (weight > RT_MAX_WEIGHT) 271 weight = RT_MAX_WEIGHT; 272 else if (weight == 0) 273 weight = default_weight; 274 275 return (weight); 276 } 277 278 bool 279 rt_is_host(const struct rtentry *rt) 280 { 281 282 return (rt->rte_flags & RTF_HOST); 283 } 284 285 sa_family_t 286 rt_get_family(const struct rtentry *rt) 287 { 288 const struct sockaddr *dst; 289 290 dst = (const struct sockaddr *)rt_key_const(rt); 291 292 return (dst->sa_family); 293 } 294 295 /* 296 * Returns pointer to nexthop or nexthop group 297 * associated with @rt 298 */ 299 struct nhop_object * 300 rt_get_raw_nhop(const struct rtentry *rt) 301 { 302 303 return (rt->rt_nhop); 304 } 305 306 #ifdef INET 307 /* 308 * Stores IPv4 address and prefix length of @rt inside 309 * @paddr and @plen. 310 * @pscopeid is currently always set to 0. 311 */ 312 void 313 rt_get_inet_prefix_plen(const struct rtentry *rt, struct in_addr *paddr, 314 int *plen, uint32_t *pscopeid) 315 { 316 const struct sockaddr_in *dst; 317 318 dst = (const struct sockaddr_in *)rt_key_const(rt); 319 KASSERT((dst->sin_family == AF_INET), 320 ("rt family is %d, not inet", dst->sin_family)); 321 *paddr = dst->sin_addr; 322 dst = (const struct sockaddr_in *)rt_mask_const(rt); 323 if (dst == NULL) 324 *plen = 32; 325 else 326 *plen = bitcount32(dst->sin_addr.s_addr); 327 *pscopeid = 0; 328 } 329 330 /* 331 * Stores IPv4 address and prefix mask of @rt inside 332 * @paddr and @pmask. Sets mask to INADDR_ANY for host routes. 333 * @pscopeid is currently always set to 0. 334 */ 335 void 336 rt_get_inet_prefix_pmask(const struct rtentry *rt, struct in_addr *paddr, 337 struct in_addr *pmask, uint32_t *pscopeid) 338 { 339 const struct sockaddr_in *dst; 340 341 dst = (const struct sockaddr_in *)rt_key_const(rt); 342 KASSERT((dst->sin_family == AF_INET), 343 ("rt family is %d, not inet", dst->sin_family)); 344 *paddr = dst->sin_addr; 345 dst = (const struct sockaddr_in *)rt_mask_const(rt); 346 if (dst == NULL) 347 pmask->s_addr = INADDR_BROADCAST; 348 else 349 *pmask = dst->sin_addr; 350 *pscopeid = 0; 351 } 352 #endif 353 354 #ifdef INET6 355 static int 356 inet6_get_plen(const struct in6_addr *addr) 357 { 358 359 return (bitcount32(addr->s6_addr32[0]) + bitcount32(addr->s6_addr32[1]) + 360 bitcount32(addr->s6_addr32[2]) + bitcount32(addr->s6_addr32[3])); 361 } 362 363 /* 364 * Stores IPv6 address and prefix length of @rt inside 365 * @paddr and @plen. Addresses are returned in de-embedded form. 366 * Scopeid is set to 0 for non-LL addresses. 367 */ 368 void 369 rt_get_inet6_prefix_plen(const struct rtentry *rt, struct in6_addr *paddr, 370 int *plen, uint32_t *pscopeid) 371 { 372 const struct sockaddr_in6 *dst; 373 374 dst = (const struct sockaddr_in6 *)rt_key_const(rt); 375 KASSERT((dst->sin6_family == AF_INET6), 376 ("rt family is %d, not inet6", dst->sin6_family)); 377 if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr)) 378 in6_splitscope(&dst->sin6_addr, paddr, pscopeid); 379 else 380 *paddr = dst->sin6_addr; 381 dst = (const struct sockaddr_in6 *)rt_mask_const(rt); 382 if (dst == NULL) 383 *plen = 128; 384 else 385 *plen = inet6_get_plen(&dst->sin6_addr); 386 } 387 388 /* 389 * Stores IPv6 address and prefix mask of @rt inside 390 * @paddr and @pmask. Addresses are returned in de-embedded form. 391 * Scopeid is set to 0 for non-LL addresses. 392 */ 393 void 394 rt_get_inet6_prefix_pmask(const struct rtentry *rt, struct in6_addr *paddr, 395 struct in6_addr *pmask, uint32_t *pscopeid) 396 { 397 const struct sockaddr_in6 *dst; 398 399 dst = (const struct sockaddr_in6 *)rt_key_const(rt); 400 KASSERT((dst->sin6_family == AF_INET6), 401 ("rt family is %d, not inet", dst->sin6_family)); 402 if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr)) 403 in6_splitscope(&dst->sin6_addr, paddr, pscopeid); 404 else 405 *paddr = dst->sin6_addr; 406 dst = (const struct sockaddr_in6 *)rt_mask_const(rt); 407 if (dst == NULL) 408 memset(pmask, 0xFF, sizeof(struct in6_addr)); 409 else 410 *pmask = dst->sin6_addr; 411 } 412 #endif 413 414 static void 415 rt_set_expire_info(struct rtentry *rt, const struct rt_addrinfo *info) 416 { 417 418 /* Kernel -> userland timebase conversion. */ 419 if (info->rti_mflags & RTV_EXPIRE) 420 rt->rt_expire = info->rti_rmx->rmx_expire ? 421 info->rti_rmx->rmx_expire - time_second + time_uptime : 0; 422 } 423 424 /* 425 * Check if specified @gw matches gw data in the nexthop @nh. 426 * 427 * Returns true if matches, false otherwise. 428 */ 429 bool 430 match_nhop_gw(const struct nhop_object *nh, const struct sockaddr *gw) 431 { 432 433 if (nh->gw_sa.sa_family != gw->sa_family) 434 return (false); 435 436 switch (gw->sa_family) { 437 case AF_INET: 438 return (nh->gw4_sa.sin_addr.s_addr == 439 ((const struct sockaddr_in *)gw)->sin_addr.s_addr); 440 case AF_INET6: 441 { 442 const struct sockaddr_in6 *gw6; 443 gw6 = (const struct sockaddr_in6 *)gw; 444 445 /* 446 * Currently (2020-09) IPv6 gws in kernel have their 447 * scope embedded. Once this becomes false, this code 448 * has to be revisited. 449 */ 450 if (IN6_ARE_ADDR_EQUAL(&nh->gw6_sa.sin6_addr, 451 &gw6->sin6_addr)) 452 return (true); 453 return (false); 454 } 455 case AF_LINK: 456 { 457 const struct sockaddr_dl *sdl; 458 sdl = (const struct sockaddr_dl *)gw; 459 return (nh->gwl_sa.sdl_index == sdl->sdl_index); 460 } 461 default: 462 return (memcmp(&nh->gw_sa, gw, nh->gw_sa.sa_len) == 0); 463 } 464 465 /* NOTREACHED */ 466 return (false); 467 } 468 469 /* 470 * Checks if data in @info matches nexhop @nh. 471 * 472 * Returns 0 on success, 473 * ESRCH if not matched, 474 * ENOENT if filter function returned false 475 */ 476 int 477 check_info_match_nhop(const struct rt_addrinfo *info, const struct rtentry *rt, 478 const struct nhop_object *nh) 479 { 480 const struct sockaddr *gw = info->rti_info[RTAX_GATEWAY]; 481 482 if (info->rti_filter != NULL) { 483 if (info->rti_filter(rt, nh, info->rti_filterdata) == 0) 484 return (ENOENT); 485 else 486 return (0); 487 } 488 if ((gw != NULL) && !match_nhop_gw(nh, gw)) 489 return (ESRCH); 490 491 return (0); 492 } 493 494 /* 495 * Checks if nexhop @nh can be rewritten by data in @info because 496 * of higher "priority". Currently the only case for such scenario 497 * is kernel installing interface routes, marked by RTF_PINNED flag. 498 * 499 * Returns: 500 * 1 if @info data has higher priority 501 * 0 if priority is the same 502 * -1 if priority is lower 503 */ 504 int 505 can_override_nhop(const struct rt_addrinfo *info, const struct nhop_object *nh) 506 { 507 508 if (info->rti_flags & RTF_PINNED) { 509 return (NH_IS_PINNED(nh)) ? 0 : 1; 510 } else { 511 return (NH_IS_PINNED(nh)) ? -1 : 0; 512 } 513 } 514 515 /* 516 * Runs exact prefix match based on @dst and @netmask. 517 * Returns matched @rtentry if found or NULL. 518 * If rtentry was found, saves nexthop / weight value into @rnd. 519 */ 520 static struct rtentry * 521 lookup_prefix_bysa(struct rib_head *rnh, const struct sockaddr *dst, 522 const struct sockaddr *netmask, struct route_nhop_data *rnd) 523 { 524 struct rtentry *rt; 525 526 RIB_LOCK_ASSERT(rnh); 527 528 rt = (struct rtentry *)rnh->rnh_lookup(__DECONST(void *, dst), 529 __DECONST(void *, netmask), &rnh->head); 530 if (rt != NULL) { 531 rnd->rnd_nhop = rt->rt_nhop; 532 rnd->rnd_weight = rt->rt_weight; 533 } else { 534 rnd->rnd_nhop = NULL; 535 rnd->rnd_weight = 0; 536 } 537 538 return (rt); 539 } 540 541 /* 542 * Runs exact prefix match based on dst/netmask from @info. 543 * Assumes RIB lock is held. 544 * Returns matched @rtentry if found or NULL. 545 * If rtentry was found, saves nexthop / weight value into @rnd. 546 */ 547 struct rtentry * 548 lookup_prefix(struct rib_head *rnh, const struct rt_addrinfo *info, 549 struct route_nhop_data *rnd) 550 { 551 struct rtentry *rt; 552 553 rt = lookup_prefix_bysa(rnh, info->rti_info[RTAX_DST], 554 info->rti_info[RTAX_NETMASK], rnd); 555 556 return (rt); 557 } 558 559 /* 560 * Adds route defined by @info into the kernel table specified by @fibnum and 561 * sa_family in @info->rti_info[RTAX_DST]. 562 * 563 * Returns 0 on success and fills in operation metadata into @rc. 564 */ 565 int 566 rib_add_route(uint32_t fibnum, struct rt_addrinfo *info, 567 struct rib_cmd_info *rc) 568 { 569 struct rib_head *rnh; 570 int error; 571 572 NET_EPOCH_ASSERT(); 573 574 rnh = get_rnh(fibnum, info); 575 if (rnh == NULL) 576 return (EAFNOSUPPORT); 577 578 /* 579 * Check consistency between RTF_HOST flag and netmask 580 * existence. 581 */ 582 if (info->rti_flags & RTF_HOST) 583 info->rti_info[RTAX_NETMASK] = NULL; 584 else if (info->rti_info[RTAX_NETMASK] == NULL) 585 return (EINVAL); 586 587 bzero(rc, sizeof(struct rib_cmd_info)); 588 rc->rc_cmd = RTM_ADD; 589 590 error = add_route(rnh, info, rc); 591 if (error == 0) 592 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 593 594 return (error); 595 } 596 597 /* 598 * Checks if @dst and @gateway is valid combination. 599 * 600 * Returns true if is valid, false otherwise. 601 */ 602 static bool 603 check_gateway(struct rib_head *rnh, struct sockaddr *dst, 604 struct sockaddr *gateway) 605 { 606 if (dst->sa_family == gateway->sa_family) 607 return (true); 608 else if (gateway->sa_family == AF_UNSPEC) 609 return (true); 610 else if (gateway->sa_family == AF_LINK) 611 return (true); 612 #if defined(INET) && defined(INET6) 613 else if (dst->sa_family == AF_INET && gateway->sa_family == AF_INET6 && 614 rib_can_ipv6_nexthop_address(rnh)) 615 return (true); 616 #endif 617 else 618 return (false); 619 } 620 621 /* 622 * Creates rtentry and nexthop based on @info data. 623 * Return 0 and fills in rtentry into @prt on success, 624 * return errno otherwise. 625 */ 626 static int 627 create_rtentry(struct rib_head *rnh, struct rt_addrinfo *info, 628 struct rtentry **prt) 629 { 630 struct sockaddr *dst, *ndst, *gateway, *netmask; 631 struct rtentry *rt; 632 struct nhop_object *nh; 633 int error, flags; 634 635 dst = info->rti_info[RTAX_DST]; 636 gateway = info->rti_info[RTAX_GATEWAY]; 637 netmask = info->rti_info[RTAX_NETMASK]; 638 flags = info->rti_flags; 639 640 if ((flags & RTF_GATEWAY) && !gateway) 641 return (EINVAL); 642 if (dst && gateway && !check_gateway(rnh, dst, gateway)) 643 return (EINVAL); 644 645 if (dst->sa_len > sizeof(((struct rtentry *)NULL)->rt_dstb)) 646 return (EINVAL); 647 648 if (info->rti_ifa == NULL) { 649 error = rt_getifa_fib(info, rnh->rib_fibnum); 650 if (error) 651 return (error); 652 } 653 654 error = nhop_create_from_info(rnh, info, &nh); 655 if (error != 0) 656 return (error); 657 658 rt = uma_zalloc(V_rtzone, M_NOWAIT | M_ZERO); 659 if (rt == NULL) { 660 nhop_free(nh); 661 return (ENOBUFS); 662 } 663 rt->rte_flags = (RTF_UP | flags) & RTE_RT_FLAG_MASK; 664 rt->rt_nhop = nh; 665 666 /* Fill in dst */ 667 memcpy(&rt->rt_dst, dst, dst->sa_len); 668 rt_key(rt) = &rt->rt_dst; 669 670 /* 671 * point to the (possibly newly malloc'd) dest address. 672 */ 673 ndst = (struct sockaddr *)rt_key(rt); 674 675 /* 676 * make sure it contains the value we want (masked if needed). 677 */ 678 if (netmask) { 679 rt_maskedcopy(dst, ndst, netmask); 680 } else 681 bcopy(dst, ndst, dst->sa_len); 682 683 /* 684 * We use the ifa reference returned by rt_getifa_fib(). 685 * This moved from below so that rnh->rnh_addaddr() can 686 * examine the ifa and ifa->ifa_ifp if it so desires. 687 */ 688 rt->rt_weight = get_info_weight(info, RT_DEFAULT_WEIGHT); 689 rt_set_expire_info(rt, info); 690 691 *prt = rt; 692 return (0); 693 } 694 695 static int 696 add_route(struct rib_head *rnh, struct rt_addrinfo *info, 697 struct rib_cmd_info *rc) 698 { 699 struct nhop_object *nh_orig; 700 struct route_nhop_data rnd_orig, rnd_add; 701 struct nhop_object *nh; 702 struct rtentry *rt, *rt_orig; 703 int error; 704 705 error = create_rtentry(rnh, info, &rt); 706 if (error != 0) 707 return (error); 708 709 rnd_add.rnd_nhop = rt->rt_nhop; 710 rnd_add.rnd_weight = rt->rt_weight; 711 nh = rt->rt_nhop; 712 713 RIB_WLOCK(rnh); 714 error = add_route_nhop(rnh, rt, info, &rnd_add, rc); 715 if (error == 0) { 716 RIB_WUNLOCK(rnh); 717 return (0); 718 } 719 720 /* addition failed. Lookup prefix in the rib to determine the cause */ 721 rt_orig = lookup_prefix(rnh, info, &rnd_orig); 722 if (rt_orig == NULL) { 723 /* No prefix -> rnh_addaddr() failed to allocate memory */ 724 RIB_WUNLOCK(rnh); 725 nhop_free(nh); 726 uma_zfree(V_rtzone, rt); 727 return (ENOMEM); 728 } 729 730 /* We have existing route in the RIB. */ 731 nh_orig = rnd_orig.rnd_nhop; 732 /* Check if new route has higher preference */ 733 if (can_override_nhop(info, nh_orig) > 0) { 734 /* Update nexthop to the new route */ 735 change_route_nhop(rnh, rt_orig, info, &rnd_add, rc); 736 RIB_WUNLOCK(rnh); 737 uma_zfree(V_rtzone, rt); 738 nhop_free(nh_orig); 739 return (0); 740 } 741 742 RIB_WUNLOCK(rnh); 743 744 #ifdef ROUTE_MPATH 745 if (rib_can_multipath(rnh) && nhop_can_multipath(rnd_add.rnd_nhop) && 746 nhop_can_multipath(rnd_orig.rnd_nhop)) 747 error = add_route_mpath(rnh, info, rt, &rnd_add, &rnd_orig, rc); 748 else 749 #endif 750 /* Unable to add - another route with the same preference exists */ 751 error = EEXIST; 752 753 /* 754 * ROUTE_MPATH disabled: failed to add route, free both nhop and rt. 755 * ROUTE_MPATH enabled: original nhop reference is unused in any case, 756 * free rt only if not _adding_ new route to rib (e.g. the case 757 * when initial lookup returned existing route, but then it got 758 * deleted prior to multipath group insertion, leading to a simple 759 * non-multipath add as a result). 760 */ 761 nhop_free(nh); 762 if ((error != 0) || rc->rc_cmd != RTM_ADD) 763 uma_zfree(V_rtzone, rt); 764 765 return (error); 766 } 767 768 /* 769 * Removes route defined by @info from the kernel table specified by @fibnum and 770 * sa_family in @info->rti_info[RTAX_DST]. 771 * 772 * Returns 0 on success and fills in operation metadata into @rc. 773 */ 774 int 775 rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc) 776 { 777 struct rib_head *rnh; 778 struct sockaddr *dst_orig, *netmask; 779 struct sockaddr_storage mdst; 780 int error; 781 782 NET_EPOCH_ASSERT(); 783 784 rnh = get_rnh(fibnum, info); 785 if (rnh == NULL) 786 return (EAFNOSUPPORT); 787 788 bzero(rc, sizeof(struct rib_cmd_info)); 789 rc->rc_cmd = RTM_DELETE; 790 791 dst_orig = info->rti_info[RTAX_DST]; 792 netmask = info->rti_info[RTAX_NETMASK]; 793 794 if (netmask != NULL) { 795 /* Ensure @dst is always properly masked */ 796 if (dst_orig->sa_len > sizeof(mdst)) 797 return (EINVAL); 798 rt_maskedcopy(dst_orig, (struct sockaddr *)&mdst, netmask); 799 info->rti_info[RTAX_DST] = (struct sockaddr *)&mdst; 800 } 801 error = del_route(rnh, info, rc); 802 info->rti_info[RTAX_DST] = dst_orig; 803 804 return (error); 805 } 806 807 /* 808 * Conditionally unlinks rtentry matching data inside @info from @rnh. 809 * Returns 0 on success with operation result stored in @rc. 810 * On error, returns: 811 * ESRCH - if prefix was not found, 812 * EADDRINUSE - if trying to delete higher priority route. 813 * ENOENT - if supplied filter function returned 0 (not matched). 814 */ 815 static int 816 rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, struct rib_cmd_info *rc) 817 { 818 struct rtentry *rt; 819 struct nhop_object *nh; 820 struct radix_node *rn; 821 struct route_nhop_data rnd; 822 int error; 823 824 rt = lookup_prefix(rnh, info, &rnd); 825 if (rt == NULL) 826 return (ESRCH); 827 828 nh = rt->rt_nhop; 829 #ifdef ROUTE_MPATH 830 if (NH_IS_NHGRP(nh)) { 831 error = del_route_mpath(rnh, info, rt, 832 (struct nhgrp_object *)nh, rc); 833 return (error); 834 } 835 #endif 836 error = check_info_match_nhop(info, rt, nh); 837 if (error != 0) 838 return (error); 839 840 if (can_override_nhop(info, nh) < 0) 841 return (EADDRINUSE); 842 843 /* 844 * Remove the item from the tree and return it. 845 * Complain if it is not there and do no more processing. 846 */ 847 rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST], 848 info->rti_info[RTAX_NETMASK], &rnh->head); 849 if (rn == NULL) 850 return (ESRCH); 851 852 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 853 panic ("rtrequest delete"); 854 855 rt = RNTORT(rn); 856 rt->rte_flags &= ~RTF_UP; 857 858 /* Finalize notification */ 859 rib_bump_gen(rnh); 860 rnh->rnh_prefixes--; 861 862 rc->rc_cmd = RTM_DELETE; 863 rc->rc_rt = rt; 864 rc->rc_nh_old = rt->rt_nhop; 865 rc->rc_nh_weight = rt->rt_weight; 866 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 867 868 return (0); 869 } 870 871 static int 872 del_route(struct rib_head *rnh, struct rt_addrinfo *info, 873 struct rib_cmd_info *rc) 874 { 875 int error; 876 877 RIB_WLOCK(rnh); 878 error = rt_unlinkrte(rnh, info, rc); 879 RIB_WUNLOCK(rnh); 880 if (error != 0) 881 return (error); 882 883 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 884 885 /* 886 * If the caller wants it, then it can have it, 887 * the entry will be deleted after the end of the current epoch. 888 */ 889 if (rc->rc_cmd == RTM_DELETE) 890 rtfree(rc->rc_rt); 891 #ifdef ROUTE_MPATH 892 else { 893 /* 894 * Deleting 1 path may result in RTM_CHANGE to 895 * a different mpath group/nhop. 896 * Free old mpath group. 897 */ 898 nhop_free_any(rc->rc_nh_old); 899 } 900 #endif 901 902 return (0); 903 } 904 905 int 906 rib_change_route(uint32_t fibnum, struct rt_addrinfo *info, 907 struct rib_cmd_info *rc) 908 { 909 RIB_RLOCK_TRACKER; 910 struct route_nhop_data rnd_orig; 911 struct rib_head *rnh; 912 struct rtentry *rt; 913 int error; 914 915 NET_EPOCH_ASSERT(); 916 917 rnh = get_rnh(fibnum, info); 918 if (rnh == NULL) 919 return (EAFNOSUPPORT); 920 921 bzero(rc, sizeof(struct rib_cmd_info)); 922 rc->rc_cmd = RTM_CHANGE; 923 924 /* Check if updated gateway exists */ 925 if ((info->rti_flags & RTF_GATEWAY) && 926 (info->rti_info[RTAX_GATEWAY] == NULL)) { 927 928 /* 929 * route(8) adds RTF_GATEWAY flag if -interface is not set. 930 * Remove RTF_GATEWAY to enforce consistency and maintain 931 * compatibility.. 932 */ 933 info->rti_flags &= ~RTF_GATEWAY; 934 } 935 936 /* 937 * route change is done in multiple steps, with dropping and 938 * reacquiring lock. In the situations with multiple processes 939 * changes the same route in can lead to the case when route 940 * is changed between the steps. Address it by retrying the operation 941 * multiple times before failing. 942 */ 943 944 RIB_RLOCK(rnh); 945 rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 946 info->rti_info[RTAX_NETMASK], &rnh->head); 947 948 if (rt == NULL) { 949 RIB_RUNLOCK(rnh); 950 return (ESRCH); 951 } 952 953 rnd_orig.rnd_nhop = rt->rt_nhop; 954 rnd_orig.rnd_weight = rt->rt_weight; 955 956 RIB_RUNLOCK(rnh); 957 958 for (int i = 0; i < RIB_MAX_RETRIES; i++) { 959 error = change_route(rnh, info, &rnd_orig, rc); 960 if (error != EAGAIN) 961 break; 962 } 963 964 return (error); 965 } 966 967 static int 968 change_nhop(struct rib_head *rnh, struct rt_addrinfo *info, 969 struct nhop_object *nh_orig, struct nhop_object **nh_new) 970 { 971 int error; 972 973 /* 974 * New gateway could require new ifaddr, ifp; 975 * flags may also be different; ifp may be specified 976 * by ll sockaddr when protocol address is ambiguous 977 */ 978 if (((nh_orig->nh_flags & NHF_GATEWAY) && 979 info->rti_info[RTAX_GATEWAY] != NULL) || 980 info->rti_info[RTAX_IFP] != NULL || 981 (info->rti_info[RTAX_IFA] != NULL && 982 !sa_equal(info->rti_info[RTAX_IFA], nh_orig->nh_ifa->ifa_addr))) { 983 error = rt_getifa_fib(info, rnh->rib_fibnum); 984 985 if (error != 0) { 986 info->rti_ifa = NULL; 987 return (error); 988 } 989 } 990 991 error = nhop_create_from_nhop(rnh, nh_orig, info, nh_new); 992 info->rti_ifa = NULL; 993 994 return (error); 995 } 996 997 #ifdef ROUTE_MPATH 998 static int 999 change_mpath_route(struct rib_head *rnh, struct rt_addrinfo *info, 1000 struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc) 1001 { 1002 int error = 0; 1003 struct nhop_object *nh_orig, *nh_new; 1004 struct route_nhop_data rnd_new; 1005 struct weightened_nhop *wn = NULL, *wn_new; 1006 uint32_t num_nhops; 1007 1008 nh_orig = rnd_orig->rnd_nhop; 1009 wn = nhgrp_get_nhops((struct nhgrp_object *)nh_orig, &num_nhops); 1010 nh_orig = NULL; 1011 for (int i = 0; i < num_nhops; i++) { 1012 if (check_info_match_nhop(info, NULL, wn[i].nh)) { 1013 nh_orig = wn[i].nh; 1014 break; 1015 } 1016 } 1017 1018 if (nh_orig == NULL) 1019 return (ESRCH); 1020 1021 error = change_nhop(rnh, info, nh_orig, &nh_new); 1022 if (error != 0) 1023 return (error); 1024 1025 wn_new = mallocarray(num_nhops, sizeof(struct weightened_nhop), 1026 M_TEMP, M_NOWAIT | M_ZERO); 1027 if (wn_new == NULL) { 1028 nhop_free(nh_new); 1029 return (EAGAIN); 1030 } 1031 1032 memcpy(wn_new, wn, num_nhops * sizeof(struct weightened_nhop)); 1033 for (int i = 0; i < num_nhops; i++) { 1034 if (wn[i].nh == nh_orig) { 1035 wn[i].nh = nh_new; 1036 wn[i].weight = get_info_weight(info, rnd_orig->rnd_weight); 1037 break; 1038 } 1039 } 1040 1041 error = nhgrp_get_group(rnh, wn_new, num_nhops, &rnd_new); 1042 nhop_free(nh_new); 1043 free(wn_new, M_TEMP); 1044 1045 if (error != 0) 1046 return (error); 1047 1048 error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc); 1049 1050 return (error); 1051 } 1052 #endif 1053 1054 static int 1055 change_route(struct rib_head *rnh, struct rt_addrinfo *info, 1056 struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc) 1057 { 1058 int error = 0; 1059 struct nhop_object *nh_orig; 1060 struct route_nhop_data rnd_new; 1061 1062 nh_orig = rnd_orig->rnd_nhop; 1063 if (nh_orig == NULL) 1064 return (ESRCH); 1065 1066 #ifdef ROUTE_MPATH 1067 if (NH_IS_NHGRP(nh_orig)) 1068 return (change_mpath_route(rnh, info, rnd_orig, rc)); 1069 #endif 1070 1071 rnd_new.rnd_weight = get_info_weight(info, rnd_orig->rnd_weight); 1072 error = change_nhop(rnh, info, nh_orig, &rnd_new.rnd_nhop); 1073 if (error != 0) 1074 return (error); 1075 error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc); 1076 1077 return (error); 1078 } 1079 1080 /* 1081 * Insert @rt with nhop data from @rnd_new to @rnh. 1082 * Returns 0 on success and stores operation results in @rc. 1083 */ 1084 static int 1085 add_route_nhop(struct rib_head *rnh, struct rtentry *rt, 1086 struct rt_addrinfo *info, struct route_nhop_data *rnd, 1087 struct rib_cmd_info *rc) 1088 { 1089 struct sockaddr *ndst, *netmask; 1090 struct radix_node *rn; 1091 int error = 0; 1092 1093 RIB_WLOCK_ASSERT(rnh); 1094 1095 ndst = (struct sockaddr *)rt_key(rt); 1096 netmask = info->rti_info[RTAX_NETMASK]; 1097 1098 rt->rt_nhop = rnd->rnd_nhop; 1099 rt->rt_weight = rnd->rnd_weight; 1100 rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, rt->rt_nodes); 1101 1102 if (rn != NULL) { 1103 if (rt->rt_expire > 0) 1104 tmproutes_update(rnh, rt); 1105 1106 /* Finalize notification */ 1107 rib_bump_gen(rnh); 1108 rnh->rnh_prefixes++; 1109 1110 rc->rc_cmd = RTM_ADD; 1111 rc->rc_rt = rt; 1112 rc->rc_nh_old = NULL; 1113 rc->rc_nh_new = rnd->rnd_nhop; 1114 rc->rc_nh_weight = rnd->rnd_weight; 1115 1116 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 1117 } else { 1118 /* Existing route or memory allocation failure */ 1119 error = EEXIST; 1120 } 1121 1122 return (error); 1123 } 1124 1125 /* 1126 * Switch @rt nhop/weigh to the ones specified in @rnd. 1127 * Conditionally set rt_expire if set in @info. 1128 * Returns 0 on success. 1129 */ 1130 int 1131 change_route_nhop(struct rib_head *rnh, struct rtentry *rt, 1132 struct rt_addrinfo *info, struct route_nhop_data *rnd, 1133 struct rib_cmd_info *rc) 1134 { 1135 struct nhop_object *nh_orig; 1136 1137 RIB_WLOCK_ASSERT(rnh); 1138 1139 nh_orig = rt->rt_nhop; 1140 1141 if (rnd->rnd_nhop != NULL) { 1142 /* Changing expiration & nexthop & weight to a new one */ 1143 rt_set_expire_info(rt, info); 1144 rt->rt_nhop = rnd->rnd_nhop; 1145 rt->rt_weight = rnd->rnd_weight; 1146 if (rt->rt_expire > 0) 1147 tmproutes_update(rnh, rt); 1148 } else { 1149 /* Route deletion requested. */ 1150 struct sockaddr *ndst, *netmask; 1151 struct radix_node *rn; 1152 1153 ndst = (struct sockaddr *)rt_key(rt); 1154 netmask = info->rti_info[RTAX_NETMASK]; 1155 rn = rnh->rnh_deladdr(ndst, netmask, &rnh->head); 1156 if (rn == NULL) 1157 return (ESRCH); 1158 rt = RNTORT(rn); 1159 rt->rte_flags &= ~RTF_UP; 1160 } 1161 1162 /* Finalize notification */ 1163 rib_bump_gen(rnh); 1164 if (rnd->rnd_nhop == NULL) 1165 rnh->rnh_prefixes--; 1166 1167 rc->rc_cmd = (rnd->rnd_nhop != NULL) ? RTM_CHANGE : RTM_DELETE; 1168 rc->rc_rt = rt; 1169 rc->rc_nh_old = nh_orig; 1170 rc->rc_nh_new = rnd->rnd_nhop; 1171 rc->rc_nh_weight = rnd->rnd_weight; 1172 1173 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 1174 1175 return (0); 1176 } 1177 1178 /* 1179 * Conditionally update route nhop/weight IFF data in @nhd_orig is 1180 * consistent with the current route data. 1181 * Nexthop in @nhd_new is consumed. 1182 */ 1183 int 1184 change_route_conditional(struct rib_head *rnh, struct rtentry *rt, 1185 struct rt_addrinfo *info, struct route_nhop_data *rnd_orig, 1186 struct route_nhop_data *rnd_new, struct rib_cmd_info *rc) 1187 { 1188 struct rtentry *rt_new; 1189 int error = 0; 1190 1191 RIB_WLOCK(rnh); 1192 1193 rt_new = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 1194 info->rti_info[RTAX_NETMASK], &rnh->head); 1195 1196 if (rt_new == NULL) { 1197 if (rnd_orig->rnd_nhop == NULL) 1198 error = add_route_nhop(rnh, rt, info, rnd_new, rc); 1199 else { 1200 /* 1201 * Prefix does not exist, which was not our assumption. 1202 * Update @rnd_orig with the new data and return 1203 */ 1204 rnd_orig->rnd_nhop = NULL; 1205 rnd_orig->rnd_weight = 0; 1206 error = EAGAIN; 1207 } 1208 } else { 1209 /* Prefix exists, try to update */ 1210 if (rnd_orig->rnd_nhop == rt_new->rt_nhop) { 1211 /* 1212 * Nhop/mpath group hasn't changed. Flip 1213 * to the new precalculated one and return 1214 */ 1215 error = change_route_nhop(rnh, rt_new, info, rnd_new, rc); 1216 } else { 1217 /* Update and retry */ 1218 rnd_orig->rnd_nhop = rt_new->rt_nhop; 1219 rnd_orig->rnd_weight = rt_new->rt_weight; 1220 error = EAGAIN; 1221 } 1222 } 1223 1224 RIB_WUNLOCK(rnh); 1225 1226 if (error == 0) { 1227 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 1228 1229 if (rnd_orig->rnd_nhop != NULL) 1230 nhop_free_any(rnd_orig->rnd_nhop); 1231 1232 } else { 1233 if (rnd_new->rnd_nhop != NULL) 1234 nhop_free_any(rnd_new->rnd_nhop); 1235 } 1236 1237 return (error); 1238 } 1239 1240 /* 1241 * Performs modification of routing table specificed by @action. 1242 * Table is specified by @fibnum and sa_family in @info->rti_info[RTAX_DST]. 1243 * Needs to be run in network epoch. 1244 * 1245 * Returns 0 on success and fills in @rc with action result. 1246 */ 1247 int 1248 rib_action(uint32_t fibnum, int action, struct rt_addrinfo *info, 1249 struct rib_cmd_info *rc) 1250 { 1251 int error; 1252 1253 switch (action) { 1254 case RTM_ADD: 1255 error = rib_add_route(fibnum, info, rc); 1256 break; 1257 case RTM_DELETE: 1258 error = rib_del_route(fibnum, info, rc); 1259 break; 1260 case RTM_CHANGE: 1261 error = rib_change_route(fibnum, info, rc); 1262 break; 1263 default: 1264 error = ENOTSUP; 1265 } 1266 1267 return (error); 1268 } 1269 1270 struct rt_delinfo 1271 { 1272 struct rt_addrinfo info; 1273 struct rib_head *rnh; 1274 struct rtentry *head; 1275 struct rib_cmd_info rc; 1276 }; 1277 1278 /* 1279 * Conditionally unlinks @rn from radix tree based 1280 * on info data passed in @arg. 1281 */ 1282 static int 1283 rt_checkdelroute(struct radix_node *rn, void *arg) 1284 { 1285 struct rt_delinfo *di; 1286 struct rt_addrinfo *info; 1287 struct rtentry *rt; 1288 1289 di = (struct rt_delinfo *)arg; 1290 rt = (struct rtentry *)rn; 1291 info = &di->info; 1292 1293 info->rti_info[RTAX_DST] = rt_key(rt); 1294 info->rti_info[RTAX_NETMASK] = rt_mask(rt); 1295 1296 if (rt_unlinkrte(di->rnh, info, &di->rc) != 0) 1297 return (0); 1298 1299 /* 1300 * Add deleted rtentries to the list to GC them 1301 * after dropping the lock. 1302 * 1303 * XXX: Delayed notifications not implemented 1304 * for nexthop updates. 1305 */ 1306 if (di->rc.rc_cmd == RTM_DELETE) { 1307 /* Add to the list and return */ 1308 rt->rt_chain = di->head; 1309 di->head = rt; 1310 #ifdef ROUTE_MPATH 1311 } else { 1312 /* 1313 * RTM_CHANGE to a diferent nexthop or nexthop group. 1314 * Free old multipath group. 1315 */ 1316 nhop_free_any(di->rc.rc_nh_old); 1317 #endif 1318 } 1319 1320 return (0); 1321 } 1322 1323 /* 1324 * Iterates over a routing table specified by @fibnum and @family and 1325 * deletes elements marked by @filter_f. 1326 * @fibnum: rtable id 1327 * @family: AF_ address family 1328 * @filter_f: function returning non-zero value for items to delete 1329 * @arg: data to pass to the @filter_f function 1330 * @report: true if rtsock notification is needed. 1331 */ 1332 void 1333 rib_walk_del(u_int fibnum, int family, rib_filter_f_t *filter_f, void *arg, bool report) 1334 { 1335 struct rib_head *rnh; 1336 struct rt_delinfo di; 1337 struct rtentry *rt; 1338 struct nhop_object *nh; 1339 struct epoch_tracker et; 1340 1341 rnh = rt_tables_get_rnh(fibnum, family); 1342 if (rnh == NULL) 1343 return; 1344 1345 bzero(&di, sizeof(di)); 1346 di.info.rti_filter = filter_f; 1347 di.info.rti_filterdata = arg; 1348 di.rnh = rnh; 1349 di.rc.rc_cmd = RTM_DELETE; 1350 1351 NET_EPOCH_ENTER(et); 1352 1353 RIB_WLOCK(rnh); 1354 rnh->rnh_walktree(&rnh->head, rt_checkdelroute, &di); 1355 RIB_WUNLOCK(rnh); 1356 1357 /* We might have something to reclaim. */ 1358 bzero(&di.rc, sizeof(di.rc)); 1359 di.rc.rc_cmd = RTM_DELETE; 1360 while (di.head != NULL) { 1361 rt = di.head; 1362 di.head = rt->rt_chain; 1363 rt->rt_chain = NULL; 1364 nh = rt->rt_nhop; 1365 1366 di.rc.rc_rt = rt; 1367 di.rc.rc_nh_old = nh; 1368 rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc); 1369 1370 /* TODO std rt -> rt_addrinfo export */ 1371 di.info.rti_info[RTAX_DST] = rt_key(rt); 1372 di.info.rti_info[RTAX_NETMASK] = rt_mask(rt); 1373 1374 if (report) { 1375 #ifdef ROUTE_MPATH 1376 struct nhgrp_object *nhg; 1377 struct weightened_nhop *wn; 1378 uint32_t num_nhops; 1379 if (NH_IS_NHGRP(nh)) { 1380 nhg = (struct nhgrp_object *)nh; 1381 wn = nhgrp_get_nhops(nhg, &num_nhops); 1382 for (int i = 0; i < num_nhops; i++) 1383 rt_routemsg(RTM_DELETE, rt, wn[i].nh, fibnum); 1384 } else 1385 #endif 1386 rt_routemsg(RTM_DELETE, rt, nh, fibnum); 1387 } 1388 rtfree(rt); 1389 } 1390 1391 NET_EPOCH_EXIT(et); 1392 } 1393 1394 static int 1395 rt_delete_unconditional(struct radix_node *rn, void *arg) 1396 { 1397 struct rtentry *rt = RNTORT(rn); 1398 struct rib_head *rnh = (struct rib_head *)arg; 1399 1400 rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), &rnh->head); 1401 if (RNTORT(rn) == rt) 1402 rtfree(rt); 1403 1404 return (0); 1405 } 1406 1407 /* 1408 * Removes all routes from the routing table without executing notifications. 1409 * rtentres will be removed after the end of a current epoch. 1410 */ 1411 static void 1412 rib_flush_routes(struct rib_head *rnh) 1413 { 1414 RIB_WLOCK(rnh); 1415 rnh->rnh_walktree(&rnh->head, rt_delete_unconditional, rnh); 1416 RIB_WUNLOCK(rnh); 1417 } 1418 1419 void 1420 rib_flush_routes_family(int family) 1421 { 1422 struct rib_head *rnh; 1423 1424 for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) { 1425 if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL) 1426 rib_flush_routes(rnh); 1427 } 1428 } 1429 1430 const char * 1431 rib_print_family(int family) 1432 { 1433 switch (family) { 1434 case AF_INET: 1435 return ("inet"); 1436 case AF_INET6: 1437 return ("inet6"); 1438 case AF_LINK: 1439 return ("link"); 1440 } 1441 return ("unknown"); 1442 } 1443 1444 static void 1445 rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 1446 struct rib_cmd_info *rc) 1447 { 1448 struct rib_subscription *rs; 1449 1450 CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) { 1451 if (rs->type == type) 1452 rs->func(rnh, rc, rs->arg); 1453 } 1454 } 1455 1456 static struct rib_subscription * 1457 allocate_subscription(rib_subscription_cb_t *f, void *arg, 1458 enum rib_subscription_type type, bool waitok) 1459 { 1460 struct rib_subscription *rs; 1461 int flags = M_ZERO | (waitok ? M_WAITOK : M_NOWAIT); 1462 1463 rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags); 1464 if (rs == NULL) 1465 return (NULL); 1466 1467 rs->func = f; 1468 rs->arg = arg; 1469 rs->type = type; 1470 1471 return (rs); 1472 } 1473 1474 /* 1475 * Subscribe for the changes in the routing table specified by @fibnum and 1476 * @family. 1477 * 1478 * Returns pointer to the subscription structure on success. 1479 */ 1480 struct rib_subscription * 1481 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg, 1482 enum rib_subscription_type type, bool waitok) 1483 { 1484 struct rib_head *rnh; 1485 struct epoch_tracker et; 1486 1487 NET_EPOCH_ENTER(et); 1488 KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__)); 1489 rnh = rt_tables_get_rnh(fibnum, family); 1490 NET_EPOCH_EXIT(et); 1491 1492 return (rib_subscribe_internal(rnh, f, arg, type, waitok)); 1493 } 1494 1495 struct rib_subscription * 1496 rib_subscribe_internal(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg, 1497 enum rib_subscription_type type, bool waitok) 1498 { 1499 struct rib_subscription *rs; 1500 struct epoch_tracker et; 1501 1502 if ((rs = allocate_subscription(f, arg, type, waitok)) == NULL) 1503 return (NULL); 1504 rs->rnh = rnh; 1505 1506 NET_EPOCH_ENTER(et); 1507 RIB_WLOCK(rnh); 1508 CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next); 1509 RIB_WUNLOCK(rnh); 1510 NET_EPOCH_EXIT(et); 1511 1512 return (rs); 1513 } 1514 1515 struct rib_subscription * 1516 rib_subscribe_locked(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg, 1517 enum rib_subscription_type type) 1518 { 1519 struct rib_subscription *rs; 1520 1521 NET_EPOCH_ASSERT(); 1522 RIB_WLOCK_ASSERT(rnh); 1523 1524 if ((rs = allocate_subscription(f, arg, type, false)) == NULL) 1525 return (NULL); 1526 rs->rnh = rnh; 1527 1528 CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next); 1529 1530 return (rs); 1531 } 1532 1533 /* 1534 * Remove rtable subscription @rs from the routing table. 1535 * Needs to be run in network epoch. 1536 */ 1537 void 1538 rib_unsubscribe(struct rib_subscription *rs) 1539 { 1540 struct rib_head *rnh = rs->rnh; 1541 1542 NET_EPOCH_ASSERT(); 1543 1544 RIB_WLOCK(rnh); 1545 CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next); 1546 RIB_WUNLOCK(rnh); 1547 1548 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1549 &rs->epoch_ctx); 1550 } 1551 1552 void 1553 rib_unsubscribe_locked(struct rib_subscription *rs) 1554 { 1555 struct rib_head *rnh = rs->rnh; 1556 1557 NET_EPOCH_ASSERT(); 1558 RIB_WLOCK_ASSERT(rnh); 1559 1560 CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next); 1561 1562 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1563 &rs->epoch_ctx); 1564 } 1565 1566 /* 1567 * Epoch callback indicating subscription is safe to destroy 1568 */ 1569 static void 1570 destroy_subscription_epoch(epoch_context_t ctx) 1571 { 1572 struct rib_subscription *rs; 1573 1574 rs = __containerof(ctx, struct rib_subscription, epoch_ctx); 1575 1576 free(rs, M_RTABLE); 1577 } 1578 1579 void 1580 rib_init_subscriptions(struct rib_head *rnh) 1581 { 1582 1583 CK_STAILQ_INIT(&rnh->rnh_subscribers); 1584 } 1585 1586 void 1587 rib_destroy_subscriptions(struct rib_head *rnh) 1588 { 1589 struct rib_subscription *rs; 1590 struct epoch_tracker et; 1591 1592 NET_EPOCH_ENTER(et); 1593 RIB_WLOCK(rnh); 1594 while ((rs = CK_STAILQ_FIRST(&rnh->rnh_subscribers)) != NULL) { 1595 CK_STAILQ_REMOVE_HEAD(&rnh->rnh_subscribers, next); 1596 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1597 &rs->epoch_ctx); 1598 } 1599 RIB_WUNLOCK(rnh); 1600 NET_EPOCH_EXIT(et); 1601 } 1602