1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_route.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/syslog.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/rmlock.h> 44 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <net/if_dl.h> 48 #include <net/vnet.h> 49 #include <net/route.h> 50 #include <net/route/route_ctl.h> 51 #include <net/route/route_var.h> 52 #include <net/route/nhop_utils.h> 53 #include <net/route/nhop.h> 54 #include <net/route/nhop_var.h> 55 #include <netinet/in.h> 56 #include <netinet6/scope6_var.h> 57 58 #include <vm/uma.h> 59 60 /* 61 * This file contains control plane routing tables functions. 62 * 63 * All functions assumes they are called in net epoch. 64 */ 65 66 struct rib_subscription { 67 CK_STAILQ_ENTRY(rib_subscription) next; 68 rib_subscription_cb_t *func; 69 void *arg; 70 struct rib_head *rnh; 71 enum rib_subscription_type type; 72 struct epoch_context epoch_ctx; 73 }; 74 75 static int add_route(struct rib_head *rnh, struct rt_addrinfo *info, 76 struct rib_cmd_info *rc); 77 static int add_route_nhop(struct rib_head *rnh, struct rtentry *rt, 78 struct rt_addrinfo *info, struct route_nhop_data *rnd, 79 struct rib_cmd_info *rc); 80 static int del_route(struct rib_head *rnh, struct rt_addrinfo *info, 81 struct rib_cmd_info *rc); 82 static int change_route(struct rib_head *rnh, struct rt_addrinfo *info, 83 struct route_nhop_data *nhd_orig, struct rib_cmd_info *rc); 84 85 static int rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, 86 struct rib_cmd_info *rc); 87 88 static void rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 89 struct rib_cmd_info *rc); 90 91 static void destroy_subscription_epoch(epoch_context_t ctx); 92 #ifdef ROUTE_MPATH 93 static bool rib_can_multipath(struct rib_head *rh); 94 #endif 95 96 /* Per-vnet multipath routing configuration */ 97 SYSCTL_DECL(_net_route); 98 #define V_rib_route_multipath VNET(rib_route_multipath) 99 #ifdef ROUTE_MPATH 100 #define _MP_FLAGS CTLFLAG_RW 101 #else 102 #define _MP_FLAGS CTLFLAG_RD 103 #endif 104 VNET_DEFINE(u_int, rib_route_multipath) = 1; 105 SYSCTL_UINT(_net_route, OID_AUTO, multipath, _MP_FLAGS | CTLFLAG_VNET, 106 &VNET_NAME(rib_route_multipath), 0, "Enable route multipath"); 107 #undef _MP_FLAGS 108 109 #if defined(INET) && defined(INET6) 110 FEATURE(ipv4_rfc5549_support, "Route IPv4 packets via IPv6 nexthops"); 111 #define V_rib_route_ipv6_nexthop VNET(rib_route_ipv6_nexthop) 112 VNET_DEFINE(u_int, rib_route_ipv6_nexthop) = 1; 113 SYSCTL_UINT(_net_route, OID_AUTO, ipv6_nexthop, CTLFLAG_RW | CTLFLAG_VNET, 114 &VNET_NAME(rib_route_ipv6_nexthop), 0, "Enable IPv4 route via IPv6 Next Hop address"); 115 #endif 116 117 /* Routing table UMA zone */ 118 VNET_DEFINE_STATIC(uma_zone_t, rtzone); 119 #define V_rtzone VNET(rtzone) 120 121 /* Debug bits */ 122 SYSCTL_NODE(_net_route, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 123 124 void 125 vnet_rtzone_init() 126 { 127 128 V_rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), 129 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 130 } 131 132 #ifdef VIMAGE 133 void 134 vnet_rtzone_destroy() 135 { 136 137 uma_zdestroy(V_rtzone); 138 } 139 #endif 140 141 static void 142 destroy_rtentry(struct rtentry *rt) 143 { 144 #ifdef VIMAGE 145 struct nhop_object *nh = rt->rt_nhop; 146 147 /* 148 * At this moment rnh, nh_control may be already freed. 149 * nhop interface may have been migrated to a different vnet. 150 * Use vnet stored in the nexthop to delete the entry. 151 */ 152 #ifdef ROUTE_MPATH 153 if (NH_IS_NHGRP(nh)) { 154 struct weightened_nhop *wn; 155 uint32_t num_nhops; 156 wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); 157 nh = wn[0].nh; 158 } 159 #endif 160 CURVNET_SET(nhop_get_vnet(nh)); 161 #endif 162 163 /* Unreference nexthop */ 164 nhop_free_any(rt->rt_nhop); 165 166 uma_zfree(V_rtzone, rt); 167 168 CURVNET_RESTORE(); 169 } 170 171 /* 172 * Epoch callback indicating rtentry is safe to destroy 173 */ 174 static void 175 destroy_rtentry_epoch(epoch_context_t ctx) 176 { 177 struct rtentry *rt; 178 179 rt = __containerof(ctx, struct rtentry, rt_epoch_ctx); 180 181 destroy_rtentry(rt); 182 } 183 184 /* 185 * Schedule rtentry deletion 186 */ 187 static void 188 rtfree(struct rtentry *rt) 189 { 190 191 KASSERT(rt != NULL, ("%s: NULL rt", __func__)); 192 193 epoch_call(net_epoch_preempt, destroy_rtentry_epoch, 194 &rt->rt_epoch_ctx); 195 } 196 197 static struct rib_head * 198 get_rnh(uint32_t fibnum, const struct rt_addrinfo *info) 199 { 200 struct rib_head *rnh; 201 struct sockaddr *dst; 202 203 KASSERT((fibnum < rt_numfibs), ("rib_add_route: bad fibnum")); 204 205 dst = info->rti_info[RTAX_DST]; 206 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 207 208 return (rnh); 209 } 210 211 #if defined(INET) && defined(INET6) 212 static bool 213 rib_can_ipv6_nexthop_address(struct rib_head *rh) 214 { 215 int result; 216 217 CURVNET_SET(rh->rib_vnet); 218 result = !!V_rib_route_ipv6_nexthop; 219 CURVNET_RESTORE(); 220 221 return (result); 222 } 223 #endif 224 225 #ifdef ROUTE_MPATH 226 static bool 227 rib_can_multipath(struct rib_head *rh) 228 { 229 int result; 230 231 CURVNET_SET(rh->rib_vnet); 232 result = !!V_rib_route_multipath; 233 CURVNET_RESTORE(); 234 235 return (result); 236 } 237 238 /* 239 * Check is nhop is multipath-eligible. 240 * Avoid nhops without gateways and redirects. 241 * 242 * Returns 1 for multipath-eligible nexthop, 243 * 0 otherwise. 244 */ 245 bool 246 nhop_can_multipath(const struct nhop_object *nh) 247 { 248 249 if ((nh->nh_flags & NHF_MULTIPATH) != 0) 250 return (1); 251 if ((nh->nh_flags & NHF_GATEWAY) == 0) 252 return (0); 253 if ((nh->nh_flags & NHF_REDIRECT) != 0) 254 return (0); 255 256 return (1); 257 } 258 #endif 259 260 static int 261 get_info_weight(const struct rt_addrinfo *info, uint32_t default_weight) 262 { 263 uint32_t weight; 264 265 if (info->rti_mflags & RTV_WEIGHT) 266 weight = info->rti_rmx->rmx_weight; 267 else 268 weight = default_weight; 269 /* Keep upper 1 byte for adm distance purposes */ 270 if (weight > RT_MAX_WEIGHT) 271 weight = RT_MAX_WEIGHT; 272 else if (weight == 0) 273 weight = default_weight; 274 275 return (weight); 276 } 277 278 bool 279 rt_is_host(const struct rtentry *rt) 280 { 281 282 return (rt->rte_flags & RTF_HOST); 283 } 284 285 sa_family_t 286 rt_get_family(const struct rtentry *rt) 287 { 288 const struct sockaddr *dst; 289 290 dst = (const struct sockaddr *)rt_key_const(rt); 291 292 return (dst->sa_family); 293 } 294 295 /* 296 * Returns pointer to nexthop or nexthop group 297 * associated with @rt 298 */ 299 struct nhop_object * 300 rt_get_raw_nhop(const struct rtentry *rt) 301 { 302 303 return (rt->rt_nhop); 304 } 305 306 #ifdef INET 307 /* 308 * Stores IPv4 address and prefix length of @rt inside 309 * @paddr and @plen. 310 * @pscopeid is currently always set to 0. 311 */ 312 void 313 rt_get_inet_prefix_plen(const struct rtentry *rt, struct in_addr *paddr, 314 int *plen, uint32_t *pscopeid) 315 { 316 const struct sockaddr_in *dst; 317 318 dst = (const struct sockaddr_in *)rt_key_const(rt); 319 KASSERT((dst->sin_family == AF_INET), 320 ("rt family is %d, not inet", dst->sin_family)); 321 *paddr = dst->sin_addr; 322 dst = (const struct sockaddr_in *)rt_mask_const(rt); 323 if (dst == NULL) 324 *plen = 32; 325 else 326 *plen = bitcount32(dst->sin_addr.s_addr); 327 *pscopeid = 0; 328 } 329 330 /* 331 * Stores IPv4 address and prefix mask of @rt inside 332 * @paddr and @pmask. Sets mask to INADDR_ANY for host routes. 333 * @pscopeid is currently always set to 0. 334 */ 335 void 336 rt_get_inet_prefix_pmask(const struct rtentry *rt, struct in_addr *paddr, 337 struct in_addr *pmask, uint32_t *pscopeid) 338 { 339 const struct sockaddr_in *dst; 340 341 dst = (const struct sockaddr_in *)rt_key_const(rt); 342 KASSERT((dst->sin_family == AF_INET), 343 ("rt family is %d, not inet", dst->sin_family)); 344 *paddr = dst->sin_addr; 345 dst = (const struct sockaddr_in *)rt_mask_const(rt); 346 if (dst == NULL) 347 pmask->s_addr = INADDR_BROADCAST; 348 else 349 *pmask = dst->sin_addr; 350 *pscopeid = 0; 351 } 352 #endif 353 354 #ifdef INET6 355 static int 356 inet6_get_plen(const struct in6_addr *addr) 357 { 358 359 return (bitcount32(addr->s6_addr32[0]) + bitcount32(addr->s6_addr32[1]) + 360 bitcount32(addr->s6_addr32[2]) + bitcount32(addr->s6_addr32[3])); 361 } 362 363 /* 364 * Stores IPv6 address and prefix length of @rt inside 365 * @paddr and @plen. Addresses are returned in de-embedded form. 366 * Scopeid is set to 0 for non-LL addresses. 367 */ 368 void 369 rt_get_inet6_prefix_plen(const struct rtentry *rt, struct in6_addr *paddr, 370 int *plen, uint32_t *pscopeid) 371 { 372 const struct sockaddr_in6 *dst; 373 374 dst = (const struct sockaddr_in6 *)rt_key_const(rt); 375 KASSERT((dst->sin6_family == AF_INET6), 376 ("rt family is %d, not inet6", dst->sin6_family)); 377 if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr)) 378 in6_splitscope(&dst->sin6_addr, paddr, pscopeid); 379 else 380 *paddr = dst->sin6_addr; 381 dst = (const struct sockaddr_in6 *)rt_mask_const(rt); 382 if (dst == NULL) 383 *plen = 128; 384 else 385 *plen = inet6_get_plen(&dst->sin6_addr); 386 } 387 388 /* 389 * Stores IPv6 address and prefix mask of @rt inside 390 * @paddr and @pmask. Addresses are returned in de-embedded form. 391 * Scopeid is set to 0 for non-LL addresses. 392 */ 393 void 394 rt_get_inet6_prefix_pmask(const struct rtentry *rt, struct in6_addr *paddr, 395 struct in6_addr *pmask, uint32_t *pscopeid) 396 { 397 const struct sockaddr_in6 *dst; 398 399 dst = (const struct sockaddr_in6 *)rt_key_const(rt); 400 KASSERT((dst->sin6_family == AF_INET6), 401 ("rt family is %d, not inet", dst->sin6_family)); 402 if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr)) 403 in6_splitscope(&dst->sin6_addr, paddr, pscopeid); 404 else 405 *paddr = dst->sin6_addr; 406 dst = (const struct sockaddr_in6 *)rt_mask_const(rt); 407 if (dst == NULL) 408 memset(pmask, 0xFF, sizeof(struct in6_addr)); 409 else 410 *pmask = dst->sin6_addr; 411 } 412 #endif 413 414 static void 415 rt_set_expire_info(struct rtentry *rt, const struct rt_addrinfo *info) 416 { 417 418 /* Kernel -> userland timebase conversion. */ 419 if (info->rti_mflags & RTV_EXPIRE) 420 rt->rt_expire = info->rti_rmx->rmx_expire ? 421 info->rti_rmx->rmx_expire - time_second + time_uptime : 0; 422 } 423 424 /* 425 * Check if specified @gw matches gw data in the nexthop @nh. 426 * 427 * Returns true if matches, false otherwise. 428 */ 429 bool 430 match_nhop_gw(const struct nhop_object *nh, const struct sockaddr *gw) 431 { 432 433 if (nh->gw_sa.sa_family != gw->sa_family) 434 return (false); 435 436 switch (gw->sa_family) { 437 case AF_INET: 438 return (nh->gw4_sa.sin_addr.s_addr == 439 ((const struct sockaddr_in *)gw)->sin_addr.s_addr); 440 case AF_INET6: 441 { 442 const struct sockaddr_in6 *gw6; 443 gw6 = (const struct sockaddr_in6 *)gw; 444 445 /* 446 * Currently (2020-09) IPv6 gws in kernel have their 447 * scope embedded. Once this becomes false, this code 448 * has to be revisited. 449 */ 450 if (IN6_ARE_ADDR_EQUAL(&nh->gw6_sa.sin6_addr, 451 &gw6->sin6_addr)) 452 return (true); 453 return (false); 454 } 455 case AF_LINK: 456 { 457 const struct sockaddr_dl *sdl; 458 sdl = (const struct sockaddr_dl *)gw; 459 return (nh->gwl_sa.sdl_index == sdl->sdl_index); 460 } 461 default: 462 return (memcmp(&nh->gw_sa, gw, nh->gw_sa.sa_len) == 0); 463 } 464 465 /* NOTREACHED */ 466 return (false); 467 } 468 469 /* 470 * Checks if data in @info matches nexhop @nh. 471 * 472 * Returns 0 on success, 473 * ESRCH if not matched, 474 * ENOENT if filter function returned false 475 */ 476 int 477 check_info_match_nhop(const struct rt_addrinfo *info, const struct rtentry *rt, 478 const struct nhop_object *nh) 479 { 480 const struct sockaddr *gw = info->rti_info[RTAX_GATEWAY]; 481 482 if (info->rti_filter != NULL) { 483 if (info->rti_filter(rt, nh, info->rti_filterdata) == 0) 484 return (ENOENT); 485 else 486 return (0); 487 } 488 if ((gw != NULL) && !match_nhop_gw(nh, gw)) 489 return (ESRCH); 490 491 return (0); 492 } 493 494 /* 495 * Checks if nexhop @nh can be rewritten by data in @info because 496 * of higher "priority". Currently the only case for such scenario 497 * is kernel installing interface routes, marked by RTF_PINNED flag. 498 * 499 * Returns: 500 * 1 if @info data has higher priority 501 * 0 if priority is the same 502 * -1 if priority is lower 503 */ 504 int 505 can_override_nhop(const struct rt_addrinfo *info, const struct nhop_object *nh) 506 { 507 508 if (info->rti_flags & RTF_PINNED) { 509 return (NH_IS_PINNED(nh)) ? 0 : 1; 510 } else { 511 return (NH_IS_PINNED(nh)) ? -1 : 0; 512 } 513 } 514 515 /* 516 * Runs exact prefix match based on @dst and @netmask. 517 * Returns matched @rtentry if found or NULL. 518 * If rtentry was found, saves nexthop / weight value into @rnd. 519 */ 520 static struct rtentry * 521 lookup_prefix_bysa(struct rib_head *rnh, const struct sockaddr *dst, 522 const struct sockaddr *netmask, struct route_nhop_data *rnd) 523 { 524 struct rtentry *rt; 525 526 RIB_LOCK_ASSERT(rnh); 527 528 rt = (struct rtentry *)rnh->rnh_lookup(__DECONST(void *, dst), 529 __DECONST(void *, netmask), &rnh->head); 530 if (rt != NULL) { 531 rnd->rnd_nhop = rt->rt_nhop; 532 rnd->rnd_weight = rt->rt_weight; 533 } else { 534 rnd->rnd_nhop = NULL; 535 rnd->rnd_weight = 0; 536 } 537 538 return (rt); 539 } 540 541 /* 542 * Runs exact prefix match based on dst/netmask from @info. 543 * Assumes RIB lock is held. 544 * Returns matched @rtentry if found or NULL. 545 * If rtentry was found, saves nexthop / weight value into @rnd. 546 */ 547 struct rtentry * 548 lookup_prefix(struct rib_head *rnh, const struct rt_addrinfo *info, 549 struct route_nhop_data *rnd) 550 { 551 struct rtentry *rt; 552 553 rt = lookup_prefix_bysa(rnh, info->rti_info[RTAX_DST], 554 info->rti_info[RTAX_NETMASK], rnd); 555 556 return (rt); 557 } 558 559 /* 560 * Adds route defined by @info into the kernel table specified by @fibnum and 561 * sa_family in @info->rti_info[RTAX_DST]. 562 * 563 * Returns 0 on success and fills in operation metadata into @rc. 564 */ 565 int 566 rib_add_route(uint32_t fibnum, struct rt_addrinfo *info, 567 struct rib_cmd_info *rc) 568 { 569 struct rib_head *rnh; 570 int error; 571 572 NET_EPOCH_ASSERT(); 573 574 rnh = get_rnh(fibnum, info); 575 if (rnh == NULL) 576 return (EAFNOSUPPORT); 577 578 /* 579 * Check consistency between RTF_HOST flag and netmask 580 * existence. 581 */ 582 if (info->rti_flags & RTF_HOST) 583 info->rti_info[RTAX_NETMASK] = NULL; 584 else if (info->rti_info[RTAX_NETMASK] == NULL) 585 return (EINVAL); 586 587 bzero(rc, sizeof(struct rib_cmd_info)); 588 rc->rc_cmd = RTM_ADD; 589 590 error = add_route(rnh, info, rc); 591 if (error == 0) 592 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 593 594 return (error); 595 } 596 597 /* 598 * Checks if @dst and @gateway is valid combination. 599 * 600 * Returns true if is valid, false otherwise. 601 */ 602 static bool 603 check_gateway(struct rib_head *rnh, struct sockaddr *dst, 604 struct sockaddr *gateway) 605 { 606 if (dst->sa_family == gateway->sa_family) 607 return (true); 608 else if (gateway->sa_family == AF_UNSPEC) 609 return (true); 610 else if (gateway->sa_family == AF_LINK) 611 return (true); 612 #if defined(INET) && defined(INET6) 613 else if (dst->sa_family == AF_INET && gateway->sa_family == AF_INET6 && 614 rib_can_ipv6_nexthop_address(rnh)) 615 return (true); 616 #endif 617 else 618 return (false); 619 } 620 621 /* 622 * Creates rtentry and nexthop based on @info data. 623 * Return 0 and fills in rtentry into @prt on success, 624 * return errno otherwise. 625 */ 626 static int 627 create_rtentry(struct rib_head *rnh, struct rt_addrinfo *info, 628 struct rtentry **prt) 629 { 630 struct sockaddr *dst, *ndst, *gateway, *netmask; 631 struct rtentry *rt; 632 struct nhop_object *nh; 633 struct ifaddr *ifa; 634 int error, flags; 635 636 dst = info->rti_info[RTAX_DST]; 637 gateway = info->rti_info[RTAX_GATEWAY]; 638 netmask = info->rti_info[RTAX_NETMASK]; 639 flags = info->rti_flags; 640 641 if ((flags & RTF_GATEWAY) && !gateway) 642 return (EINVAL); 643 if (dst && gateway && !check_gateway(rnh, dst, gateway)) 644 return (EINVAL); 645 646 if (dst->sa_len > sizeof(((struct rtentry *)NULL)->rt_dstb)) 647 return (EINVAL); 648 649 if (info->rti_ifa == NULL) { 650 error = rt_getifa_fib(info, rnh->rib_fibnum); 651 if (error) 652 return (error); 653 } 654 655 error = nhop_create_from_info(rnh, info, &nh); 656 if (error != 0) 657 return (error); 658 659 rt = uma_zalloc(V_rtzone, M_NOWAIT | M_ZERO); 660 if (rt == NULL) { 661 nhop_free(nh); 662 return (ENOBUFS); 663 } 664 rt->rte_flags = (RTF_UP | flags) & RTE_RT_FLAG_MASK; 665 rt->rt_nhop = nh; 666 667 /* Fill in dst */ 668 memcpy(&rt->rt_dst, dst, dst->sa_len); 669 rt_key(rt) = &rt->rt_dst; 670 671 /* 672 * point to the (possibly newly malloc'd) dest address. 673 */ 674 ndst = (struct sockaddr *)rt_key(rt); 675 676 /* 677 * make sure it contains the value we want (masked if needed). 678 */ 679 if (netmask) { 680 rt_maskedcopy(dst, ndst, netmask); 681 } else 682 bcopy(dst, ndst, dst->sa_len); 683 684 /* 685 * We use the ifa reference returned by rt_getifa_fib(). 686 * This moved from below so that rnh->rnh_addaddr() can 687 * examine the ifa and ifa->ifa_ifp if it so desires. 688 */ 689 ifa = info->rti_ifa; 690 rt->rt_weight = get_info_weight(info, RT_DEFAULT_WEIGHT); 691 rt_set_expire_info(rt, info); 692 693 *prt = rt; 694 return (0); 695 } 696 697 static int 698 add_route(struct rib_head *rnh, struct rt_addrinfo *info, 699 struct rib_cmd_info *rc) 700 { 701 struct nhop_object *nh_orig; 702 struct route_nhop_data rnd_orig, rnd_add; 703 struct nhop_object *nh; 704 struct rtentry *rt, *rt_orig; 705 int error; 706 707 error = create_rtentry(rnh, info, &rt); 708 if (error != 0) 709 return (error); 710 711 rnd_add.rnd_nhop = rt->rt_nhop; 712 rnd_add.rnd_weight = rt->rt_weight; 713 nh = rt->rt_nhop; 714 715 RIB_WLOCK(rnh); 716 error = add_route_nhop(rnh, rt, info, &rnd_add, rc); 717 if (error == 0) { 718 RIB_WUNLOCK(rnh); 719 return (0); 720 } 721 722 /* addition failed. Lookup prefix in the rib to determine the cause */ 723 rt_orig = lookup_prefix(rnh, info, &rnd_orig); 724 if (rt_orig == NULL) { 725 /* No prefix -> rnh_addaddr() failed to allocate memory */ 726 RIB_WUNLOCK(rnh); 727 nhop_free(nh); 728 uma_zfree(V_rtzone, rt); 729 return (ENOMEM); 730 } 731 732 /* We have existing route in the RIB. */ 733 nh_orig = rnd_orig.rnd_nhop; 734 /* Check if new route has higher preference */ 735 if (can_override_nhop(info, nh_orig) > 0) { 736 /* Update nexthop to the new route */ 737 change_route_nhop(rnh, rt_orig, info, &rnd_add, rc); 738 RIB_WUNLOCK(rnh); 739 uma_zfree(V_rtzone, rt); 740 nhop_free(nh_orig); 741 return (0); 742 } 743 744 RIB_WUNLOCK(rnh); 745 746 #ifdef ROUTE_MPATH 747 if (rib_can_multipath(rnh) && nhop_can_multipath(rnd_add.rnd_nhop) && 748 nhop_can_multipath(rnd_orig.rnd_nhop)) 749 error = add_route_mpath(rnh, info, rt, &rnd_add, &rnd_orig, rc); 750 else 751 #endif 752 /* Unable to add - another route with the same preference exists */ 753 error = EEXIST; 754 755 /* 756 * ROUTE_MPATH disabled: failed to add route, free both nhop and rt. 757 * ROUTE_MPATH enabled: original nhop reference is unused in any case, 758 * free rt only if not _adding_ new route to rib (e.g. the case 759 * when initial lookup returned existing route, but then it got 760 * deleted prior to multipath group insertion, leading to a simple 761 * non-multipath add as a result). 762 */ 763 nhop_free(nh); 764 if ((error != 0) || rc->rc_cmd != RTM_ADD) 765 uma_zfree(V_rtzone, rt); 766 767 return (error); 768 } 769 770 /* 771 * Removes route defined by @info from the kernel table specified by @fibnum and 772 * sa_family in @info->rti_info[RTAX_DST]. 773 * 774 * Returns 0 on success and fills in operation metadata into @rc. 775 */ 776 int 777 rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc) 778 { 779 struct rib_head *rnh; 780 struct sockaddr *dst_orig, *netmask; 781 struct sockaddr_storage mdst; 782 int error; 783 784 NET_EPOCH_ASSERT(); 785 786 rnh = get_rnh(fibnum, info); 787 if (rnh == NULL) 788 return (EAFNOSUPPORT); 789 790 bzero(rc, sizeof(struct rib_cmd_info)); 791 rc->rc_cmd = RTM_DELETE; 792 793 dst_orig = info->rti_info[RTAX_DST]; 794 netmask = info->rti_info[RTAX_NETMASK]; 795 796 if (netmask != NULL) { 797 /* Ensure @dst is always properly masked */ 798 if (dst_orig->sa_len > sizeof(mdst)) 799 return (EINVAL); 800 rt_maskedcopy(dst_orig, (struct sockaddr *)&mdst, netmask); 801 info->rti_info[RTAX_DST] = (struct sockaddr *)&mdst; 802 } 803 error = del_route(rnh, info, rc); 804 info->rti_info[RTAX_DST] = dst_orig; 805 806 return (error); 807 } 808 809 /* 810 * Conditionally unlinks rtentry matching data inside @info from @rnh. 811 * Returns 0 on success with operation result stored in @rc. 812 * On error, returns: 813 * ESRCH - if prefix was not found, 814 * EADDRINUSE - if trying to delete higher priority route. 815 * ENOENT - if supplied filter function returned 0 (not matched). 816 */ 817 static int 818 rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, struct rib_cmd_info *rc) 819 { 820 struct rtentry *rt; 821 struct nhop_object *nh; 822 struct radix_node *rn; 823 struct route_nhop_data rnd; 824 int error; 825 826 rt = lookup_prefix(rnh, info, &rnd); 827 if (rt == NULL) 828 return (ESRCH); 829 830 nh = rt->rt_nhop; 831 #ifdef ROUTE_MPATH 832 if (NH_IS_NHGRP(nh)) { 833 error = del_route_mpath(rnh, info, rt, 834 (struct nhgrp_object *)nh, rc); 835 return (error); 836 } 837 #endif 838 error = check_info_match_nhop(info, rt, nh); 839 if (error != 0) 840 return (error); 841 842 if (can_override_nhop(info, nh) < 0) 843 return (EADDRINUSE); 844 845 /* 846 * Remove the item from the tree and return it. 847 * Complain if it is not there and do no more processing. 848 */ 849 rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST], 850 info->rti_info[RTAX_NETMASK], &rnh->head); 851 if (rn == NULL) 852 return (ESRCH); 853 854 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 855 panic ("rtrequest delete"); 856 857 rt = RNTORT(rn); 858 rt->rte_flags &= ~RTF_UP; 859 860 /* Finalize notification */ 861 rib_bump_gen(rnh); 862 rnh->rnh_prefixes--; 863 864 rc->rc_cmd = RTM_DELETE; 865 rc->rc_rt = rt; 866 rc->rc_nh_old = rt->rt_nhop; 867 rc->rc_nh_weight = rt->rt_weight; 868 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 869 870 return (0); 871 } 872 873 static int 874 del_route(struct rib_head *rnh, struct rt_addrinfo *info, 875 struct rib_cmd_info *rc) 876 { 877 int error; 878 879 RIB_WLOCK(rnh); 880 error = rt_unlinkrte(rnh, info, rc); 881 RIB_WUNLOCK(rnh); 882 if (error != 0) 883 return (error); 884 885 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 886 887 /* 888 * If the caller wants it, then it can have it, 889 * the entry will be deleted after the end of the current epoch. 890 */ 891 if (rc->rc_cmd == RTM_DELETE) 892 rtfree(rc->rc_rt); 893 #ifdef ROUTE_MPATH 894 else { 895 /* 896 * Deleting 1 path may result in RTM_CHANGE to 897 * a different mpath group/nhop. 898 * Free old mpath group. 899 */ 900 nhop_free_any(rc->rc_nh_old); 901 } 902 #endif 903 904 return (0); 905 } 906 907 int 908 rib_change_route(uint32_t fibnum, struct rt_addrinfo *info, 909 struct rib_cmd_info *rc) 910 { 911 RIB_RLOCK_TRACKER; 912 struct route_nhop_data rnd_orig; 913 struct rib_head *rnh; 914 struct rtentry *rt; 915 int error; 916 917 NET_EPOCH_ASSERT(); 918 919 rnh = get_rnh(fibnum, info); 920 if (rnh == NULL) 921 return (EAFNOSUPPORT); 922 923 bzero(rc, sizeof(struct rib_cmd_info)); 924 rc->rc_cmd = RTM_CHANGE; 925 926 /* Check if updated gateway exists */ 927 if ((info->rti_flags & RTF_GATEWAY) && 928 (info->rti_info[RTAX_GATEWAY] == NULL)) { 929 930 /* 931 * route(8) adds RTF_GATEWAY flag if -interface is not set. 932 * Remove RTF_GATEWAY to enforce consistency and maintain 933 * compatibility.. 934 */ 935 info->rti_flags &= ~RTF_GATEWAY; 936 } 937 938 /* 939 * route change is done in multiple steps, with dropping and 940 * reacquiring lock. In the situations with multiple processes 941 * changes the same route in can lead to the case when route 942 * is changed between the steps. Address it by retrying the operation 943 * multiple times before failing. 944 */ 945 946 RIB_RLOCK(rnh); 947 rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 948 info->rti_info[RTAX_NETMASK], &rnh->head); 949 950 if (rt == NULL) { 951 RIB_RUNLOCK(rnh); 952 return (ESRCH); 953 } 954 955 rnd_orig.rnd_nhop = rt->rt_nhop; 956 rnd_orig.rnd_weight = rt->rt_weight; 957 958 RIB_RUNLOCK(rnh); 959 960 for (int i = 0; i < RIB_MAX_RETRIES; i++) { 961 error = change_route(rnh, info, &rnd_orig, rc); 962 if (error != EAGAIN) 963 break; 964 } 965 966 return (error); 967 } 968 969 static int 970 change_nhop(struct rib_head *rnh, struct rt_addrinfo *info, 971 struct nhop_object *nh_orig, struct nhop_object **nh_new) 972 { 973 int error; 974 975 /* 976 * New gateway could require new ifaddr, ifp; 977 * flags may also be different; ifp may be specified 978 * by ll sockaddr when protocol address is ambiguous 979 */ 980 if (((nh_orig->nh_flags & NHF_GATEWAY) && 981 info->rti_info[RTAX_GATEWAY] != NULL) || 982 info->rti_info[RTAX_IFP] != NULL || 983 (info->rti_info[RTAX_IFA] != NULL && 984 !sa_equal(info->rti_info[RTAX_IFA], nh_orig->nh_ifa->ifa_addr))) { 985 error = rt_getifa_fib(info, rnh->rib_fibnum); 986 987 if (error != 0) { 988 info->rti_ifa = NULL; 989 return (error); 990 } 991 } 992 993 error = nhop_create_from_nhop(rnh, nh_orig, info, nh_new); 994 info->rti_ifa = NULL; 995 996 return (error); 997 } 998 999 #ifdef ROUTE_MPATH 1000 static int 1001 change_mpath_route(struct rib_head *rnh, struct rt_addrinfo *info, 1002 struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc) 1003 { 1004 int error = 0; 1005 struct nhop_object *nh, *nh_orig, *nh_new; 1006 struct route_nhop_data rnd_new; 1007 1008 nh = NULL; 1009 nh_orig = rnd_orig->rnd_nhop; 1010 1011 struct weightened_nhop *wn = NULL, *wn_new; 1012 uint32_t num_nhops; 1013 1014 wn = nhgrp_get_nhops((struct nhgrp_object *)nh_orig, &num_nhops); 1015 nh_orig = NULL; 1016 for (int i = 0; i < num_nhops; i++) { 1017 if (check_info_match_nhop(info, NULL, wn[i].nh)) { 1018 nh_orig = wn[i].nh; 1019 break; 1020 } 1021 } 1022 1023 if (nh_orig == NULL) 1024 return (ESRCH); 1025 1026 error = change_nhop(rnh, info, nh_orig, &nh_new); 1027 if (error != 0) 1028 return (error); 1029 1030 wn_new = mallocarray(num_nhops, sizeof(struct weightened_nhop), 1031 M_TEMP, M_NOWAIT | M_ZERO); 1032 if (wn_new == NULL) { 1033 nhop_free(nh_new); 1034 return (EAGAIN); 1035 } 1036 1037 memcpy(wn_new, wn, num_nhops * sizeof(struct weightened_nhop)); 1038 for (int i = 0; i < num_nhops; i++) { 1039 if (wn[i].nh == nh_orig) { 1040 wn[i].nh = nh_new; 1041 wn[i].weight = get_info_weight(info, rnd_orig->rnd_weight); 1042 break; 1043 } 1044 } 1045 1046 error = nhgrp_get_group(rnh, wn_new, num_nhops, &rnd_new); 1047 nhop_free(nh_new); 1048 free(wn_new, M_TEMP); 1049 1050 if (error != 0) 1051 return (error); 1052 1053 error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc); 1054 1055 return (error); 1056 } 1057 #endif 1058 1059 static int 1060 change_route(struct rib_head *rnh, struct rt_addrinfo *info, 1061 struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc) 1062 { 1063 int error = 0; 1064 struct nhop_object *nh, *nh_orig; 1065 struct route_nhop_data rnd_new; 1066 1067 nh = NULL; 1068 nh_orig = rnd_orig->rnd_nhop; 1069 if (nh_orig == NULL) 1070 return (ESRCH); 1071 1072 #ifdef ROUTE_MPATH 1073 if (NH_IS_NHGRP(nh_orig)) 1074 return (change_mpath_route(rnh, info, rnd_orig, rc)); 1075 #endif 1076 1077 rnd_new.rnd_weight = get_info_weight(info, rnd_orig->rnd_weight); 1078 error = change_nhop(rnh, info, nh_orig, &rnd_new.rnd_nhop); 1079 if (error != 0) 1080 return (error); 1081 error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc); 1082 1083 return (error); 1084 } 1085 1086 /* 1087 * Insert @rt with nhop data from @rnd_new to @rnh. 1088 * Returns 0 on success and stores operation results in @rc. 1089 */ 1090 static int 1091 add_route_nhop(struct rib_head *rnh, struct rtentry *rt, 1092 struct rt_addrinfo *info, struct route_nhop_data *rnd, 1093 struct rib_cmd_info *rc) 1094 { 1095 struct sockaddr *ndst, *netmask; 1096 struct radix_node *rn; 1097 int error = 0; 1098 1099 RIB_WLOCK_ASSERT(rnh); 1100 1101 ndst = (struct sockaddr *)rt_key(rt); 1102 netmask = info->rti_info[RTAX_NETMASK]; 1103 1104 rt->rt_nhop = rnd->rnd_nhop; 1105 rt->rt_weight = rnd->rnd_weight; 1106 rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, rt->rt_nodes); 1107 1108 if (rn != NULL) { 1109 if (rt->rt_expire > 0) 1110 tmproutes_update(rnh, rt); 1111 1112 /* Finalize notification */ 1113 rib_bump_gen(rnh); 1114 rnh->rnh_prefixes++; 1115 1116 rc->rc_cmd = RTM_ADD; 1117 rc->rc_rt = rt; 1118 rc->rc_nh_old = NULL; 1119 rc->rc_nh_new = rnd->rnd_nhop; 1120 rc->rc_nh_weight = rnd->rnd_weight; 1121 1122 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 1123 } else { 1124 /* Existing route or memory allocation failure */ 1125 error = EEXIST; 1126 } 1127 1128 return (error); 1129 } 1130 1131 /* 1132 * Switch @rt nhop/weigh to the ones specified in @rnd. 1133 * Conditionally set rt_expire if set in @info. 1134 * Returns 0 on success. 1135 */ 1136 int 1137 change_route_nhop(struct rib_head *rnh, struct rtentry *rt, 1138 struct rt_addrinfo *info, struct route_nhop_data *rnd, 1139 struct rib_cmd_info *rc) 1140 { 1141 struct nhop_object *nh_orig; 1142 1143 RIB_WLOCK_ASSERT(rnh); 1144 1145 nh_orig = rt->rt_nhop; 1146 1147 if (rnd->rnd_nhop != NULL) { 1148 /* Changing expiration & nexthop & weight to a new one */ 1149 rt_set_expire_info(rt, info); 1150 rt->rt_nhop = rnd->rnd_nhop; 1151 rt->rt_weight = rnd->rnd_weight; 1152 if (rt->rt_expire > 0) 1153 tmproutes_update(rnh, rt); 1154 } else { 1155 /* Route deletion requested. */ 1156 struct sockaddr *ndst, *netmask; 1157 struct radix_node *rn; 1158 1159 ndst = (struct sockaddr *)rt_key(rt); 1160 netmask = info->rti_info[RTAX_NETMASK]; 1161 rn = rnh->rnh_deladdr(ndst, netmask, &rnh->head); 1162 if (rn == NULL) 1163 return (ESRCH); 1164 rt = RNTORT(rn); 1165 rt->rte_flags &= ~RTF_UP; 1166 } 1167 1168 /* Finalize notification */ 1169 rib_bump_gen(rnh); 1170 if (rnd->rnd_nhop == NULL) 1171 rnh->rnh_prefixes--; 1172 1173 rc->rc_cmd = (rnd->rnd_nhop != NULL) ? RTM_CHANGE : RTM_DELETE; 1174 rc->rc_rt = rt; 1175 rc->rc_nh_old = nh_orig; 1176 rc->rc_nh_new = rnd->rnd_nhop; 1177 rc->rc_nh_weight = rnd->rnd_weight; 1178 1179 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 1180 1181 return (0); 1182 } 1183 1184 /* 1185 * Conditionally update route nhop/weight IFF data in @nhd_orig is 1186 * consistent with the current route data. 1187 * Nexthop in @nhd_new is consumed. 1188 */ 1189 int 1190 change_route_conditional(struct rib_head *rnh, struct rtentry *rt, 1191 struct rt_addrinfo *info, struct route_nhop_data *rnd_orig, 1192 struct route_nhop_data *rnd_new, struct rib_cmd_info *rc) 1193 { 1194 struct rtentry *rt_new; 1195 int error = 0; 1196 1197 RIB_WLOCK(rnh); 1198 1199 rt_new = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 1200 info->rti_info[RTAX_NETMASK], &rnh->head); 1201 1202 if (rt_new == NULL) { 1203 if (rnd_orig->rnd_nhop == NULL) 1204 error = add_route_nhop(rnh, rt, info, rnd_new, rc); 1205 else { 1206 /* 1207 * Prefix does not exist, which was not our assumption. 1208 * Update @rnd_orig with the new data and return 1209 */ 1210 rnd_orig->rnd_nhop = NULL; 1211 rnd_orig->rnd_weight = 0; 1212 error = EAGAIN; 1213 } 1214 } else { 1215 /* Prefix exists, try to update */ 1216 if (rnd_orig->rnd_nhop == rt_new->rt_nhop) { 1217 /* 1218 * Nhop/mpath group hasn't changed. Flip 1219 * to the new precalculated one and return 1220 */ 1221 error = change_route_nhop(rnh, rt_new, info, rnd_new, rc); 1222 } else { 1223 /* Update and retry */ 1224 rnd_orig->rnd_nhop = rt_new->rt_nhop; 1225 rnd_orig->rnd_weight = rt_new->rt_weight; 1226 error = EAGAIN; 1227 } 1228 } 1229 1230 RIB_WUNLOCK(rnh); 1231 1232 if (error == 0) { 1233 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 1234 1235 if (rnd_orig->rnd_nhop != NULL) 1236 nhop_free_any(rnd_orig->rnd_nhop); 1237 1238 } else { 1239 if (rnd_new->rnd_nhop != NULL) 1240 nhop_free_any(rnd_new->rnd_nhop); 1241 } 1242 1243 return (error); 1244 } 1245 1246 /* 1247 * Performs modification of routing table specificed by @action. 1248 * Table is specified by @fibnum and sa_family in @info->rti_info[RTAX_DST]. 1249 * Needs to be run in network epoch. 1250 * 1251 * Returns 0 on success and fills in @rc with action result. 1252 */ 1253 int 1254 rib_action(uint32_t fibnum, int action, struct rt_addrinfo *info, 1255 struct rib_cmd_info *rc) 1256 { 1257 int error; 1258 1259 switch (action) { 1260 case RTM_ADD: 1261 error = rib_add_route(fibnum, info, rc); 1262 break; 1263 case RTM_DELETE: 1264 error = rib_del_route(fibnum, info, rc); 1265 break; 1266 case RTM_CHANGE: 1267 error = rib_change_route(fibnum, info, rc); 1268 break; 1269 default: 1270 error = ENOTSUP; 1271 } 1272 1273 return (error); 1274 } 1275 1276 struct rt_delinfo 1277 { 1278 struct rt_addrinfo info; 1279 struct rib_head *rnh; 1280 struct rtentry *head; 1281 struct rib_cmd_info rc; 1282 }; 1283 1284 /* 1285 * Conditionally unlinks @rn from radix tree based 1286 * on info data passed in @arg. 1287 */ 1288 static int 1289 rt_checkdelroute(struct radix_node *rn, void *arg) 1290 { 1291 struct rt_delinfo *di; 1292 struct rt_addrinfo *info; 1293 struct rtentry *rt; 1294 1295 di = (struct rt_delinfo *)arg; 1296 rt = (struct rtentry *)rn; 1297 info = &di->info; 1298 1299 info->rti_info[RTAX_DST] = rt_key(rt); 1300 info->rti_info[RTAX_NETMASK] = rt_mask(rt); 1301 1302 if (rt_unlinkrte(di->rnh, info, &di->rc) != 0) 1303 return (0); 1304 1305 /* 1306 * Add deleted rtentries to the list to GC them 1307 * after dropping the lock. 1308 * 1309 * XXX: Delayed notifications not implemented 1310 * for nexthop updates. 1311 */ 1312 if (di->rc.rc_cmd == RTM_DELETE) { 1313 /* Add to the list and return */ 1314 rt->rt_chain = di->head; 1315 di->head = rt; 1316 #ifdef ROUTE_MPATH 1317 } else { 1318 /* 1319 * RTM_CHANGE to a diferent nexthop or nexthop group. 1320 * Free old multipath group. 1321 */ 1322 nhop_free_any(di->rc.rc_nh_old); 1323 #endif 1324 } 1325 1326 return (0); 1327 } 1328 1329 /* 1330 * Iterates over a routing table specified by @fibnum and @family and 1331 * deletes elements marked by @filter_f. 1332 * @fibnum: rtable id 1333 * @family: AF_ address family 1334 * @filter_f: function returning non-zero value for items to delete 1335 * @arg: data to pass to the @filter_f function 1336 * @report: true if rtsock notification is needed. 1337 */ 1338 void 1339 rib_walk_del(u_int fibnum, int family, rib_filter_f_t *filter_f, void *arg, bool report) 1340 { 1341 struct rib_head *rnh; 1342 struct rt_delinfo di; 1343 struct rtentry *rt; 1344 struct nhop_object *nh; 1345 struct epoch_tracker et; 1346 1347 rnh = rt_tables_get_rnh(fibnum, family); 1348 if (rnh == NULL) 1349 return; 1350 1351 bzero(&di, sizeof(di)); 1352 di.info.rti_filter = filter_f; 1353 di.info.rti_filterdata = arg; 1354 di.rnh = rnh; 1355 di.rc.rc_cmd = RTM_DELETE; 1356 1357 NET_EPOCH_ENTER(et); 1358 1359 RIB_WLOCK(rnh); 1360 rnh->rnh_walktree(&rnh->head, rt_checkdelroute, &di); 1361 RIB_WUNLOCK(rnh); 1362 1363 /* We might have something to reclaim. */ 1364 bzero(&di.rc, sizeof(di.rc)); 1365 di.rc.rc_cmd = RTM_DELETE; 1366 while (di.head != NULL) { 1367 rt = di.head; 1368 di.head = rt->rt_chain; 1369 rt->rt_chain = NULL; 1370 nh = rt->rt_nhop; 1371 1372 di.rc.rc_rt = rt; 1373 di.rc.rc_nh_old = nh; 1374 rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc); 1375 1376 /* TODO std rt -> rt_addrinfo export */ 1377 di.info.rti_info[RTAX_DST] = rt_key(rt); 1378 di.info.rti_info[RTAX_NETMASK] = rt_mask(rt); 1379 1380 if (report) { 1381 #ifdef ROUTE_MPATH 1382 struct nhgrp_object *nhg; 1383 struct weightened_nhop *wn; 1384 uint32_t num_nhops; 1385 if (NH_IS_NHGRP(nh)) { 1386 nhg = (struct nhgrp_object *)nh; 1387 wn = nhgrp_get_nhops(nhg, &num_nhops); 1388 for (int i = 0; i < num_nhops; i++) 1389 rt_routemsg(RTM_DELETE, rt, wn[i].nh, fibnum); 1390 } else 1391 #endif 1392 rt_routemsg(RTM_DELETE, rt, nh, fibnum); 1393 } 1394 rtfree(rt); 1395 } 1396 1397 NET_EPOCH_EXIT(et); 1398 } 1399 1400 static int 1401 rt_delete_unconditional(struct radix_node *rn, void *arg) 1402 { 1403 struct rtentry *rt = RNTORT(rn); 1404 struct rib_head *rnh = (struct rib_head *)arg; 1405 1406 rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), &rnh->head); 1407 if (RNTORT(rn) == rt) 1408 rtfree(rt); 1409 1410 return (0); 1411 } 1412 1413 /* 1414 * Removes all routes from the routing table without executing notifications. 1415 * rtentres will be removed after the end of a current epoch. 1416 */ 1417 static void 1418 rib_flush_routes(struct rib_head *rnh) 1419 { 1420 RIB_WLOCK(rnh); 1421 rnh->rnh_walktree(&rnh->head, rt_delete_unconditional, rnh); 1422 RIB_WUNLOCK(rnh); 1423 } 1424 1425 void 1426 rib_flush_routes_family(int family) 1427 { 1428 struct rib_head *rnh; 1429 1430 for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) { 1431 if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL) 1432 rib_flush_routes(rnh); 1433 } 1434 } 1435 1436 const char * 1437 rib_print_family(int family) 1438 { 1439 switch (family) { 1440 case AF_INET: 1441 return ("inet"); 1442 case AF_INET6: 1443 return ("inet6"); 1444 case AF_LINK: 1445 return ("link"); 1446 } 1447 return ("unknown"); 1448 } 1449 1450 static void 1451 rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 1452 struct rib_cmd_info *rc) 1453 { 1454 struct rib_subscription *rs; 1455 1456 CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) { 1457 if (rs->type == type) 1458 rs->func(rnh, rc, rs->arg); 1459 } 1460 } 1461 1462 static struct rib_subscription * 1463 allocate_subscription(rib_subscription_cb_t *f, void *arg, 1464 enum rib_subscription_type type, bool waitok) 1465 { 1466 struct rib_subscription *rs; 1467 int flags = M_ZERO | (waitok ? M_WAITOK : M_NOWAIT); 1468 1469 rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags); 1470 if (rs == NULL) 1471 return (NULL); 1472 1473 rs->func = f; 1474 rs->arg = arg; 1475 rs->type = type; 1476 1477 return (rs); 1478 } 1479 1480 /* 1481 * Subscribe for the changes in the routing table specified by @fibnum and 1482 * @family. 1483 * 1484 * Returns pointer to the subscription structure on success. 1485 */ 1486 struct rib_subscription * 1487 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg, 1488 enum rib_subscription_type type, bool waitok) 1489 { 1490 struct rib_head *rnh; 1491 struct epoch_tracker et; 1492 1493 NET_EPOCH_ENTER(et); 1494 KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__)); 1495 rnh = rt_tables_get_rnh(fibnum, family); 1496 NET_EPOCH_EXIT(et); 1497 1498 return (rib_subscribe_internal(rnh, f, arg, type, waitok)); 1499 } 1500 1501 struct rib_subscription * 1502 rib_subscribe_internal(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg, 1503 enum rib_subscription_type type, bool waitok) 1504 { 1505 struct rib_subscription *rs; 1506 struct epoch_tracker et; 1507 1508 if ((rs = allocate_subscription(f, arg, type, waitok)) == NULL) 1509 return (NULL); 1510 rs->rnh = rnh; 1511 1512 NET_EPOCH_ENTER(et); 1513 RIB_WLOCK(rnh); 1514 CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next); 1515 RIB_WUNLOCK(rnh); 1516 NET_EPOCH_EXIT(et); 1517 1518 return (rs); 1519 } 1520 1521 struct rib_subscription * 1522 rib_subscribe_locked(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg, 1523 enum rib_subscription_type type) 1524 { 1525 struct rib_subscription *rs; 1526 1527 NET_EPOCH_ASSERT(); 1528 RIB_WLOCK_ASSERT(rnh); 1529 1530 if ((rs = allocate_subscription(f, arg, type, false)) == NULL) 1531 return (NULL); 1532 rs->rnh = rnh; 1533 1534 CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next); 1535 1536 return (rs); 1537 } 1538 1539 /* 1540 * Remove rtable subscription @rs from the routing table. 1541 * Needs to be run in network epoch. 1542 */ 1543 void 1544 rib_unsubscribe(struct rib_subscription *rs) 1545 { 1546 struct rib_head *rnh = rs->rnh; 1547 1548 NET_EPOCH_ASSERT(); 1549 1550 RIB_WLOCK(rnh); 1551 CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next); 1552 RIB_WUNLOCK(rnh); 1553 1554 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1555 &rs->epoch_ctx); 1556 } 1557 1558 void 1559 rib_unsubscribe_locked(struct rib_subscription *rs) 1560 { 1561 struct rib_head *rnh = rs->rnh; 1562 1563 NET_EPOCH_ASSERT(); 1564 RIB_WLOCK_ASSERT(rnh); 1565 1566 CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next); 1567 1568 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1569 &rs->epoch_ctx); 1570 } 1571 1572 /* 1573 * Epoch callback indicating subscription is safe to destroy 1574 */ 1575 static void 1576 destroy_subscription_epoch(epoch_context_t ctx) 1577 { 1578 struct rib_subscription *rs; 1579 1580 rs = __containerof(ctx, struct rib_subscription, epoch_ctx); 1581 1582 free(rs, M_RTABLE); 1583 } 1584 1585 void 1586 rib_init_subscriptions(struct rib_head *rnh) 1587 { 1588 1589 CK_STAILQ_INIT(&rnh->rnh_subscribers); 1590 } 1591 1592 void 1593 rib_destroy_subscriptions(struct rib_head *rnh) 1594 { 1595 struct rib_subscription *rs; 1596 struct epoch_tracker et; 1597 1598 NET_EPOCH_ENTER(et); 1599 RIB_WLOCK(rnh); 1600 while ((rs = CK_STAILQ_FIRST(&rnh->rnh_subscribers)) != NULL) { 1601 CK_STAILQ_REMOVE_HEAD(&rnh->rnh_subscribers, next); 1602 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 1603 &rs->epoch_ctx); 1604 } 1605 RIB_WUNLOCK(rnh); 1606 NET_EPOCH_EXIT(et); 1607 } 1608