1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_mpath.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/syslog.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/rmlock.h> 44 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <net/if_dl.h> 48 #include <net/vnet.h> 49 #include <net/route.h> 50 #include <net/route/route_ctl.h> 51 #include <net/route/route_var.h> 52 #include <net/route/nhop_utils.h> 53 #include <net/route/nhop.h> 54 #include <net/route/nhop_var.h> 55 #include <net/route/shared.h> 56 #include <netinet/in.h> 57 58 #ifdef RADIX_MPATH 59 #include <net/radix_mpath.h> 60 #endif 61 62 #include <vm/uma.h> 63 64 65 /* 66 * This file contains control plane routing tables functions. 67 * 68 * All functions assumes they are called in net epoch. 69 */ 70 71 struct rib_subscription { 72 CK_STAILQ_ENTRY(rib_subscription) next; 73 rib_subscription_cb_t *func; 74 void *arg; 75 enum rib_subscription_type type; 76 struct epoch_context epoch_ctx; 77 }; 78 79 static void rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 80 struct rib_cmd_info *rc); 81 82 static void destroy_subscription_epoch(epoch_context_t ctx); 83 84 static struct rib_head * 85 get_rnh(uint32_t fibnum, const struct rt_addrinfo *info) 86 { 87 struct rib_head *rnh; 88 struct sockaddr *dst; 89 90 KASSERT((fibnum < rt_numfibs), ("rib_add_route: bad fibnum")); 91 92 dst = info->rti_info[RTAX_DST]; 93 rnh = rt_tables_get_rnh(fibnum, dst->sa_family); 94 95 return (rnh); 96 } 97 98 /* 99 * Adds route defined by @info into the kernel table specified by @fibnum and 100 * sa_family in @info->rti_info[RTAX_DST]. 101 * 102 * Returns 0 on success and fills in operation metadata into @rc. 103 */ 104 int 105 rib_add_route(uint32_t fibnum, struct rt_addrinfo *info, 106 struct rib_cmd_info *rc) 107 { 108 struct rib_head *rnh; 109 110 NET_EPOCH_ASSERT(); 111 112 rnh = get_rnh(fibnum, info); 113 if (rnh == NULL) 114 return (EAFNOSUPPORT); 115 116 /* 117 * Check consistency between RTF_HOST flag and netmask 118 * existence. 119 */ 120 if (info->rti_flags & RTF_HOST) 121 info->rti_info[RTAX_NETMASK] = NULL; 122 else if (info->rti_info[RTAX_NETMASK] == NULL) 123 return (EINVAL); 124 125 bzero(rc, sizeof(struct rib_cmd_info)); 126 rc->rc_cmd = RTM_ADD; 127 128 return (add_route(rnh, info, rc)); 129 } 130 131 int 132 add_route(struct rib_head *rnh, struct rt_addrinfo *info, 133 struct rib_cmd_info *rc) 134 { 135 struct sockaddr *dst, *ndst, *gateway, *netmask; 136 struct rtentry *rt, *rt_old; 137 struct nhop_object *nh; 138 struct radix_node *rn; 139 struct ifaddr *ifa; 140 int error, flags; 141 struct epoch_tracker et; 142 143 dst = info->rti_info[RTAX_DST]; 144 gateway = info->rti_info[RTAX_GATEWAY]; 145 netmask = info->rti_info[RTAX_NETMASK]; 146 flags = info->rti_flags; 147 148 if ((flags & RTF_GATEWAY) && !gateway) 149 return (EINVAL); 150 if (dst && gateway && (dst->sa_family != gateway->sa_family) && 151 (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK)) 152 return (EINVAL); 153 154 if (dst->sa_len > sizeof(((struct rtentry *)NULL)->rt_dstb)) 155 return (EINVAL); 156 157 if (info->rti_ifa == NULL) { 158 error = rt_getifa_fib(info, rnh->rib_fibnum); 159 if (error) 160 return (error); 161 } else { 162 ifa_ref(info->rti_ifa); 163 } 164 165 NET_EPOCH_ENTER(et); 166 error = nhop_create_from_info(rnh, info, &nh); 167 NET_EPOCH_EXIT(et); 168 if (error != 0) { 169 ifa_free(info->rti_ifa); 170 return (error); 171 } 172 173 rt = uma_zalloc(V_rtzone, M_NOWAIT); 174 if (rt == NULL) { 175 ifa_free(info->rti_ifa); 176 nhop_free(nh); 177 return (ENOBUFS); 178 } 179 rt->rt_flags = RTF_UP | flags; 180 rt->rt_nhop = nh; 181 182 /* Fill in dst */ 183 memcpy(&rt->rt_dst, dst, dst->sa_len); 184 rt_key(rt) = &rt->rt_dst; 185 186 /* 187 * point to the (possibly newly malloc'd) dest address. 188 */ 189 ndst = (struct sockaddr *)rt_key(rt); 190 191 /* 192 * make sure it contains the value we want (masked if needed). 193 */ 194 if (netmask) { 195 rt_maskedcopy(dst, ndst, netmask); 196 } else 197 bcopy(dst, ndst, dst->sa_len); 198 199 /* 200 * We use the ifa reference returned by rt_getifa_fib(). 201 * This moved from below so that rnh->rnh_addaddr() can 202 * examine the ifa and ifa->ifa_ifp if it so desires. 203 */ 204 ifa = info->rti_ifa; 205 rt->rt_weight = 1; 206 207 rt_setmetrics(info, rt); 208 rt_old = NULL; 209 210 RIB_WLOCK(rnh); 211 RT_LOCK(rt); 212 #ifdef RADIX_MPATH 213 /* do not permit exactly the same dst/mask/gw pair */ 214 if (rt_mpath_capable(rnh) && 215 rt_mpath_conflict(rnh, rt, netmask)) { 216 RIB_WUNLOCK(rnh); 217 218 nhop_free(nh); 219 uma_zfree(V_rtzone, rt); 220 return (EEXIST); 221 } 222 #endif 223 224 rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, rt->rt_nodes); 225 226 if (rn != NULL) { 227 /* Most common usecase */ 228 if (rt->rt_expire > 0) 229 tmproutes_update(rnh, rt); 230 231 /* Finalize notification */ 232 rnh->rnh_gen++; 233 234 rc->rc_rt = RNTORT(rn); 235 rc->rc_nh_new = nh; 236 237 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 238 } else if ((info->rti_flags & RTF_PINNED) != 0) { 239 240 /* 241 * Force removal and re-try addition 242 * TODO: better multipath&pinned support 243 */ 244 struct sockaddr *info_dst = info->rti_info[RTAX_DST]; 245 info->rti_info[RTAX_DST] = ndst; 246 /* Do not delete existing PINNED(interface) routes */ 247 info->rti_flags &= ~RTF_PINNED; 248 rt_old = rt_unlinkrte(rnh, info, &error); 249 info->rti_flags |= RTF_PINNED; 250 info->rti_info[RTAX_DST] = info_dst; 251 if (rt_old != NULL) { 252 rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, 253 rt->rt_nodes); 254 255 /* Finalize notification */ 256 rnh->rnh_gen++; 257 258 if (rn != NULL) { 259 rc->rc_cmd = RTM_CHANGE; 260 rc->rc_rt = RNTORT(rn); 261 rc->rc_nh_old = rt_old->rt_nhop; 262 rc->rc_nh_new = nh; 263 } else { 264 rc->rc_cmd = RTM_DELETE; 265 rc->rc_rt = RNTORT(rn); 266 rc->rc_nh_old = rt_old->rt_nhop; 267 rc->rc_nh_new = nh; 268 } 269 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 270 } 271 } 272 RIB_WUNLOCK(rnh); 273 274 if ((rn != NULL) || (rt_old != NULL)) 275 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 276 277 if (rt_old != NULL) 278 rtfree(rt_old); 279 280 /* 281 * If it still failed to go into the tree, 282 * then un-make it (this should be a function) 283 */ 284 if (rn == NULL) { 285 nhop_free(nh); 286 uma_zfree(V_rtzone, rt); 287 return (EEXIST); 288 } 289 290 RT_UNLOCK(rt); 291 292 return (0); 293 } 294 295 296 /* 297 * Removes route defined by @info from the kernel table specified by @fibnum and 298 * sa_family in @info->rti_info[RTAX_DST]. 299 * 300 * Returns 0 on success and fills in operation metadata into @rc. 301 */ 302 int 303 rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc) 304 { 305 struct rib_head *rnh; 306 307 NET_EPOCH_ASSERT(); 308 309 rnh = get_rnh(fibnum, info); 310 if (rnh == NULL) 311 return (EAFNOSUPPORT); 312 313 bzero(rc, sizeof(struct rib_cmd_info)); 314 rc->rc_cmd = RTM_DELETE; 315 316 return (del_route(rnh, info, rc)); 317 } 318 319 /* 320 * Conditionally unlinks rtentry matching data inside @info from @rnh. 321 * Returns unlinked, locked and referenced @rtentry on success, 322 * Returns NULL and sets @perror to: 323 * ESRCH - if prefix was not found, 324 * EADDRINUSE - if trying to delete PINNED route without appropriate flag. 325 * ENOENT - if supplied filter function returned 0 (not matched). 326 */ 327 struct rtentry * 328 rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, int *perror) 329 { 330 struct sockaddr *dst, *netmask; 331 struct rtentry *rt; 332 struct radix_node *rn; 333 334 dst = info->rti_info[RTAX_DST]; 335 netmask = info->rti_info[RTAX_NETMASK]; 336 337 rt = (struct rtentry *)rnh->rnh_lookup(dst, netmask, &rnh->head); 338 if (rt == NULL) { 339 *perror = ESRCH; 340 return (NULL); 341 } 342 343 if ((info->rti_flags & RTF_PINNED) == 0) { 344 /* Check if target route can be deleted */ 345 if (rt->rt_flags & RTF_PINNED) { 346 *perror = EADDRINUSE; 347 return (NULL); 348 } 349 } 350 351 if (info->rti_filter != NULL) { 352 if (info->rti_filter(rt, rt->rt_nhop, info->rti_filterdata)==0){ 353 /* Not matched */ 354 *perror = ENOENT; 355 return (NULL); 356 } 357 358 /* 359 * Filter function requested rte deletion. 360 * Ease the caller work by filling in remaining info 361 * from that particular entry. 362 */ 363 info->rti_info[RTAX_GATEWAY] = &rt->rt_nhop->gw_sa; 364 } 365 366 /* 367 * Remove the item from the tree and return it. 368 * Complain if it is not there and do no more processing. 369 */ 370 *perror = ESRCH; 371 #ifdef RADIX_MPATH 372 if (rt_mpath_capable(rnh)) 373 rn = rt_mpath_unlink(rnh, info, rt, perror); 374 else 375 #endif 376 rn = rnh->rnh_deladdr(dst, netmask, &rnh->head); 377 if (rn == NULL) 378 return (NULL); 379 380 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) 381 panic ("rtrequest delete"); 382 383 rt = RNTORT(rn); 384 RT_LOCK(rt); 385 rt->rt_flags &= ~RTF_UP; 386 387 *perror = 0; 388 389 return (rt); 390 } 391 392 int 393 del_route(struct rib_head *rnh, struct rt_addrinfo *info, 394 struct rib_cmd_info *rc) 395 { 396 struct sockaddr *dst, *netmask; 397 struct sockaddr_storage mdst; 398 struct rtentry *rt; 399 int error; 400 401 dst = info->rti_info[RTAX_DST]; 402 netmask = info->rti_info[RTAX_NETMASK]; 403 404 if (netmask) { 405 if (dst->sa_len > sizeof(mdst)) 406 return (EINVAL); 407 rt_maskedcopy(dst, (struct sockaddr *)&mdst, netmask); 408 dst = (struct sockaddr *)&mdst; 409 } 410 411 RIB_WLOCK(rnh); 412 rt = rt_unlinkrte(rnh, info, &error); 413 if (rt != NULL) { 414 /* Finalize notification */ 415 rnh->rnh_gen++; 416 rc->rc_rt = rt; 417 rc->rc_nh_old = rt->rt_nhop; 418 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 419 } 420 RIB_WUNLOCK(rnh); 421 if (error != 0) 422 return (error); 423 424 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 425 426 /* 427 * If the caller wants it, then it can have it, 428 * the entry will be deleted after the end of the current epoch. 429 */ 430 rtfree(rt); 431 432 return (0); 433 } 434 435 int 436 rib_change_route(uint32_t fibnum, struct rt_addrinfo *info, 437 struct rib_cmd_info *rc) 438 { 439 struct rib_head *rnh; 440 441 NET_EPOCH_ASSERT(); 442 443 rnh = get_rnh(fibnum, info); 444 if (rnh == NULL) 445 return (EAFNOSUPPORT); 446 447 bzero(rc, sizeof(struct rib_cmd_info)); 448 rc->rc_cmd = RTM_CHANGE; 449 450 return (change_route(rnh, info, rc)); 451 } 452 453 static int 454 change_route_one(struct rib_head *rnh, struct rt_addrinfo *info, 455 struct rib_cmd_info *rc) 456 { 457 RIB_RLOCK_TRACKER; 458 struct rtentry *rt = NULL; 459 int error = 0; 460 int free_ifa = 0; 461 struct nhop_object *nh, *nh_orig; 462 463 RIB_RLOCK(rnh); 464 rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 465 info->rti_info[RTAX_NETMASK], &rnh->head); 466 467 if (rt == NULL) { 468 RIB_RUNLOCK(rnh); 469 return (ESRCH); 470 } 471 472 #ifdef RADIX_MPATH 473 /* 474 * If we got multipath routes, 475 * we require users to specify a matching RTAX_GATEWAY. 476 */ 477 if (rt_mpath_capable(rnh)) { 478 rt = rt_mpath_matchgate(rt, info->rti_info[RTAX_GATEWAY]); 479 if (rt == NULL) { 480 RIB_RUNLOCK(rnh); 481 return (ESRCH); 482 } 483 } 484 #endif 485 nh_orig = rt->rt_nhop; 486 487 RIB_RUNLOCK(rnh); 488 489 rt = NULL; 490 nh = NULL; 491 492 /* 493 * New gateway could require new ifaddr, ifp; 494 * flags may also be different; ifp may be specified 495 * by ll sockaddr when protocol address is ambiguous 496 */ 497 if (((nh_orig->nh_flags & NHF_GATEWAY) && 498 info->rti_info[RTAX_GATEWAY] != NULL) || 499 info->rti_info[RTAX_IFP] != NULL || 500 (info->rti_info[RTAX_IFA] != NULL && 501 !sa_equal(info->rti_info[RTAX_IFA], nh_orig->nh_ifa->ifa_addr))) { 502 error = rt_getifa_fib(info, rnh->rib_fibnum); 503 if (info->rti_ifa != NULL) 504 free_ifa = 1; 505 506 if (error != 0) { 507 if (free_ifa) { 508 ifa_free(info->rti_ifa); 509 info->rti_ifa = NULL; 510 } 511 512 return (error); 513 } 514 } 515 516 error = nhop_create_from_nhop(rnh, nh_orig, info, &nh); 517 if (free_ifa) { 518 ifa_free(info->rti_ifa); 519 info->rti_ifa = NULL; 520 } 521 if (error != 0) 522 return (error); 523 524 RIB_WLOCK(rnh); 525 526 /* Lookup rtentry once again and check if nexthop is still the same */ 527 rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST], 528 info->rti_info[RTAX_NETMASK], &rnh->head); 529 530 if (rt == NULL) { 531 RIB_WUNLOCK(rnh); 532 nhop_free(nh); 533 return (ESRCH); 534 } 535 536 if (rt->rt_nhop != nh_orig) { 537 RIB_WUNLOCK(rnh); 538 nhop_free(nh); 539 return (EAGAIN); 540 } 541 542 /* Proceed with the update */ 543 RT_LOCK(rt); 544 545 /* Provide notification to the protocols.*/ 546 rt->rt_nhop = nh; 547 rt_setmetrics(info, rt); 548 549 /* Finalize notification */ 550 rc->rc_rt = rt; 551 rc->rc_nh_old = nh_orig; 552 rc->rc_nh_new = rt->rt_nhop; 553 554 RT_UNLOCK(rt); 555 556 /* Update generation id to reflect rtable change */ 557 rnh->rnh_gen++; 558 rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc); 559 560 RIB_WUNLOCK(rnh); 561 562 rib_notify(rnh, RIB_NOTIFY_DELAYED, rc); 563 564 nhop_free(nh_orig); 565 566 return (0); 567 } 568 569 int 570 change_route(struct rib_head *rnh, struct rt_addrinfo *info, 571 struct rib_cmd_info *rc) 572 { 573 int error; 574 575 /* Check if updated gateway exists */ 576 if ((info->rti_flags & RTF_GATEWAY) && 577 (info->rti_info[RTAX_GATEWAY] == NULL)) 578 return (EINVAL); 579 580 /* 581 * route change is done in multiple steps, with dropping and 582 * reacquiring lock. In the situations with multiple processes 583 * changes the same route in can lead to the case when route 584 * is changed between the steps. Address it by retrying the operation 585 * multiple times before failing. 586 */ 587 for (int i = 0; i < RIB_MAX_RETRIES; i++) { 588 error = change_route_one(rnh, info, rc); 589 if (error != EAGAIN) 590 break; 591 } 592 593 return (error); 594 } 595 596 /* 597 * Performs modification of routing table specificed by @action. 598 * Table is specified by @fibnum and sa_family in @info->rti_info[RTAX_DST]. 599 * Needs to be run in network epoch. 600 * 601 * Returns 0 on success and fills in @rc with action result. 602 */ 603 int 604 rib_action(uint32_t fibnum, int action, struct rt_addrinfo *info, 605 struct rib_cmd_info *rc) 606 { 607 int error; 608 609 switch (action) { 610 case RTM_ADD: 611 error = rib_add_route(fibnum, info, rc); 612 break; 613 case RTM_DELETE: 614 error = rib_del_route(fibnum, info, rc); 615 break; 616 case RTM_CHANGE: 617 error = rib_change_route(fibnum, info, rc); 618 break; 619 default: 620 error = ENOTSUP; 621 } 622 623 return (error); 624 } 625 626 627 struct rt_delinfo 628 { 629 struct rt_addrinfo info; 630 struct rib_head *rnh; 631 struct rtentry *head; 632 struct rib_cmd_info rc; 633 }; 634 635 /* 636 * Conditionally unlinks @rn from radix tree based 637 * on info data passed in @arg. 638 */ 639 static int 640 rt_checkdelroute(struct radix_node *rn, void *arg) 641 { 642 struct rt_delinfo *di; 643 struct rt_addrinfo *info; 644 struct rtentry *rt; 645 int error; 646 647 di = (struct rt_delinfo *)arg; 648 rt = (struct rtentry *)rn; 649 info = &di->info; 650 error = 0; 651 652 info->rti_info[RTAX_DST] = rt_key(rt); 653 info->rti_info[RTAX_NETMASK] = rt_mask(rt); 654 info->rti_info[RTAX_GATEWAY] = &rt->rt_nhop->gw_sa; 655 656 rt = rt_unlinkrte(di->rnh, info, &error); 657 if (rt == NULL) { 658 /* Either not allowed or not matched. Skip entry */ 659 return (0); 660 } 661 662 /* Entry was unlinked. Notify subscribers */ 663 di->rnh->rnh_gen++; 664 di->rc.rc_rt = rt; 665 di->rc.rc_nh_old = rt->rt_nhop; 666 rib_notify(di->rnh, RIB_NOTIFY_IMMEDIATE, &di->rc); 667 668 /* Add to the list and return */ 669 rt->rt_chain = di->head; 670 di->head = rt; 671 672 return (0); 673 } 674 675 /* 676 * Iterates over a routing table specified by @fibnum and @family and 677 * deletes elements marked by @filter_f. 678 * @fibnum: rtable id 679 * @family: AF_ address family 680 * @filter_f: function returning non-zero value for items to delete 681 * @arg: data to pass to the @filter_f function 682 * @report: true if rtsock notification is needed. 683 */ 684 void 685 rib_walk_del(u_int fibnum, int family, rt_filter_f_t *filter_f, void *arg, bool report) 686 { 687 struct rib_head *rnh; 688 struct rt_delinfo di; 689 struct rtentry *rt; 690 struct epoch_tracker et; 691 692 rnh = rt_tables_get_rnh(fibnum, family); 693 if (rnh == NULL) 694 return; 695 696 bzero(&di, sizeof(di)); 697 di.info.rti_filter = filter_f; 698 di.info.rti_filterdata = arg; 699 di.rnh = rnh; 700 di.rc.rc_cmd = RTM_DELETE; 701 702 NET_EPOCH_ENTER(et); 703 704 RIB_WLOCK(rnh); 705 rnh->rnh_walktree(&rnh->head, rt_checkdelroute, &di); 706 RIB_WUNLOCK(rnh); 707 708 /* We might have something to reclaim. */ 709 while (di.head != NULL) { 710 rt = di.head; 711 di.head = rt->rt_chain; 712 rt->rt_chain = NULL; 713 714 di.rc.rc_rt = rt; 715 di.rc.rc_nh_old = rt->rt_nhop; 716 rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc); 717 718 /* TODO std rt -> rt_addrinfo export */ 719 di.info.rti_info[RTAX_DST] = rt_key(rt); 720 di.info.rti_info[RTAX_NETMASK] = rt_mask(rt); 721 722 if (report) 723 rt_routemsg(RTM_DELETE, rt, rt->rt_nhop->nh_ifp, 0, 724 fibnum); 725 rtfree(rt); 726 } 727 728 NET_EPOCH_EXIT(et); 729 } 730 731 static void 732 rib_notify(struct rib_head *rnh, enum rib_subscription_type type, 733 struct rib_cmd_info *rc) 734 { 735 struct rib_subscription *rs; 736 737 CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) { 738 if (rs->type == type) 739 rs->func(rnh, rc, rs->arg); 740 } 741 } 742 743 /* 744 * Subscribe for the changes in the routing table specified by @fibnum and 745 * @family. 746 * Needs to be run in network epoch. 747 * 748 * Returns pointer to the subscription structure on success. 749 */ 750 struct rib_subscription * 751 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg, 752 enum rib_subscription_type type, int waitok) 753 { 754 struct rib_head *rnh; 755 struct rib_subscription *rs; 756 int flags = M_ZERO | (waitok ? M_WAITOK : 0); 757 758 NET_EPOCH_ASSERT(); 759 KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__)); 760 rnh = rt_tables_get_rnh(fibnum, family); 761 762 rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags); 763 if (rs == NULL) 764 return (NULL); 765 766 rs->func = f; 767 rs->arg = arg; 768 rs->type = type; 769 770 RIB_WLOCK(rnh); 771 CK_STAILQ_INSERT_TAIL(&rnh->rnh_subscribers, rs, next); 772 RIB_WUNLOCK(rnh); 773 774 return (rs); 775 } 776 777 /* 778 * Remove rtable subscription @rs from the table specified by @fibnum 779 * and @family. 780 * Needs to be run in network epoch. 781 * 782 * Returns 0 on success. 783 */ 784 int 785 rib_unsibscribe(uint32_t fibnum, int family, struct rib_subscription *rs) 786 { 787 struct rib_head *rnh; 788 789 NET_EPOCH_ASSERT(); 790 KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__)); 791 rnh = rt_tables_get_rnh(fibnum, family); 792 793 if (rnh == NULL) 794 return (ENOENT); 795 796 RIB_WLOCK(rnh); 797 CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next); 798 RIB_WUNLOCK(rnh); 799 800 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 801 &rs->epoch_ctx); 802 803 return (0); 804 } 805 806 /* 807 * Epoch callback indicating subscription is safe to destroy 808 */ 809 static void 810 destroy_subscription_epoch(epoch_context_t ctx) 811 { 812 struct rib_subscription *rs; 813 814 rs = __containerof(ctx, struct rib_subscription, epoch_ctx); 815 816 free(rs, M_RTABLE); 817 } 818 819 void 820 rib_init_subscriptions(struct rib_head *rnh) 821 { 822 823 CK_STAILQ_INIT(&rnh->rnh_subscribers); 824 } 825 826 void 827 rib_destroy_subscriptions(struct rib_head *rnh) 828 { 829 struct rib_subscription *rs; 830 struct epoch_tracker et; 831 832 NET_EPOCH_ENTER(et); 833 RIB_WLOCK(rnh); 834 while ((rs = CK_STAILQ_FIRST(&rnh->rnh_subscribers)) != NULL) { 835 CK_STAILQ_REMOVE_HEAD(&rnh->rnh_subscribers, next); 836 epoch_call(net_epoch_preempt, destroy_subscription_epoch, 837 &rs->epoch_ctx); 838 } 839 RIB_WUNLOCK(rnh); 840 NET_EPOCH_EXIT(et); 841 } 842 843