1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/rmlock.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/priv.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <net/vnet.h> 42 43 #include <net/if.h> 44 #include <netinet/in.h> 45 46 #include <net/route.h> 47 #include <net/route/nhop.h> 48 #include <net/route/route_ctl.h> 49 #include <net/route/route_var.h> 50 #include <net/route/fib_algo.h> 51 52 /* 53 * Binary search lookup algo. 54 * 55 * Compiles route table into a sorted array. 56 * Used with small amount of routes (< 16). 57 * As array is immutable, it is rebuild on each rtable change. 58 * 59 * Example: 60 * 61 * 0.0.0.0/0 -> nh1 62 * 10.0.0.0/24 -> nh2 63 * 10.0.0.1/32 -> nh3 64 * 65 * gets compiled to: 66 * 67 * 0.0.0.0 -> nh1 68 * 10.0.0.0 -> nh2 69 * 10.0.0.1 -> nh3 70 * 10.0.0.2 -> nh2 71 * 10.0.1.0 -> nh1 72 * 73 */ 74 75 struct bsearch4_record { 76 uint32_t addr4; 77 uint32_t mask4; 78 struct nhop_object *nh; 79 }; 80 81 struct bsearch4_data { 82 struct fib_data *fd; 83 uint32_t alloc_items; 84 uint32_t num_items; 85 void *mem; 86 struct bsearch4_record *rr; 87 struct bsearch4_record br[0]; 88 }; 89 90 /* 91 * Main IPv4 address lookup function. 92 * 93 * Finds array record with maximum index that is <= provided key. 94 * Assumes 0.0.0.0/0 always exists (may be with NULL nhop) 95 */ 96 static struct nhop_object * 97 bsearch4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid) 98 { 99 const struct bsearch4_data *bd = (const struct bsearch4_data *)algo_data; 100 const struct bsearch4_record *br; 101 uint32_t addr4 = ntohl(key.addr4.s_addr); 102 103 int start = 0; 104 int end = bd->num_items; 105 106 int i = (start + end) / 2; 107 while (start + 1 < end) { 108 i = (start + end) / 2; 109 br = &bd->br[i]; 110 if (addr4 < br->addr4) { 111 /* key < average, reduce right boundary */ 112 end = i; 113 continue; 114 } else if (addr4 > br->addr4) { 115 /* key > average, increase left aboundary */ 116 start = i; 117 continue; 118 } else { 119 /* direct match */ 120 return (br->nh); 121 } 122 } 123 /* start + 1 == end */ 124 return (bd->br[start].nh); 125 } 126 127 /* 128 * Preference function. 129 * Assume ideal for < 10 (typical single-interface setup has 5) 130 * Then gradually degrade. 131 * Assume 30 prefixes is at least 60 records, so it will require 8 lookup, 132 * which is even worse than radix. 133 */ 134 static uint8_t 135 bsearch4_get_pref(const struct rib_rtable_info *rinfo) 136 { 137 138 if (rinfo->num_prefixes < 10) 139 return (253); 140 else if (rinfo->num_prefixes < 30) 141 return (255 - rinfo->num_prefixes * 8); 142 else 143 return (1); 144 } 145 146 static enum flm_op_result 147 bsearch4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data) 148 { 149 struct bsearch4_data *bd; 150 struct rib_rtable_info rinfo; 151 uint32_t count; 152 size_t sz; 153 void *mem; 154 155 fib_get_rtable_info(fib_get_rh(fd), &rinfo); 156 count = rinfo.num_prefixes * 11 / 10 + 64; 157 158 sz = sizeof(struct bsearch4_data) + sizeof(struct bsearch4_record) * count; 159 /* add cache line sz to ease alignment */ 160 sz += CACHE_LINE_SIZE; 161 mem = malloc(sz, M_RTABLE, M_NOWAIT | M_ZERO); 162 if (mem == NULL) 163 return (FLM_REBUILD); 164 /* Align datapath-usable structure to cache line boundary */ 165 bd = (struct bsearch4_data *)roundup2((uintptr_t)mem, CACHE_LINE_SIZE); 166 bd->mem = mem; 167 bd->alloc_items = count; 168 bd->fd = fd; 169 170 *_data = bd; 171 172 /* 173 * Allocate temporary array to store all rtable data. 174 * This step is required to provide the required prefix iteration order. 175 */ 176 bd->rr = mallocarray(count, sizeof(struct bsearch4_record), M_TEMP, M_NOWAIT | M_ZERO); 177 if (bd->rr == NULL) 178 return (FLM_REBUILD); 179 180 return (FLM_SUCCESS); 181 } 182 183 static void 184 bsearch4_destroy(void *_data) 185 { 186 struct bsearch4_data *bd = (struct bsearch4_data *)_data; 187 188 if (bd->rr != NULL) 189 free(bd->rr, M_TEMP); 190 free(bd->mem, M_RTABLE); 191 } 192 193 /* 194 * Callback storing converted rtable prefixes in the temporary array. 195 * Addresses are converted to a host order. 196 */ 197 static enum flm_op_result 198 bsearch4_add_route_cb(struct rtentry *rt, void *_data) 199 { 200 struct bsearch4_data *bd = (struct bsearch4_data *)_data; 201 struct bsearch4_record *rr; 202 struct in_addr addr4, mask4; 203 uint32_t scopeid; 204 205 if (bd->num_items >= bd->alloc_items) 206 return (FLM_REBUILD); 207 208 rr = &bd->rr[bd->num_items++]; 209 rt_get_inet_prefix_pmask(rt, &addr4, &mask4, &scopeid); 210 rr->addr4 = ntohl(addr4.s_addr); 211 rr->mask4 = ntohl(mask4.s_addr); 212 rr->nh = rt_get_raw_nhop(rt); 213 214 return (FLM_SUCCESS); 215 } 216 217 /* 218 * Prefix comparison function. 219 * 10.0.0.0/24 < 10.0.0.0/25 <- less specific wins 220 * 10.0.0.0/25 < 10.0.0.1/32 <- bigger base wins 221 */ 222 static int 223 rr_cmp(const void *_rec1, const void *_rec2) 224 { 225 const struct bsearch4_record *rec1, *rec2; 226 rec1 = _rec1; 227 rec2 = _rec2; 228 229 if (rec1->addr4 < rec2->addr4) 230 return (-1); 231 else if (rec1->addr4 > rec2->addr4) 232 return (1); 233 234 /* 235 * wider mask value is lesser mask 236 * we want less specific come first, e.g. < 237 */ 238 if (rec1->mask4 < rec2->mask4) 239 return (-1); 240 else if (rec1->mask4 > rec2->mask4) 241 return (1); 242 return (0); 243 } 244 245 struct bsearch4_array { 246 uint32_t alloc_items; 247 uint32_t num_items; 248 struct bsearch4_record *arr; 249 }; 250 251 static bool 252 add_array_entry(struct bsearch4_array *ba, struct bsearch4_record *br_new) 253 { 254 255 if (ba->num_items < ba->alloc_items) { 256 ba->arr[ba->num_items++] = *br_new; 257 return (true); 258 } 259 return (false); 260 } 261 262 static struct bsearch4_record * 263 get_last_entry(struct bsearch4_array *ba) 264 { 265 266 return (&ba->arr[ba->num_items - 1]); 267 } 268 269 /* 270 * 271 * Example: 272 * stack: 10.0.1.0/24,nh=3 array: 10.0.1.0/25,nh=4 -> ++10.0.1.128/24,nh=3 273 * 274 * 275 */ 276 static bool 277 pop_stack_entry(struct bsearch4_array *dst_array, struct bsearch4_array *stack) 278 { 279 uint32_t last_stack_addr, last_array_addr; 280 281 struct bsearch4_record *br_prev = get_last_entry(dst_array); 282 struct bsearch4_record *pstack = get_last_entry(stack); 283 284 /* Regardless of the result, pop stack entry */ 285 stack->num_items--; 286 287 /* Prefix last address for the last entry in lookup array */ 288 last_array_addr = (br_prev->addr4 | ~br_prev->mask4); 289 /* Prefix last address for the stack record entry */ 290 last_stack_addr = (pstack->addr4 | ~pstack->mask4); 291 292 if (last_stack_addr > last_array_addr) { 293 /* 294 * Stack record covers > address space than 295 * the last entry in the lookup array. 296 * Add the remaining parts of a stack record to 297 * the lookup array. 298 */ 299 struct bsearch4_record br_new = { 300 .addr4 = last_array_addr + 1, 301 .mask4 = pstack->mask4, 302 .nh = pstack->nh, 303 }; 304 return (add_array_entry(dst_array, &br_new)); 305 } 306 307 return (true); 308 } 309 310 /* 311 * Updates resulting array @dst_array with a rib entry @rib_entry. 312 */ 313 static bool 314 bsearch4_process_record(struct bsearch4_array *dst_array, 315 struct bsearch4_array *stack, struct bsearch4_record *rib_entry) 316 { 317 318 /* 319 * Maintain invariant: current rib_entry is always contained 320 * in the top stack entry. 321 * Note we always have 0.0.0.0/0. 322 */ 323 while (stack->num_items > 0) { 324 struct bsearch4_record *pst = get_last_entry(stack); 325 326 /* 327 * Check if we need to pop stack. 328 * Rely on the ordering - larger prefixes comes up first 329 * Pop any entry that doesn't contain current prefix. 330 */ 331 if (pst->addr4 == (rib_entry->addr4 & pst->mask4)) 332 break; 333 334 if (!pop_stack_entry(dst_array, stack)) 335 return (false); 336 } 337 338 if (dst_array->num_items > 0) { 339 340 /* 341 * Check if there is a gap between previous entry and a 342 * current entry. Code above guarantees that both previous 343 * and current entry are contained in the top stack entry. 344 * 345 * Example: last: 10.0.0.1(/32,nh=3) cur: 10.0.0.3(/32,nh=4), 346 * stack: 10.0.0.0/24,nh=2. 347 * Cover a gap between previous and current by adding stack 348 * nexthop. 349 */ 350 struct bsearch4_record *br_tmp = get_last_entry(dst_array); 351 uint32_t last_declared_addr = br_tmp->addr4 | ~br_tmp->mask4; 352 if (last_declared_addr < rib_entry->addr4 - 1) { 353 /* Cover a hole */ 354 struct bsearch4_record *pst = get_last_entry(stack); 355 struct bsearch4_record new_entry = { 356 .addr4 = last_declared_addr + 1, 357 .mask4 = pst->mask4, 358 .nh = pst->nh, 359 }; 360 if (!add_array_entry(dst_array, &new_entry)) 361 return (false); 362 } 363 } 364 365 if (!add_array_entry(dst_array, rib_entry)) 366 return (false); 367 add_array_entry(stack, rib_entry); 368 369 return (true); 370 } 371 372 static enum flm_op_result 373 bsearch4_build_array(struct bsearch4_array *dst_array, struct bsearch4_array *src_array) 374 { 375 376 /* 377 * During iteration, we keep track of all prefixes in rtable 378 * we currently match, by maintaining stack. As there can be only 379 * 32 prefixes for a single address, pre-allocate stack of size 32. 380 */ 381 struct bsearch4_array stack = { 382 .alloc_items = 32, 383 .arr = mallocarray(32, sizeof(struct bsearch4_record), M_TEMP, M_NOWAIT | M_ZERO), 384 }; 385 if (stack.arr == NULL) 386 return (FLM_REBUILD); 387 388 for (int i = 0; i < src_array->num_items; i++) { 389 struct bsearch4_record *rib_entry = &src_array->arr[i]; 390 391 if (!bsearch4_process_record(dst_array, &stack, rib_entry)) { 392 free(stack.arr, M_TEMP); 393 return (FLM_REBUILD); 394 } 395 } 396 397 /* 398 * We know that last record is contained in the top stack entry. 399 */ 400 while (stack.num_items > 0) { 401 if (!pop_stack_entry(dst_array, &stack)) 402 return (FLM_REBUILD); 403 } 404 free(stack.arr, M_TEMP); 405 406 return (FLM_SUCCESS); 407 } 408 409 static enum flm_op_result 410 bsearch4_build(struct bsearch4_data *bd) 411 { 412 enum flm_op_result ret; 413 414 struct bsearch4_array prefixes_array = { 415 .alloc_items = bd->alloc_items, 416 .num_items = bd->num_items, 417 .arr = bd->rr, 418 }; 419 420 /* Add default route if not exists */ 421 bool default_found = false; 422 for (int i = 0; i < prefixes_array.num_items; i++) { 423 if (prefixes_array.arr[i].mask4 == 0) { 424 default_found = true; 425 break; 426 } 427 } 428 if (!default_found) { 429 /* Add default route with NULL nhop */ 430 struct bsearch4_record default_entry = {}; 431 if (!add_array_entry(&prefixes_array, &default_entry)) 432 return (FLM_REBUILD); 433 } 434 435 /* Sort prefixes */ 436 qsort(prefixes_array.arr, prefixes_array.num_items, sizeof(struct bsearch4_record), rr_cmp); 437 438 struct bsearch4_array dst_array = { 439 .alloc_items = bd->alloc_items, 440 .arr = bd->br, 441 }; 442 443 ret = bsearch4_build_array(&dst_array, &prefixes_array); 444 bd->num_items = dst_array.num_items; 445 446 free(bd->rr, M_TEMP); 447 bd->rr = NULL; 448 return (ret); 449 } 450 451 452 static enum flm_op_result 453 bsearch4_end_dump(void *_data, struct fib_dp *dp) 454 { 455 struct bsearch4_data *bd = (struct bsearch4_data *)_data; 456 enum flm_op_result ret; 457 458 ret = bsearch4_build(bd); 459 if (ret == FLM_SUCCESS) { 460 dp->f = bsearch4_lookup; 461 dp->arg = bd; 462 } 463 464 return (ret); 465 } 466 467 static enum flm_op_result 468 bsearch4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc, 469 void *_data) 470 { 471 472 return (FLM_REBUILD); 473 } 474 475 struct fib_lookup_module flm_bsearch4= { 476 .flm_name = "bsearch4", 477 .flm_family = AF_INET, 478 .flm_init_cb = bsearch4_init, 479 .flm_destroy_cb = bsearch4_destroy, 480 .flm_dump_rib_item_cb = bsearch4_add_route_cb, 481 .flm_dump_end_cb = bsearch4_end_dump, 482 .flm_change_rib_item_cb = bsearch4_change_cb, 483 .flm_get_pref = bsearch4_get_pref, 484 }; 485 486 /* 487 * Lockless radix lookup algo. 488 * 489 * Compiles immutable radix from the current routing table. 490 * Used with small amount of routes (<1000). 491 * As datastructure is immutable, it gets rebuild on each rtable change. 492 * 493 * Lookups are slightly faster as shorter lookup keys are used 494 * (4 bytes instead of 8 in stock radix). 495 */ 496 497 #define KEY_LEN_INET (offsetof(struct sockaddr_in, sin_addr) + sizeof(in_addr_t)) 498 #define OFF_LEN_INET (8 * offsetof(struct sockaddr_in, sin_addr)) 499 struct radix4_addr_entry { 500 struct radix_node rn[2]; 501 struct sockaddr_in addr; 502 struct nhop_object *nhop; 503 }; 504 #define LRADIX4_ITEM_SZ roundup2(sizeof(struct radix4_addr_entry), 64) 505 506 struct lradix4_data { 507 struct radix_node_head *rnh; 508 struct fib_data *fd; 509 void *mem; 510 char *rt_base; 511 uint32_t alloc_items; 512 uint32_t num_items; 513 }; 514 515 static struct nhop_object * 516 lradix4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid) 517 { 518 struct radix_node_head *rnh = (struct radix_node_head *)algo_data; 519 struct radix4_addr_entry *ent; 520 struct sockaddr_in addr4 = { 521 .sin_len = KEY_LEN_INET, 522 .sin_addr = key.addr4, 523 }; 524 ent = (struct radix4_addr_entry *)(rnh->rnh_matchaddr(&addr4, &rnh->rh)); 525 if (ent != NULL) 526 return (ent->nhop); 527 return (NULL); 528 } 529 530 /* 531 * Preference function. 532 * Assume close-to-ideal of < 10 routes (though worse than bsearch), then 533 * gradually degrade until 1000 routes are reached. 534 */ 535 static uint8_t 536 lradix4_get_pref(const struct rib_rtable_info *rinfo) 537 { 538 539 if (rinfo->num_prefixes < 10) 540 return (250); 541 else if (rinfo->num_prefixes < 1000) 542 return (254 - rinfo->num_prefixes / 4); 543 else 544 return (1); 545 } 546 547 static enum flm_op_result 548 lradix4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data) 549 { 550 struct lradix4_data *lr; 551 struct rib_rtable_info rinfo; 552 uint32_t count; 553 size_t sz; 554 555 lr = malloc(sizeof(struct lradix4_data), M_RTABLE, M_NOWAIT | M_ZERO); 556 if (lr == NULL || !rn_inithead((void **)&lr->rnh, OFF_LEN_INET)) 557 return (FLM_REBUILD); 558 fib_get_rtable_info(fib_get_rh(fd), &rinfo); 559 560 count = rinfo.num_prefixes * 11 / 10; 561 sz = count * LRADIX4_ITEM_SZ + CACHE_LINE_SIZE; 562 lr->mem = malloc(sz, M_RTABLE, M_NOWAIT | M_ZERO); 563 if (lr->mem == NULL) 564 return (FLM_REBUILD); 565 /* Align all rtentries to a cacheline boundary */ 566 lr->rt_base = (char *)roundup2((uintptr_t)lr->mem, CACHE_LINE_SIZE); 567 lr->alloc_items = count; 568 lr->fd = fd; 569 570 *_data = lr; 571 572 return (FLM_SUCCESS); 573 } 574 575 static void 576 lradix4_destroy(void *_data) 577 { 578 struct lradix4_data *lr = (struct lradix4_data *)_data; 579 580 if (lr->rnh != NULL) 581 rn_detachhead((void **)&lr->rnh); 582 if (lr->mem != NULL) 583 free(lr->mem, M_RTABLE); 584 free(lr, M_RTABLE); 585 } 586 587 static enum flm_op_result 588 lradix4_add_route_cb(struct rtentry *rt, void *_data) 589 { 590 struct lradix4_data *lr = (struct lradix4_data *)_data; 591 struct radix4_addr_entry *ae; 592 struct sockaddr_in mask; 593 struct sockaddr *rt_mask; 594 struct radix_node *rn; 595 struct in_addr addr4, mask4; 596 uint32_t scopeid; 597 598 if (lr->num_items >= lr->alloc_items) 599 return (FLM_REBUILD); 600 601 ae = (struct radix4_addr_entry *)(lr->rt_base + lr->num_items * LRADIX4_ITEM_SZ); 602 lr->num_items++; 603 604 ae->nhop = rt_get_raw_nhop(rt); 605 606 rt_get_inet_prefix_pmask(rt, &addr4, &mask4, &scopeid); 607 ae->addr.sin_len = KEY_LEN_INET; 608 ae->addr.sin_addr = addr4; 609 610 if (mask4.s_addr != INADDR_BROADCAST) { 611 bzero(&mask, sizeof(mask)); 612 mask.sin_len = KEY_LEN_INET; 613 mask.sin_addr = mask4; 614 rt_mask = (struct sockaddr *)&mask; 615 } else 616 rt_mask = NULL; 617 618 rn = lr->rnh->rnh_addaddr((struct sockaddr *)&ae->addr, rt_mask, 619 &lr->rnh->rh, ae->rn); 620 if (rn == NULL) 621 return (FLM_REBUILD); 622 623 return (FLM_SUCCESS); 624 } 625 626 static enum flm_op_result 627 lradix4_end_dump(void *_data, struct fib_dp *dp) 628 { 629 struct lradix4_data *lr = (struct lradix4_data *)_data; 630 631 dp->f = lradix4_lookup; 632 dp->arg = lr->rnh; 633 634 return (FLM_SUCCESS); 635 } 636 637 static enum flm_op_result 638 lradix4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc, 639 void *_data) 640 { 641 642 return (FLM_REBUILD); 643 } 644 645 struct fib_lookup_module flm_radix4_lockless = { 646 .flm_name = "radix4_lockless", 647 .flm_family = AF_INET, 648 .flm_init_cb = lradix4_init, 649 .flm_destroy_cb = lradix4_destroy, 650 .flm_dump_rib_item_cb = lradix4_add_route_cb, 651 .flm_dump_end_cb = lradix4_end_dump, 652 .flm_change_rib_item_cb = lradix4_change_cb, 653 .flm_get_pref = lradix4_get_pref, 654 }; 655 656 /* 657 * Fallback lookup algorithm. 658 * This is a simple wrapper around system radix. 659 */ 660 661 struct radix4_data { 662 struct fib_data *fd; 663 struct rib_head *rh; 664 }; 665 666 static struct nhop_object * 667 radix4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid) 668 { 669 RIB_RLOCK_TRACKER; 670 struct rib_head *rh = (struct rib_head *)algo_data; 671 struct radix_node *rn; 672 struct nhop_object *nh; 673 674 /* Prepare lookup key */ 675 struct sockaddr_in sin4 = { 676 .sin_family = AF_INET, 677 .sin_len = sizeof(struct sockaddr_in), 678 .sin_addr = key.addr4, 679 }; 680 681 nh = NULL; 682 RIB_RLOCK(rh); 683 rn = rh->rnh_matchaddr((void *)&sin4, &rh->head); 684 if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) 685 nh = (RNTORT(rn))->rt_nhop; 686 RIB_RUNLOCK(rh); 687 688 return (nh); 689 } 690 691 static uint8_t 692 radix4_get_pref(const struct rib_rtable_info *rinfo) 693 { 694 695 return (50); 696 } 697 698 static enum flm_op_result 699 radix4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data) 700 { 701 struct radix4_data *r4; 702 703 r4 = malloc(sizeof(struct radix4_data), M_RTABLE, M_NOWAIT | M_ZERO); 704 if (r4 == NULL) 705 return (FLM_REBUILD); 706 r4->fd = fd; 707 r4->rh = fib_get_rh(fd); 708 709 *_data = r4; 710 711 return (FLM_SUCCESS); 712 } 713 714 static void 715 radix4_destroy(void *_data) 716 { 717 718 free(_data, M_RTABLE); 719 } 720 721 static enum flm_op_result 722 radix4_add_route_cb(struct rtentry *rt, void *_data) 723 { 724 725 return (FLM_SUCCESS); 726 } 727 728 static enum flm_op_result 729 radix4_end_dump(void *_data, struct fib_dp *dp) 730 { 731 struct radix4_data *r4 = (struct radix4_data *)_data; 732 733 dp->f = radix4_lookup; 734 dp->arg = r4->rh; 735 736 return (FLM_SUCCESS); 737 } 738 739 static enum flm_op_result 740 radix4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc, 741 void *_data) 742 { 743 744 return (FLM_SUCCESS); 745 } 746 747 struct fib_lookup_module flm_radix4 = { 748 .flm_name = "radix4", 749 .flm_family = AF_INET, 750 .flm_init_cb = radix4_init, 751 .flm_destroy_cb = radix4_destroy, 752 .flm_dump_rib_item_cb = radix4_add_route_cb, 753 .flm_dump_end_cb = radix4_end_dump, 754 .flm_change_rib_item_cb = radix4_change_cb, 755 .flm_get_pref = radix4_get_pref, 756 }; 757 758 static void 759 fib4_algo_init(void) 760 { 761 762 fib_module_register(&flm_bsearch4); 763 fib_module_register(&flm_radix4_lockless); 764 fib_module_register(&flm_radix4); 765 } 766 SYSINIT(fib4_algo_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, fib4_algo_init, NULL); 767