1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 #include "opt_inet.h" 30 #include "opt_route.h" 31 32 #include <sys/cdefs.h> 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/lock.h> 36 #include <sys/rmlock.h> 37 #include <sys/malloc.h> 38 #include <sys/mbuf.h> 39 #include <sys/refcount.h> 40 #include <sys/socket.h> 41 #include <sys/sysctl.h> 42 #include <sys/kernel.h> 43 #include <sys/epoch.h> 44 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <net/route.h> 48 #include <net/route/route_ctl.h> 49 #include <net/route/route_var.h> 50 #include <net/vnet.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_var.h> 54 #include <netinet/in_fib.h> 55 56 #include <net/route/nhop_utils.h> 57 #include <net/route/nhop.h> 58 #include <net/route/nhop_var.h> 59 #include <net/route/nhgrp_var.h> 60 61 /* 62 * This file contains the supporting functions for creating multipath groups 63 * and compiling their dataplane parts. 64 */ 65 66 /* MPF_MULTIPATH must be the same as NHF_MULTIPATH for nhop selection to work */ 67 _Static_assert(MPF_MULTIPATH == NHF_MULTIPATH, 68 "MPF_MULTIPATH must be the same as NHF_MULTIPATH"); 69 /* Offset and size of flags field has to be the same for nhop/nhop groups */ 70 CHK_STRUCT_FIELD_GENERIC(struct nhop_object, nh_flags, struct nhgrp_object, nhg_flags); 71 /* Cap multipath to 64, as the larger values would break rib_cmd_info bmasks */ 72 CTASSERT(RIB_MAX_MPATH_WIDTH <= 64); 73 74 static int wn_cmp(const void *a, const void *b); 75 static void sort_weightened_nhops(struct weightened_nhop *wn, int num_nhops); 76 77 static struct nhgrp_priv *get_nhgrp(struct nh_control *ctl, 78 struct weightened_nhop *wn, int num_nhops, int *perror); 79 static void destroy_nhgrp(struct nhgrp_priv *nhg_priv); 80 static void destroy_nhgrp_epoch(epoch_context_t ctx); 81 static void free_nhgrp_nhops(struct nhgrp_priv *nhg_priv); 82 83 static int 84 wn_cmp(const void *a, const void *b) 85 { 86 const struct weightened_nhop *wa = a; 87 const struct weightened_nhop *wb = b; 88 89 if (wa->weight > wb->weight) 90 return (1); 91 else if (wa->weight < wb->weight) 92 return (-1); 93 94 /* Compare nexthops by pointer */ 95 if (wa->nh > wb->nh) 96 return (1); 97 else if (wa->nh < wb->nh) 98 return (-1); 99 else 100 return (0); 101 } 102 103 /* 104 * Perform in-place sorting for array of nexthops in @wn. 105 * 106 * To avoid nh groups duplication, nexthops/weights in the 107 * @wn need to be ordered deterministically. 108 * As this sorting is needed only for the control plane functionality, 109 * there are no specific external requirements. 110 * 111 * Sort by weight first, to ease calculation of the slot sizes. 112 */ 113 static void 114 sort_weightened_nhops(struct weightened_nhop *wn, int num_nhops) 115 { 116 117 qsort(wn, num_nhops, sizeof(struct weightened_nhop), wn_cmp); 118 } 119 120 /* 121 * Calculate minimum number of slots required to fit the existing 122 * set of weights in the common use case where weights are "easily" 123 * comparable. 124 * Assumes @wn is sorted by weight ascending and each weight is > 0. 125 * Returns number of slots or 0 if precise calculation failed. 126 * 127 * Some examples: 128 * note: (i, X) pair means (nhop=i, weight=X): 129 * (1, 1) (2, 2) -> 3 slots [1, 2, 2] 130 * (1, 100), (2, 200) -> 3 slots [1, 2, 2] 131 * (1, 100), (2, 200), (3, 400) -> 7 slots [1, 2, 2, 3, 3, 3] 132 */ 133 static uint32_t 134 calc_min_mpath_slots_fast(const struct weightened_nhop *wn, size_t num_items) 135 { 136 uint32_t i, last, xmin; 137 uint64_t total = 0; 138 139 last = 0; 140 xmin = wn[0].weight; 141 for (i = 0; i < num_items; i++) { 142 total += wn[i].weight; 143 if ((wn[i].weight - last < xmin) && (wn[i].weight != last)) 144 xmin = wn[i].weight - last; 145 last = wn[i].weight; 146 } 147 /* xmin is the minimum unit of desired capacity */ 148 if ((total % xmin) != 0) 149 return (0); 150 for (i = 0; i < num_items; i++) { 151 if ((wn[i].weight % xmin) != 0) 152 return (0); 153 } 154 155 return ((uint32_t)(total / xmin)); 156 } 157 158 /* 159 * Calculate minimum number of slots required to fit the existing 160 * set of weights while maintaining weight coefficients. 161 * 162 * Assume @wn is sorted by weight ascending and each weight is > 0. 163 * 164 * Tries to find simple precise solution first and falls back to 165 * RIB_MAX_MPATH_WIDTH in case of any failure. 166 */ 167 static uint32_t 168 calc_min_mpath_slots(const struct weightened_nhop *wn, size_t num_items) 169 { 170 uint32_t v; 171 172 v = calc_min_mpath_slots_fast(wn, num_items); 173 if ((v == 0) || (v > RIB_MAX_MPATH_WIDTH)) 174 v = RIB_MAX_MPATH_WIDTH; 175 176 return (v); 177 } 178 179 /* 180 * Nexthop group data consists of 181 * 1) dataplane part, with nhgrp_object as a header followed by an 182 * arbitrary number of nexthop pointers. 183 * 2) control plane part, with nhgrp_priv as a header, followed by 184 * an arbirtrary number of 'struct weightened_nhop' object. 185 * 186 * Given nexthop groups are (mostly) immutable, allocate all data 187 * in one go. 188 * 189 */ 190 __noinline static size_t 191 get_nhgrp_alloc_size(uint32_t nhg_size, uint32_t num_nhops) 192 { 193 size_t sz; 194 195 sz = sizeof(struct nhgrp_object); 196 sz += nhg_size * sizeof(struct nhop_object *); 197 sz += sizeof(struct nhgrp_priv); 198 sz += num_nhops * sizeof(struct weightened_nhop); 199 return (sz); 200 } 201 202 /* 203 * Compile actual list of nexthops to be used by datapath from 204 * the nexthop group @dst. 205 * 206 * For example, compiling control plane list of 2 nexthops 207 * [(200, A), (100, B)] would result in the datapath array 208 * [A, A, B] 209 */ 210 static void 211 compile_nhgrp(struct nhgrp_priv *dst_priv, const struct weightened_nhop *x, 212 uint32_t num_slots) 213 { 214 struct nhgrp_object *dst; 215 int i, slot_idx, remaining_slots; 216 uint64_t remaining_sum, nh_weight, nh_slots; 217 218 slot_idx = 0; 219 dst = dst_priv->nhg; 220 /* Calculate sum of all weights */ 221 remaining_sum = 0; 222 for (i = 0; i < dst_priv->nhg_nh_count; i++) 223 remaining_sum += x[i].weight; 224 remaining_slots = num_slots; 225 DPRINTF("O: %u/%u", (uint32_t)remaining_sum, remaining_slots); 226 for (i = 0; i < dst_priv->nhg_nh_count; i++) { 227 /* Calculate number of slots for the current nexthop */ 228 if (remaining_sum > 0) { 229 nh_weight = (uint64_t)x[i].weight; 230 nh_slots = (nh_weight * remaining_slots / remaining_sum); 231 } else 232 nh_slots = 0; 233 234 remaining_sum -= x[i].weight; 235 remaining_slots -= nh_slots; 236 237 DPRINTF(" OO[%d]: %u/%u curr=%d slot_idx=%d", i, 238 (uint32_t)remaining_sum, remaining_slots, 239 (int)nh_slots, slot_idx); 240 241 KASSERT((slot_idx + nh_slots <= num_slots), 242 ("index overflow during nhg compilation")); 243 while (nh_slots-- > 0) 244 dst->nhops[slot_idx++] = x[i].nh; 245 } 246 } 247 248 /* 249 * Allocates new nexthop group for the list of weightened nexthops. 250 * Assume sorted list. 251 * Does NOT reference any nexthops in the group. 252 * Returns group with refcount=1 or NULL. 253 */ 254 static struct nhgrp_priv * 255 alloc_nhgrp(struct weightened_nhop *wn, int num_nhops) 256 { 257 uint32_t nhgrp_size; 258 struct nhgrp_object *nhg; 259 struct nhgrp_priv *nhg_priv; 260 261 nhgrp_size = calc_min_mpath_slots(wn, num_nhops); 262 if (nhgrp_size == 0) { 263 /* Zero weights, abort */ 264 return (NULL); 265 } 266 267 size_t sz = get_nhgrp_alloc_size(nhgrp_size, num_nhops); 268 nhg = malloc(sz, M_NHOP, M_NOWAIT | M_ZERO); 269 if (nhg == NULL) { 270 return (NULL); 271 } 272 273 /* Has to be the first to make NHGRP_PRIV() work */ 274 nhg->nhg_size = nhgrp_size; 275 DPRINTF("new mpath group: num_nhops: %u", (uint32_t)nhgrp_size); 276 nhg->nhg_flags = MPF_MULTIPATH; 277 278 nhg_priv = NHGRP_PRIV(nhg); 279 nhg_priv->nhg_nh_count = num_nhops; 280 refcount_init(&nhg_priv->nhg_refcount, 1); 281 282 /* Please see nhgrp_free() comments on the initial value */ 283 refcount_init(&nhg_priv->nhg_linked, 2); 284 285 nhg_priv->nhg = nhg; 286 memcpy(&nhg_priv->nhg_nh_weights[0], wn, 287 num_nhops * sizeof(struct weightened_nhop)); 288 289 compile_nhgrp(nhg_priv, wn, nhg->nhg_size); 290 291 return (nhg_priv); 292 } 293 294 void 295 nhgrp_ref_object(struct nhgrp_object *nhg) 296 { 297 struct nhgrp_priv *nhg_priv; 298 u_int old; 299 300 nhg_priv = NHGRP_PRIV(nhg); 301 old = refcount_acquire(&nhg_priv->nhg_refcount); 302 KASSERT(old > 0, ("%s: nhgrp object %p has 0 refs", __func__, nhg)); 303 } 304 305 void 306 nhgrp_free(struct nhgrp_object *nhg) 307 { 308 struct nhgrp_priv *nhg_priv; 309 struct nh_control *ctl; 310 struct epoch_tracker et; 311 312 nhg_priv = NHGRP_PRIV(nhg); 313 314 if (!refcount_release(&nhg_priv->nhg_refcount)) 315 return; 316 317 /* 318 * group objects don't have an explicit lock attached to it. 319 * As groups are reclaimed based on reference count, it is possible 320 * that some groups will persist after vnet destruction callback 321 * called. Given that, handle scenario with nhgrp_free_group() being 322 * called either after or simultaneously with nhgrp_ctl_unlink_all() 323 * by using another reference counter: nhg_linked. 324 * 325 * There are only 2 places, where nhg_linked can be decreased: 326 * rib destroy (nhgrp_ctl_unlink_all) and this function. 327 * nhg_link can never be increased. 328 * 329 * Hence, use initial value of 2 to make use of 330 * refcount_release_if_not_last(). 331 * 332 * There can be two scenarious when calling this function: 333 * 334 * 1) nhg_linked value is 2. This means that either 335 * nhgrp_ctl_unlink_all() has not been called OR it is running, 336 * but we are guaranteed that nh_control won't be freed in 337 * this epoch. Hence, nexthop can be safely unlinked. 338 * 339 * 2) nh_linked value is 1. In that case, nhgrp_ctl_unlink_all() 340 * has been called and nhgrp unlink can be skipped. 341 */ 342 343 NET_EPOCH_ENTER(et); 344 if (refcount_release_if_not_last(&nhg_priv->nhg_linked)) { 345 ctl = nhg_priv->nh_control; 346 if (unlink_nhgrp(ctl, nhg_priv) == NULL) { 347 /* Do not try to reclaim */ 348 DPRINTF("Failed to unlink nexhop group %p", nhg_priv); 349 NET_EPOCH_EXIT(et); 350 return; 351 } 352 } 353 NET_EPOCH_EXIT(et); 354 355 epoch_call(net_epoch_preempt, destroy_nhgrp_epoch, 356 &nhg_priv->nhg_epoch_ctx); 357 } 358 359 /* 360 * Destroys all local resources belonging to @nhg_priv. 361 */ 362 __noinline static void 363 destroy_nhgrp_int(struct nhgrp_priv *nhg_priv) 364 { 365 366 free(nhg_priv->nhg, M_NHOP); 367 } 368 369 __noinline static void 370 destroy_nhgrp(struct nhgrp_priv *nhg_priv) 371 { 372 373 KASSERT((nhg_priv->nhg_refcount == 0), ("nhg_refcount != 0")); 374 375 DPRINTF("DEL MPATH %p", nhg_priv); 376 377 KASSERT((nhg_priv->nhg_idx == 0), ("gr_idx != 0")); 378 379 free_nhgrp_nhops(nhg_priv); 380 381 destroy_nhgrp_int(nhg_priv); 382 } 383 384 /* 385 * Epoch callback indicating group is safe to destroy 386 */ 387 static void 388 destroy_nhgrp_epoch(epoch_context_t ctx) 389 { 390 struct nhgrp_priv *nhg_priv; 391 392 nhg_priv = __containerof(ctx, struct nhgrp_priv, nhg_epoch_ctx); 393 394 destroy_nhgrp(nhg_priv); 395 } 396 397 static bool 398 ref_nhgrp_nhops(struct nhgrp_priv *nhg_priv) 399 { 400 401 for (int i = 0; i < nhg_priv->nhg_nh_count; i++) { 402 if (nhop_try_ref_object(nhg_priv->nhg_nh_weights[i].nh) != 0) 403 continue; 404 405 /* 406 * Failed to ref the nexthop, b/c it's deleted. 407 * Need to rollback references back. 408 */ 409 for (int j = 0; j < i; j++) 410 nhop_free(nhg_priv->nhg_nh_weights[j].nh); 411 return (false); 412 } 413 414 return (true); 415 } 416 417 static void 418 free_nhgrp_nhops(struct nhgrp_priv *nhg_priv) 419 { 420 421 for (int i = 0; i < nhg_priv->nhg_nh_count; i++) 422 nhop_free(nhg_priv->nhg_nh_weights[i].nh); 423 } 424 425 /* 426 * Creates or looks up an existing nexthop group based on @wn and @num_nhops. 427 * 428 * Returns referenced nhop group or NULL, passing error code in @perror. 429 */ 430 struct nhgrp_priv * 431 get_nhgrp(struct nh_control *ctl, struct weightened_nhop *wn, int num_nhops, 432 int *perror) 433 { 434 struct nhgrp_priv *key, *nhg_priv; 435 436 if (num_nhops > RIB_MAX_MPATH_WIDTH) { 437 *perror = E2BIG; 438 return (NULL); 439 } 440 441 if (ctl->gr_head.hash_size == 0) { 442 /* First multipath request. Bootstrap mpath datastructures. */ 443 if (nhgrp_ctl_alloc_default(ctl, M_NOWAIT) == 0) { 444 *perror = ENOMEM; 445 return (NULL); 446 } 447 } 448 449 /* Sort nexthops & check there are no duplicates */ 450 sort_weightened_nhops(wn, num_nhops); 451 uint32_t last_id = 0; 452 for (int i = 0; i < num_nhops; i++) { 453 if (wn[i].nh->nh_priv->nh_idx == last_id) { 454 *perror = EEXIST; 455 return (NULL); 456 } 457 last_id = wn[i].nh->nh_priv->nh_idx; 458 } 459 460 if ((key = alloc_nhgrp(wn, num_nhops)) == NULL) { 461 *perror = ENOMEM; 462 return (NULL); 463 } 464 465 nhg_priv = find_nhgrp(ctl, key); 466 if (nhg_priv != NULL) { 467 /* 468 * Free originally-created group. As it hasn't been linked 469 * and the dependent nexhops haven't been referenced, just free 470 * the group. 471 */ 472 destroy_nhgrp_int(key); 473 *perror = 0; 474 return (nhg_priv); 475 } else { 476 /* No existing group, try to link the new one */ 477 if (!ref_nhgrp_nhops(key)) { 478 /* 479 * Some of the nexthops have been scheduled for deletion. 480 * As the group hasn't been linked / no nexhops have been 481 * referenced, call the final destructor immediately. 482 */ 483 destroy_nhgrp_int(key); 484 *perror = EAGAIN; 485 return (NULL); 486 } 487 if (link_nhgrp(ctl, key) == 0) { 488 /* Unable to allocate index? */ 489 *perror = EAGAIN; 490 free_nhgrp_nhops(key); 491 destroy_nhgrp_int(key); 492 return (NULL); 493 } 494 *perror = 0; 495 return (key); 496 } 497 498 /* NOTREACHED */ 499 } 500 501 /* 502 * Appends one or more nexthops denoted by @wm to the nexthop group @gr_orig. 503 * 504 * Returns referenced nexthop group or NULL. In the latter case, @perror is 505 * filled with an error code. 506 * Note that function does NOT care if the next nexthops already exists 507 * in the @gr_orig. As a result, they will be added, resulting in the 508 * same nexthop being present multiple times in the new group. 509 */ 510 static struct nhgrp_priv * 511 append_nhops(struct nh_control *ctl, const struct nhgrp_object *gr_orig, 512 struct weightened_nhop *wn, int num_nhops, int *perror) 513 { 514 char storage[64]; 515 struct weightened_nhop *pnhops; 516 struct nhgrp_priv *nhg_priv; 517 const struct nhgrp_priv *src_priv; 518 size_t sz; 519 int curr_nhops; 520 521 src_priv = NHGRP_PRIV_CONST(gr_orig); 522 curr_nhops = src_priv->nhg_nh_count; 523 524 *perror = 0; 525 526 sz = (src_priv->nhg_nh_count + num_nhops) * (sizeof(struct weightened_nhop)); 527 /* optimize for <= 4 paths, each path=16 bytes */ 528 if (sz <= sizeof(storage)) 529 pnhops = (struct weightened_nhop *)&storage[0]; 530 else { 531 pnhops = malloc(sz, M_TEMP, M_NOWAIT); 532 if (pnhops == NULL) { 533 *perror = ENOMEM; 534 return (NULL); 535 } 536 } 537 538 /* Copy nhops from original group first */ 539 memcpy(pnhops, src_priv->nhg_nh_weights, 540 curr_nhops * sizeof(struct weightened_nhop)); 541 memcpy(&pnhops[curr_nhops], wn, num_nhops * sizeof(struct weightened_nhop)); 542 curr_nhops += num_nhops; 543 544 nhg_priv = get_nhgrp(ctl, pnhops, curr_nhops, perror); 545 546 if (pnhops != (struct weightened_nhop *)&storage[0]) 547 free(pnhops, M_TEMP); 548 549 if (nhg_priv == NULL) 550 return (NULL); 551 552 return (nhg_priv); 553 } 554 555 556 /* 557 * Creates/finds nexthop group based on @wn and @num_nhops. 558 * Returns 0 on success with referenced group in @rnd, or 559 * errno. 560 * 561 * If the error is EAGAIN, then the operation can be retried. 562 */ 563 int 564 nhgrp_get_group(struct rib_head *rh, struct weightened_nhop *wn, int num_nhops, 565 struct route_nhop_data *rnd) 566 { 567 struct nh_control *ctl = rh->nh_control; 568 struct nhgrp_priv *nhg_priv; 569 int error; 570 571 nhg_priv = get_nhgrp(ctl, wn, num_nhops, &error); 572 if (nhg_priv != NULL) 573 rnd->rnd_nhgrp = nhg_priv->nhg; 574 rnd->rnd_weight = 0; 575 576 return (error); 577 } 578 579 /* 580 * Creates new nexthop group based on @src group without the nexthops 581 * chosen by @flt_func. 582 * Returns 0 on success, storring the reference nhop group/object in @rnd. 583 */ 584 int 585 nhgrp_get_filtered_group(struct rib_head *rh, const struct nhgrp_object *src, 586 nhgrp_filter_cb_t flt_func, void *flt_data, struct route_nhop_data *rnd) 587 { 588 char storage[64]; 589 struct nh_control *ctl = rh->nh_control; 590 struct weightened_nhop *pnhops; 591 const struct nhgrp_priv *mp_priv, *src_priv; 592 size_t sz; 593 int error, i, num_nhops; 594 595 src_priv = NHGRP_PRIV_CONST(src); 596 597 sz = src_priv->nhg_nh_count * (sizeof(struct weightened_nhop)); 598 /* optimize for <= 4 paths, each path=16 bytes */ 599 if (sz <= sizeof(storage)) 600 pnhops = (struct weightened_nhop *)&storage[0]; 601 else { 602 if ((pnhops = malloc(sz, M_TEMP, M_NOWAIT)) == NULL) 603 return (ENOMEM); 604 } 605 606 /* Filter nexthops */ 607 error = 0; 608 num_nhops = 0; 609 for (i = 0; i < src_priv->nhg_nh_count; i++) { 610 if (flt_func(src_priv->nhg_nh_weights[i].nh, flt_data)) 611 continue; 612 memcpy(&pnhops[num_nhops++], &src_priv->nhg_nh_weights[i], 613 sizeof(struct weightened_nhop)); 614 } 615 616 if (num_nhops == 0) { 617 rnd->rnd_nhgrp = NULL; 618 rnd->rnd_weight = 0; 619 } else if (num_nhops == 1) { 620 rnd->rnd_nhop = pnhops[0].nh; 621 rnd->rnd_weight = pnhops[0].weight; 622 if (nhop_try_ref_object(rnd->rnd_nhop) == 0) 623 error = EAGAIN; 624 } else { 625 mp_priv = get_nhgrp(ctl, pnhops, num_nhops, &error); 626 if (mp_priv != NULL) 627 rnd->rnd_nhgrp = mp_priv->nhg; 628 rnd->rnd_weight = 0; 629 } 630 631 if (pnhops != (struct weightened_nhop *)&storage[0]) 632 free(pnhops, M_TEMP); 633 634 return (error); 635 } 636 637 /* 638 * Creates new multipath group based on existing group/nhop in @rnd_orig and 639 * to-be-added nhop @wn_add. 640 * Returns 0 on success and stores result in @rnd_new. 641 */ 642 int 643 nhgrp_get_addition_group(struct rib_head *rh, struct route_nhop_data *rnd_orig, 644 struct route_nhop_data *rnd_add, struct route_nhop_data *rnd_new) 645 { 646 struct nh_control *ctl = rh->nh_control; 647 struct nhgrp_priv *nhg_priv; 648 struct weightened_nhop wn[2] = {}; 649 int error; 650 651 if (rnd_orig->rnd_nhop == NULL) { 652 /* No paths to add to, just reference current nhop */ 653 *rnd_new = *rnd_add; 654 if (nhop_try_ref_object(rnd_new->rnd_nhop) == 0) 655 return (EAGAIN); 656 return (0); 657 } 658 659 wn[0].nh = rnd_add->rnd_nhop; 660 wn[0].weight = rnd_add->rnd_weight; 661 662 if (!NH_IS_NHGRP(rnd_orig->rnd_nhop)) { 663 /* Simple merge of 2 non-multipath nexthops */ 664 wn[1].nh = rnd_orig->rnd_nhop; 665 wn[1].weight = rnd_orig->rnd_weight; 666 nhg_priv = get_nhgrp(ctl, wn, 2, &error); 667 } else { 668 /* Get new nhop group with @rt->rt_nhop as an additional nhop */ 669 nhg_priv = append_nhops(ctl, rnd_orig->rnd_nhgrp, &wn[0], 1, 670 &error); 671 } 672 673 if (nhg_priv == NULL) 674 return (error); 675 rnd_new->rnd_nhgrp = nhg_priv->nhg; 676 rnd_new->rnd_weight = 0; 677 678 return (0); 679 } 680 681 /* 682 * Returns pointer to array of nexthops with weights for 683 * given @nhg. Stores number of items in the array into @pnum_nhops. 684 */ 685 struct weightened_nhop * 686 nhgrp_get_nhops(struct nhgrp_object *nhg, uint32_t *pnum_nhops) 687 { 688 struct nhgrp_priv *nhg_priv; 689 690 KASSERT(((nhg->nhg_flags & MPF_MULTIPATH) != 0), ("nhop is not mpath")); 691 692 nhg_priv = NHGRP_PRIV(nhg); 693 *pnum_nhops = nhg_priv->nhg_nh_count; 694 695 return (nhg_priv->nhg_nh_weights); 696 } 697 698 __noinline static int 699 dump_nhgrp_entry(struct rib_head *rh, const struct nhgrp_priv *nhg_priv, 700 char *buffer, size_t buffer_size, struct sysctl_req *w) 701 { 702 struct rt_msghdr *rtm; 703 struct nhgrp_external *nhge; 704 struct nhgrp_container *nhgc; 705 const struct nhgrp_object *nhg; 706 struct nhgrp_nhop_external *ext; 707 int error; 708 size_t sz; 709 710 nhg = nhg_priv->nhg; 711 712 sz = sizeof(struct rt_msghdr) + sizeof(struct nhgrp_external); 713 /* controlplane nexthops */ 714 sz += sizeof(struct nhgrp_container); 715 sz += sizeof(struct nhgrp_nhop_external) * nhg_priv->nhg_nh_count; 716 /* dataplane nexthops */ 717 sz += sizeof(struct nhgrp_container); 718 sz += sizeof(struct nhgrp_nhop_external) * nhg->nhg_size; 719 720 KASSERT(sz <= buffer_size, ("increase nhgrp buffer size")); 721 722 bzero(buffer, sz); 723 724 rtm = (struct rt_msghdr *)buffer; 725 rtm->rtm_msglen = sz; 726 rtm->rtm_version = RTM_VERSION; 727 rtm->rtm_type = RTM_GET; 728 729 nhge = (struct nhgrp_external *)(rtm + 1); 730 731 nhge->nhg_idx = nhg_priv->nhg_idx; 732 nhge->nhg_refcount = nhg_priv->nhg_refcount; 733 734 /* fill in control plane nexthops firs */ 735 nhgc = (struct nhgrp_container *)(nhge + 1); 736 nhgc->nhgc_type = NHG_C_TYPE_CNHOPS; 737 nhgc->nhgc_subtype = 0; 738 nhgc->nhgc_len = sizeof(struct nhgrp_container); 739 nhgc->nhgc_len += sizeof(struct nhgrp_nhop_external) * nhg_priv->nhg_nh_count; 740 nhgc->nhgc_count = nhg_priv->nhg_nh_count; 741 742 ext = (struct nhgrp_nhop_external *)(nhgc + 1); 743 for (int i = 0; i < nhg_priv->nhg_nh_count; i++) { 744 ext[i].nh_idx = nhg_priv->nhg_nh_weights[i].nh->nh_priv->nh_idx; 745 ext[i].nh_weight = nhg_priv->nhg_nh_weights[i].weight; 746 } 747 748 /* fill in dataplane nexthops */ 749 nhgc = (struct nhgrp_container *)(&ext[nhg_priv->nhg_nh_count]); 750 nhgc->nhgc_type = NHG_C_TYPE_DNHOPS; 751 nhgc->nhgc_subtype = 0; 752 nhgc->nhgc_len = sizeof(struct nhgrp_container); 753 nhgc->nhgc_len += sizeof(struct nhgrp_nhop_external) * nhg->nhg_size; 754 nhgc->nhgc_count = nhg->nhg_size; 755 756 ext = (struct nhgrp_nhop_external *)(nhgc + 1); 757 for (int i = 0; i < nhg->nhg_size; i++) { 758 ext[i].nh_idx = nhg->nhops[i]->nh_priv->nh_idx; 759 ext[i].nh_weight = 0; 760 } 761 762 error = SYSCTL_OUT(w, buffer, sz); 763 764 return (error); 765 } 766 767 uint32_t 768 nhgrp_get_idx(const struct nhgrp_object *nhg) 769 { 770 const struct nhgrp_priv *nhg_priv; 771 772 nhg_priv = NHGRP_PRIV_CONST(nhg); 773 return (nhg_priv->nhg_idx); 774 } 775 776 uint32_t 777 nhgrp_get_count(struct rib_head *rh) 778 { 779 struct nh_control *ctl; 780 uint32_t count; 781 782 ctl = rh->nh_control; 783 784 NHOPS_RLOCK(ctl); 785 count = ctl->gr_head.items_count; 786 NHOPS_RUNLOCK(ctl); 787 788 return (count); 789 } 790 791 int 792 nhgrp_dump_sysctl(struct rib_head *rh, struct sysctl_req *w) 793 { 794 struct nh_control *ctl = rh->nh_control; 795 struct epoch_tracker et; 796 struct nhgrp_priv *nhg_priv; 797 char *buffer; 798 size_t sz; 799 int error = 0; 800 801 if (ctl->gr_head.items_count == 0) 802 return (0); 803 804 /* Calculate the maximum nhop group size in bytes */ 805 sz = sizeof(struct rt_msghdr) + sizeof(struct nhgrp_external); 806 sz += 2 * sizeof(struct nhgrp_container); 807 sz += 2 * sizeof(struct nhgrp_nhop_external) * RIB_MAX_MPATH_WIDTH; 808 buffer = malloc(sz, M_TEMP, M_NOWAIT); 809 if (buffer == NULL) 810 return (ENOMEM); 811 812 NET_EPOCH_ENTER(et); 813 NHOPS_RLOCK(ctl); 814 CHT_SLIST_FOREACH(&ctl->gr_head, mpath, nhg_priv) { 815 error = dump_nhgrp_entry(rh, nhg_priv, buffer, sz, w); 816 if (error != 0) 817 break; 818 } CHT_SLIST_FOREACH_END; 819 NHOPS_RUNLOCK(ctl); 820 NET_EPOCH_EXIT(et); 821 822 free(buffer, M_TEMP); 823 824 return (error); 825 } 826