1 // SPDX-License-Identifier: GPL-2.0 2 /* Generic nexthop implementation 3 * 4 * Copyright (c) 2017-19 Cumulus Networks 5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> 6 */ 7 8 #include <linux/nexthop.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 #include <net/arp.h> 13 #include <net/ipv6_stubs.h> 14 #include <net/lwtunnel.h> 15 #include <net/ndisc.h> 16 #include <net/nexthop.h> 17 #include <net/route.h> 18 #include <net/sock.h> 19 20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ) 21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */ 22 23 static void remove_nexthop(struct net *net, struct nexthop *nh, 24 struct nl_info *nlinfo); 25 26 #define NH_DEV_HASHBITS 8 27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) 28 29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \ 30 NHA_OP_FLAG_DUMP_HW_STATS) 31 32 static const struct nla_policy rtm_nh_policy_new[] = { 33 [NHA_ID] = { .type = NLA_U32 }, 34 [NHA_GROUP] = { .type = NLA_BINARY }, 35 [NHA_GROUP_TYPE] = { .type = NLA_U16 }, 36 [NHA_BLACKHOLE] = { .type = NLA_FLAG }, 37 [NHA_OIF] = { .type = NLA_U32 }, 38 [NHA_GATEWAY] = { .type = NLA_BINARY }, 39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 }, 40 [NHA_ENCAP] = { .type = NLA_NESTED }, 41 [NHA_FDB] = { .type = NLA_FLAG }, 42 [NHA_RES_GROUP] = { .type = NLA_NESTED }, 43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true), 44 }; 45 46 static const struct nla_policy rtm_nh_policy_get[] = { 47 [NHA_ID] = { .type = NLA_U32 }, 48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 49 NHA_OP_FLAGS_DUMP_ALL), 50 }; 51 52 static const struct nla_policy rtm_nh_policy_del[] = { 53 [NHA_ID] = { .type = NLA_U32 }, 54 }; 55 56 static const struct nla_policy rtm_nh_policy_dump[] = { 57 [NHA_OIF] = { .type = NLA_U32 }, 58 [NHA_GROUPS] = { .type = NLA_FLAG }, 59 [NHA_MASTER] = { .type = NLA_U32 }, 60 [NHA_FDB] = { .type = NLA_FLAG }, 61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 62 NHA_OP_FLAGS_DUMP_ALL), 63 }; 64 65 static const struct nla_policy rtm_nh_res_policy_new[] = { 66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 }, 67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 }, 68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 }, 69 }; 70 71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = { 72 [NHA_ID] = { .type = NLA_U32 }, 73 [NHA_OIF] = { .type = NLA_U32 }, 74 [NHA_MASTER] = { .type = NLA_U32 }, 75 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 76 }; 77 78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = { 79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 }, 80 }; 81 82 static const struct nla_policy rtm_nh_policy_get_bucket[] = { 83 [NHA_ID] = { .type = NLA_U32 }, 84 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 85 }; 86 87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = { 88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 }, 89 }; 90 91 static bool nexthop_notifiers_is_empty(struct net *net) 92 { 93 return !net->nexthop.notifier_chain.head; 94 } 95 96 static void 97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info, 98 const struct nh_info *nhi) 99 { 100 nh_info->dev = nhi->fib_nhc.nhc_dev; 101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; 102 if (nh_info->gw_family == AF_INET) 103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; 104 else if (nh_info->gw_family == AF_INET6) 105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; 106 107 nh_info->id = nhi->nh_parent->id; 108 nh_info->is_reject = nhi->reject_nh; 109 nh_info->is_fdb = nhi->fdb_nh; 110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; 111 } 112 113 static int nh_notifier_single_info_init(struct nh_notifier_info *info, 114 const struct nexthop *nh) 115 { 116 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 117 118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; 119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); 120 if (!info->nh) 121 return -ENOMEM; 122 123 __nh_notifier_single_info_init(info->nh, nhi); 124 125 return 0; 126 } 127 128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info) 129 { 130 kfree(info->nh); 131 } 132 133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info, 134 struct nh_group *nhg) 135 { 136 u16 num_nh = nhg->num_nh; 137 int i; 138 139 info->type = NH_NOTIFIER_INFO_TYPE_GRP; 140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), 141 GFP_KERNEL); 142 if (!info->nh_grp) 143 return -ENOMEM; 144 145 info->nh_grp->num_nh = num_nh; 146 info->nh_grp->is_fdb = nhg->fdb_nh; 147 info->nh_grp->hw_stats = nhg->hw_stats; 148 149 for (i = 0; i < num_nh; i++) { 150 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 151 struct nh_info *nhi; 152 153 nhi = rtnl_dereference(nhge->nh->nh_info); 154 info->nh_grp->nh_entries[i].weight = nhge->weight; 155 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, 156 nhi); 157 } 158 159 return 0; 160 } 161 162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info, 163 struct nh_group *nhg) 164 { 165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 166 u16 num_nh_buckets = res_table->num_nh_buckets; 167 unsigned long size; 168 u16 i; 169 170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; 171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets); 172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | 173 __GFP_NOWARN); 174 if (!info->nh_res_table) 175 return -ENOMEM; 176 177 info->nh_res_table->num_nh_buckets = num_nh_buckets; 178 info->nh_res_table->hw_stats = nhg->hw_stats; 179 180 for (i = 0; i < num_nh_buckets; i++) { 181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 182 struct nh_grp_entry *nhge; 183 struct nh_info *nhi; 184 185 nhge = rtnl_dereference(bucket->nh_entry); 186 nhi = rtnl_dereference(nhge->nh->nh_info); 187 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], 188 nhi); 189 } 190 191 return 0; 192 } 193 194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info, 195 const struct nexthop *nh) 196 { 197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 198 199 if (nhg->hash_threshold) 200 return nh_notifier_mpath_info_init(info, nhg); 201 else if (nhg->resilient) 202 return nh_notifier_res_table_info_init(info, nhg); 203 return -EINVAL; 204 } 205 206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info, 207 const struct nexthop *nh) 208 { 209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 210 211 if (nhg->hash_threshold) 212 kfree(info->nh_grp); 213 else if (nhg->resilient) 214 vfree(info->nh_res_table); 215 } 216 217 static int nh_notifier_info_init(struct nh_notifier_info *info, 218 const struct nexthop *nh) 219 { 220 info->id = nh->id; 221 222 if (nh->is_group) 223 return nh_notifier_grp_info_init(info, nh); 224 else 225 return nh_notifier_single_info_init(info, nh); 226 } 227 228 static void nh_notifier_info_fini(struct nh_notifier_info *info, 229 const struct nexthop *nh) 230 { 231 if (nh->is_group) 232 nh_notifier_grp_info_fini(info, nh); 233 else 234 nh_notifier_single_info_fini(info); 235 } 236 237 static int call_nexthop_notifiers(struct net *net, 238 enum nexthop_event_type event_type, 239 struct nexthop *nh, 240 struct netlink_ext_ack *extack) 241 { 242 struct nh_notifier_info info = { 243 .net = net, 244 .extack = extack, 245 }; 246 int err; 247 248 ASSERT_RTNL(); 249 250 if (nexthop_notifiers_is_empty(net)) 251 return 0; 252 253 err = nh_notifier_info_init(&info, nh); 254 if (err) { 255 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 256 return err; 257 } 258 259 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 260 event_type, &info); 261 nh_notifier_info_fini(&info, nh); 262 263 return notifier_to_errno(err); 264 } 265 266 static int 267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info, 268 bool force, unsigned int *p_idle_timer_ms) 269 { 270 struct nh_res_table *res_table; 271 struct nh_group *nhg; 272 struct nexthop *nh; 273 int err = 0; 274 275 /* When 'force' is false, nexthop bucket replacement is performed 276 * because the bucket was deemed to be idle. In this case, capable 277 * listeners can choose to perform an atomic replacement: The bucket is 278 * only replaced if it is inactive. However, if the idle timer interval 279 * is smaller than the interval in which a listener is querying 280 * buckets' activity from the device, then atomic replacement should 281 * not be tried. Pass the idle timer value to listeners, so that they 282 * could determine which type of replacement to perform. 283 */ 284 if (force) { 285 *p_idle_timer_ms = 0; 286 return 0; 287 } 288 289 rcu_read_lock(); 290 291 nh = nexthop_find_by_id(info->net, info->id); 292 if (!nh) { 293 err = -EINVAL; 294 goto out; 295 } 296 297 nhg = rcu_dereference(nh->nh_grp); 298 res_table = rcu_dereference(nhg->res_table); 299 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); 300 301 out: 302 rcu_read_unlock(); 303 304 return err; 305 } 306 307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info, 308 u16 bucket_index, bool force, 309 struct nh_info *oldi, 310 struct nh_info *newi) 311 { 312 unsigned int idle_timer_ms; 313 int err; 314 315 err = nh_notifier_res_bucket_idle_timer_get(info, force, 316 &idle_timer_ms); 317 if (err) 318 return err; 319 320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; 321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), 322 GFP_KERNEL); 323 if (!info->nh_res_bucket) 324 return -ENOMEM; 325 326 info->nh_res_bucket->bucket_index = bucket_index; 327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms; 328 info->nh_res_bucket->force = force; 329 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); 330 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); 331 return 0; 332 } 333 334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info) 335 { 336 kfree(info->nh_res_bucket); 337 } 338 339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 340 u16 bucket_index, bool force, 341 struct nh_info *oldi, 342 struct nh_info *newi, 343 struct netlink_ext_ack *extack) 344 { 345 struct nh_notifier_info info = { 346 .net = net, 347 .extack = extack, 348 .id = nhg_id, 349 }; 350 int err; 351 352 if (nexthop_notifiers_is_empty(net)) 353 return 0; 354 355 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force, 356 oldi, newi); 357 if (err) 358 return err; 359 360 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 361 NEXTHOP_EVENT_BUCKET_REPLACE, &info); 362 nh_notifier_res_bucket_info_fini(&info); 363 364 return notifier_to_errno(err); 365 } 366 367 /* There are three users of RES_TABLE, and NHs etc. referenced from there: 368 * 369 * 1) a collection of callbacks for NH maintenance. This operates under 370 * RTNL, 371 * 2) the delayed work that gradually balances the resilient table, 372 * 3) and nexthop_select_path(), operating under RCU. 373 * 374 * Both the delayed work and the RTNL block are writers, and need to 375 * maintain mutual exclusion. Since there are only two and well-known 376 * writers for each table, the RTNL code can make sure it has exclusive 377 * access thus: 378 * 379 * - Have the DW operate without locking; 380 * - synchronously cancel the DW; 381 * - do the writing; 382 * - if the write was not actually a delete, call upkeep, which schedules 383 * DW again if necessary. 384 * 385 * The functions that are always called from the RTNL context use 386 * rtnl_dereference(). The functions that can also be called from the DW do 387 * a raw dereference and rely on the above mutual exclusion scheme. 388 */ 389 #define nh_res_dereference(p) (rcu_dereference_raw(p)) 390 391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 392 u16 bucket_index, bool force, 393 struct nexthop *old_nh, 394 struct nexthop *new_nh, 395 struct netlink_ext_ack *extack) 396 { 397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); 398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info); 399 400 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index, 401 force, oldi, newi, extack); 402 } 403 404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh, 405 struct netlink_ext_ack *extack) 406 { 407 struct nh_notifier_info info = { 408 .net = net, 409 .extack = extack, 410 .id = nh->id, 411 }; 412 struct nh_group *nhg; 413 int err; 414 415 ASSERT_RTNL(); 416 417 if (nexthop_notifiers_is_empty(net)) 418 return 0; 419 420 /* At this point, the nexthop buckets are still not populated. Only 421 * emit a notification with the logical nexthops, so that a listener 422 * could potentially veto it in case of unsupported configuration. 423 */ 424 nhg = rtnl_dereference(nh->nh_grp); 425 err = nh_notifier_mpath_info_init(&info, nhg); 426 if (err) { 427 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 428 return err; 429 } 430 431 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 432 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, 433 &info); 434 kfree(info.nh_grp); 435 436 return notifier_to_errno(err); 437 } 438 439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net, 440 enum nexthop_event_type event_type, 441 struct nexthop *nh, 442 struct netlink_ext_ack *extack) 443 { 444 struct nh_notifier_info info = { 445 .net = net, 446 .extack = extack, 447 }; 448 int err; 449 450 err = nh_notifier_info_init(&info, nh); 451 if (err) 452 return err; 453 454 err = nb->notifier_call(nb, event_type, &info); 455 nh_notifier_info_fini(&info, nh); 456 457 return notifier_to_errno(err); 458 } 459 460 static unsigned int nh_dev_hashfn(unsigned int val) 461 { 462 unsigned int mask = NH_DEV_HASHSIZE - 1; 463 464 return (val ^ 465 (val >> NH_DEV_HASHBITS) ^ 466 (val >> (NH_DEV_HASHBITS * 2))) & mask; 467 } 468 469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi) 470 { 471 struct net_device *dev = nhi->fib_nhc.nhc_dev; 472 struct hlist_head *head; 473 unsigned int hash; 474 475 WARN_ON(!dev); 476 477 hash = nh_dev_hashfn(dev->ifindex); 478 head = &net->nexthop.devhash[hash]; 479 hlist_add_head(&nhi->dev_hash, head); 480 } 481 482 static void nexthop_free_group(struct nexthop *nh) 483 { 484 struct nh_group *nhg; 485 int i; 486 487 nhg = rcu_dereference_raw(nh->nh_grp); 488 for (i = 0; i < nhg->num_nh; ++i) { 489 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 490 491 WARN_ON(!list_empty(&nhge->nh_list)); 492 free_percpu(nhge->stats); 493 nexthop_put(nhge->nh); 494 } 495 496 WARN_ON(nhg->spare == nhg); 497 498 if (nhg->resilient) 499 vfree(rcu_dereference_raw(nhg->res_table)); 500 501 kfree(nhg->spare); 502 kfree(nhg); 503 } 504 505 static void nexthop_free_single(struct nexthop *nh) 506 { 507 struct nh_info *nhi; 508 509 nhi = rcu_dereference_raw(nh->nh_info); 510 switch (nhi->family) { 511 case AF_INET: 512 fib_nh_release(nh->net, &nhi->fib_nh); 513 break; 514 case AF_INET6: 515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh); 516 break; 517 } 518 kfree(nhi); 519 } 520 521 void nexthop_free_rcu(struct rcu_head *head) 522 { 523 struct nexthop *nh = container_of(head, struct nexthop, rcu); 524 525 if (nh->is_group) 526 nexthop_free_group(nh); 527 else 528 nexthop_free_single(nh); 529 530 kfree(nh); 531 } 532 EXPORT_SYMBOL_GPL(nexthop_free_rcu); 533 534 static struct nexthop *nexthop_alloc(void) 535 { 536 struct nexthop *nh; 537 538 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); 539 if (nh) { 540 INIT_LIST_HEAD(&nh->fi_list); 541 INIT_LIST_HEAD(&nh->f6i_list); 542 INIT_LIST_HEAD(&nh->grp_list); 543 INIT_LIST_HEAD(&nh->fdb_list); 544 } 545 return nh; 546 } 547 548 static struct nh_group *nexthop_grp_alloc(u16 num_nh) 549 { 550 struct nh_group *nhg; 551 552 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL); 553 if (nhg) 554 nhg->num_nh = num_nh; 555 556 return nhg; 557 } 558 559 static void nh_res_table_upkeep_dw(struct work_struct *work); 560 561 static struct nh_res_table * 562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg) 563 { 564 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; 565 struct nh_res_table *res_table; 566 unsigned long size; 567 568 size = struct_size(res_table, nh_buckets, num_nh_buckets); 569 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); 570 if (!res_table) 571 return NULL; 572 573 res_table->net = net; 574 res_table->nhg_id = nhg_id; 575 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); 576 INIT_LIST_HEAD(&res_table->uw_nh_entries); 577 res_table->idle_timer = cfg->nh_grp_res_idle_timer; 578 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; 579 res_table->num_nh_buckets = num_nh_buckets; 580 return res_table; 581 } 582 583 static void nh_base_seq_inc(struct net *net) 584 { 585 while (++net->nexthop.seq == 0) 586 ; 587 } 588 589 /* no reference taken; rcu lock or rtnl must be held */ 590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id) 591 { 592 struct rb_node **pp, *parent = NULL, *next; 593 594 pp = &net->nexthop.rb_root.rb_node; 595 while (1) { 596 struct nexthop *nh; 597 598 next = rcu_dereference_raw(*pp); 599 if (!next) 600 break; 601 parent = next; 602 603 nh = rb_entry(parent, struct nexthop, rb_node); 604 if (id < nh->id) 605 pp = &next->rb_left; 606 else if (id > nh->id) 607 pp = &next->rb_right; 608 else 609 return nh; 610 } 611 return NULL; 612 } 613 EXPORT_SYMBOL_GPL(nexthop_find_by_id); 614 615 /* used for auto id allocation; called with rtnl held */ 616 static u32 nh_find_unused_id(struct net *net) 617 { 618 u32 id_start = net->nexthop.last_id_allocated; 619 620 while (1) { 621 net->nexthop.last_id_allocated++; 622 if (net->nexthop.last_id_allocated == id_start) 623 break; 624 625 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) 626 return net->nexthop.last_id_allocated; 627 } 628 return 0; 629 } 630 631 static void nh_res_time_set_deadline(unsigned long next_time, 632 unsigned long *deadline) 633 { 634 if (time_before(next_time, *deadline)) 635 *deadline = next_time; 636 } 637 638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table) 639 { 640 if (list_empty(&res_table->uw_nh_entries)) 641 return 0; 642 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); 643 } 644 645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg) 646 { 647 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 648 struct nlattr *nest; 649 650 nest = nla_nest_start(skb, NHA_RES_GROUP); 651 if (!nest) 652 return -EMSGSIZE; 653 654 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS, 655 res_table->num_nh_buckets) || 656 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER, 657 jiffies_to_clock_t(res_table->idle_timer)) || 658 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER, 659 jiffies_to_clock_t(res_table->unbalanced_timer)) || 660 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME, 661 nh_res_table_unbalanced_time(res_table), 662 NHA_RES_GROUP_PAD)) 663 goto nla_put_failure; 664 665 nla_nest_end(skb, nest); 666 return 0; 667 668 nla_put_failure: 669 nla_nest_cancel(skb, nest); 670 return -EMSGSIZE; 671 } 672 673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge) 674 { 675 struct nh_grp_entry_stats *cpu_stats; 676 677 cpu_stats = get_cpu_ptr(nhge->stats); 678 u64_stats_update_begin(&cpu_stats->syncp); 679 u64_stats_inc(&cpu_stats->packets); 680 u64_stats_update_end(&cpu_stats->syncp); 681 put_cpu_ptr(cpu_stats); 682 } 683 684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge, 685 u64 *ret_packets) 686 { 687 int i; 688 689 *ret_packets = 0; 690 691 for_each_possible_cpu(i) { 692 struct nh_grp_entry_stats *cpu_stats; 693 unsigned int start; 694 u64 packets; 695 696 cpu_stats = per_cpu_ptr(nhge->stats, i); 697 do { 698 start = u64_stats_fetch_begin(&cpu_stats->syncp); 699 packets = u64_stats_read(&cpu_stats->packets); 700 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 701 702 *ret_packets += packets; 703 } 704 } 705 706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info, 707 const struct nexthop *nh) 708 { 709 struct nh_group *nhg; 710 int i; 711 712 ASSERT_RTNL(); 713 nhg = rtnl_dereference(nh->nh_grp); 714 715 info->id = nh->id; 716 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; 717 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats, 718 stats, nhg->num_nh), 719 GFP_KERNEL); 720 if (!info->nh_grp_hw_stats) 721 return -ENOMEM; 722 723 info->nh_grp_hw_stats->num_nh = nhg->num_nh; 724 for (i = 0; i < nhg->num_nh; i++) { 725 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 726 727 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id; 728 } 729 730 return 0; 731 } 732 733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info) 734 { 735 kfree(info->nh_grp_hw_stats); 736 } 737 738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info, 739 unsigned int nh_idx, 740 u64 delta_packets) 741 { 742 info->hw_stats_used = true; 743 info->stats[nh_idx].packets += delta_packets; 744 } 745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta); 746 747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh, 748 struct nh_notifier_info *info) 749 { 750 struct nh_group *nhg; 751 int i; 752 753 ASSERT_RTNL(); 754 nhg = rtnl_dereference(nh->nh_grp); 755 756 for (i = 0; i < nhg->num_nh; i++) { 757 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 758 759 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets; 760 } 761 } 762 763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used) 764 { 765 struct nh_notifier_info info = { 766 .net = nh->net, 767 }; 768 struct net *net = nh->net; 769 int err; 770 771 if (nexthop_notifiers_is_empty(net)) 772 return 0; 773 774 err = nh_notifier_grp_hw_stats_init(&info, nh); 775 if (err) 776 return err; 777 778 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 779 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA, 780 &info); 781 782 /* Cache whatever we got, even if there was an error, otherwise the 783 * successful stats retrievals would get lost. 784 */ 785 nh_grp_hw_stats_apply_update(nh, &info); 786 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used; 787 788 nh_notifier_grp_hw_stats_fini(&info); 789 return notifier_to_errno(err); 790 } 791 792 static int nla_put_nh_group_stats_entry(struct sk_buff *skb, 793 struct nh_grp_entry *nhge, 794 u32 op_flags) 795 { 796 struct nlattr *nest; 797 u64 packets; 798 799 nh_grp_entry_stats_read(nhge, &packets); 800 801 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY); 802 if (!nest) 803 return -EMSGSIZE; 804 805 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) || 806 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, 807 packets + nhge->packets_hw)) 808 goto nla_put_failure; 809 810 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 811 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW, 812 nhge->packets_hw)) 813 goto nla_put_failure; 814 815 nla_nest_end(skb, nest); 816 return 0; 817 818 nla_put_failure: 819 nla_nest_cancel(skb, nest); 820 return -EMSGSIZE; 821 } 822 823 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh, 824 u32 op_flags) 825 { 826 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 827 struct nlattr *nest; 828 bool hw_stats_used; 829 int err; 830 int i; 831 832 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats)) 833 goto err_out; 834 835 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 836 nhg->hw_stats) { 837 err = nh_grp_hw_stats_update(nh, &hw_stats_used); 838 if (err) 839 goto out; 840 841 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used)) 842 goto err_out; 843 } 844 845 nest = nla_nest_start(skb, NHA_GROUP_STATS); 846 if (!nest) 847 goto err_out; 848 849 for (i = 0; i < nhg->num_nh; i++) 850 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i], 851 op_flags)) 852 goto cancel_out; 853 854 nla_nest_end(skb, nest); 855 return 0; 856 857 cancel_out: 858 nla_nest_cancel(skb, nest); 859 err_out: 860 err = -EMSGSIZE; 861 out: 862 return err; 863 } 864 865 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, 866 u32 op_flags) 867 { 868 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 869 struct nexthop_grp *p; 870 size_t len = nhg->num_nh * sizeof(*p); 871 struct nlattr *nla; 872 u16 group_type = 0; 873 int i; 874 875 if (nhg->hash_threshold) 876 group_type = NEXTHOP_GRP_TYPE_MPATH; 877 else if (nhg->resilient) 878 group_type = NEXTHOP_GRP_TYPE_RES; 879 880 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type)) 881 goto nla_put_failure; 882 883 nla = nla_reserve(skb, NHA_GROUP, len); 884 if (!nla) 885 goto nla_put_failure; 886 887 p = nla_data(nla); 888 for (i = 0; i < nhg->num_nh; ++i) { 889 p->id = nhg->nh_entries[i].nh->id; 890 p->weight = nhg->nh_entries[i].weight - 1; 891 p += 1; 892 } 893 894 if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) 895 goto nla_put_failure; 896 897 if (op_flags & NHA_OP_FLAG_DUMP_STATS && 898 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || 899 nla_put_nh_group_stats(skb, nh, op_flags))) 900 goto nla_put_failure; 901 902 return 0; 903 904 nla_put_failure: 905 return -EMSGSIZE; 906 } 907 908 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, 909 int event, u32 portid, u32 seq, unsigned int nlflags, 910 u32 op_flags) 911 { 912 struct fib6_nh *fib6_nh; 913 struct fib_nh *fib_nh; 914 struct nlmsghdr *nlh; 915 struct nh_info *nhi; 916 struct nhmsg *nhm; 917 918 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 919 if (!nlh) 920 return -EMSGSIZE; 921 922 nhm = nlmsg_data(nlh); 923 nhm->nh_family = AF_UNSPEC; 924 nhm->nh_flags = nh->nh_flags; 925 nhm->nh_protocol = nh->protocol; 926 nhm->nh_scope = 0; 927 nhm->resvd = 0; 928 929 if (nla_put_u32(skb, NHA_ID, nh->id)) 930 goto nla_put_failure; 931 932 if (nh->is_group) { 933 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 934 935 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) 936 goto nla_put_failure; 937 if (nla_put_nh_group(skb, nh, op_flags)) 938 goto nla_put_failure; 939 goto out; 940 } 941 942 nhi = rtnl_dereference(nh->nh_info); 943 nhm->nh_family = nhi->family; 944 if (nhi->reject_nh) { 945 if (nla_put_flag(skb, NHA_BLACKHOLE)) 946 goto nla_put_failure; 947 goto out; 948 } else if (nhi->fdb_nh) { 949 if (nla_put_flag(skb, NHA_FDB)) 950 goto nla_put_failure; 951 } else { 952 const struct net_device *dev; 953 954 dev = nhi->fib_nhc.nhc_dev; 955 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) 956 goto nla_put_failure; 957 } 958 959 nhm->nh_scope = nhi->fib_nhc.nhc_scope; 960 switch (nhi->family) { 961 case AF_INET: 962 fib_nh = &nhi->fib_nh; 963 if (fib_nh->fib_nh_gw_family && 964 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) 965 goto nla_put_failure; 966 break; 967 968 case AF_INET6: 969 fib6_nh = &nhi->fib6_nh; 970 if (fib6_nh->fib_nh_gw_family && 971 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) 972 goto nla_put_failure; 973 break; 974 } 975 976 if (nhi->fib_nhc.nhc_lwtstate && 977 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, 978 NHA_ENCAP, NHA_ENCAP_TYPE) < 0) 979 goto nla_put_failure; 980 981 out: 982 nlmsg_end(skb, nlh); 983 return 0; 984 985 nla_put_failure: 986 nlmsg_cancel(skb, nlh); 987 return -EMSGSIZE; 988 } 989 990 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg) 991 { 992 return nla_total_size(0) + /* NHA_RES_GROUP */ 993 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */ 994 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */ 995 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */ 996 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ 997 } 998 999 static size_t nh_nlmsg_size_grp(struct nexthop *nh) 1000 { 1001 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1002 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; 1003 size_t tot = nla_total_size(sz) + 1004 nla_total_size(2); /* NHA_GROUP_TYPE */ 1005 1006 if (nhg->resilient) 1007 tot += nh_nlmsg_size_grp_res(nhg); 1008 1009 return tot; 1010 } 1011 1012 static size_t nh_nlmsg_size_single(struct nexthop *nh) 1013 { 1014 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1015 size_t sz; 1016 1017 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE 1018 * are mutually exclusive 1019 */ 1020 sz = nla_total_size(4); /* NHA_OIF */ 1021 1022 switch (nhi->family) { 1023 case AF_INET: 1024 if (nhi->fib_nh.fib_nh_gw_family) 1025 sz += nla_total_size(4); /* NHA_GATEWAY */ 1026 break; 1027 1028 case AF_INET6: 1029 /* NHA_GATEWAY */ 1030 if (nhi->fib6_nh.fib_nh_gw_family) 1031 sz += nla_total_size(sizeof(const struct in6_addr)); 1032 break; 1033 } 1034 1035 if (nhi->fib_nhc.nhc_lwtstate) { 1036 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); 1037 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */ 1038 } 1039 1040 return sz; 1041 } 1042 1043 static size_t nh_nlmsg_size(struct nexthop *nh) 1044 { 1045 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); 1046 1047 sz += nla_total_size(4); /* NHA_ID */ 1048 1049 if (nh->is_group) 1050 sz += nh_nlmsg_size_grp(nh); 1051 else 1052 sz += nh_nlmsg_size_single(nh); 1053 1054 return sz; 1055 } 1056 1057 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) 1058 { 1059 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; 1060 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 1061 struct sk_buff *skb; 1062 int err = -ENOBUFS; 1063 1064 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); 1065 if (!skb) 1066 goto errout; 1067 1068 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0); 1069 if (err < 0) { 1070 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ 1071 WARN_ON(err == -EMSGSIZE); 1072 kfree_skb(skb); 1073 goto errout; 1074 } 1075 1076 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, 1077 info->nlh, gfp_any()); 1078 return; 1079 errout: 1080 if (err < 0) 1081 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); 1082 } 1083 1084 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) 1085 { 1086 return (unsigned long)atomic_long_read(&bucket->used_time); 1087 } 1088 1089 static unsigned long 1090 nh_res_bucket_idle_point(const struct nh_res_table *res_table, 1091 const struct nh_res_bucket *bucket, 1092 unsigned long now) 1093 { 1094 unsigned long time = nh_res_bucket_used_time(bucket); 1095 1096 /* Bucket was not used since it was migrated. The idle time is now. */ 1097 if (time == bucket->migrated_time) 1098 return now; 1099 1100 return time + res_table->idle_timer; 1101 } 1102 1103 static unsigned long 1104 nh_res_table_unb_point(const struct nh_res_table *res_table) 1105 { 1106 return res_table->unbalanced_since + res_table->unbalanced_timer; 1107 } 1108 1109 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table, 1110 struct nh_res_bucket *bucket) 1111 { 1112 unsigned long now = jiffies; 1113 1114 atomic_long_set(&bucket->used_time, (long)now); 1115 bucket->migrated_time = now; 1116 } 1117 1118 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket) 1119 { 1120 atomic_long_set(&bucket->used_time, (long)jiffies); 1121 } 1122 1123 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket) 1124 { 1125 unsigned long used_time = nh_res_bucket_used_time(bucket); 1126 1127 return jiffies_delta_to_clock_t(jiffies - used_time); 1128 } 1129 1130 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh, 1131 struct nh_res_bucket *bucket, u16 bucket_index, 1132 int event, u32 portid, u32 seq, 1133 unsigned int nlflags, 1134 struct netlink_ext_ack *extack) 1135 { 1136 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1137 struct nlmsghdr *nlh; 1138 struct nlattr *nest; 1139 struct nhmsg *nhm; 1140 1141 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 1142 if (!nlh) 1143 return -EMSGSIZE; 1144 1145 nhm = nlmsg_data(nlh); 1146 nhm->nh_family = AF_UNSPEC; 1147 nhm->nh_flags = bucket->nh_flags; 1148 nhm->nh_protocol = nh->protocol; 1149 nhm->nh_scope = 0; 1150 nhm->resvd = 0; 1151 1152 if (nla_put_u32(skb, NHA_ID, nh->id)) 1153 goto nla_put_failure; 1154 1155 nest = nla_nest_start(skb, NHA_RES_BUCKET); 1156 if (!nest) 1157 goto nla_put_failure; 1158 1159 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) || 1160 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || 1161 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME, 1162 nh_res_bucket_idle_time(bucket), 1163 NHA_RES_BUCKET_PAD)) 1164 goto nla_put_failure_nest; 1165 1166 nla_nest_end(skb, nest); 1167 nlmsg_end(skb, nlh); 1168 return 0; 1169 1170 nla_put_failure_nest: 1171 nla_nest_cancel(skb, nest); 1172 nla_put_failure: 1173 nlmsg_cancel(skb, nlh); 1174 return -EMSGSIZE; 1175 } 1176 1177 static void nexthop_bucket_notify(struct nh_res_table *res_table, 1178 u16 bucket_index) 1179 { 1180 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1181 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1182 struct nexthop *nh = nhge->nh_parent; 1183 struct sk_buff *skb; 1184 int err = -ENOBUFS; 1185 1186 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1187 if (!skb) 1188 goto errout; 1189 1190 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 1191 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE, 1192 NULL); 1193 if (err < 0) { 1194 kfree_skb(skb); 1195 goto errout; 1196 } 1197 1198 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); 1199 return; 1200 errout: 1201 if (err < 0) 1202 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); 1203 } 1204 1205 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, 1206 bool *is_fdb, struct netlink_ext_ack *extack) 1207 { 1208 if (nh->is_group) { 1209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1210 1211 /* Nesting groups within groups is not supported. */ 1212 if (nhg->hash_threshold) { 1213 NL_SET_ERR_MSG(extack, 1214 "Hash-threshold group can not be a nexthop within a group"); 1215 return false; 1216 } 1217 if (nhg->resilient) { 1218 NL_SET_ERR_MSG(extack, 1219 "Resilient group can not be a nexthop within a group"); 1220 return false; 1221 } 1222 *is_fdb = nhg->fdb_nh; 1223 } else { 1224 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1225 1226 if (nhi->reject_nh && npaths > 1) { 1227 NL_SET_ERR_MSG(extack, 1228 "Blackhole nexthop can not be used in a group with more than 1 path"); 1229 return false; 1230 } 1231 *is_fdb = nhi->fdb_nh; 1232 } 1233 1234 return true; 1235 } 1236 1237 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family, 1238 struct netlink_ext_ack *extack) 1239 { 1240 struct nh_info *nhi; 1241 1242 nhi = rtnl_dereference(nh->nh_info); 1243 1244 if (!nhi->fdb_nh) { 1245 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops"); 1246 return -EINVAL; 1247 } 1248 1249 if (*nh_family == AF_UNSPEC) { 1250 *nh_family = nhi->family; 1251 } else if (*nh_family != nhi->family) { 1252 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops"); 1253 return -EINVAL; 1254 } 1255 1256 return 0; 1257 } 1258 1259 static int nh_check_attr_group(struct net *net, 1260 struct nlattr *tb[], size_t tb_size, 1261 u16 nh_grp_type, struct netlink_ext_ack *extack) 1262 { 1263 unsigned int len = nla_len(tb[NHA_GROUP]); 1264 u8 nh_family = AF_UNSPEC; 1265 struct nexthop_grp *nhg; 1266 unsigned int i, j; 1267 u8 nhg_fdb = 0; 1268 1269 if (!len || len & (sizeof(struct nexthop_grp) - 1)) { 1270 NL_SET_ERR_MSG(extack, 1271 "Invalid length for nexthop group attribute"); 1272 return -EINVAL; 1273 } 1274 1275 /* convert len to number of nexthop ids */ 1276 len /= sizeof(*nhg); 1277 1278 nhg = nla_data(tb[NHA_GROUP]); 1279 for (i = 0; i < len; ++i) { 1280 if (nhg[i].resvd1 || nhg[i].resvd2) { 1281 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0"); 1282 return -EINVAL; 1283 } 1284 if (nhg[i].weight > 254) { 1285 NL_SET_ERR_MSG(extack, "Invalid value for weight"); 1286 return -EINVAL; 1287 } 1288 for (j = i + 1; j < len; ++j) { 1289 if (nhg[i].id == nhg[j].id) { 1290 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group"); 1291 return -EINVAL; 1292 } 1293 } 1294 } 1295 1296 if (tb[NHA_FDB]) 1297 nhg_fdb = 1; 1298 nhg = nla_data(tb[NHA_GROUP]); 1299 for (i = 0; i < len; ++i) { 1300 struct nexthop *nh; 1301 bool is_fdb_nh; 1302 1303 nh = nexthop_find_by_id(net, nhg[i].id); 1304 if (!nh) { 1305 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 1306 return -EINVAL; 1307 } 1308 if (!valid_group_nh(nh, len, &is_fdb_nh, extack)) 1309 return -EINVAL; 1310 1311 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack)) 1312 return -EINVAL; 1313 1314 if (!nhg_fdb && is_fdb_nh) { 1315 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops"); 1316 return -EINVAL; 1317 } 1318 } 1319 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { 1320 if (!tb[i]) 1321 continue; 1322 switch (i) { 1323 case NHA_HW_STATS_ENABLE: 1324 case NHA_FDB: 1325 continue; 1326 case NHA_RES_GROUP: 1327 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) 1328 continue; 1329 break; 1330 } 1331 NL_SET_ERR_MSG(extack, 1332 "No other attributes can be set in nexthop groups"); 1333 return -EINVAL; 1334 } 1335 1336 return 0; 1337 } 1338 1339 static bool ipv6_good_nh(const struct fib6_nh *nh) 1340 { 1341 int state = NUD_REACHABLE; 1342 struct neighbour *n; 1343 1344 rcu_read_lock(); 1345 1346 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); 1347 if (n) 1348 state = READ_ONCE(n->nud_state); 1349 1350 rcu_read_unlock(); 1351 1352 return !!(state & NUD_VALID); 1353 } 1354 1355 static bool ipv4_good_nh(const struct fib_nh *nh) 1356 { 1357 int state = NUD_REACHABLE; 1358 struct neighbour *n; 1359 1360 rcu_read_lock(); 1361 1362 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, 1363 (__force u32)nh->fib_nh_gw4); 1364 if (n) 1365 state = READ_ONCE(n->nud_state); 1366 1367 rcu_read_unlock(); 1368 1369 return !!(state & NUD_VALID); 1370 } 1371 1372 static bool nexthop_is_good_nh(const struct nexthop *nh) 1373 { 1374 struct nh_info *nhi = rcu_dereference(nh->nh_info); 1375 1376 switch (nhi->family) { 1377 case AF_INET: 1378 return ipv4_good_nh(&nhi->fib_nh); 1379 case AF_INET6: 1380 return ipv6_good_nh(&nhi->fib6_nh); 1381 } 1382 1383 return false; 1384 } 1385 1386 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash) 1387 { 1388 int i; 1389 1390 for (i = 0; i < nhg->num_nh; i++) { 1391 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1392 1393 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1394 continue; 1395 1396 nh_grp_entry_stats_inc(nhge); 1397 return nhge->nh; 1398 } 1399 1400 WARN_ON_ONCE(1); 1401 return NULL; 1402 } 1403 1404 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash) 1405 { 1406 struct nh_grp_entry *nhge0 = NULL; 1407 int i; 1408 1409 if (nhg->fdb_nh) 1410 return nexthop_select_path_fdb(nhg, hash); 1411 1412 for (i = 0; i < nhg->num_nh; ++i) { 1413 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1414 1415 /* nexthops always check if it is good and does 1416 * not rely on a sysctl for this behavior 1417 */ 1418 if (!nexthop_is_good_nh(nhge->nh)) 1419 continue; 1420 1421 if (!nhge0) 1422 nhge0 = nhge; 1423 1424 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1425 continue; 1426 1427 nh_grp_entry_stats_inc(nhge); 1428 return nhge->nh; 1429 } 1430 1431 if (!nhge0) 1432 nhge0 = &nhg->nh_entries[0]; 1433 nh_grp_entry_stats_inc(nhge0); 1434 return nhge0->nh; 1435 } 1436 1437 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash) 1438 { 1439 struct nh_res_table *res_table = rcu_dereference(nhg->res_table); 1440 u16 bucket_index = hash % res_table->num_nh_buckets; 1441 struct nh_res_bucket *bucket; 1442 struct nh_grp_entry *nhge; 1443 1444 /* nexthop_select_path() is expected to return a non-NULL value, so 1445 * skip protocol validation and just hand out whatever there is. 1446 */ 1447 bucket = &res_table->nh_buckets[bucket_index]; 1448 nh_res_bucket_set_busy(bucket); 1449 nhge = rcu_dereference(bucket->nh_entry); 1450 nh_grp_entry_stats_inc(nhge); 1451 return nhge->nh; 1452 } 1453 1454 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash) 1455 { 1456 struct nh_group *nhg; 1457 1458 if (!nh->is_group) 1459 return nh; 1460 1461 nhg = rcu_dereference(nh->nh_grp); 1462 if (nhg->hash_threshold) 1463 return nexthop_select_path_hthr(nhg, hash); 1464 else if (nhg->resilient) 1465 return nexthop_select_path_res(nhg, hash); 1466 1467 /* Unreachable. */ 1468 return NULL; 1469 } 1470 EXPORT_SYMBOL_GPL(nexthop_select_path); 1471 1472 int nexthop_for_each_fib6_nh(struct nexthop *nh, 1473 int (*cb)(struct fib6_nh *nh, void *arg), 1474 void *arg) 1475 { 1476 struct nh_info *nhi; 1477 int err; 1478 1479 if (nh->is_group) { 1480 struct nh_group *nhg; 1481 int i; 1482 1483 nhg = rcu_dereference_rtnl(nh->nh_grp); 1484 for (i = 0; i < nhg->num_nh; i++) { 1485 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1486 1487 nhi = rcu_dereference_rtnl(nhge->nh->nh_info); 1488 err = cb(&nhi->fib6_nh, arg); 1489 if (err) 1490 return err; 1491 } 1492 } else { 1493 nhi = rcu_dereference_rtnl(nh->nh_info); 1494 err = cb(&nhi->fib6_nh, arg); 1495 if (err) 1496 return err; 1497 } 1498 1499 return 0; 1500 } 1501 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh); 1502 1503 static int check_src_addr(const struct in6_addr *saddr, 1504 struct netlink_ext_ack *extack) 1505 { 1506 if (!ipv6_addr_any(saddr)) { 1507 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects"); 1508 return -EINVAL; 1509 } 1510 return 0; 1511 } 1512 1513 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, 1514 struct netlink_ext_ack *extack) 1515 { 1516 struct nh_info *nhi; 1517 bool is_fdb_nh; 1518 1519 /* fib6_src is unique to a fib6_info and limits the ability to cache 1520 * routes in fib6_nh within a nexthop that is potentially shared 1521 * across multiple fib entries. If the config wants to use source 1522 * routing it can not use nexthop objects. mlxsw also does not allow 1523 * fib6_src on routes. 1524 */ 1525 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0) 1526 return -EINVAL; 1527 1528 if (nh->is_group) { 1529 struct nh_group *nhg; 1530 1531 nhg = rtnl_dereference(nh->nh_grp); 1532 if (nhg->has_v4) 1533 goto no_v4_nh; 1534 is_fdb_nh = nhg->fdb_nh; 1535 } else { 1536 nhi = rtnl_dereference(nh->nh_info); 1537 if (nhi->family == AF_INET) 1538 goto no_v4_nh; 1539 is_fdb_nh = nhi->fdb_nh; 1540 } 1541 1542 if (is_fdb_nh) { 1543 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1544 return -EINVAL; 1545 } 1546 1547 return 0; 1548 no_v4_nh: 1549 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop"); 1550 return -EINVAL; 1551 } 1552 EXPORT_SYMBOL_GPL(fib6_check_nexthop); 1553 1554 /* if existing nexthop has ipv6 routes linked to it, need 1555 * to verify this new spec works with ipv6 1556 */ 1557 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new, 1558 struct netlink_ext_ack *extack) 1559 { 1560 struct fib6_info *f6i; 1561 1562 if (list_empty(&old->f6i_list)) 1563 return 0; 1564 1565 list_for_each_entry(f6i, &old->f6i_list, nh_list) { 1566 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0) 1567 return -EINVAL; 1568 } 1569 1570 return fib6_check_nexthop(new, NULL, extack); 1571 } 1572 1573 static int nexthop_check_scope(struct nh_info *nhi, u8 scope, 1574 struct netlink_ext_ack *extack) 1575 { 1576 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) { 1577 NL_SET_ERR_MSG(extack, 1578 "Route with host scope can not have a gateway"); 1579 return -EINVAL; 1580 } 1581 1582 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) { 1583 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop"); 1584 return -EINVAL; 1585 } 1586 1587 return 0; 1588 } 1589 1590 /* Invoked by fib add code to verify nexthop by id is ok with 1591 * config for prefix; parts of fib_check_nh not done when nexthop 1592 * object is used. 1593 */ 1594 int fib_check_nexthop(struct nexthop *nh, u8 scope, 1595 struct netlink_ext_ack *extack) 1596 { 1597 struct nh_info *nhi; 1598 int err = 0; 1599 1600 if (nh->is_group) { 1601 struct nh_group *nhg; 1602 1603 nhg = rtnl_dereference(nh->nh_grp); 1604 if (nhg->fdb_nh) { 1605 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1606 err = -EINVAL; 1607 goto out; 1608 } 1609 1610 if (scope == RT_SCOPE_HOST) { 1611 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops"); 1612 err = -EINVAL; 1613 goto out; 1614 } 1615 1616 /* all nexthops in a group have the same scope */ 1617 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info); 1618 err = nexthop_check_scope(nhi, scope, extack); 1619 } else { 1620 nhi = rtnl_dereference(nh->nh_info); 1621 if (nhi->fdb_nh) { 1622 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1623 err = -EINVAL; 1624 goto out; 1625 } 1626 err = nexthop_check_scope(nhi, scope, extack); 1627 } 1628 1629 out: 1630 return err; 1631 } 1632 1633 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new, 1634 struct netlink_ext_ack *extack) 1635 { 1636 struct fib_info *fi; 1637 1638 list_for_each_entry(fi, &old->fi_list, nh_list) { 1639 int err; 1640 1641 err = fib_check_nexthop(new, fi->fib_scope, extack); 1642 if (err) 1643 return err; 1644 } 1645 return 0; 1646 } 1647 1648 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge) 1649 { 1650 return nhge->res.count_buckets == nhge->res.wants_buckets; 1651 } 1652 1653 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge) 1654 { 1655 return nhge->res.count_buckets > nhge->res.wants_buckets; 1656 } 1657 1658 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge) 1659 { 1660 return nhge->res.count_buckets < nhge->res.wants_buckets; 1661 } 1662 1663 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table) 1664 { 1665 return list_empty(&res_table->uw_nh_entries); 1666 } 1667 1668 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket) 1669 { 1670 struct nh_grp_entry *nhge; 1671 1672 if (bucket->occupied) { 1673 nhge = nh_res_dereference(bucket->nh_entry); 1674 nhge->res.count_buckets--; 1675 bucket->occupied = false; 1676 } 1677 } 1678 1679 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket, 1680 struct nh_grp_entry *nhge) 1681 { 1682 nh_res_bucket_unset_nh(bucket); 1683 1684 bucket->occupied = true; 1685 rcu_assign_pointer(bucket->nh_entry, nhge); 1686 nhge->res.count_buckets++; 1687 } 1688 1689 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table, 1690 struct nh_res_bucket *bucket, 1691 unsigned long *deadline, bool *force) 1692 { 1693 unsigned long now = jiffies; 1694 struct nh_grp_entry *nhge; 1695 unsigned long idle_point; 1696 1697 if (!bucket->occupied) { 1698 /* The bucket is not occupied, its NHGE pointer is either 1699 * NULL or obsolete. We _have to_ migrate: set force. 1700 */ 1701 *force = true; 1702 return true; 1703 } 1704 1705 nhge = nh_res_dereference(bucket->nh_entry); 1706 1707 /* If the bucket is populated by an underweight or balanced 1708 * nexthop, do not migrate. 1709 */ 1710 if (!nh_res_nhge_is_ow(nhge)) 1711 return false; 1712 1713 /* At this point we know that the bucket is populated with an 1714 * overweight nexthop. It needs to be migrated to a new nexthop if 1715 * the idle timer of unbalanced timer expired. 1716 */ 1717 1718 idle_point = nh_res_bucket_idle_point(res_table, bucket, now); 1719 if (time_after_eq(now, idle_point)) { 1720 /* The bucket is idle. We _can_ migrate: unset force. */ 1721 *force = false; 1722 return true; 1723 } 1724 1725 /* Unbalanced timer of 0 means "never force". */ 1726 if (res_table->unbalanced_timer) { 1727 unsigned long unb_point; 1728 1729 unb_point = nh_res_table_unb_point(res_table); 1730 if (time_after(now, unb_point)) { 1731 /* The bucket is not idle, but the unbalanced timer 1732 * expired. We _can_ migrate, but set force anyway, 1733 * so that drivers know to ignore activity reports 1734 * from the HW. 1735 */ 1736 *force = true; 1737 return true; 1738 } 1739 1740 nh_res_time_set_deadline(unb_point, deadline); 1741 } 1742 1743 nh_res_time_set_deadline(idle_point, deadline); 1744 return false; 1745 } 1746 1747 static bool nh_res_bucket_migrate(struct nh_res_table *res_table, 1748 u16 bucket_index, bool notify, 1749 bool notify_nl, bool force) 1750 { 1751 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1752 struct nh_grp_entry *new_nhge; 1753 struct netlink_ext_ack extack; 1754 int err; 1755 1756 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries, 1757 struct nh_grp_entry, 1758 res.uw_nh_entry); 1759 if (WARN_ON_ONCE(!new_nhge)) 1760 /* If this function is called, "bucket" is either not 1761 * occupied, or it belongs to a next hop that is 1762 * overweight. In either case, there ought to be a 1763 * corresponding underweight next hop. 1764 */ 1765 return false; 1766 1767 if (notify) { 1768 struct nh_grp_entry *old_nhge; 1769 1770 old_nhge = nh_res_dereference(bucket->nh_entry); 1771 err = call_nexthop_res_bucket_notifiers(res_table->net, 1772 res_table->nhg_id, 1773 bucket_index, force, 1774 old_nhge->nh, 1775 new_nhge->nh, &extack); 1776 if (err) { 1777 pr_err_ratelimited("%s\n", extack._msg); 1778 if (!force) 1779 return false; 1780 /* It is not possible to veto a forced replacement, so 1781 * just clear the hardware flags from the nexthop 1782 * bucket to indicate to user space that this bucket is 1783 * not correctly populated in hardware. 1784 */ 1785 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 1786 } 1787 } 1788 1789 nh_res_bucket_set_nh(bucket, new_nhge); 1790 nh_res_bucket_set_idle(res_table, bucket); 1791 1792 if (notify_nl) 1793 nexthop_bucket_notify(res_table, bucket_index); 1794 1795 if (nh_res_nhge_is_balanced(new_nhge)) 1796 list_del(&new_nhge->res.uw_nh_entry); 1797 return true; 1798 } 1799 1800 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2) 1801 1802 static void nh_res_table_upkeep(struct nh_res_table *res_table, 1803 bool notify, bool notify_nl) 1804 { 1805 unsigned long now = jiffies; 1806 unsigned long deadline; 1807 u16 i; 1808 1809 /* Deadline is the next time that upkeep should be run. It is the 1810 * earliest time at which one of the buckets might be migrated. 1811 * Start at the most pessimistic estimate: either unbalanced_timer 1812 * from now, or if there is none, idle_timer from now. For each 1813 * encountered time point, call nh_res_time_set_deadline() to 1814 * refine the estimate. 1815 */ 1816 if (res_table->unbalanced_timer) 1817 deadline = now + res_table->unbalanced_timer; 1818 else 1819 deadline = now + res_table->idle_timer; 1820 1821 for (i = 0; i < res_table->num_nh_buckets; i++) { 1822 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1823 bool force; 1824 1825 if (nh_res_bucket_should_migrate(res_table, bucket, 1826 &deadline, &force)) { 1827 if (!nh_res_bucket_migrate(res_table, i, notify, 1828 notify_nl, force)) { 1829 unsigned long idle_point; 1830 1831 /* A driver can override the migration 1832 * decision if the HW reports that the 1833 * bucket is actually not idle. Therefore 1834 * remark the bucket as busy again and 1835 * update the deadline. 1836 */ 1837 nh_res_bucket_set_busy(bucket); 1838 idle_point = nh_res_bucket_idle_point(res_table, 1839 bucket, 1840 now); 1841 nh_res_time_set_deadline(idle_point, &deadline); 1842 } 1843 } 1844 } 1845 1846 /* If the group is still unbalanced, schedule the next upkeep to 1847 * either the deadline computed above, or the minimum deadline, 1848 * whichever comes later. 1849 */ 1850 if (!nh_res_table_is_balanced(res_table)) { 1851 unsigned long now = jiffies; 1852 unsigned long min_deadline; 1853 1854 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL; 1855 if (time_before(deadline, min_deadline)) 1856 deadline = min_deadline; 1857 1858 queue_delayed_work(system_power_efficient_wq, 1859 &res_table->upkeep_dw, deadline - now); 1860 } 1861 } 1862 1863 static void nh_res_table_upkeep_dw(struct work_struct *work) 1864 { 1865 struct delayed_work *dw = to_delayed_work(work); 1866 struct nh_res_table *res_table; 1867 1868 res_table = container_of(dw, struct nh_res_table, upkeep_dw); 1869 nh_res_table_upkeep(res_table, true, true); 1870 } 1871 1872 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table) 1873 { 1874 cancel_delayed_work_sync(&res_table->upkeep_dw); 1875 } 1876 1877 static void nh_res_group_rebalance(struct nh_group *nhg, 1878 struct nh_res_table *res_table) 1879 { 1880 int prev_upper_bound = 0; 1881 int total = 0; 1882 int w = 0; 1883 int i; 1884 1885 INIT_LIST_HEAD(&res_table->uw_nh_entries); 1886 1887 for (i = 0; i < nhg->num_nh; ++i) 1888 total += nhg->nh_entries[i].weight; 1889 1890 for (i = 0; i < nhg->num_nh; ++i) { 1891 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1892 int upper_bound; 1893 1894 w += nhge->weight; 1895 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w, 1896 total); 1897 nhge->res.wants_buckets = upper_bound - prev_upper_bound; 1898 prev_upper_bound = upper_bound; 1899 1900 if (nh_res_nhge_is_uw(nhge)) { 1901 if (list_empty(&res_table->uw_nh_entries)) 1902 res_table->unbalanced_since = jiffies; 1903 list_add(&nhge->res.uw_nh_entry, 1904 &res_table->uw_nh_entries); 1905 } 1906 } 1907 } 1908 1909 /* Migrate buckets in res_table so that they reference NHGE's from NHG with 1910 * the right NH ID. Set those buckets that do not have a corresponding NHGE 1911 * entry in NHG as not occupied. 1912 */ 1913 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table, 1914 struct nh_group *nhg) 1915 { 1916 u16 i; 1917 1918 for (i = 0; i < res_table->num_nh_buckets; i++) { 1919 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1920 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id; 1921 bool found = false; 1922 int j; 1923 1924 for (j = 0; j < nhg->num_nh; j++) { 1925 struct nh_grp_entry *nhge = &nhg->nh_entries[j]; 1926 1927 if (nhge->nh->id == id) { 1928 nh_res_bucket_set_nh(bucket, nhge); 1929 found = true; 1930 break; 1931 } 1932 } 1933 1934 if (!found) 1935 nh_res_bucket_unset_nh(bucket); 1936 } 1937 } 1938 1939 static void replace_nexthop_grp_res(struct nh_group *oldg, 1940 struct nh_group *newg) 1941 { 1942 /* For NH group replacement, the new NHG might only have a stub 1943 * hash table with 0 buckets, because the number of buckets was not 1944 * specified. For NH removal, oldg and newg both reference the same 1945 * res_table. So in any case, in the following, we want to work 1946 * with oldg->res_table. 1947 */ 1948 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table); 1949 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since; 1950 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries); 1951 1952 nh_res_table_cancel_upkeep(old_res_table); 1953 nh_res_table_migrate_buckets(old_res_table, newg); 1954 nh_res_group_rebalance(newg, old_res_table); 1955 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries)) 1956 old_res_table->unbalanced_since = prev_unbalanced_since; 1957 nh_res_table_upkeep(old_res_table, true, false); 1958 } 1959 1960 static void nh_hthr_group_rebalance(struct nh_group *nhg) 1961 { 1962 int total = 0; 1963 int w = 0; 1964 int i; 1965 1966 for (i = 0; i < nhg->num_nh; ++i) 1967 total += nhg->nh_entries[i].weight; 1968 1969 for (i = 0; i < nhg->num_nh; ++i) { 1970 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1971 int upper_bound; 1972 1973 w += nhge->weight; 1974 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; 1975 atomic_set(&nhge->hthr.upper_bound, upper_bound); 1976 } 1977 } 1978 1979 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 1980 struct nl_info *nlinfo) 1981 { 1982 struct nh_grp_entry *nhges, *new_nhges; 1983 struct nexthop *nhp = nhge->nh_parent; 1984 struct netlink_ext_ack extack; 1985 struct nexthop *nh = nhge->nh; 1986 struct nh_group *nhg, *newg; 1987 int i, j, err; 1988 1989 WARN_ON(!nh); 1990 1991 nhg = rtnl_dereference(nhp->nh_grp); 1992 newg = nhg->spare; 1993 1994 /* last entry, keep it visible and remove the parent */ 1995 if (nhg->num_nh == 1) { 1996 remove_nexthop(net, nhp, nlinfo); 1997 return; 1998 } 1999 2000 newg->has_v4 = false; 2001 newg->is_multipath = nhg->is_multipath; 2002 newg->hash_threshold = nhg->hash_threshold; 2003 newg->resilient = nhg->resilient; 2004 newg->fdb_nh = nhg->fdb_nh; 2005 newg->num_nh = nhg->num_nh; 2006 2007 /* copy old entries to new except the one getting removed */ 2008 nhges = nhg->nh_entries; 2009 new_nhges = newg->nh_entries; 2010 for (i = 0, j = 0; i < nhg->num_nh; ++i) { 2011 struct nh_info *nhi; 2012 2013 /* current nexthop getting removed */ 2014 if (nhg->nh_entries[i].nh == nh) { 2015 newg->num_nh--; 2016 continue; 2017 } 2018 2019 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2020 if (nhi->family == AF_INET) 2021 newg->has_v4 = true; 2022 2023 list_del(&nhges[i].nh_list); 2024 new_nhges[j].stats = nhges[i].stats; 2025 new_nhges[j].nh_parent = nhges[i].nh_parent; 2026 new_nhges[j].nh = nhges[i].nh; 2027 new_nhges[j].weight = nhges[i].weight; 2028 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 2029 j++; 2030 } 2031 2032 if (newg->hash_threshold) 2033 nh_hthr_group_rebalance(newg); 2034 else if (newg->resilient) 2035 replace_nexthop_grp_res(nhg, newg); 2036 2037 rcu_assign_pointer(nhp->nh_grp, newg); 2038 2039 list_del(&nhge->nh_list); 2040 free_percpu(nhge->stats); 2041 nexthop_put(nhge->nh); 2042 2043 /* Removal of a NH from a resilient group is notified through 2044 * bucket notifications. 2045 */ 2046 if (newg->hash_threshold) { 2047 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, 2048 &extack); 2049 if (err) 2050 pr_err("%s\n", extack._msg); 2051 } 2052 2053 if (nlinfo) 2054 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 2055 } 2056 2057 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, 2058 struct nl_info *nlinfo) 2059 { 2060 struct nh_grp_entry *nhge, *tmp; 2061 2062 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 2063 remove_nh_grp_entry(net, nhge, nlinfo); 2064 2065 /* make sure all see the newly published array before releasing rtnl */ 2066 synchronize_net(); 2067 } 2068 2069 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) 2070 { 2071 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 2072 struct nh_res_table *res_table; 2073 int i, num_nh = nhg->num_nh; 2074 2075 for (i = 0; i < num_nh; ++i) { 2076 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2077 2078 if (WARN_ON(!nhge->nh)) 2079 continue; 2080 2081 list_del_init(&nhge->nh_list); 2082 } 2083 2084 if (nhg->resilient) { 2085 res_table = rtnl_dereference(nhg->res_table); 2086 nh_res_table_cancel_upkeep(res_table); 2087 } 2088 } 2089 2090 /* not called for nexthop replace */ 2091 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) 2092 { 2093 struct fib6_info *f6i, *tmp; 2094 bool do_flush = false; 2095 struct fib_info *fi; 2096 2097 list_for_each_entry(fi, &nh->fi_list, nh_list) { 2098 fi->fib_flags |= RTNH_F_DEAD; 2099 do_flush = true; 2100 } 2101 if (do_flush) 2102 fib_flush(net); 2103 2104 /* ip6_del_rt removes the entry from this list hence the _safe */ 2105 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { 2106 /* __ip6_del_rt does a release, so do a hold here */ 2107 fib6_info_hold(f6i); 2108 ipv6_stub->ip6_del_rt(net, f6i, 2109 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); 2110 } 2111 } 2112 2113 static void __remove_nexthop(struct net *net, struct nexthop *nh, 2114 struct nl_info *nlinfo) 2115 { 2116 __remove_nexthop_fib(net, nh); 2117 2118 if (nh->is_group) { 2119 remove_nexthop_group(nh, nlinfo); 2120 } else { 2121 struct nh_info *nhi; 2122 2123 nhi = rtnl_dereference(nh->nh_info); 2124 if (nhi->fib_nhc.nhc_dev) 2125 hlist_del(&nhi->dev_hash); 2126 2127 remove_nexthop_from_groups(net, nh, nlinfo); 2128 } 2129 } 2130 2131 static void remove_nexthop(struct net *net, struct nexthop *nh, 2132 struct nl_info *nlinfo) 2133 { 2134 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL); 2135 2136 /* remove from the tree */ 2137 rb_erase(&nh->rb_node, &net->nexthop.rb_root); 2138 2139 if (nlinfo) 2140 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo); 2141 2142 __remove_nexthop(net, nh, nlinfo); 2143 nh_base_seq_inc(net); 2144 2145 nexthop_put(nh); 2146 } 2147 2148 /* if any FIB entries reference this nexthop, any dst entries 2149 * need to be regenerated 2150 */ 2151 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, 2152 struct nexthop *replaced_nh) 2153 { 2154 struct fib6_info *f6i; 2155 struct nh_group *nhg; 2156 int i; 2157 2158 if (!list_empty(&nh->fi_list)) 2159 rt_cache_flush(net); 2160 2161 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2162 ipv6_stub->fib6_update_sernum(net, f6i); 2163 2164 /* if an IPv6 group was replaced, we have to release all old 2165 * dsts to make sure all refcounts are released 2166 */ 2167 if (!replaced_nh->is_group) 2168 return; 2169 2170 nhg = rtnl_dereference(replaced_nh->nh_grp); 2171 for (i = 0; i < nhg->num_nh; i++) { 2172 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2173 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); 2174 2175 if (nhi->family == AF_INET6) 2176 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); 2177 } 2178 } 2179 2180 static int replace_nexthop_grp(struct net *net, struct nexthop *old, 2181 struct nexthop *new, const struct nh_config *cfg, 2182 struct netlink_ext_ack *extack) 2183 { 2184 struct nh_res_table *tmp_table = NULL; 2185 struct nh_res_table *new_res_table; 2186 struct nh_res_table *old_res_table; 2187 struct nh_group *oldg, *newg; 2188 int i, err; 2189 2190 if (!new->is_group) { 2191 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop."); 2192 return -EINVAL; 2193 } 2194 2195 oldg = rtnl_dereference(old->nh_grp); 2196 newg = rtnl_dereference(new->nh_grp); 2197 2198 if (newg->hash_threshold != oldg->hash_threshold) { 2199 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type."); 2200 return -EINVAL; 2201 } 2202 2203 if (newg->hash_threshold) { 2204 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, 2205 extack); 2206 if (err) 2207 return err; 2208 } else if (newg->resilient) { 2209 new_res_table = rtnl_dereference(newg->res_table); 2210 old_res_table = rtnl_dereference(oldg->res_table); 2211 2212 /* Accept if num_nh_buckets was not given, but if it was 2213 * given, demand that the value be correct. 2214 */ 2215 if (cfg->nh_grp_res_has_num_buckets && 2216 cfg->nh_grp_res_num_buckets != 2217 old_res_table->num_nh_buckets) { 2218 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group."); 2219 return -EINVAL; 2220 } 2221 2222 /* Emit a pre-replace notification so that listeners could veto 2223 * a potentially unsupported configuration. Otherwise, 2224 * individual bucket replacement notifications would need to be 2225 * vetoed, which is something that should only happen if the 2226 * bucket is currently active. 2227 */ 2228 err = call_nexthop_res_table_notifiers(net, new, extack); 2229 if (err) 2230 return err; 2231 2232 if (cfg->nh_grp_res_has_idle_timer) 2233 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer; 2234 if (cfg->nh_grp_res_has_unbalanced_timer) 2235 old_res_table->unbalanced_timer = 2236 cfg->nh_grp_res_unbalanced_timer; 2237 2238 replace_nexthop_grp_res(oldg, newg); 2239 2240 tmp_table = new_res_table; 2241 rcu_assign_pointer(newg->res_table, old_res_table); 2242 rcu_assign_pointer(newg->spare->res_table, old_res_table); 2243 } 2244 2245 /* update parents - used by nexthop code for cleanup */ 2246 for (i = 0; i < newg->num_nh; i++) 2247 newg->nh_entries[i].nh_parent = old; 2248 2249 rcu_assign_pointer(old->nh_grp, newg); 2250 2251 /* Make sure concurrent readers are not using 'oldg' anymore. */ 2252 synchronize_net(); 2253 2254 if (newg->resilient) { 2255 rcu_assign_pointer(oldg->res_table, tmp_table); 2256 rcu_assign_pointer(oldg->spare->res_table, tmp_table); 2257 } 2258 2259 for (i = 0; i < oldg->num_nh; i++) 2260 oldg->nh_entries[i].nh_parent = new; 2261 2262 rcu_assign_pointer(new->nh_grp, oldg); 2263 2264 return 0; 2265 } 2266 2267 static void nh_group_v4_update(struct nh_group *nhg) 2268 { 2269 struct nh_grp_entry *nhges; 2270 bool has_v4 = false; 2271 int i; 2272 2273 nhges = nhg->nh_entries; 2274 for (i = 0; i < nhg->num_nh; i++) { 2275 struct nh_info *nhi; 2276 2277 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2278 if (nhi->family == AF_INET) 2279 has_v4 = true; 2280 } 2281 nhg->has_v4 = has_v4; 2282 } 2283 2284 static int replace_nexthop_single_notify_res(struct net *net, 2285 struct nh_res_table *res_table, 2286 struct nexthop *old, 2287 struct nh_info *oldi, 2288 struct nh_info *newi, 2289 struct netlink_ext_ack *extack) 2290 { 2291 u32 nhg_id = res_table->nhg_id; 2292 int err; 2293 u16 i; 2294 2295 for (i = 0; i < res_table->num_nh_buckets; i++) { 2296 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2297 struct nh_grp_entry *nhge; 2298 2299 nhge = rtnl_dereference(bucket->nh_entry); 2300 if (nhge->nh == old) { 2301 err = __call_nexthop_res_bucket_notifiers(net, nhg_id, 2302 i, true, 2303 oldi, newi, 2304 extack); 2305 if (err) 2306 goto err_notify; 2307 } 2308 } 2309 2310 return 0; 2311 2312 err_notify: 2313 while (i-- > 0) { 2314 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2315 struct nh_grp_entry *nhge; 2316 2317 nhge = rtnl_dereference(bucket->nh_entry); 2318 if (nhge->nh == old) 2319 __call_nexthop_res_bucket_notifiers(net, nhg_id, i, 2320 true, newi, oldi, 2321 extack); 2322 } 2323 return err; 2324 } 2325 2326 static int replace_nexthop_single_notify(struct net *net, 2327 struct nexthop *group_nh, 2328 struct nexthop *old, 2329 struct nh_info *oldi, 2330 struct nh_info *newi, 2331 struct netlink_ext_ack *extack) 2332 { 2333 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp); 2334 struct nh_res_table *res_table; 2335 2336 if (nhg->hash_threshold) { 2337 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, 2338 group_nh, extack); 2339 } else if (nhg->resilient) { 2340 res_table = rtnl_dereference(nhg->res_table); 2341 return replace_nexthop_single_notify_res(net, res_table, 2342 old, oldi, newi, 2343 extack); 2344 } 2345 2346 return -EINVAL; 2347 } 2348 2349 static int replace_nexthop_single(struct net *net, struct nexthop *old, 2350 struct nexthop *new, 2351 struct netlink_ext_ack *extack) 2352 { 2353 u8 old_protocol, old_nh_flags; 2354 struct nh_info *oldi, *newi; 2355 struct nh_grp_entry *nhge; 2356 int err; 2357 2358 if (new->is_group) { 2359 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group."); 2360 return -EINVAL; 2361 } 2362 2363 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack); 2364 if (err) 2365 return err; 2366 2367 /* Hardware flags were set on 'old' as 'new' is not in the red-black 2368 * tree. Therefore, inherit the flags from 'old' to 'new'. 2369 */ 2370 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP); 2371 2372 oldi = rtnl_dereference(old->nh_info); 2373 newi = rtnl_dereference(new->nh_info); 2374 2375 newi->nh_parent = old; 2376 oldi->nh_parent = new; 2377 2378 old_protocol = old->protocol; 2379 old_nh_flags = old->nh_flags; 2380 2381 old->protocol = new->protocol; 2382 old->nh_flags = new->nh_flags; 2383 2384 rcu_assign_pointer(old->nh_info, newi); 2385 rcu_assign_pointer(new->nh_info, oldi); 2386 2387 /* Send a replace notification for all the groups using the nexthop. */ 2388 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2389 struct nexthop *nhp = nhge->nh_parent; 2390 2391 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi, 2392 extack); 2393 if (err) 2394 goto err_notify; 2395 } 2396 2397 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially 2398 * update IPv4 indication in all the groups using the nexthop. 2399 */ 2400 if (oldi->family == AF_INET && newi->family == AF_INET6) { 2401 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2402 struct nexthop *nhp = nhge->nh_parent; 2403 struct nh_group *nhg; 2404 2405 nhg = rtnl_dereference(nhp->nh_grp); 2406 nh_group_v4_update(nhg); 2407 } 2408 } 2409 2410 return 0; 2411 2412 err_notify: 2413 rcu_assign_pointer(new->nh_info, newi); 2414 rcu_assign_pointer(old->nh_info, oldi); 2415 old->nh_flags = old_nh_flags; 2416 old->protocol = old_protocol; 2417 oldi->nh_parent = old; 2418 newi->nh_parent = new; 2419 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) { 2420 struct nexthop *nhp = nhge->nh_parent; 2421 2422 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL); 2423 } 2424 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack); 2425 return err; 2426 } 2427 2428 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh, 2429 struct nl_info *info) 2430 { 2431 struct fib6_info *f6i; 2432 2433 if (!list_empty(&nh->fi_list)) { 2434 struct fib_info *fi; 2435 2436 /* expectation is a few fib_info per nexthop and then 2437 * a lot of routes per fib_info. So mark the fib_info 2438 * and then walk the fib tables once 2439 */ 2440 list_for_each_entry(fi, &nh->fi_list, nh_list) 2441 fi->nh_updated = true; 2442 2443 fib_info_notify_update(net, info); 2444 2445 list_for_each_entry(fi, &nh->fi_list, nh_list) 2446 fi->nh_updated = false; 2447 } 2448 2449 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2450 ipv6_stub->fib6_rt_update(net, f6i, info); 2451 } 2452 2453 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries 2454 * linked to this nexthop and for all groups that the nexthop 2455 * is a member of 2456 */ 2457 static void nexthop_replace_notify(struct net *net, struct nexthop *nh, 2458 struct nl_info *info) 2459 { 2460 struct nh_grp_entry *nhge; 2461 2462 __nexthop_replace_notify(net, nh, info); 2463 2464 list_for_each_entry(nhge, &nh->grp_list, nh_list) 2465 __nexthop_replace_notify(net, nhge->nh_parent, info); 2466 } 2467 2468 static int replace_nexthop(struct net *net, struct nexthop *old, 2469 struct nexthop *new, const struct nh_config *cfg, 2470 struct netlink_ext_ack *extack) 2471 { 2472 bool new_is_reject = false; 2473 struct nh_grp_entry *nhge; 2474 int err; 2475 2476 /* check that existing FIB entries are ok with the 2477 * new nexthop definition 2478 */ 2479 err = fib_check_nh_list(old, new, extack); 2480 if (err) 2481 return err; 2482 2483 err = fib6_check_nh_list(old, new, extack); 2484 if (err) 2485 return err; 2486 2487 if (!new->is_group) { 2488 struct nh_info *nhi = rtnl_dereference(new->nh_info); 2489 2490 new_is_reject = nhi->reject_nh; 2491 } 2492 2493 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2494 /* if new nexthop is a blackhole, any groups using this 2495 * nexthop cannot have more than 1 path 2496 */ 2497 if (new_is_reject && 2498 nexthop_num_path(nhge->nh_parent) > 1) { 2499 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path"); 2500 return -EINVAL; 2501 } 2502 2503 err = fib_check_nh_list(nhge->nh_parent, new, extack); 2504 if (err) 2505 return err; 2506 2507 err = fib6_check_nh_list(nhge->nh_parent, new, extack); 2508 if (err) 2509 return err; 2510 } 2511 2512 if (old->is_group) 2513 err = replace_nexthop_grp(net, old, new, cfg, extack); 2514 else 2515 err = replace_nexthop_single(net, old, new, extack); 2516 2517 if (!err) { 2518 nh_rt_cache_flush(net, old, new); 2519 2520 __remove_nexthop(net, new, NULL); 2521 nexthop_put(new); 2522 } 2523 2524 return err; 2525 } 2526 2527 /* called with rtnl_lock held */ 2528 static int insert_nexthop(struct net *net, struct nexthop *new_nh, 2529 struct nh_config *cfg, struct netlink_ext_ack *extack) 2530 { 2531 struct rb_node **pp, *parent = NULL, *next; 2532 struct rb_root *root = &net->nexthop.rb_root; 2533 bool replace = !!(cfg->nlflags & NLM_F_REPLACE); 2534 bool create = !!(cfg->nlflags & NLM_F_CREATE); 2535 u32 new_id = new_nh->id; 2536 int replace_notify = 0; 2537 int rc = -EEXIST; 2538 2539 pp = &root->rb_node; 2540 while (1) { 2541 struct nexthop *nh; 2542 2543 next = *pp; 2544 if (!next) 2545 break; 2546 2547 parent = next; 2548 2549 nh = rb_entry(parent, struct nexthop, rb_node); 2550 if (new_id < nh->id) { 2551 pp = &next->rb_left; 2552 } else if (new_id > nh->id) { 2553 pp = &next->rb_right; 2554 } else if (replace) { 2555 rc = replace_nexthop(net, nh, new_nh, cfg, extack); 2556 if (!rc) { 2557 new_nh = nh; /* send notification with old nh */ 2558 replace_notify = 1; 2559 } 2560 goto out; 2561 } else { 2562 /* id already exists and not a replace */ 2563 goto out; 2564 } 2565 } 2566 2567 if (replace && !create) { 2568 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists"); 2569 rc = -ENOENT; 2570 goto out; 2571 } 2572 2573 if (new_nh->is_group) { 2574 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp); 2575 struct nh_res_table *res_table; 2576 2577 if (nhg->resilient) { 2578 res_table = rtnl_dereference(nhg->res_table); 2579 2580 /* Not passing the number of buckets is OK when 2581 * replacing, but not when creating a new group. 2582 */ 2583 if (!cfg->nh_grp_res_has_num_buckets) { 2584 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion"); 2585 rc = -EINVAL; 2586 goto out; 2587 } 2588 2589 nh_res_group_rebalance(nhg, res_table); 2590 2591 /* Do not send bucket notifications, we do full 2592 * notification below. 2593 */ 2594 nh_res_table_upkeep(res_table, false, false); 2595 } 2596 } 2597 2598 rb_link_node_rcu(&new_nh->rb_node, parent, pp); 2599 rb_insert_color(&new_nh->rb_node, root); 2600 2601 /* The initial insertion is a full notification for hash-threshold as 2602 * well as resilient groups. 2603 */ 2604 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack); 2605 if (rc) 2606 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root); 2607 2608 out: 2609 if (!rc) { 2610 nh_base_seq_inc(net); 2611 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); 2612 if (replace_notify && 2613 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) 2614 nexthop_replace_notify(net, new_nh, &cfg->nlinfo); 2615 } 2616 2617 return rc; 2618 } 2619 2620 /* rtnl */ 2621 /* remove all nexthops tied to a device being deleted */ 2622 static void nexthop_flush_dev(struct net_device *dev, unsigned long event) 2623 { 2624 unsigned int hash = nh_dev_hashfn(dev->ifindex); 2625 struct net *net = dev_net(dev); 2626 struct hlist_head *head = &net->nexthop.devhash[hash]; 2627 struct hlist_node *n; 2628 struct nh_info *nhi; 2629 2630 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 2631 if (nhi->fib_nhc.nhc_dev != dev) 2632 continue; 2633 2634 if (nhi->reject_nh && 2635 (event == NETDEV_DOWN || event == NETDEV_CHANGE)) 2636 continue; 2637 2638 remove_nexthop(net, nhi->nh_parent, NULL); 2639 } 2640 } 2641 2642 /* rtnl; called when net namespace is deleted */ 2643 static void flush_all_nexthops(struct net *net) 2644 { 2645 struct rb_root *root = &net->nexthop.rb_root; 2646 struct rb_node *node; 2647 struct nexthop *nh; 2648 2649 while ((node = rb_first(root))) { 2650 nh = rb_entry(node, struct nexthop, rb_node); 2651 remove_nexthop(net, nh, NULL); 2652 cond_resched(); 2653 } 2654 } 2655 2656 static struct nexthop *nexthop_create_group(struct net *net, 2657 struct nh_config *cfg) 2658 { 2659 struct nlattr *grps_attr = cfg->nh_grp; 2660 struct nexthop_grp *entry = nla_data(grps_attr); 2661 u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 2662 struct nh_group *nhg; 2663 struct nexthop *nh; 2664 int err; 2665 int i; 2666 2667 if (WARN_ON(!num_nh)) 2668 return ERR_PTR(-EINVAL); 2669 2670 nh = nexthop_alloc(); 2671 if (!nh) 2672 return ERR_PTR(-ENOMEM); 2673 2674 nh->is_group = 1; 2675 2676 nhg = nexthop_grp_alloc(num_nh); 2677 if (!nhg) { 2678 kfree(nh); 2679 return ERR_PTR(-ENOMEM); 2680 } 2681 2682 /* spare group used for removals */ 2683 nhg->spare = nexthop_grp_alloc(num_nh); 2684 if (!nhg->spare) { 2685 kfree(nhg); 2686 kfree(nh); 2687 return ERR_PTR(-ENOMEM); 2688 } 2689 nhg->spare->spare = nhg; 2690 2691 for (i = 0; i < nhg->num_nh; ++i) { 2692 struct nexthop *nhe; 2693 struct nh_info *nhi; 2694 2695 nhe = nexthop_find_by_id(net, entry[i].id); 2696 if (!nexthop_get(nhe)) { 2697 err = -ENOENT; 2698 goto out_no_nh; 2699 } 2700 2701 nhi = rtnl_dereference(nhe->nh_info); 2702 if (nhi->family == AF_INET) 2703 nhg->has_v4 = true; 2704 2705 nhg->nh_entries[i].stats = 2706 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats); 2707 if (!nhg->nh_entries[i].stats) { 2708 err = -ENOMEM; 2709 nexthop_put(nhe); 2710 goto out_no_nh; 2711 } 2712 nhg->nh_entries[i].nh = nhe; 2713 nhg->nh_entries[i].weight = entry[i].weight + 1; 2714 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list); 2715 nhg->nh_entries[i].nh_parent = nh; 2716 } 2717 2718 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) { 2719 nhg->hash_threshold = 1; 2720 nhg->is_multipath = true; 2721 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) { 2722 struct nh_res_table *res_table; 2723 2724 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg); 2725 if (!res_table) { 2726 err = -ENOMEM; 2727 goto out_no_nh; 2728 } 2729 2730 rcu_assign_pointer(nhg->spare->res_table, res_table); 2731 rcu_assign_pointer(nhg->res_table, res_table); 2732 nhg->resilient = true; 2733 nhg->is_multipath = true; 2734 } 2735 2736 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1); 2737 2738 if (nhg->hash_threshold) 2739 nh_hthr_group_rebalance(nhg); 2740 2741 if (cfg->nh_fdb) 2742 nhg->fdb_nh = 1; 2743 2744 if (cfg->nh_hw_stats) 2745 nhg->hw_stats = true; 2746 2747 rcu_assign_pointer(nh->nh_grp, nhg); 2748 2749 return nh; 2750 2751 out_no_nh: 2752 for (i--; i >= 0; --i) { 2753 list_del(&nhg->nh_entries[i].nh_list); 2754 free_percpu(nhg->nh_entries[i].stats); 2755 nexthop_put(nhg->nh_entries[i].nh); 2756 } 2757 2758 kfree(nhg->spare); 2759 kfree(nhg); 2760 kfree(nh); 2761 2762 return ERR_PTR(err); 2763 } 2764 2765 static int nh_create_ipv4(struct net *net, struct nexthop *nh, 2766 struct nh_info *nhi, struct nh_config *cfg, 2767 struct netlink_ext_ack *extack) 2768 { 2769 struct fib_nh *fib_nh = &nhi->fib_nh; 2770 struct fib_config fib_cfg = { 2771 .fc_oif = cfg->nh_ifindex, 2772 .fc_gw4 = cfg->gw.ipv4, 2773 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, 2774 .fc_flags = cfg->nh_flags, 2775 .fc_nlinfo = cfg->nlinfo, 2776 .fc_encap = cfg->nh_encap, 2777 .fc_encap_type = cfg->nh_encap_type, 2778 }; 2779 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN); 2780 int err; 2781 2782 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack); 2783 if (err) { 2784 fib_nh_release(net, fib_nh); 2785 goto out; 2786 } 2787 2788 if (nhi->fdb_nh) 2789 goto out; 2790 2791 /* sets nh_dev if successful */ 2792 err = fib_check_nh(net, fib_nh, tb_id, 0, extack); 2793 if (!err) { 2794 nh->nh_flags = fib_nh->fib_nh_flags; 2795 fib_info_update_nhc_saddr(net, &fib_nh->nh_common, 2796 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); 2797 } else { 2798 fib_nh_release(net, fib_nh); 2799 } 2800 out: 2801 return err; 2802 } 2803 2804 static int nh_create_ipv6(struct net *net, struct nexthop *nh, 2805 struct nh_info *nhi, struct nh_config *cfg, 2806 struct netlink_ext_ack *extack) 2807 { 2808 struct fib6_nh *fib6_nh = &nhi->fib6_nh; 2809 struct fib6_config fib6_cfg = { 2810 .fc_table = l3mdev_fib_table(cfg->dev), 2811 .fc_ifindex = cfg->nh_ifindex, 2812 .fc_gateway = cfg->gw.ipv6, 2813 .fc_flags = cfg->nh_flags, 2814 .fc_nlinfo = cfg->nlinfo, 2815 .fc_encap = cfg->nh_encap, 2816 .fc_encap_type = cfg->nh_encap_type, 2817 .fc_is_fdb = cfg->nh_fdb, 2818 }; 2819 int err; 2820 2821 if (!ipv6_addr_any(&cfg->gw.ipv6)) 2822 fib6_cfg.fc_flags |= RTF_GATEWAY; 2823 2824 /* sets nh_dev if successful */ 2825 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, 2826 extack); 2827 if (err) { 2828 /* IPv6 is not enabled, don't call fib6_nh_release */ 2829 if (err == -EAFNOSUPPORT) 2830 goto out; 2831 ipv6_stub->fib6_nh_release(fib6_nh); 2832 } else { 2833 nh->nh_flags = fib6_nh->fib_nh_flags; 2834 } 2835 out: 2836 return err; 2837 } 2838 2839 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg, 2840 struct netlink_ext_ack *extack) 2841 { 2842 struct nh_info *nhi; 2843 struct nexthop *nh; 2844 int err = 0; 2845 2846 nh = nexthop_alloc(); 2847 if (!nh) 2848 return ERR_PTR(-ENOMEM); 2849 2850 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL); 2851 if (!nhi) { 2852 kfree(nh); 2853 return ERR_PTR(-ENOMEM); 2854 } 2855 2856 nh->nh_flags = cfg->nh_flags; 2857 nh->net = net; 2858 2859 nhi->nh_parent = nh; 2860 nhi->family = cfg->nh_family; 2861 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK; 2862 2863 if (cfg->nh_fdb) 2864 nhi->fdb_nh = 1; 2865 2866 if (cfg->nh_blackhole) { 2867 nhi->reject_nh = 1; 2868 cfg->nh_ifindex = net->loopback_dev->ifindex; 2869 } 2870 2871 switch (cfg->nh_family) { 2872 case AF_INET: 2873 err = nh_create_ipv4(net, nh, nhi, cfg, extack); 2874 break; 2875 case AF_INET6: 2876 err = nh_create_ipv6(net, nh, nhi, cfg, extack); 2877 break; 2878 } 2879 2880 if (err) { 2881 kfree(nhi); 2882 kfree(nh); 2883 return ERR_PTR(err); 2884 } 2885 2886 /* add the entry to the device based hash */ 2887 if (!nhi->fdb_nh) 2888 nexthop_devhash_add(net, nhi); 2889 2890 rcu_assign_pointer(nh->nh_info, nhi); 2891 2892 return nh; 2893 } 2894 2895 /* called with rtnl lock held */ 2896 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg, 2897 struct netlink_ext_ack *extack) 2898 { 2899 struct nexthop *nh; 2900 int err; 2901 2902 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) { 2903 NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); 2904 return ERR_PTR(-EINVAL); 2905 } 2906 2907 if (!cfg->nh_id) { 2908 cfg->nh_id = nh_find_unused_id(net); 2909 if (!cfg->nh_id) { 2910 NL_SET_ERR_MSG(extack, "No unused id"); 2911 return ERR_PTR(-EINVAL); 2912 } 2913 } 2914 2915 if (cfg->nh_grp) 2916 nh = nexthop_create_group(net, cfg); 2917 else 2918 nh = nexthop_create(net, cfg, extack); 2919 2920 if (IS_ERR(nh)) 2921 return nh; 2922 2923 refcount_set(&nh->refcnt, 1); 2924 nh->id = cfg->nh_id; 2925 nh->protocol = cfg->nh_protocol; 2926 nh->net = net; 2927 2928 err = insert_nexthop(net, nh, cfg, extack); 2929 if (err) { 2930 __remove_nexthop(net, nh, NULL); 2931 nexthop_put(nh); 2932 nh = ERR_PTR(err); 2933 } 2934 2935 return nh; 2936 } 2937 2938 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback, 2939 unsigned long *timer_p, bool *has_p, 2940 struct netlink_ext_ack *extack) 2941 { 2942 unsigned long timer; 2943 u32 value; 2944 2945 if (!attr) { 2946 *timer_p = fallback; 2947 *has_p = false; 2948 return 0; 2949 } 2950 2951 value = nla_get_u32(attr); 2952 timer = clock_t_to_jiffies(value); 2953 if (timer == ~0UL) { 2954 NL_SET_ERR_MSG(extack, "Timer value too large"); 2955 return -EINVAL; 2956 } 2957 2958 *timer_p = timer; 2959 *has_p = true; 2960 return 0; 2961 } 2962 2963 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg, 2964 struct netlink_ext_ack *extack) 2965 { 2966 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {}; 2967 int err; 2968 2969 if (res) { 2970 err = nla_parse_nested(tb, 2971 ARRAY_SIZE(rtm_nh_res_policy_new) - 1, 2972 res, rtm_nh_res_policy_new, extack); 2973 if (err < 0) 2974 return err; 2975 } 2976 2977 if (tb[NHA_RES_GROUP_BUCKETS]) { 2978 cfg->nh_grp_res_num_buckets = 2979 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]); 2980 cfg->nh_grp_res_has_num_buckets = true; 2981 if (!cfg->nh_grp_res_num_buckets) { 2982 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0"); 2983 return -EINVAL; 2984 } 2985 } 2986 2987 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER], 2988 NH_RES_DEFAULT_IDLE_TIMER, 2989 &cfg->nh_grp_res_idle_timer, 2990 &cfg->nh_grp_res_has_idle_timer, 2991 extack); 2992 if (err) 2993 return err; 2994 2995 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER], 2996 NH_RES_DEFAULT_UNBALANCED_TIMER, 2997 &cfg->nh_grp_res_unbalanced_timer, 2998 &cfg->nh_grp_res_has_unbalanced_timer, 2999 extack); 3000 } 3001 3002 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, 3003 struct nlmsghdr *nlh, struct nh_config *cfg, 3004 struct netlink_ext_ack *extack) 3005 { 3006 struct nhmsg *nhm = nlmsg_data(nlh); 3007 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; 3008 int err; 3009 3010 err = nlmsg_parse(nlh, sizeof(*nhm), tb, 3011 ARRAY_SIZE(rtm_nh_policy_new) - 1, 3012 rtm_nh_policy_new, extack); 3013 if (err < 0) 3014 return err; 3015 3016 err = -EINVAL; 3017 if (nhm->resvd || nhm->nh_scope) { 3018 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header"); 3019 goto out; 3020 } 3021 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) { 3022 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header"); 3023 goto out; 3024 } 3025 3026 switch (nhm->nh_family) { 3027 case AF_INET: 3028 case AF_INET6: 3029 break; 3030 case AF_UNSPEC: 3031 if (tb[NHA_GROUP]) 3032 break; 3033 fallthrough; 3034 default: 3035 NL_SET_ERR_MSG(extack, "Invalid address family"); 3036 goto out; 3037 } 3038 3039 memset(cfg, 0, sizeof(*cfg)); 3040 cfg->nlflags = nlh->nlmsg_flags; 3041 cfg->nlinfo.portid = NETLINK_CB(skb).portid; 3042 cfg->nlinfo.nlh = nlh; 3043 cfg->nlinfo.nl_net = net; 3044 3045 cfg->nh_family = nhm->nh_family; 3046 cfg->nh_protocol = nhm->nh_protocol; 3047 cfg->nh_flags = nhm->nh_flags; 3048 3049 if (tb[NHA_ID]) 3050 cfg->nh_id = nla_get_u32(tb[NHA_ID]); 3051 3052 if (tb[NHA_FDB]) { 3053 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] || 3054 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) { 3055 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole"); 3056 goto out; 3057 } 3058 if (nhm->nh_flags) { 3059 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header"); 3060 goto out; 3061 } 3062 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]); 3063 } 3064 3065 if (tb[NHA_GROUP]) { 3066 if (nhm->nh_family != AF_UNSPEC) { 3067 NL_SET_ERR_MSG(extack, "Invalid family for group"); 3068 goto out; 3069 } 3070 cfg->nh_grp = tb[NHA_GROUP]; 3071 3072 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH; 3073 if (tb[NHA_GROUP_TYPE]) 3074 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]); 3075 3076 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) { 3077 NL_SET_ERR_MSG(extack, "Invalid group type"); 3078 goto out; 3079 } 3080 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), 3081 cfg->nh_grp_type, extack); 3082 if (err) 3083 goto out; 3084 3085 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) 3086 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP], 3087 cfg, extack); 3088 3089 if (tb[NHA_HW_STATS_ENABLE]) 3090 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]); 3091 3092 /* no other attributes should be set */ 3093 goto out; 3094 } 3095 3096 if (tb[NHA_BLACKHOLE]) { 3097 if (tb[NHA_GATEWAY] || tb[NHA_OIF] || 3098 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) { 3099 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb"); 3100 goto out; 3101 } 3102 3103 cfg->nh_blackhole = 1; 3104 err = 0; 3105 goto out; 3106 } 3107 3108 if (!cfg->nh_fdb && !tb[NHA_OIF]) { 3109 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops"); 3110 goto out; 3111 } 3112 3113 if (!cfg->nh_fdb && tb[NHA_OIF]) { 3114 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); 3115 if (cfg->nh_ifindex) 3116 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); 3117 3118 if (!cfg->dev) { 3119 NL_SET_ERR_MSG(extack, "Invalid device index"); 3120 goto out; 3121 } else if (!(cfg->dev->flags & IFF_UP)) { 3122 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3123 err = -ENETDOWN; 3124 goto out; 3125 } else if (!netif_carrier_ok(cfg->dev)) { 3126 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); 3127 err = -ENETDOWN; 3128 goto out; 3129 } 3130 } 3131 3132 err = -EINVAL; 3133 if (tb[NHA_GATEWAY]) { 3134 struct nlattr *gwa = tb[NHA_GATEWAY]; 3135 3136 switch (cfg->nh_family) { 3137 case AF_INET: 3138 if (nla_len(gwa) != sizeof(u32)) { 3139 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3140 goto out; 3141 } 3142 cfg->gw.ipv4 = nla_get_be32(gwa); 3143 break; 3144 case AF_INET6: 3145 if (nla_len(gwa) != sizeof(struct in6_addr)) { 3146 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3147 goto out; 3148 } 3149 cfg->gw.ipv6 = nla_get_in6_addr(gwa); 3150 break; 3151 default: 3152 NL_SET_ERR_MSG(extack, 3153 "Unknown address family for gateway"); 3154 goto out; 3155 } 3156 } else { 3157 /* device only nexthop (no gateway) */ 3158 if (cfg->nh_flags & RTNH_F_ONLINK) { 3159 NL_SET_ERR_MSG(extack, 3160 "ONLINK flag can not be set for nexthop without a gateway"); 3161 goto out; 3162 } 3163 } 3164 3165 if (tb[NHA_ENCAP]) { 3166 cfg->nh_encap = tb[NHA_ENCAP]; 3167 3168 if (!tb[NHA_ENCAP_TYPE]) { 3169 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing"); 3170 goto out; 3171 } 3172 3173 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); 3174 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); 3175 if (err < 0) 3176 goto out; 3177 3178 } else if (tb[NHA_ENCAP_TYPE]) { 3179 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing"); 3180 goto out; 3181 } 3182 3183 if (tb[NHA_HW_STATS_ENABLE]) { 3184 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops"); 3185 goto out; 3186 } 3187 3188 err = 0; 3189 out: 3190 return err; 3191 } 3192 3193 /* rtnl */ 3194 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3195 struct netlink_ext_ack *extack) 3196 { 3197 struct net *net = sock_net(skb->sk); 3198 struct nh_config cfg; 3199 struct nexthop *nh; 3200 int err; 3201 3202 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack); 3203 if (!err) { 3204 nh = nexthop_add(net, &cfg, extack); 3205 if (IS_ERR(nh)) 3206 err = PTR_ERR(nh); 3207 } 3208 3209 return err; 3210 } 3211 3212 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, 3213 struct nlattr **tb, u32 *id, u32 *op_flags, 3214 struct netlink_ext_ack *extack) 3215 { 3216 struct nhmsg *nhm = nlmsg_data(nlh); 3217 3218 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3219 NL_SET_ERR_MSG(extack, "Invalid values in header"); 3220 return -EINVAL; 3221 } 3222 3223 if (!tb[NHA_ID]) { 3224 NL_SET_ERR_MSG(extack, "Nexthop id is missing"); 3225 return -EINVAL; 3226 } 3227 3228 *id = nla_get_u32(tb[NHA_ID]); 3229 if (!(*id)) { 3230 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3231 return -EINVAL; 3232 } 3233 3234 if (op_flags) { 3235 if (tb[NHA_OP_FLAGS]) 3236 *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]); 3237 else 3238 *op_flags = 0; 3239 } 3240 3241 return 0; 3242 } 3243 3244 /* rtnl */ 3245 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3246 struct netlink_ext_ack *extack) 3247 { 3248 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)]; 3249 struct net *net = sock_net(skb->sk); 3250 struct nl_info nlinfo = { 3251 .nlh = nlh, 3252 .nl_net = net, 3253 .portid = NETLINK_CB(skb).portid, 3254 }; 3255 struct nexthop *nh; 3256 int err; 3257 u32 id; 3258 3259 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3260 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del, 3261 extack); 3262 if (err < 0) 3263 return err; 3264 3265 err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack); 3266 if (err) 3267 return err; 3268 3269 nh = nexthop_find_by_id(net, id); 3270 if (!nh) 3271 return -ENOENT; 3272 3273 remove_nexthop(net, nh, &nlinfo); 3274 3275 return 0; 3276 } 3277 3278 /* rtnl */ 3279 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3280 struct netlink_ext_ack *extack) 3281 { 3282 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)]; 3283 struct net *net = sock_net(in_skb->sk); 3284 struct sk_buff *skb = NULL; 3285 struct nexthop *nh; 3286 u32 op_flags; 3287 int err; 3288 u32 id; 3289 3290 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3291 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get, 3292 extack); 3293 if (err < 0) 3294 return err; 3295 3296 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack); 3297 if (err) 3298 return err; 3299 3300 err = -ENOBUFS; 3301 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3302 if (!skb) 3303 goto out; 3304 3305 err = -ENOENT; 3306 nh = nexthop_find_by_id(net, id); 3307 if (!nh) 3308 goto errout_free; 3309 3310 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, 3311 nlh->nlmsg_seq, 0, op_flags); 3312 if (err < 0) { 3313 WARN_ON(err == -EMSGSIZE); 3314 goto errout_free; 3315 } 3316 3317 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3318 out: 3319 return err; 3320 errout_free: 3321 kfree_skb(skb); 3322 goto out; 3323 } 3324 3325 struct nh_dump_filter { 3326 u32 nh_id; 3327 int dev_idx; 3328 int master_idx; 3329 bool group_filter; 3330 bool fdb_filter; 3331 u32 res_bucket_nh_id; 3332 u32 op_flags; 3333 }; 3334 3335 static bool nh_dump_filtered(struct nexthop *nh, 3336 struct nh_dump_filter *filter, u8 family) 3337 { 3338 const struct net_device *dev; 3339 const struct nh_info *nhi; 3340 3341 if (filter->group_filter && !nh->is_group) 3342 return true; 3343 3344 if (!filter->dev_idx && !filter->master_idx && !family) 3345 return false; 3346 3347 if (nh->is_group) 3348 return true; 3349 3350 nhi = rtnl_dereference(nh->nh_info); 3351 if (family && nhi->family != family) 3352 return true; 3353 3354 dev = nhi->fib_nhc.nhc_dev; 3355 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx)) 3356 return true; 3357 3358 if (filter->master_idx) { 3359 struct net_device *master; 3360 3361 if (!dev) 3362 return true; 3363 3364 master = netdev_master_upper_dev_get((struct net_device *)dev); 3365 if (!master || master->ifindex != filter->master_idx) 3366 return true; 3367 } 3368 3369 return false; 3370 } 3371 3372 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb, 3373 struct nh_dump_filter *filter, 3374 struct netlink_ext_ack *extack) 3375 { 3376 struct nhmsg *nhm; 3377 u32 idx; 3378 3379 if (tb[NHA_OIF]) { 3380 idx = nla_get_u32(tb[NHA_OIF]); 3381 if (idx > INT_MAX) { 3382 NL_SET_ERR_MSG(extack, "Invalid device index"); 3383 return -EINVAL; 3384 } 3385 filter->dev_idx = idx; 3386 } 3387 if (tb[NHA_MASTER]) { 3388 idx = nla_get_u32(tb[NHA_MASTER]); 3389 if (idx > INT_MAX) { 3390 NL_SET_ERR_MSG(extack, "Invalid master device index"); 3391 return -EINVAL; 3392 } 3393 filter->master_idx = idx; 3394 } 3395 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]); 3396 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]); 3397 3398 nhm = nlmsg_data(nlh); 3399 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3400 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request"); 3401 return -EINVAL; 3402 } 3403 3404 return 0; 3405 } 3406 3407 static int nh_valid_dump_req(const struct nlmsghdr *nlh, 3408 struct nh_dump_filter *filter, 3409 struct netlink_callback *cb) 3410 { 3411 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)]; 3412 int err; 3413 3414 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3415 ARRAY_SIZE(rtm_nh_policy_dump) - 1, 3416 rtm_nh_policy_dump, cb->extack); 3417 if (err < 0) 3418 return err; 3419 3420 if (tb[NHA_OP_FLAGS]) 3421 filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]); 3422 else 3423 filter->op_flags = 0; 3424 3425 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3426 } 3427 3428 struct rtm_dump_nh_ctx { 3429 u32 idx; 3430 }; 3431 3432 static struct rtm_dump_nh_ctx * 3433 rtm_dump_nh_ctx(struct netlink_callback *cb) 3434 { 3435 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx; 3436 3437 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3438 return ctx; 3439 } 3440 3441 static int rtm_dump_walk_nexthops(struct sk_buff *skb, 3442 struct netlink_callback *cb, 3443 struct rb_root *root, 3444 struct rtm_dump_nh_ctx *ctx, 3445 int (*nh_cb)(struct sk_buff *skb, 3446 struct netlink_callback *cb, 3447 struct nexthop *nh, void *data), 3448 void *data) 3449 { 3450 struct rb_node *node; 3451 int s_idx; 3452 int err; 3453 3454 s_idx = ctx->idx; 3455 for (node = rb_first(root); node; node = rb_next(node)) { 3456 struct nexthop *nh; 3457 3458 nh = rb_entry(node, struct nexthop, rb_node); 3459 if (nh->id < s_idx) 3460 continue; 3461 3462 ctx->idx = nh->id; 3463 err = nh_cb(skb, cb, nh, data); 3464 if (err) 3465 return err; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb, 3472 struct nexthop *nh, void *data) 3473 { 3474 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3475 struct nh_dump_filter *filter = data; 3476 3477 if (nh_dump_filtered(nh, filter, nhm->nh_family)) 3478 return 0; 3479 3480 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP, 3481 NETLINK_CB(cb->skb).portid, 3482 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags); 3483 } 3484 3485 /* rtnl */ 3486 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) 3487 { 3488 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb); 3489 struct net *net = sock_net(skb->sk); 3490 struct rb_root *root = &net->nexthop.rb_root; 3491 struct nh_dump_filter filter = {}; 3492 int err; 3493 3494 err = nh_valid_dump_req(cb->nlh, &filter, cb); 3495 if (err < 0) 3496 return err; 3497 3498 err = rtm_dump_walk_nexthops(skb, cb, root, ctx, 3499 &rtm_dump_nexthop_cb, &filter); 3500 3501 cb->seq = net->nexthop.seq; 3502 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3503 return err; 3504 } 3505 3506 static struct nexthop * 3507 nexthop_find_group_resilient(struct net *net, u32 id, 3508 struct netlink_ext_ack *extack) 3509 { 3510 struct nh_group *nhg; 3511 struct nexthop *nh; 3512 3513 nh = nexthop_find_by_id(net, id); 3514 if (!nh) 3515 return ERR_PTR(-ENOENT); 3516 3517 if (!nh->is_group) { 3518 NL_SET_ERR_MSG(extack, "Not a nexthop group"); 3519 return ERR_PTR(-EINVAL); 3520 } 3521 3522 nhg = rtnl_dereference(nh->nh_grp); 3523 if (!nhg->resilient) { 3524 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient"); 3525 return ERR_PTR(-EINVAL); 3526 } 3527 3528 return nh; 3529 } 3530 3531 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p, 3532 struct netlink_ext_ack *extack) 3533 { 3534 u32 idx; 3535 3536 if (attr) { 3537 idx = nla_get_u32(attr); 3538 if (!idx) { 3539 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3540 return -EINVAL; 3541 } 3542 *nh_id_p = idx; 3543 } else { 3544 *nh_id_p = 0; 3545 } 3546 3547 return 0; 3548 } 3549 3550 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh, 3551 struct nh_dump_filter *filter, 3552 struct netlink_callback *cb) 3553 { 3554 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)]; 3555 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)]; 3556 int err; 3557 3558 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3559 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1, 3560 rtm_nh_policy_dump_bucket, NULL); 3561 if (err < 0) 3562 return err; 3563 3564 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack); 3565 if (err) 3566 return err; 3567 3568 if (tb[NHA_RES_BUCKET]) { 3569 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1; 3570 3571 err = nla_parse_nested(res_tb, max, 3572 tb[NHA_RES_BUCKET], 3573 rtm_nh_res_bucket_policy_dump, 3574 cb->extack); 3575 if (err < 0) 3576 return err; 3577 3578 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID], 3579 &filter->res_bucket_nh_id, 3580 cb->extack); 3581 if (err) 3582 return err; 3583 } 3584 3585 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3586 } 3587 3588 struct rtm_dump_res_bucket_ctx { 3589 struct rtm_dump_nh_ctx nh; 3590 u16 bucket_index; 3591 }; 3592 3593 static struct rtm_dump_res_bucket_ctx * 3594 rtm_dump_res_bucket_ctx(struct netlink_callback *cb) 3595 { 3596 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx; 3597 3598 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3599 return ctx; 3600 } 3601 3602 struct rtm_dump_nexthop_bucket_data { 3603 struct rtm_dump_res_bucket_ctx *ctx; 3604 struct nh_dump_filter filter; 3605 }; 3606 3607 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, 3608 struct netlink_callback *cb, 3609 struct nexthop *nh, 3610 struct rtm_dump_nexthop_bucket_data *dd) 3611 { 3612 u32 portid = NETLINK_CB(cb->skb).portid; 3613 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3614 struct nh_res_table *res_table; 3615 struct nh_group *nhg; 3616 u16 bucket_index; 3617 int err; 3618 3619 nhg = rtnl_dereference(nh->nh_grp); 3620 res_table = rtnl_dereference(nhg->res_table); 3621 for (bucket_index = dd->ctx->bucket_index; 3622 bucket_index < res_table->num_nh_buckets; 3623 bucket_index++) { 3624 struct nh_res_bucket *bucket; 3625 struct nh_grp_entry *nhge; 3626 3627 bucket = &res_table->nh_buckets[bucket_index]; 3628 nhge = rtnl_dereference(bucket->nh_entry); 3629 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family)) 3630 continue; 3631 3632 if (dd->filter.res_bucket_nh_id && 3633 dd->filter.res_bucket_nh_id != nhge->nh->id) 3634 continue; 3635 3636 dd->ctx->bucket_index = bucket_index; 3637 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 3638 RTM_NEWNEXTHOPBUCKET, portid, 3639 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3640 cb->extack); 3641 if (err) 3642 return err; 3643 } 3644 3645 dd->ctx->bucket_index = 0; 3646 3647 return 0; 3648 } 3649 3650 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, 3651 struct netlink_callback *cb, 3652 struct nexthop *nh, void *data) 3653 { 3654 struct rtm_dump_nexthop_bucket_data *dd = data; 3655 struct nh_group *nhg; 3656 3657 if (!nh->is_group) 3658 return 0; 3659 3660 nhg = rtnl_dereference(nh->nh_grp); 3661 if (!nhg->resilient) 3662 return 0; 3663 3664 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd); 3665 } 3666 3667 /* rtnl */ 3668 static int rtm_dump_nexthop_bucket(struct sk_buff *skb, 3669 struct netlink_callback *cb) 3670 { 3671 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb); 3672 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx }; 3673 struct net *net = sock_net(skb->sk); 3674 struct nexthop *nh; 3675 int err; 3676 3677 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb); 3678 if (err) 3679 return err; 3680 3681 if (dd.filter.nh_id) { 3682 nh = nexthop_find_group_resilient(net, dd.filter.nh_id, 3683 cb->extack); 3684 if (IS_ERR(nh)) 3685 return PTR_ERR(nh); 3686 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd); 3687 } else { 3688 struct rb_root *root = &net->nexthop.rb_root; 3689 3690 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh, 3691 &rtm_dump_nexthop_bucket_cb, &dd); 3692 } 3693 3694 cb->seq = net->nexthop.seq; 3695 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3696 return err; 3697 } 3698 3699 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res, 3700 u16 *bucket_index, 3701 struct netlink_ext_ack *extack) 3702 { 3703 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)]; 3704 int err; 3705 3706 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1, 3707 res, rtm_nh_res_bucket_policy_get, extack); 3708 if (err < 0) 3709 return err; 3710 3711 if (!tb[NHA_RES_BUCKET_INDEX]) { 3712 NL_SET_ERR_MSG(extack, "Bucket index is missing"); 3713 return -EINVAL; 3714 } 3715 3716 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]); 3717 return 0; 3718 } 3719 3720 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh, 3721 u32 *id, u16 *bucket_index, 3722 struct netlink_ext_ack *extack) 3723 { 3724 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)]; 3725 int err; 3726 3727 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3728 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1, 3729 rtm_nh_policy_get_bucket, extack); 3730 if (err < 0) 3731 return err; 3732 3733 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack); 3734 if (err) 3735 return err; 3736 3737 if (!tb[NHA_RES_BUCKET]) { 3738 NL_SET_ERR_MSG(extack, "Bucket information is missing"); 3739 return -EINVAL; 3740 } 3741 3742 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET], 3743 bucket_index, extack); 3744 if (err) 3745 return err; 3746 3747 return 0; 3748 } 3749 3750 /* rtnl */ 3751 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3752 struct netlink_ext_ack *extack) 3753 { 3754 struct net *net = sock_net(in_skb->sk); 3755 struct nh_res_table *res_table; 3756 struct sk_buff *skb = NULL; 3757 struct nh_group *nhg; 3758 struct nexthop *nh; 3759 u16 bucket_index; 3760 int err; 3761 u32 id; 3762 3763 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack); 3764 if (err) 3765 return err; 3766 3767 nh = nexthop_find_group_resilient(net, id, extack); 3768 if (IS_ERR(nh)) 3769 return PTR_ERR(nh); 3770 3771 nhg = rtnl_dereference(nh->nh_grp); 3772 res_table = rtnl_dereference(nhg->res_table); 3773 if (bucket_index >= res_table->num_nh_buckets) { 3774 NL_SET_ERR_MSG(extack, "Bucket index out of bounds"); 3775 return -ENOENT; 3776 } 3777 3778 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3779 if (!skb) 3780 return -ENOBUFS; 3781 3782 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index], 3783 bucket_index, RTM_NEWNEXTHOPBUCKET, 3784 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 3785 0, extack); 3786 if (err < 0) { 3787 WARN_ON(err == -EMSGSIZE); 3788 goto errout_free; 3789 } 3790 3791 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3792 3793 errout_free: 3794 kfree_skb(skb); 3795 return err; 3796 } 3797 3798 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu) 3799 { 3800 unsigned int hash = nh_dev_hashfn(dev->ifindex); 3801 struct net *net = dev_net(dev); 3802 struct hlist_head *head = &net->nexthop.devhash[hash]; 3803 struct hlist_node *n; 3804 struct nh_info *nhi; 3805 3806 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 3807 if (nhi->fib_nhc.nhc_dev == dev) { 3808 if (nhi->family == AF_INET) 3809 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu, 3810 orig_mtu); 3811 } 3812 } 3813 } 3814 3815 /* rtnl */ 3816 static int nh_netdev_event(struct notifier_block *this, 3817 unsigned long event, void *ptr) 3818 { 3819 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3820 struct netdev_notifier_info_ext *info_ext; 3821 3822 switch (event) { 3823 case NETDEV_DOWN: 3824 case NETDEV_UNREGISTER: 3825 nexthop_flush_dev(dev, event); 3826 break; 3827 case NETDEV_CHANGE: 3828 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) 3829 nexthop_flush_dev(dev, event); 3830 break; 3831 case NETDEV_CHANGEMTU: 3832 info_ext = ptr; 3833 nexthop_sync_mtu(dev, info_ext->ext.mtu); 3834 rt_cache_flush(dev_net(dev)); 3835 break; 3836 } 3837 return NOTIFY_DONE; 3838 } 3839 3840 static struct notifier_block nh_netdev_notifier = { 3841 .notifier_call = nh_netdev_event, 3842 }; 3843 3844 static int nexthops_dump(struct net *net, struct notifier_block *nb, 3845 enum nexthop_event_type event_type, 3846 struct netlink_ext_ack *extack) 3847 { 3848 struct rb_root *root = &net->nexthop.rb_root; 3849 struct rb_node *node; 3850 int err = 0; 3851 3852 for (node = rb_first(root); node; node = rb_next(node)) { 3853 struct nexthop *nh; 3854 3855 nh = rb_entry(node, struct nexthop, rb_node); 3856 err = call_nexthop_notifier(nb, net, event_type, nh, extack); 3857 if (err) 3858 break; 3859 } 3860 3861 return err; 3862 } 3863 3864 int register_nexthop_notifier(struct net *net, struct notifier_block *nb, 3865 struct netlink_ext_ack *extack) 3866 { 3867 int err; 3868 3869 rtnl_lock(); 3870 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack); 3871 if (err) 3872 goto unlock; 3873 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, 3874 nb); 3875 unlock: 3876 rtnl_unlock(); 3877 return err; 3878 } 3879 EXPORT_SYMBOL(register_nexthop_notifier); 3880 3881 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3882 { 3883 int err; 3884 3885 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3886 nb); 3887 if (!err) 3888 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3889 return err; 3890 } 3891 EXPORT_SYMBOL(__unregister_nexthop_notifier); 3892 3893 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3894 { 3895 int err; 3896 3897 rtnl_lock(); 3898 err = __unregister_nexthop_notifier(net, nb); 3899 rtnl_unlock(); 3900 return err; 3901 } 3902 EXPORT_SYMBOL(unregister_nexthop_notifier); 3903 3904 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap) 3905 { 3906 struct nexthop *nexthop; 3907 3908 rcu_read_lock(); 3909 3910 nexthop = nexthop_find_by_id(net, id); 3911 if (!nexthop) 3912 goto out; 3913 3914 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3915 if (offload) 3916 nexthop->nh_flags |= RTNH_F_OFFLOAD; 3917 if (trap) 3918 nexthop->nh_flags |= RTNH_F_TRAP; 3919 3920 out: 3921 rcu_read_unlock(); 3922 } 3923 EXPORT_SYMBOL(nexthop_set_hw_flags); 3924 3925 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, 3926 bool offload, bool trap) 3927 { 3928 struct nh_res_table *res_table; 3929 struct nh_res_bucket *bucket; 3930 struct nexthop *nexthop; 3931 struct nh_group *nhg; 3932 3933 rcu_read_lock(); 3934 3935 nexthop = nexthop_find_by_id(net, id); 3936 if (!nexthop || !nexthop->is_group) 3937 goto out; 3938 3939 nhg = rcu_dereference(nexthop->nh_grp); 3940 if (!nhg->resilient) 3941 goto out; 3942 3943 if (bucket_index >= nhg->res_table->num_nh_buckets) 3944 goto out; 3945 3946 res_table = rcu_dereference(nhg->res_table); 3947 bucket = &res_table->nh_buckets[bucket_index]; 3948 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3949 if (offload) 3950 bucket->nh_flags |= RTNH_F_OFFLOAD; 3951 if (trap) 3952 bucket->nh_flags |= RTNH_F_TRAP; 3953 3954 out: 3955 rcu_read_unlock(); 3956 } 3957 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags); 3958 3959 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets, 3960 unsigned long *activity) 3961 { 3962 struct nh_res_table *res_table; 3963 struct nexthop *nexthop; 3964 struct nh_group *nhg; 3965 u16 i; 3966 3967 rcu_read_lock(); 3968 3969 nexthop = nexthop_find_by_id(net, id); 3970 if (!nexthop || !nexthop->is_group) 3971 goto out; 3972 3973 nhg = rcu_dereference(nexthop->nh_grp); 3974 if (!nhg->resilient) 3975 goto out; 3976 3977 /* Instead of silently ignoring some buckets, demand that the sizes 3978 * be the same. 3979 */ 3980 res_table = rcu_dereference(nhg->res_table); 3981 if (num_buckets != res_table->num_nh_buckets) 3982 goto out; 3983 3984 for (i = 0; i < num_buckets; i++) { 3985 if (test_bit(i, activity)) 3986 nh_res_bucket_set_busy(&res_table->nh_buckets[i]); 3987 } 3988 3989 out: 3990 rcu_read_unlock(); 3991 } 3992 EXPORT_SYMBOL(nexthop_res_grp_activity_update); 3993 3994 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list, 3995 struct list_head *dev_to_kill) 3996 { 3997 struct net *net; 3998 3999 ASSERT_RTNL(); 4000 list_for_each_entry(net, net_list, exit_list) 4001 flush_all_nexthops(net); 4002 } 4003 4004 static void __net_exit nexthop_net_exit(struct net *net) 4005 { 4006 kfree(net->nexthop.devhash); 4007 net->nexthop.devhash = NULL; 4008 } 4009 4010 static int __net_init nexthop_net_init(struct net *net) 4011 { 4012 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE; 4013 4014 net->nexthop.rb_root = RB_ROOT; 4015 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL); 4016 if (!net->nexthop.devhash) 4017 return -ENOMEM; 4018 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain); 4019 4020 return 0; 4021 } 4022 4023 static struct pernet_operations nexthop_net_ops = { 4024 .init = nexthop_net_init, 4025 .exit = nexthop_net_exit, 4026 .exit_batch_rtnl = nexthop_net_exit_batch_rtnl, 4027 }; 4028 4029 static int __init nexthop_init(void) 4030 { 4031 register_pernet_subsys(&nexthop_net_ops); 4032 4033 register_netdevice_notifier(&nh_netdev_notifier); 4034 4035 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4036 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0); 4037 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop, 4038 rtm_dump_nexthop, 0); 4039 4040 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4041 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 4042 4043 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4044 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 4045 4046 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket, 4047 rtm_dump_nexthop_bucket, 0); 4048 4049 return 0; 4050 } 4051 subsys_initcall(nexthop_init); 4052