1 // SPDX-License-Identifier: GPL-2.0 2 /* Generic nexthop implementation 3 * 4 * Copyright (c) 2017-19 Cumulus Networks 5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> 6 */ 7 8 #include <linux/nexthop.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 #include <net/arp.h> 13 #include <net/ipv6_stubs.h> 14 #include <net/lwtunnel.h> 15 #include <net/ndisc.h> 16 #include <net/nexthop.h> 17 #include <net/route.h> 18 #include <net/sock.h> 19 20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ) 21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */ 22 23 static void remove_nexthop(struct net *net, struct nexthop *nh, 24 struct nl_info *nlinfo); 25 26 #define NH_DEV_HASHBITS 8 27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) 28 29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \ 30 NHA_OP_FLAG_DUMP_HW_STATS) 31 32 static const struct nla_policy rtm_nh_policy_new[] = { 33 [NHA_ID] = { .type = NLA_U32 }, 34 [NHA_GROUP] = { .type = NLA_BINARY }, 35 [NHA_GROUP_TYPE] = { .type = NLA_U16 }, 36 [NHA_BLACKHOLE] = { .type = NLA_FLAG }, 37 [NHA_OIF] = { .type = NLA_U32 }, 38 [NHA_GATEWAY] = { .type = NLA_BINARY }, 39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 }, 40 [NHA_ENCAP] = { .type = NLA_NESTED }, 41 [NHA_FDB] = { .type = NLA_FLAG }, 42 [NHA_RES_GROUP] = { .type = NLA_NESTED }, 43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true), 44 }; 45 46 static const struct nla_policy rtm_nh_policy_get[] = { 47 [NHA_ID] = { .type = NLA_U32 }, 48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 49 NHA_OP_FLAGS_DUMP_ALL), 50 }; 51 52 static const struct nla_policy rtm_nh_policy_del[] = { 53 [NHA_ID] = { .type = NLA_U32 }, 54 }; 55 56 static const struct nla_policy rtm_nh_policy_dump[] = { 57 [NHA_OIF] = { .type = NLA_U32 }, 58 [NHA_GROUPS] = { .type = NLA_FLAG }, 59 [NHA_MASTER] = { .type = NLA_U32 }, 60 [NHA_FDB] = { .type = NLA_FLAG }, 61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 62 NHA_OP_FLAGS_DUMP_ALL), 63 }; 64 65 static const struct nla_policy rtm_nh_res_policy_new[] = { 66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 }, 67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 }, 68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 }, 69 }; 70 71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = { 72 [NHA_ID] = { .type = NLA_U32 }, 73 [NHA_OIF] = { .type = NLA_U32 }, 74 [NHA_MASTER] = { .type = NLA_U32 }, 75 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 76 }; 77 78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = { 79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 }, 80 }; 81 82 static const struct nla_policy rtm_nh_policy_get_bucket[] = { 83 [NHA_ID] = { .type = NLA_U32 }, 84 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 85 }; 86 87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = { 88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 }, 89 }; 90 91 static bool nexthop_notifiers_is_empty(struct net *net) 92 { 93 return !net->nexthop.notifier_chain.head; 94 } 95 96 static void 97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info, 98 const struct nh_info *nhi) 99 { 100 nh_info->dev = nhi->fib_nhc.nhc_dev; 101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; 102 if (nh_info->gw_family == AF_INET) 103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; 104 else if (nh_info->gw_family == AF_INET6) 105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; 106 107 nh_info->id = nhi->nh_parent->id; 108 nh_info->is_reject = nhi->reject_nh; 109 nh_info->is_fdb = nhi->fdb_nh; 110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; 111 } 112 113 static int nh_notifier_single_info_init(struct nh_notifier_info *info, 114 const struct nexthop *nh) 115 { 116 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 117 118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; 119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); 120 if (!info->nh) 121 return -ENOMEM; 122 123 __nh_notifier_single_info_init(info->nh, nhi); 124 125 return 0; 126 } 127 128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info) 129 { 130 kfree(info->nh); 131 } 132 133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info, 134 struct nh_group *nhg) 135 { 136 u16 num_nh = nhg->num_nh; 137 int i; 138 139 info->type = NH_NOTIFIER_INFO_TYPE_GRP; 140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), 141 GFP_KERNEL); 142 if (!info->nh_grp) 143 return -ENOMEM; 144 145 info->nh_grp->num_nh = num_nh; 146 info->nh_grp->is_fdb = nhg->fdb_nh; 147 info->nh_grp->hw_stats = nhg->hw_stats; 148 149 for (i = 0; i < num_nh; i++) { 150 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 151 struct nh_info *nhi; 152 153 nhi = rtnl_dereference(nhge->nh->nh_info); 154 info->nh_grp->nh_entries[i].weight = nhge->weight; 155 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, 156 nhi); 157 } 158 159 return 0; 160 } 161 162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info, 163 struct nh_group *nhg) 164 { 165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 166 u16 num_nh_buckets = res_table->num_nh_buckets; 167 unsigned long size; 168 u16 i; 169 170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; 171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets); 172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | 173 __GFP_NOWARN); 174 if (!info->nh_res_table) 175 return -ENOMEM; 176 177 info->nh_res_table->num_nh_buckets = num_nh_buckets; 178 info->nh_res_table->hw_stats = nhg->hw_stats; 179 180 for (i = 0; i < num_nh_buckets; i++) { 181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 182 struct nh_grp_entry *nhge; 183 struct nh_info *nhi; 184 185 nhge = rtnl_dereference(bucket->nh_entry); 186 nhi = rtnl_dereference(nhge->nh->nh_info); 187 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], 188 nhi); 189 } 190 191 return 0; 192 } 193 194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info, 195 const struct nexthop *nh) 196 { 197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 198 199 if (nhg->hash_threshold) 200 return nh_notifier_mpath_info_init(info, nhg); 201 else if (nhg->resilient) 202 return nh_notifier_res_table_info_init(info, nhg); 203 return -EINVAL; 204 } 205 206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info, 207 const struct nexthop *nh) 208 { 209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 210 211 if (nhg->hash_threshold) 212 kfree(info->nh_grp); 213 else if (nhg->resilient) 214 vfree(info->nh_res_table); 215 } 216 217 static int nh_notifier_info_init(struct nh_notifier_info *info, 218 const struct nexthop *nh) 219 { 220 info->id = nh->id; 221 222 if (nh->is_group) 223 return nh_notifier_grp_info_init(info, nh); 224 else 225 return nh_notifier_single_info_init(info, nh); 226 } 227 228 static void nh_notifier_info_fini(struct nh_notifier_info *info, 229 const struct nexthop *nh) 230 { 231 if (nh->is_group) 232 nh_notifier_grp_info_fini(info, nh); 233 else 234 nh_notifier_single_info_fini(info); 235 } 236 237 static int call_nexthop_notifiers(struct net *net, 238 enum nexthop_event_type event_type, 239 struct nexthop *nh, 240 struct netlink_ext_ack *extack) 241 { 242 struct nh_notifier_info info = { 243 .net = net, 244 .extack = extack, 245 }; 246 int err; 247 248 ASSERT_RTNL(); 249 250 if (nexthop_notifiers_is_empty(net)) 251 return 0; 252 253 err = nh_notifier_info_init(&info, nh); 254 if (err) { 255 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 256 return err; 257 } 258 259 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 260 event_type, &info); 261 nh_notifier_info_fini(&info, nh); 262 263 return notifier_to_errno(err); 264 } 265 266 static int 267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info, 268 bool force, unsigned int *p_idle_timer_ms) 269 { 270 struct nh_res_table *res_table; 271 struct nh_group *nhg; 272 struct nexthop *nh; 273 int err = 0; 274 275 /* When 'force' is false, nexthop bucket replacement is performed 276 * because the bucket was deemed to be idle. In this case, capable 277 * listeners can choose to perform an atomic replacement: The bucket is 278 * only replaced if it is inactive. However, if the idle timer interval 279 * is smaller than the interval in which a listener is querying 280 * buckets' activity from the device, then atomic replacement should 281 * not be tried. Pass the idle timer value to listeners, so that they 282 * could determine which type of replacement to perform. 283 */ 284 if (force) { 285 *p_idle_timer_ms = 0; 286 return 0; 287 } 288 289 rcu_read_lock(); 290 291 nh = nexthop_find_by_id(info->net, info->id); 292 if (!nh) { 293 err = -EINVAL; 294 goto out; 295 } 296 297 nhg = rcu_dereference(nh->nh_grp); 298 res_table = rcu_dereference(nhg->res_table); 299 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); 300 301 out: 302 rcu_read_unlock(); 303 304 return err; 305 } 306 307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info, 308 u16 bucket_index, bool force, 309 struct nh_info *oldi, 310 struct nh_info *newi) 311 { 312 unsigned int idle_timer_ms; 313 int err; 314 315 err = nh_notifier_res_bucket_idle_timer_get(info, force, 316 &idle_timer_ms); 317 if (err) 318 return err; 319 320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; 321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), 322 GFP_KERNEL); 323 if (!info->nh_res_bucket) 324 return -ENOMEM; 325 326 info->nh_res_bucket->bucket_index = bucket_index; 327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms; 328 info->nh_res_bucket->force = force; 329 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); 330 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); 331 return 0; 332 } 333 334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info) 335 { 336 kfree(info->nh_res_bucket); 337 } 338 339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 340 u16 bucket_index, bool force, 341 struct nh_info *oldi, 342 struct nh_info *newi, 343 struct netlink_ext_ack *extack) 344 { 345 struct nh_notifier_info info = { 346 .net = net, 347 .extack = extack, 348 .id = nhg_id, 349 }; 350 int err; 351 352 if (nexthop_notifiers_is_empty(net)) 353 return 0; 354 355 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force, 356 oldi, newi); 357 if (err) 358 return err; 359 360 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 361 NEXTHOP_EVENT_BUCKET_REPLACE, &info); 362 nh_notifier_res_bucket_info_fini(&info); 363 364 return notifier_to_errno(err); 365 } 366 367 /* There are three users of RES_TABLE, and NHs etc. referenced from there: 368 * 369 * 1) a collection of callbacks for NH maintenance. This operates under 370 * RTNL, 371 * 2) the delayed work that gradually balances the resilient table, 372 * 3) and nexthop_select_path(), operating under RCU. 373 * 374 * Both the delayed work and the RTNL block are writers, and need to 375 * maintain mutual exclusion. Since there are only two and well-known 376 * writers for each table, the RTNL code can make sure it has exclusive 377 * access thus: 378 * 379 * - Have the DW operate without locking; 380 * - synchronously cancel the DW; 381 * - do the writing; 382 * - if the write was not actually a delete, call upkeep, which schedules 383 * DW again if necessary. 384 * 385 * The functions that are always called from the RTNL context use 386 * rtnl_dereference(). The functions that can also be called from the DW do 387 * a raw dereference and rely on the above mutual exclusion scheme. 388 */ 389 #define nh_res_dereference(p) (rcu_dereference_raw(p)) 390 391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 392 u16 bucket_index, bool force, 393 struct nexthop *old_nh, 394 struct nexthop *new_nh, 395 struct netlink_ext_ack *extack) 396 { 397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); 398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info); 399 400 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index, 401 force, oldi, newi, extack); 402 } 403 404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh, 405 struct netlink_ext_ack *extack) 406 { 407 struct nh_notifier_info info = { 408 .net = net, 409 .extack = extack, 410 .id = nh->id, 411 }; 412 struct nh_group *nhg; 413 int err; 414 415 ASSERT_RTNL(); 416 417 if (nexthop_notifiers_is_empty(net)) 418 return 0; 419 420 /* At this point, the nexthop buckets are still not populated. Only 421 * emit a notification with the logical nexthops, so that a listener 422 * could potentially veto it in case of unsupported configuration. 423 */ 424 nhg = rtnl_dereference(nh->nh_grp); 425 err = nh_notifier_mpath_info_init(&info, nhg); 426 if (err) { 427 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 428 return err; 429 } 430 431 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 432 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, 433 &info); 434 kfree(info.nh_grp); 435 436 return notifier_to_errno(err); 437 } 438 439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net, 440 enum nexthop_event_type event_type, 441 struct nexthop *nh, 442 struct netlink_ext_ack *extack) 443 { 444 struct nh_notifier_info info = { 445 .net = net, 446 .extack = extack, 447 }; 448 int err; 449 450 err = nh_notifier_info_init(&info, nh); 451 if (err) 452 return err; 453 454 err = nb->notifier_call(nb, event_type, &info); 455 nh_notifier_info_fini(&info, nh); 456 457 return notifier_to_errno(err); 458 } 459 460 static unsigned int nh_dev_hashfn(unsigned int val) 461 { 462 unsigned int mask = NH_DEV_HASHSIZE - 1; 463 464 return (val ^ 465 (val >> NH_DEV_HASHBITS) ^ 466 (val >> (NH_DEV_HASHBITS * 2))) & mask; 467 } 468 469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi) 470 { 471 struct net_device *dev = nhi->fib_nhc.nhc_dev; 472 struct hlist_head *head; 473 unsigned int hash; 474 475 WARN_ON(!dev); 476 477 hash = nh_dev_hashfn(dev->ifindex); 478 head = &net->nexthop.devhash[hash]; 479 hlist_add_head(&nhi->dev_hash, head); 480 } 481 482 static void nexthop_free_group(struct nexthop *nh) 483 { 484 struct nh_group *nhg; 485 int i; 486 487 nhg = rcu_dereference_raw(nh->nh_grp); 488 for (i = 0; i < nhg->num_nh; ++i) { 489 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 490 491 WARN_ON(!list_empty(&nhge->nh_list)); 492 free_percpu(nhge->stats); 493 nexthop_put(nhge->nh); 494 } 495 496 WARN_ON(nhg->spare == nhg); 497 498 if (nhg->resilient) 499 vfree(rcu_dereference_raw(nhg->res_table)); 500 501 kfree(nhg->spare); 502 kfree(nhg); 503 } 504 505 static void nexthop_free_single(struct nexthop *nh) 506 { 507 struct nh_info *nhi; 508 509 nhi = rcu_dereference_raw(nh->nh_info); 510 switch (nhi->family) { 511 case AF_INET: 512 fib_nh_release(nh->net, &nhi->fib_nh); 513 break; 514 case AF_INET6: 515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh); 516 break; 517 } 518 kfree(nhi); 519 } 520 521 void nexthop_free_rcu(struct rcu_head *head) 522 { 523 struct nexthop *nh = container_of(head, struct nexthop, rcu); 524 525 if (nh->is_group) 526 nexthop_free_group(nh); 527 else 528 nexthop_free_single(nh); 529 530 kfree(nh); 531 } 532 EXPORT_SYMBOL_GPL(nexthop_free_rcu); 533 534 static struct nexthop *nexthop_alloc(void) 535 { 536 struct nexthop *nh; 537 538 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); 539 if (nh) { 540 INIT_LIST_HEAD(&nh->fi_list); 541 INIT_LIST_HEAD(&nh->f6i_list); 542 INIT_LIST_HEAD(&nh->grp_list); 543 INIT_LIST_HEAD(&nh->fdb_list); 544 } 545 return nh; 546 } 547 548 static struct nh_group *nexthop_grp_alloc(u16 num_nh) 549 { 550 struct nh_group *nhg; 551 552 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL); 553 if (nhg) 554 nhg->num_nh = num_nh; 555 556 return nhg; 557 } 558 559 static void nh_res_table_upkeep_dw(struct work_struct *work); 560 561 static struct nh_res_table * 562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg) 563 { 564 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; 565 struct nh_res_table *res_table; 566 unsigned long size; 567 568 size = struct_size(res_table, nh_buckets, num_nh_buckets); 569 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); 570 if (!res_table) 571 return NULL; 572 573 res_table->net = net; 574 res_table->nhg_id = nhg_id; 575 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); 576 INIT_LIST_HEAD(&res_table->uw_nh_entries); 577 res_table->idle_timer = cfg->nh_grp_res_idle_timer; 578 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; 579 res_table->num_nh_buckets = num_nh_buckets; 580 return res_table; 581 } 582 583 static void nh_base_seq_inc(struct net *net) 584 { 585 while (++net->nexthop.seq == 0) 586 ; 587 } 588 589 /* no reference taken; rcu lock or rtnl must be held */ 590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id) 591 { 592 struct rb_node **pp, *parent = NULL, *next; 593 594 pp = &net->nexthop.rb_root.rb_node; 595 while (1) { 596 struct nexthop *nh; 597 598 next = rcu_dereference_raw(*pp); 599 if (!next) 600 break; 601 parent = next; 602 603 nh = rb_entry(parent, struct nexthop, rb_node); 604 if (id < nh->id) 605 pp = &next->rb_left; 606 else if (id > nh->id) 607 pp = &next->rb_right; 608 else 609 return nh; 610 } 611 return NULL; 612 } 613 EXPORT_SYMBOL_GPL(nexthop_find_by_id); 614 615 /* used for auto id allocation; called with rtnl held */ 616 static u32 nh_find_unused_id(struct net *net) 617 { 618 u32 id_start = net->nexthop.last_id_allocated; 619 620 while (1) { 621 net->nexthop.last_id_allocated++; 622 if (net->nexthop.last_id_allocated == id_start) 623 break; 624 625 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) 626 return net->nexthop.last_id_allocated; 627 } 628 return 0; 629 } 630 631 static void nh_res_time_set_deadline(unsigned long next_time, 632 unsigned long *deadline) 633 { 634 if (time_before(next_time, *deadline)) 635 *deadline = next_time; 636 } 637 638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table) 639 { 640 if (list_empty(&res_table->uw_nh_entries)) 641 return 0; 642 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); 643 } 644 645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg) 646 { 647 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 648 struct nlattr *nest; 649 650 nest = nla_nest_start(skb, NHA_RES_GROUP); 651 if (!nest) 652 return -EMSGSIZE; 653 654 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS, 655 res_table->num_nh_buckets) || 656 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER, 657 jiffies_to_clock_t(res_table->idle_timer)) || 658 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER, 659 jiffies_to_clock_t(res_table->unbalanced_timer)) || 660 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME, 661 nh_res_table_unbalanced_time(res_table), 662 NHA_RES_GROUP_PAD)) 663 goto nla_put_failure; 664 665 nla_nest_end(skb, nest); 666 return 0; 667 668 nla_put_failure: 669 nla_nest_cancel(skb, nest); 670 return -EMSGSIZE; 671 } 672 673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge) 674 { 675 struct nh_grp_entry_stats *cpu_stats; 676 677 cpu_stats = this_cpu_ptr(nhge->stats); 678 u64_stats_update_begin(&cpu_stats->syncp); 679 u64_stats_inc(&cpu_stats->packets); 680 u64_stats_update_end(&cpu_stats->syncp); 681 } 682 683 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge, 684 u64 *ret_packets) 685 { 686 int i; 687 688 *ret_packets = 0; 689 690 for_each_possible_cpu(i) { 691 struct nh_grp_entry_stats *cpu_stats; 692 unsigned int start; 693 u64 packets; 694 695 cpu_stats = per_cpu_ptr(nhge->stats, i); 696 do { 697 start = u64_stats_fetch_begin(&cpu_stats->syncp); 698 packets = u64_stats_read(&cpu_stats->packets); 699 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 700 701 *ret_packets += packets; 702 } 703 } 704 705 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info, 706 const struct nexthop *nh) 707 { 708 struct nh_group *nhg; 709 int i; 710 711 ASSERT_RTNL(); 712 nhg = rtnl_dereference(nh->nh_grp); 713 714 info->id = nh->id; 715 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; 716 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats, 717 stats, nhg->num_nh), 718 GFP_KERNEL); 719 if (!info->nh_grp_hw_stats) 720 return -ENOMEM; 721 722 info->nh_grp_hw_stats->num_nh = nhg->num_nh; 723 for (i = 0; i < nhg->num_nh; i++) { 724 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 725 726 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id; 727 } 728 729 return 0; 730 } 731 732 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info) 733 { 734 kfree(info->nh_grp_hw_stats); 735 } 736 737 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info, 738 unsigned int nh_idx, 739 u64 delta_packets) 740 { 741 info->hw_stats_used = true; 742 info->stats[nh_idx].packets += delta_packets; 743 } 744 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta); 745 746 static void nh_grp_hw_stats_apply_update(struct nexthop *nh, 747 struct nh_notifier_info *info) 748 { 749 struct nh_group *nhg; 750 int i; 751 752 ASSERT_RTNL(); 753 nhg = rtnl_dereference(nh->nh_grp); 754 755 for (i = 0; i < nhg->num_nh; i++) { 756 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 757 758 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets; 759 } 760 } 761 762 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used) 763 { 764 struct nh_notifier_info info = { 765 .net = nh->net, 766 }; 767 struct net *net = nh->net; 768 int err; 769 770 if (nexthop_notifiers_is_empty(net)) 771 return 0; 772 773 err = nh_notifier_grp_hw_stats_init(&info, nh); 774 if (err) 775 return err; 776 777 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 778 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA, 779 &info); 780 781 /* Cache whatever we got, even if there was an error, otherwise the 782 * successful stats retrievals would get lost. 783 */ 784 nh_grp_hw_stats_apply_update(nh, &info); 785 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used; 786 787 nh_notifier_grp_hw_stats_fini(&info); 788 return notifier_to_errno(err); 789 } 790 791 static int nla_put_nh_group_stats_entry(struct sk_buff *skb, 792 struct nh_grp_entry *nhge, 793 u32 op_flags) 794 { 795 struct nlattr *nest; 796 u64 packets; 797 798 nh_grp_entry_stats_read(nhge, &packets); 799 800 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY); 801 if (!nest) 802 return -EMSGSIZE; 803 804 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) || 805 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, 806 packets + nhge->packets_hw)) 807 goto nla_put_failure; 808 809 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 810 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW, 811 nhge->packets_hw)) 812 goto nla_put_failure; 813 814 nla_nest_end(skb, nest); 815 return 0; 816 817 nla_put_failure: 818 nla_nest_cancel(skb, nest); 819 return -EMSGSIZE; 820 } 821 822 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh, 823 u32 op_flags) 824 { 825 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 826 struct nlattr *nest; 827 bool hw_stats_used; 828 int err; 829 int i; 830 831 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats)) 832 goto err_out; 833 834 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 835 nhg->hw_stats) { 836 err = nh_grp_hw_stats_update(nh, &hw_stats_used); 837 if (err) 838 goto out; 839 840 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used)) 841 goto err_out; 842 } 843 844 nest = nla_nest_start(skb, NHA_GROUP_STATS); 845 if (!nest) 846 goto err_out; 847 848 for (i = 0; i < nhg->num_nh; i++) 849 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i], 850 op_flags)) 851 goto cancel_out; 852 853 nla_nest_end(skb, nest); 854 return 0; 855 856 cancel_out: 857 nla_nest_cancel(skb, nest); 858 err_out: 859 err = -EMSGSIZE; 860 out: 861 return err; 862 } 863 864 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, 865 u32 op_flags) 866 { 867 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 868 struct nexthop_grp *p; 869 size_t len = nhg->num_nh * sizeof(*p); 870 struct nlattr *nla; 871 u16 group_type = 0; 872 int i; 873 874 if (nhg->hash_threshold) 875 group_type = NEXTHOP_GRP_TYPE_MPATH; 876 else if (nhg->resilient) 877 group_type = NEXTHOP_GRP_TYPE_RES; 878 879 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type)) 880 goto nla_put_failure; 881 882 nla = nla_reserve(skb, NHA_GROUP, len); 883 if (!nla) 884 goto nla_put_failure; 885 886 p = nla_data(nla); 887 for (i = 0; i < nhg->num_nh; ++i) { 888 p->id = nhg->nh_entries[i].nh->id; 889 p->weight = nhg->nh_entries[i].weight - 1; 890 p += 1; 891 } 892 893 if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) 894 goto nla_put_failure; 895 896 if (op_flags & NHA_OP_FLAG_DUMP_STATS && 897 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || 898 nla_put_nh_group_stats(skb, nh, op_flags))) 899 goto nla_put_failure; 900 901 return 0; 902 903 nla_put_failure: 904 return -EMSGSIZE; 905 } 906 907 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, 908 int event, u32 portid, u32 seq, unsigned int nlflags, 909 u32 op_flags) 910 { 911 struct fib6_nh *fib6_nh; 912 struct fib_nh *fib_nh; 913 struct nlmsghdr *nlh; 914 struct nh_info *nhi; 915 struct nhmsg *nhm; 916 917 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 918 if (!nlh) 919 return -EMSGSIZE; 920 921 nhm = nlmsg_data(nlh); 922 nhm->nh_family = AF_UNSPEC; 923 nhm->nh_flags = nh->nh_flags; 924 nhm->nh_protocol = nh->protocol; 925 nhm->nh_scope = 0; 926 nhm->resvd = 0; 927 928 if (nla_put_u32(skb, NHA_ID, nh->id)) 929 goto nla_put_failure; 930 931 if (nh->is_group) { 932 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 933 934 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) 935 goto nla_put_failure; 936 if (nla_put_nh_group(skb, nh, op_flags)) 937 goto nla_put_failure; 938 goto out; 939 } 940 941 nhi = rtnl_dereference(nh->nh_info); 942 nhm->nh_family = nhi->family; 943 if (nhi->reject_nh) { 944 if (nla_put_flag(skb, NHA_BLACKHOLE)) 945 goto nla_put_failure; 946 goto out; 947 } else if (nhi->fdb_nh) { 948 if (nla_put_flag(skb, NHA_FDB)) 949 goto nla_put_failure; 950 } else { 951 const struct net_device *dev; 952 953 dev = nhi->fib_nhc.nhc_dev; 954 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) 955 goto nla_put_failure; 956 } 957 958 nhm->nh_scope = nhi->fib_nhc.nhc_scope; 959 switch (nhi->family) { 960 case AF_INET: 961 fib_nh = &nhi->fib_nh; 962 if (fib_nh->fib_nh_gw_family && 963 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) 964 goto nla_put_failure; 965 break; 966 967 case AF_INET6: 968 fib6_nh = &nhi->fib6_nh; 969 if (fib6_nh->fib_nh_gw_family && 970 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) 971 goto nla_put_failure; 972 break; 973 } 974 975 if (nhi->fib_nhc.nhc_lwtstate && 976 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, 977 NHA_ENCAP, NHA_ENCAP_TYPE) < 0) 978 goto nla_put_failure; 979 980 out: 981 nlmsg_end(skb, nlh); 982 return 0; 983 984 nla_put_failure: 985 nlmsg_cancel(skb, nlh); 986 return -EMSGSIZE; 987 } 988 989 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg) 990 { 991 return nla_total_size(0) + /* NHA_RES_GROUP */ 992 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */ 993 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */ 994 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */ 995 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ 996 } 997 998 static size_t nh_nlmsg_size_grp(struct nexthop *nh) 999 { 1000 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1001 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; 1002 size_t tot = nla_total_size(sz) + 1003 nla_total_size(2); /* NHA_GROUP_TYPE */ 1004 1005 if (nhg->resilient) 1006 tot += nh_nlmsg_size_grp_res(nhg); 1007 1008 return tot; 1009 } 1010 1011 static size_t nh_nlmsg_size_single(struct nexthop *nh) 1012 { 1013 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1014 size_t sz; 1015 1016 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE 1017 * are mutually exclusive 1018 */ 1019 sz = nla_total_size(4); /* NHA_OIF */ 1020 1021 switch (nhi->family) { 1022 case AF_INET: 1023 if (nhi->fib_nh.fib_nh_gw_family) 1024 sz += nla_total_size(4); /* NHA_GATEWAY */ 1025 break; 1026 1027 case AF_INET6: 1028 /* NHA_GATEWAY */ 1029 if (nhi->fib6_nh.fib_nh_gw_family) 1030 sz += nla_total_size(sizeof(const struct in6_addr)); 1031 break; 1032 } 1033 1034 if (nhi->fib_nhc.nhc_lwtstate) { 1035 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); 1036 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */ 1037 } 1038 1039 return sz; 1040 } 1041 1042 static size_t nh_nlmsg_size(struct nexthop *nh) 1043 { 1044 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); 1045 1046 sz += nla_total_size(4); /* NHA_ID */ 1047 1048 if (nh->is_group) 1049 sz += nh_nlmsg_size_grp(nh); 1050 else 1051 sz += nh_nlmsg_size_single(nh); 1052 1053 return sz; 1054 } 1055 1056 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) 1057 { 1058 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; 1059 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 1060 struct sk_buff *skb; 1061 int err = -ENOBUFS; 1062 1063 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); 1064 if (!skb) 1065 goto errout; 1066 1067 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0); 1068 if (err < 0) { 1069 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ 1070 WARN_ON(err == -EMSGSIZE); 1071 kfree_skb(skb); 1072 goto errout; 1073 } 1074 1075 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, 1076 info->nlh, gfp_any()); 1077 return; 1078 errout: 1079 if (err < 0) 1080 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); 1081 } 1082 1083 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) 1084 { 1085 return (unsigned long)atomic_long_read(&bucket->used_time); 1086 } 1087 1088 static unsigned long 1089 nh_res_bucket_idle_point(const struct nh_res_table *res_table, 1090 const struct nh_res_bucket *bucket, 1091 unsigned long now) 1092 { 1093 unsigned long time = nh_res_bucket_used_time(bucket); 1094 1095 /* Bucket was not used since it was migrated. The idle time is now. */ 1096 if (time == bucket->migrated_time) 1097 return now; 1098 1099 return time + res_table->idle_timer; 1100 } 1101 1102 static unsigned long 1103 nh_res_table_unb_point(const struct nh_res_table *res_table) 1104 { 1105 return res_table->unbalanced_since + res_table->unbalanced_timer; 1106 } 1107 1108 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table, 1109 struct nh_res_bucket *bucket) 1110 { 1111 unsigned long now = jiffies; 1112 1113 atomic_long_set(&bucket->used_time, (long)now); 1114 bucket->migrated_time = now; 1115 } 1116 1117 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket) 1118 { 1119 atomic_long_set(&bucket->used_time, (long)jiffies); 1120 } 1121 1122 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket) 1123 { 1124 unsigned long used_time = nh_res_bucket_used_time(bucket); 1125 1126 return jiffies_delta_to_clock_t(jiffies - used_time); 1127 } 1128 1129 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh, 1130 struct nh_res_bucket *bucket, u16 bucket_index, 1131 int event, u32 portid, u32 seq, 1132 unsigned int nlflags, 1133 struct netlink_ext_ack *extack) 1134 { 1135 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1136 struct nlmsghdr *nlh; 1137 struct nlattr *nest; 1138 struct nhmsg *nhm; 1139 1140 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 1141 if (!nlh) 1142 return -EMSGSIZE; 1143 1144 nhm = nlmsg_data(nlh); 1145 nhm->nh_family = AF_UNSPEC; 1146 nhm->nh_flags = bucket->nh_flags; 1147 nhm->nh_protocol = nh->protocol; 1148 nhm->nh_scope = 0; 1149 nhm->resvd = 0; 1150 1151 if (nla_put_u32(skb, NHA_ID, nh->id)) 1152 goto nla_put_failure; 1153 1154 nest = nla_nest_start(skb, NHA_RES_BUCKET); 1155 if (!nest) 1156 goto nla_put_failure; 1157 1158 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) || 1159 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || 1160 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME, 1161 nh_res_bucket_idle_time(bucket), 1162 NHA_RES_BUCKET_PAD)) 1163 goto nla_put_failure_nest; 1164 1165 nla_nest_end(skb, nest); 1166 nlmsg_end(skb, nlh); 1167 return 0; 1168 1169 nla_put_failure_nest: 1170 nla_nest_cancel(skb, nest); 1171 nla_put_failure: 1172 nlmsg_cancel(skb, nlh); 1173 return -EMSGSIZE; 1174 } 1175 1176 static void nexthop_bucket_notify(struct nh_res_table *res_table, 1177 u16 bucket_index) 1178 { 1179 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1180 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1181 struct nexthop *nh = nhge->nh_parent; 1182 struct sk_buff *skb; 1183 int err = -ENOBUFS; 1184 1185 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1186 if (!skb) 1187 goto errout; 1188 1189 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 1190 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE, 1191 NULL); 1192 if (err < 0) { 1193 kfree_skb(skb); 1194 goto errout; 1195 } 1196 1197 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); 1198 return; 1199 errout: 1200 if (err < 0) 1201 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); 1202 } 1203 1204 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, 1205 bool *is_fdb, struct netlink_ext_ack *extack) 1206 { 1207 if (nh->is_group) { 1208 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1209 1210 /* Nesting groups within groups is not supported. */ 1211 if (nhg->hash_threshold) { 1212 NL_SET_ERR_MSG(extack, 1213 "Hash-threshold group can not be a nexthop within a group"); 1214 return false; 1215 } 1216 if (nhg->resilient) { 1217 NL_SET_ERR_MSG(extack, 1218 "Resilient group can not be a nexthop within a group"); 1219 return false; 1220 } 1221 *is_fdb = nhg->fdb_nh; 1222 } else { 1223 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1224 1225 if (nhi->reject_nh && npaths > 1) { 1226 NL_SET_ERR_MSG(extack, 1227 "Blackhole nexthop can not be used in a group with more than 1 path"); 1228 return false; 1229 } 1230 *is_fdb = nhi->fdb_nh; 1231 } 1232 1233 return true; 1234 } 1235 1236 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family, 1237 struct netlink_ext_ack *extack) 1238 { 1239 struct nh_info *nhi; 1240 1241 nhi = rtnl_dereference(nh->nh_info); 1242 1243 if (!nhi->fdb_nh) { 1244 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops"); 1245 return -EINVAL; 1246 } 1247 1248 if (*nh_family == AF_UNSPEC) { 1249 *nh_family = nhi->family; 1250 } else if (*nh_family != nhi->family) { 1251 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops"); 1252 return -EINVAL; 1253 } 1254 1255 return 0; 1256 } 1257 1258 static int nh_check_attr_group(struct net *net, 1259 struct nlattr *tb[], size_t tb_size, 1260 u16 nh_grp_type, struct netlink_ext_ack *extack) 1261 { 1262 unsigned int len = nla_len(tb[NHA_GROUP]); 1263 u8 nh_family = AF_UNSPEC; 1264 struct nexthop_grp *nhg; 1265 unsigned int i, j; 1266 u8 nhg_fdb = 0; 1267 1268 if (!len || len & (sizeof(struct nexthop_grp) - 1)) { 1269 NL_SET_ERR_MSG(extack, 1270 "Invalid length for nexthop group attribute"); 1271 return -EINVAL; 1272 } 1273 1274 /* convert len to number of nexthop ids */ 1275 len /= sizeof(*nhg); 1276 1277 nhg = nla_data(tb[NHA_GROUP]); 1278 for (i = 0; i < len; ++i) { 1279 if (nhg[i].resvd1 || nhg[i].resvd2) { 1280 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0"); 1281 return -EINVAL; 1282 } 1283 if (nhg[i].weight > 254) { 1284 NL_SET_ERR_MSG(extack, "Invalid value for weight"); 1285 return -EINVAL; 1286 } 1287 for (j = i + 1; j < len; ++j) { 1288 if (nhg[i].id == nhg[j].id) { 1289 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group"); 1290 return -EINVAL; 1291 } 1292 } 1293 } 1294 1295 if (tb[NHA_FDB]) 1296 nhg_fdb = 1; 1297 nhg = nla_data(tb[NHA_GROUP]); 1298 for (i = 0; i < len; ++i) { 1299 struct nexthop *nh; 1300 bool is_fdb_nh; 1301 1302 nh = nexthop_find_by_id(net, nhg[i].id); 1303 if (!nh) { 1304 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 1305 return -EINVAL; 1306 } 1307 if (!valid_group_nh(nh, len, &is_fdb_nh, extack)) 1308 return -EINVAL; 1309 1310 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack)) 1311 return -EINVAL; 1312 1313 if (!nhg_fdb && is_fdb_nh) { 1314 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops"); 1315 return -EINVAL; 1316 } 1317 } 1318 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { 1319 if (!tb[i]) 1320 continue; 1321 switch (i) { 1322 case NHA_HW_STATS_ENABLE: 1323 case NHA_FDB: 1324 continue; 1325 case NHA_RES_GROUP: 1326 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) 1327 continue; 1328 break; 1329 } 1330 NL_SET_ERR_MSG(extack, 1331 "No other attributes can be set in nexthop groups"); 1332 return -EINVAL; 1333 } 1334 1335 return 0; 1336 } 1337 1338 static bool ipv6_good_nh(const struct fib6_nh *nh) 1339 { 1340 int state = NUD_REACHABLE; 1341 struct neighbour *n; 1342 1343 rcu_read_lock(); 1344 1345 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); 1346 if (n) 1347 state = READ_ONCE(n->nud_state); 1348 1349 rcu_read_unlock(); 1350 1351 return !!(state & NUD_VALID); 1352 } 1353 1354 static bool ipv4_good_nh(const struct fib_nh *nh) 1355 { 1356 int state = NUD_REACHABLE; 1357 struct neighbour *n; 1358 1359 rcu_read_lock(); 1360 1361 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, 1362 (__force u32)nh->fib_nh_gw4); 1363 if (n) 1364 state = READ_ONCE(n->nud_state); 1365 1366 rcu_read_unlock(); 1367 1368 return !!(state & NUD_VALID); 1369 } 1370 1371 static bool nexthop_is_good_nh(const struct nexthop *nh) 1372 { 1373 struct nh_info *nhi = rcu_dereference(nh->nh_info); 1374 1375 switch (nhi->family) { 1376 case AF_INET: 1377 return ipv4_good_nh(&nhi->fib_nh); 1378 case AF_INET6: 1379 return ipv6_good_nh(&nhi->fib6_nh); 1380 } 1381 1382 return false; 1383 } 1384 1385 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash) 1386 { 1387 int i; 1388 1389 for (i = 0; i < nhg->num_nh; i++) { 1390 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1391 1392 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1393 continue; 1394 1395 nh_grp_entry_stats_inc(nhge); 1396 return nhge->nh; 1397 } 1398 1399 WARN_ON_ONCE(1); 1400 return NULL; 1401 } 1402 1403 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash) 1404 { 1405 struct nh_grp_entry *nhge0 = NULL; 1406 int i; 1407 1408 if (nhg->fdb_nh) 1409 return nexthop_select_path_fdb(nhg, hash); 1410 1411 for (i = 0; i < nhg->num_nh; ++i) { 1412 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1413 1414 /* nexthops always check if it is good and does 1415 * not rely on a sysctl for this behavior 1416 */ 1417 if (!nexthop_is_good_nh(nhge->nh)) 1418 continue; 1419 1420 if (!nhge0) 1421 nhge0 = nhge; 1422 1423 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1424 continue; 1425 1426 nh_grp_entry_stats_inc(nhge); 1427 return nhge->nh; 1428 } 1429 1430 if (!nhge0) 1431 nhge0 = &nhg->nh_entries[0]; 1432 nh_grp_entry_stats_inc(nhge0); 1433 return nhge0->nh; 1434 } 1435 1436 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash) 1437 { 1438 struct nh_res_table *res_table = rcu_dereference(nhg->res_table); 1439 u16 bucket_index = hash % res_table->num_nh_buckets; 1440 struct nh_res_bucket *bucket; 1441 struct nh_grp_entry *nhge; 1442 1443 /* nexthop_select_path() is expected to return a non-NULL value, so 1444 * skip protocol validation and just hand out whatever there is. 1445 */ 1446 bucket = &res_table->nh_buckets[bucket_index]; 1447 nh_res_bucket_set_busy(bucket); 1448 nhge = rcu_dereference(bucket->nh_entry); 1449 nh_grp_entry_stats_inc(nhge); 1450 return nhge->nh; 1451 } 1452 1453 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash) 1454 { 1455 struct nh_group *nhg; 1456 1457 if (!nh->is_group) 1458 return nh; 1459 1460 nhg = rcu_dereference(nh->nh_grp); 1461 if (nhg->hash_threshold) 1462 return nexthop_select_path_hthr(nhg, hash); 1463 else if (nhg->resilient) 1464 return nexthop_select_path_res(nhg, hash); 1465 1466 /* Unreachable. */ 1467 return NULL; 1468 } 1469 EXPORT_SYMBOL_GPL(nexthop_select_path); 1470 1471 int nexthop_for_each_fib6_nh(struct nexthop *nh, 1472 int (*cb)(struct fib6_nh *nh, void *arg), 1473 void *arg) 1474 { 1475 struct nh_info *nhi; 1476 int err; 1477 1478 if (nh->is_group) { 1479 struct nh_group *nhg; 1480 int i; 1481 1482 nhg = rcu_dereference_rtnl(nh->nh_grp); 1483 for (i = 0; i < nhg->num_nh; i++) { 1484 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1485 1486 nhi = rcu_dereference_rtnl(nhge->nh->nh_info); 1487 err = cb(&nhi->fib6_nh, arg); 1488 if (err) 1489 return err; 1490 } 1491 } else { 1492 nhi = rcu_dereference_rtnl(nh->nh_info); 1493 err = cb(&nhi->fib6_nh, arg); 1494 if (err) 1495 return err; 1496 } 1497 1498 return 0; 1499 } 1500 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh); 1501 1502 static int check_src_addr(const struct in6_addr *saddr, 1503 struct netlink_ext_ack *extack) 1504 { 1505 if (!ipv6_addr_any(saddr)) { 1506 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects"); 1507 return -EINVAL; 1508 } 1509 return 0; 1510 } 1511 1512 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, 1513 struct netlink_ext_ack *extack) 1514 { 1515 struct nh_info *nhi; 1516 bool is_fdb_nh; 1517 1518 /* fib6_src is unique to a fib6_info and limits the ability to cache 1519 * routes in fib6_nh within a nexthop that is potentially shared 1520 * across multiple fib entries. If the config wants to use source 1521 * routing it can not use nexthop objects. mlxsw also does not allow 1522 * fib6_src on routes. 1523 */ 1524 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0) 1525 return -EINVAL; 1526 1527 if (nh->is_group) { 1528 struct nh_group *nhg; 1529 1530 nhg = rtnl_dereference(nh->nh_grp); 1531 if (nhg->has_v4) 1532 goto no_v4_nh; 1533 is_fdb_nh = nhg->fdb_nh; 1534 } else { 1535 nhi = rtnl_dereference(nh->nh_info); 1536 if (nhi->family == AF_INET) 1537 goto no_v4_nh; 1538 is_fdb_nh = nhi->fdb_nh; 1539 } 1540 1541 if (is_fdb_nh) { 1542 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1543 return -EINVAL; 1544 } 1545 1546 return 0; 1547 no_v4_nh: 1548 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop"); 1549 return -EINVAL; 1550 } 1551 EXPORT_SYMBOL_GPL(fib6_check_nexthop); 1552 1553 /* if existing nexthop has ipv6 routes linked to it, need 1554 * to verify this new spec works with ipv6 1555 */ 1556 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new, 1557 struct netlink_ext_ack *extack) 1558 { 1559 struct fib6_info *f6i; 1560 1561 if (list_empty(&old->f6i_list)) 1562 return 0; 1563 1564 list_for_each_entry(f6i, &old->f6i_list, nh_list) { 1565 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0) 1566 return -EINVAL; 1567 } 1568 1569 return fib6_check_nexthop(new, NULL, extack); 1570 } 1571 1572 static int nexthop_check_scope(struct nh_info *nhi, u8 scope, 1573 struct netlink_ext_ack *extack) 1574 { 1575 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) { 1576 NL_SET_ERR_MSG(extack, 1577 "Route with host scope can not have a gateway"); 1578 return -EINVAL; 1579 } 1580 1581 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) { 1582 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop"); 1583 return -EINVAL; 1584 } 1585 1586 return 0; 1587 } 1588 1589 /* Invoked by fib add code to verify nexthop by id is ok with 1590 * config for prefix; parts of fib_check_nh not done when nexthop 1591 * object is used. 1592 */ 1593 int fib_check_nexthop(struct nexthop *nh, u8 scope, 1594 struct netlink_ext_ack *extack) 1595 { 1596 struct nh_info *nhi; 1597 int err = 0; 1598 1599 if (nh->is_group) { 1600 struct nh_group *nhg; 1601 1602 nhg = rtnl_dereference(nh->nh_grp); 1603 if (nhg->fdb_nh) { 1604 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1605 err = -EINVAL; 1606 goto out; 1607 } 1608 1609 if (scope == RT_SCOPE_HOST) { 1610 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops"); 1611 err = -EINVAL; 1612 goto out; 1613 } 1614 1615 /* all nexthops in a group have the same scope */ 1616 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info); 1617 err = nexthop_check_scope(nhi, scope, extack); 1618 } else { 1619 nhi = rtnl_dereference(nh->nh_info); 1620 if (nhi->fdb_nh) { 1621 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1622 err = -EINVAL; 1623 goto out; 1624 } 1625 err = nexthop_check_scope(nhi, scope, extack); 1626 } 1627 1628 out: 1629 return err; 1630 } 1631 1632 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new, 1633 struct netlink_ext_ack *extack) 1634 { 1635 struct fib_info *fi; 1636 1637 list_for_each_entry(fi, &old->fi_list, nh_list) { 1638 int err; 1639 1640 err = fib_check_nexthop(new, fi->fib_scope, extack); 1641 if (err) 1642 return err; 1643 } 1644 return 0; 1645 } 1646 1647 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge) 1648 { 1649 return nhge->res.count_buckets == nhge->res.wants_buckets; 1650 } 1651 1652 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge) 1653 { 1654 return nhge->res.count_buckets > nhge->res.wants_buckets; 1655 } 1656 1657 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge) 1658 { 1659 return nhge->res.count_buckets < nhge->res.wants_buckets; 1660 } 1661 1662 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table) 1663 { 1664 return list_empty(&res_table->uw_nh_entries); 1665 } 1666 1667 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket) 1668 { 1669 struct nh_grp_entry *nhge; 1670 1671 if (bucket->occupied) { 1672 nhge = nh_res_dereference(bucket->nh_entry); 1673 nhge->res.count_buckets--; 1674 bucket->occupied = false; 1675 } 1676 } 1677 1678 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket, 1679 struct nh_grp_entry *nhge) 1680 { 1681 nh_res_bucket_unset_nh(bucket); 1682 1683 bucket->occupied = true; 1684 rcu_assign_pointer(bucket->nh_entry, nhge); 1685 nhge->res.count_buckets++; 1686 } 1687 1688 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table, 1689 struct nh_res_bucket *bucket, 1690 unsigned long *deadline, bool *force) 1691 { 1692 unsigned long now = jiffies; 1693 struct nh_grp_entry *nhge; 1694 unsigned long idle_point; 1695 1696 if (!bucket->occupied) { 1697 /* The bucket is not occupied, its NHGE pointer is either 1698 * NULL or obsolete. We _have to_ migrate: set force. 1699 */ 1700 *force = true; 1701 return true; 1702 } 1703 1704 nhge = nh_res_dereference(bucket->nh_entry); 1705 1706 /* If the bucket is populated by an underweight or balanced 1707 * nexthop, do not migrate. 1708 */ 1709 if (!nh_res_nhge_is_ow(nhge)) 1710 return false; 1711 1712 /* At this point we know that the bucket is populated with an 1713 * overweight nexthop. It needs to be migrated to a new nexthop if 1714 * the idle timer of unbalanced timer expired. 1715 */ 1716 1717 idle_point = nh_res_bucket_idle_point(res_table, bucket, now); 1718 if (time_after_eq(now, idle_point)) { 1719 /* The bucket is idle. We _can_ migrate: unset force. */ 1720 *force = false; 1721 return true; 1722 } 1723 1724 /* Unbalanced timer of 0 means "never force". */ 1725 if (res_table->unbalanced_timer) { 1726 unsigned long unb_point; 1727 1728 unb_point = nh_res_table_unb_point(res_table); 1729 if (time_after(now, unb_point)) { 1730 /* The bucket is not idle, but the unbalanced timer 1731 * expired. We _can_ migrate, but set force anyway, 1732 * so that drivers know to ignore activity reports 1733 * from the HW. 1734 */ 1735 *force = true; 1736 return true; 1737 } 1738 1739 nh_res_time_set_deadline(unb_point, deadline); 1740 } 1741 1742 nh_res_time_set_deadline(idle_point, deadline); 1743 return false; 1744 } 1745 1746 static bool nh_res_bucket_migrate(struct nh_res_table *res_table, 1747 u16 bucket_index, bool notify, 1748 bool notify_nl, bool force) 1749 { 1750 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1751 struct nh_grp_entry *new_nhge; 1752 struct netlink_ext_ack extack; 1753 int err; 1754 1755 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries, 1756 struct nh_grp_entry, 1757 res.uw_nh_entry); 1758 if (WARN_ON_ONCE(!new_nhge)) 1759 /* If this function is called, "bucket" is either not 1760 * occupied, or it belongs to a next hop that is 1761 * overweight. In either case, there ought to be a 1762 * corresponding underweight next hop. 1763 */ 1764 return false; 1765 1766 if (notify) { 1767 struct nh_grp_entry *old_nhge; 1768 1769 old_nhge = nh_res_dereference(bucket->nh_entry); 1770 err = call_nexthop_res_bucket_notifiers(res_table->net, 1771 res_table->nhg_id, 1772 bucket_index, force, 1773 old_nhge->nh, 1774 new_nhge->nh, &extack); 1775 if (err) { 1776 pr_err_ratelimited("%s\n", extack._msg); 1777 if (!force) 1778 return false; 1779 /* It is not possible to veto a forced replacement, so 1780 * just clear the hardware flags from the nexthop 1781 * bucket to indicate to user space that this bucket is 1782 * not correctly populated in hardware. 1783 */ 1784 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 1785 } 1786 } 1787 1788 nh_res_bucket_set_nh(bucket, new_nhge); 1789 nh_res_bucket_set_idle(res_table, bucket); 1790 1791 if (notify_nl) 1792 nexthop_bucket_notify(res_table, bucket_index); 1793 1794 if (nh_res_nhge_is_balanced(new_nhge)) 1795 list_del(&new_nhge->res.uw_nh_entry); 1796 return true; 1797 } 1798 1799 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2) 1800 1801 static void nh_res_table_upkeep(struct nh_res_table *res_table, 1802 bool notify, bool notify_nl) 1803 { 1804 unsigned long now = jiffies; 1805 unsigned long deadline; 1806 u16 i; 1807 1808 /* Deadline is the next time that upkeep should be run. It is the 1809 * earliest time at which one of the buckets might be migrated. 1810 * Start at the most pessimistic estimate: either unbalanced_timer 1811 * from now, or if there is none, idle_timer from now. For each 1812 * encountered time point, call nh_res_time_set_deadline() to 1813 * refine the estimate. 1814 */ 1815 if (res_table->unbalanced_timer) 1816 deadline = now + res_table->unbalanced_timer; 1817 else 1818 deadline = now + res_table->idle_timer; 1819 1820 for (i = 0; i < res_table->num_nh_buckets; i++) { 1821 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1822 bool force; 1823 1824 if (nh_res_bucket_should_migrate(res_table, bucket, 1825 &deadline, &force)) { 1826 if (!nh_res_bucket_migrate(res_table, i, notify, 1827 notify_nl, force)) { 1828 unsigned long idle_point; 1829 1830 /* A driver can override the migration 1831 * decision if the HW reports that the 1832 * bucket is actually not idle. Therefore 1833 * remark the bucket as busy again and 1834 * update the deadline. 1835 */ 1836 nh_res_bucket_set_busy(bucket); 1837 idle_point = nh_res_bucket_idle_point(res_table, 1838 bucket, 1839 now); 1840 nh_res_time_set_deadline(idle_point, &deadline); 1841 } 1842 } 1843 } 1844 1845 /* If the group is still unbalanced, schedule the next upkeep to 1846 * either the deadline computed above, or the minimum deadline, 1847 * whichever comes later. 1848 */ 1849 if (!nh_res_table_is_balanced(res_table)) { 1850 unsigned long now = jiffies; 1851 unsigned long min_deadline; 1852 1853 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL; 1854 if (time_before(deadline, min_deadline)) 1855 deadline = min_deadline; 1856 1857 queue_delayed_work(system_power_efficient_wq, 1858 &res_table->upkeep_dw, deadline - now); 1859 } 1860 } 1861 1862 static void nh_res_table_upkeep_dw(struct work_struct *work) 1863 { 1864 struct delayed_work *dw = to_delayed_work(work); 1865 struct nh_res_table *res_table; 1866 1867 res_table = container_of(dw, struct nh_res_table, upkeep_dw); 1868 nh_res_table_upkeep(res_table, true, true); 1869 } 1870 1871 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table) 1872 { 1873 cancel_delayed_work_sync(&res_table->upkeep_dw); 1874 } 1875 1876 static void nh_res_group_rebalance(struct nh_group *nhg, 1877 struct nh_res_table *res_table) 1878 { 1879 int prev_upper_bound = 0; 1880 int total = 0; 1881 int w = 0; 1882 int i; 1883 1884 INIT_LIST_HEAD(&res_table->uw_nh_entries); 1885 1886 for (i = 0; i < nhg->num_nh; ++i) 1887 total += nhg->nh_entries[i].weight; 1888 1889 for (i = 0; i < nhg->num_nh; ++i) { 1890 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1891 int upper_bound; 1892 1893 w += nhge->weight; 1894 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w, 1895 total); 1896 nhge->res.wants_buckets = upper_bound - prev_upper_bound; 1897 prev_upper_bound = upper_bound; 1898 1899 if (nh_res_nhge_is_uw(nhge)) { 1900 if (list_empty(&res_table->uw_nh_entries)) 1901 res_table->unbalanced_since = jiffies; 1902 list_add(&nhge->res.uw_nh_entry, 1903 &res_table->uw_nh_entries); 1904 } 1905 } 1906 } 1907 1908 /* Migrate buckets in res_table so that they reference NHGE's from NHG with 1909 * the right NH ID. Set those buckets that do not have a corresponding NHGE 1910 * entry in NHG as not occupied. 1911 */ 1912 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table, 1913 struct nh_group *nhg) 1914 { 1915 u16 i; 1916 1917 for (i = 0; i < res_table->num_nh_buckets; i++) { 1918 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1919 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id; 1920 bool found = false; 1921 int j; 1922 1923 for (j = 0; j < nhg->num_nh; j++) { 1924 struct nh_grp_entry *nhge = &nhg->nh_entries[j]; 1925 1926 if (nhge->nh->id == id) { 1927 nh_res_bucket_set_nh(bucket, nhge); 1928 found = true; 1929 break; 1930 } 1931 } 1932 1933 if (!found) 1934 nh_res_bucket_unset_nh(bucket); 1935 } 1936 } 1937 1938 static void replace_nexthop_grp_res(struct nh_group *oldg, 1939 struct nh_group *newg) 1940 { 1941 /* For NH group replacement, the new NHG might only have a stub 1942 * hash table with 0 buckets, because the number of buckets was not 1943 * specified. For NH removal, oldg and newg both reference the same 1944 * res_table. So in any case, in the following, we want to work 1945 * with oldg->res_table. 1946 */ 1947 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table); 1948 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since; 1949 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries); 1950 1951 nh_res_table_cancel_upkeep(old_res_table); 1952 nh_res_table_migrate_buckets(old_res_table, newg); 1953 nh_res_group_rebalance(newg, old_res_table); 1954 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries)) 1955 old_res_table->unbalanced_since = prev_unbalanced_since; 1956 nh_res_table_upkeep(old_res_table, true, false); 1957 } 1958 1959 static void nh_hthr_group_rebalance(struct nh_group *nhg) 1960 { 1961 int total = 0; 1962 int w = 0; 1963 int i; 1964 1965 for (i = 0; i < nhg->num_nh; ++i) 1966 total += nhg->nh_entries[i].weight; 1967 1968 for (i = 0; i < nhg->num_nh; ++i) { 1969 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1970 int upper_bound; 1971 1972 w += nhge->weight; 1973 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; 1974 atomic_set(&nhge->hthr.upper_bound, upper_bound); 1975 } 1976 } 1977 1978 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 1979 struct nl_info *nlinfo) 1980 { 1981 struct nh_grp_entry *nhges, *new_nhges; 1982 struct nexthop *nhp = nhge->nh_parent; 1983 struct netlink_ext_ack extack; 1984 struct nexthop *nh = nhge->nh; 1985 struct nh_group *nhg, *newg; 1986 int i, j, err; 1987 1988 WARN_ON(!nh); 1989 1990 nhg = rtnl_dereference(nhp->nh_grp); 1991 newg = nhg->spare; 1992 1993 /* last entry, keep it visible and remove the parent */ 1994 if (nhg->num_nh == 1) { 1995 remove_nexthop(net, nhp, nlinfo); 1996 return; 1997 } 1998 1999 newg->has_v4 = false; 2000 newg->is_multipath = nhg->is_multipath; 2001 newg->hash_threshold = nhg->hash_threshold; 2002 newg->resilient = nhg->resilient; 2003 newg->fdb_nh = nhg->fdb_nh; 2004 newg->num_nh = nhg->num_nh; 2005 2006 /* copy old entries to new except the one getting removed */ 2007 nhges = nhg->nh_entries; 2008 new_nhges = newg->nh_entries; 2009 for (i = 0, j = 0; i < nhg->num_nh; ++i) { 2010 struct nh_info *nhi; 2011 2012 /* current nexthop getting removed */ 2013 if (nhg->nh_entries[i].nh == nh) { 2014 newg->num_nh--; 2015 continue; 2016 } 2017 2018 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2019 if (nhi->family == AF_INET) 2020 newg->has_v4 = true; 2021 2022 list_del(&nhges[i].nh_list); 2023 new_nhges[j].stats = nhges[i].stats; 2024 new_nhges[j].nh_parent = nhges[i].nh_parent; 2025 new_nhges[j].nh = nhges[i].nh; 2026 new_nhges[j].weight = nhges[i].weight; 2027 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 2028 j++; 2029 } 2030 2031 if (newg->hash_threshold) 2032 nh_hthr_group_rebalance(newg); 2033 else if (newg->resilient) 2034 replace_nexthop_grp_res(nhg, newg); 2035 2036 rcu_assign_pointer(nhp->nh_grp, newg); 2037 2038 list_del(&nhge->nh_list); 2039 free_percpu(nhge->stats); 2040 nexthop_put(nhge->nh); 2041 2042 /* Removal of a NH from a resilient group is notified through 2043 * bucket notifications. 2044 */ 2045 if (newg->hash_threshold) { 2046 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, 2047 &extack); 2048 if (err) 2049 pr_err("%s\n", extack._msg); 2050 } 2051 2052 if (nlinfo) 2053 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 2054 } 2055 2056 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, 2057 struct nl_info *nlinfo) 2058 { 2059 struct nh_grp_entry *nhge, *tmp; 2060 2061 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 2062 remove_nh_grp_entry(net, nhge, nlinfo); 2063 2064 /* make sure all see the newly published array before releasing rtnl */ 2065 synchronize_net(); 2066 } 2067 2068 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) 2069 { 2070 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 2071 struct nh_res_table *res_table; 2072 int i, num_nh = nhg->num_nh; 2073 2074 for (i = 0; i < num_nh; ++i) { 2075 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2076 2077 if (WARN_ON(!nhge->nh)) 2078 continue; 2079 2080 list_del_init(&nhge->nh_list); 2081 } 2082 2083 if (nhg->resilient) { 2084 res_table = rtnl_dereference(nhg->res_table); 2085 nh_res_table_cancel_upkeep(res_table); 2086 } 2087 } 2088 2089 /* not called for nexthop replace */ 2090 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) 2091 { 2092 struct fib6_info *f6i, *tmp; 2093 bool do_flush = false; 2094 struct fib_info *fi; 2095 2096 list_for_each_entry(fi, &nh->fi_list, nh_list) { 2097 fi->fib_flags |= RTNH_F_DEAD; 2098 do_flush = true; 2099 } 2100 if (do_flush) 2101 fib_flush(net); 2102 2103 /* ip6_del_rt removes the entry from this list hence the _safe */ 2104 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { 2105 /* __ip6_del_rt does a release, so do a hold here */ 2106 fib6_info_hold(f6i); 2107 ipv6_stub->ip6_del_rt(net, f6i, 2108 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); 2109 } 2110 } 2111 2112 static void __remove_nexthop(struct net *net, struct nexthop *nh, 2113 struct nl_info *nlinfo) 2114 { 2115 __remove_nexthop_fib(net, nh); 2116 2117 if (nh->is_group) { 2118 remove_nexthop_group(nh, nlinfo); 2119 } else { 2120 struct nh_info *nhi; 2121 2122 nhi = rtnl_dereference(nh->nh_info); 2123 if (nhi->fib_nhc.nhc_dev) 2124 hlist_del(&nhi->dev_hash); 2125 2126 remove_nexthop_from_groups(net, nh, nlinfo); 2127 } 2128 } 2129 2130 static void remove_nexthop(struct net *net, struct nexthop *nh, 2131 struct nl_info *nlinfo) 2132 { 2133 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL); 2134 2135 /* remove from the tree */ 2136 rb_erase(&nh->rb_node, &net->nexthop.rb_root); 2137 2138 if (nlinfo) 2139 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo); 2140 2141 __remove_nexthop(net, nh, nlinfo); 2142 nh_base_seq_inc(net); 2143 2144 nexthop_put(nh); 2145 } 2146 2147 /* if any FIB entries reference this nexthop, any dst entries 2148 * need to be regenerated 2149 */ 2150 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, 2151 struct nexthop *replaced_nh) 2152 { 2153 struct fib6_info *f6i; 2154 struct nh_group *nhg; 2155 int i; 2156 2157 if (!list_empty(&nh->fi_list)) 2158 rt_cache_flush(net); 2159 2160 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2161 ipv6_stub->fib6_update_sernum(net, f6i); 2162 2163 /* if an IPv6 group was replaced, we have to release all old 2164 * dsts to make sure all refcounts are released 2165 */ 2166 if (!replaced_nh->is_group) 2167 return; 2168 2169 nhg = rtnl_dereference(replaced_nh->nh_grp); 2170 for (i = 0; i < nhg->num_nh; i++) { 2171 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2172 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); 2173 2174 if (nhi->family == AF_INET6) 2175 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); 2176 } 2177 } 2178 2179 static int replace_nexthop_grp(struct net *net, struct nexthop *old, 2180 struct nexthop *new, const struct nh_config *cfg, 2181 struct netlink_ext_ack *extack) 2182 { 2183 struct nh_res_table *tmp_table = NULL; 2184 struct nh_res_table *new_res_table; 2185 struct nh_res_table *old_res_table; 2186 struct nh_group *oldg, *newg; 2187 int i, err; 2188 2189 if (!new->is_group) { 2190 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop."); 2191 return -EINVAL; 2192 } 2193 2194 oldg = rtnl_dereference(old->nh_grp); 2195 newg = rtnl_dereference(new->nh_grp); 2196 2197 if (newg->hash_threshold != oldg->hash_threshold) { 2198 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type."); 2199 return -EINVAL; 2200 } 2201 2202 if (newg->hash_threshold) { 2203 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, 2204 extack); 2205 if (err) 2206 return err; 2207 } else if (newg->resilient) { 2208 new_res_table = rtnl_dereference(newg->res_table); 2209 old_res_table = rtnl_dereference(oldg->res_table); 2210 2211 /* Accept if num_nh_buckets was not given, but if it was 2212 * given, demand that the value be correct. 2213 */ 2214 if (cfg->nh_grp_res_has_num_buckets && 2215 cfg->nh_grp_res_num_buckets != 2216 old_res_table->num_nh_buckets) { 2217 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group."); 2218 return -EINVAL; 2219 } 2220 2221 /* Emit a pre-replace notification so that listeners could veto 2222 * a potentially unsupported configuration. Otherwise, 2223 * individual bucket replacement notifications would need to be 2224 * vetoed, which is something that should only happen if the 2225 * bucket is currently active. 2226 */ 2227 err = call_nexthop_res_table_notifiers(net, new, extack); 2228 if (err) 2229 return err; 2230 2231 if (cfg->nh_grp_res_has_idle_timer) 2232 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer; 2233 if (cfg->nh_grp_res_has_unbalanced_timer) 2234 old_res_table->unbalanced_timer = 2235 cfg->nh_grp_res_unbalanced_timer; 2236 2237 replace_nexthop_grp_res(oldg, newg); 2238 2239 tmp_table = new_res_table; 2240 rcu_assign_pointer(newg->res_table, old_res_table); 2241 rcu_assign_pointer(newg->spare->res_table, old_res_table); 2242 } 2243 2244 /* update parents - used by nexthop code for cleanup */ 2245 for (i = 0; i < newg->num_nh; i++) 2246 newg->nh_entries[i].nh_parent = old; 2247 2248 rcu_assign_pointer(old->nh_grp, newg); 2249 2250 /* Make sure concurrent readers are not using 'oldg' anymore. */ 2251 synchronize_net(); 2252 2253 if (newg->resilient) { 2254 rcu_assign_pointer(oldg->res_table, tmp_table); 2255 rcu_assign_pointer(oldg->spare->res_table, tmp_table); 2256 } 2257 2258 for (i = 0; i < oldg->num_nh; i++) 2259 oldg->nh_entries[i].nh_parent = new; 2260 2261 rcu_assign_pointer(new->nh_grp, oldg); 2262 2263 return 0; 2264 } 2265 2266 static void nh_group_v4_update(struct nh_group *nhg) 2267 { 2268 struct nh_grp_entry *nhges; 2269 bool has_v4 = false; 2270 int i; 2271 2272 nhges = nhg->nh_entries; 2273 for (i = 0; i < nhg->num_nh; i++) { 2274 struct nh_info *nhi; 2275 2276 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2277 if (nhi->family == AF_INET) 2278 has_v4 = true; 2279 } 2280 nhg->has_v4 = has_v4; 2281 } 2282 2283 static int replace_nexthop_single_notify_res(struct net *net, 2284 struct nh_res_table *res_table, 2285 struct nexthop *old, 2286 struct nh_info *oldi, 2287 struct nh_info *newi, 2288 struct netlink_ext_ack *extack) 2289 { 2290 u32 nhg_id = res_table->nhg_id; 2291 int err; 2292 u16 i; 2293 2294 for (i = 0; i < res_table->num_nh_buckets; i++) { 2295 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2296 struct nh_grp_entry *nhge; 2297 2298 nhge = rtnl_dereference(bucket->nh_entry); 2299 if (nhge->nh == old) { 2300 err = __call_nexthop_res_bucket_notifiers(net, nhg_id, 2301 i, true, 2302 oldi, newi, 2303 extack); 2304 if (err) 2305 goto err_notify; 2306 } 2307 } 2308 2309 return 0; 2310 2311 err_notify: 2312 while (i-- > 0) { 2313 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2314 struct nh_grp_entry *nhge; 2315 2316 nhge = rtnl_dereference(bucket->nh_entry); 2317 if (nhge->nh == old) 2318 __call_nexthop_res_bucket_notifiers(net, nhg_id, i, 2319 true, newi, oldi, 2320 extack); 2321 } 2322 return err; 2323 } 2324 2325 static int replace_nexthop_single_notify(struct net *net, 2326 struct nexthop *group_nh, 2327 struct nexthop *old, 2328 struct nh_info *oldi, 2329 struct nh_info *newi, 2330 struct netlink_ext_ack *extack) 2331 { 2332 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp); 2333 struct nh_res_table *res_table; 2334 2335 if (nhg->hash_threshold) { 2336 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, 2337 group_nh, extack); 2338 } else if (nhg->resilient) { 2339 res_table = rtnl_dereference(nhg->res_table); 2340 return replace_nexthop_single_notify_res(net, res_table, 2341 old, oldi, newi, 2342 extack); 2343 } 2344 2345 return -EINVAL; 2346 } 2347 2348 static int replace_nexthop_single(struct net *net, struct nexthop *old, 2349 struct nexthop *new, 2350 struct netlink_ext_ack *extack) 2351 { 2352 u8 old_protocol, old_nh_flags; 2353 struct nh_info *oldi, *newi; 2354 struct nh_grp_entry *nhge; 2355 int err; 2356 2357 if (new->is_group) { 2358 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group."); 2359 return -EINVAL; 2360 } 2361 2362 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack); 2363 if (err) 2364 return err; 2365 2366 /* Hardware flags were set on 'old' as 'new' is not in the red-black 2367 * tree. Therefore, inherit the flags from 'old' to 'new'. 2368 */ 2369 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP); 2370 2371 oldi = rtnl_dereference(old->nh_info); 2372 newi = rtnl_dereference(new->nh_info); 2373 2374 newi->nh_parent = old; 2375 oldi->nh_parent = new; 2376 2377 old_protocol = old->protocol; 2378 old_nh_flags = old->nh_flags; 2379 2380 old->protocol = new->protocol; 2381 old->nh_flags = new->nh_flags; 2382 2383 rcu_assign_pointer(old->nh_info, newi); 2384 rcu_assign_pointer(new->nh_info, oldi); 2385 2386 /* Send a replace notification for all the groups using the nexthop. */ 2387 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2388 struct nexthop *nhp = nhge->nh_parent; 2389 2390 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi, 2391 extack); 2392 if (err) 2393 goto err_notify; 2394 } 2395 2396 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially 2397 * update IPv4 indication in all the groups using the nexthop. 2398 */ 2399 if (oldi->family == AF_INET && newi->family == AF_INET6) { 2400 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2401 struct nexthop *nhp = nhge->nh_parent; 2402 struct nh_group *nhg; 2403 2404 nhg = rtnl_dereference(nhp->nh_grp); 2405 nh_group_v4_update(nhg); 2406 } 2407 } 2408 2409 return 0; 2410 2411 err_notify: 2412 rcu_assign_pointer(new->nh_info, newi); 2413 rcu_assign_pointer(old->nh_info, oldi); 2414 old->nh_flags = old_nh_flags; 2415 old->protocol = old_protocol; 2416 oldi->nh_parent = old; 2417 newi->nh_parent = new; 2418 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) { 2419 struct nexthop *nhp = nhge->nh_parent; 2420 2421 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL); 2422 } 2423 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack); 2424 return err; 2425 } 2426 2427 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh, 2428 struct nl_info *info) 2429 { 2430 struct fib6_info *f6i; 2431 2432 if (!list_empty(&nh->fi_list)) { 2433 struct fib_info *fi; 2434 2435 /* expectation is a few fib_info per nexthop and then 2436 * a lot of routes per fib_info. So mark the fib_info 2437 * and then walk the fib tables once 2438 */ 2439 list_for_each_entry(fi, &nh->fi_list, nh_list) 2440 fi->nh_updated = true; 2441 2442 fib_info_notify_update(net, info); 2443 2444 list_for_each_entry(fi, &nh->fi_list, nh_list) 2445 fi->nh_updated = false; 2446 } 2447 2448 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2449 ipv6_stub->fib6_rt_update(net, f6i, info); 2450 } 2451 2452 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries 2453 * linked to this nexthop and for all groups that the nexthop 2454 * is a member of 2455 */ 2456 static void nexthop_replace_notify(struct net *net, struct nexthop *nh, 2457 struct nl_info *info) 2458 { 2459 struct nh_grp_entry *nhge; 2460 2461 __nexthop_replace_notify(net, nh, info); 2462 2463 list_for_each_entry(nhge, &nh->grp_list, nh_list) 2464 __nexthop_replace_notify(net, nhge->nh_parent, info); 2465 } 2466 2467 static int replace_nexthop(struct net *net, struct nexthop *old, 2468 struct nexthop *new, const struct nh_config *cfg, 2469 struct netlink_ext_ack *extack) 2470 { 2471 bool new_is_reject = false; 2472 struct nh_grp_entry *nhge; 2473 int err; 2474 2475 /* check that existing FIB entries are ok with the 2476 * new nexthop definition 2477 */ 2478 err = fib_check_nh_list(old, new, extack); 2479 if (err) 2480 return err; 2481 2482 err = fib6_check_nh_list(old, new, extack); 2483 if (err) 2484 return err; 2485 2486 if (!new->is_group) { 2487 struct nh_info *nhi = rtnl_dereference(new->nh_info); 2488 2489 new_is_reject = nhi->reject_nh; 2490 } 2491 2492 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2493 /* if new nexthop is a blackhole, any groups using this 2494 * nexthop cannot have more than 1 path 2495 */ 2496 if (new_is_reject && 2497 nexthop_num_path(nhge->nh_parent) > 1) { 2498 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path"); 2499 return -EINVAL; 2500 } 2501 2502 err = fib_check_nh_list(nhge->nh_parent, new, extack); 2503 if (err) 2504 return err; 2505 2506 err = fib6_check_nh_list(nhge->nh_parent, new, extack); 2507 if (err) 2508 return err; 2509 } 2510 2511 if (old->is_group) 2512 err = replace_nexthop_grp(net, old, new, cfg, extack); 2513 else 2514 err = replace_nexthop_single(net, old, new, extack); 2515 2516 if (!err) { 2517 nh_rt_cache_flush(net, old, new); 2518 2519 __remove_nexthop(net, new, NULL); 2520 nexthop_put(new); 2521 } 2522 2523 return err; 2524 } 2525 2526 /* called with rtnl_lock held */ 2527 static int insert_nexthop(struct net *net, struct nexthop *new_nh, 2528 struct nh_config *cfg, struct netlink_ext_ack *extack) 2529 { 2530 struct rb_node **pp, *parent = NULL, *next; 2531 struct rb_root *root = &net->nexthop.rb_root; 2532 bool replace = !!(cfg->nlflags & NLM_F_REPLACE); 2533 bool create = !!(cfg->nlflags & NLM_F_CREATE); 2534 u32 new_id = new_nh->id; 2535 int replace_notify = 0; 2536 int rc = -EEXIST; 2537 2538 pp = &root->rb_node; 2539 while (1) { 2540 struct nexthop *nh; 2541 2542 next = *pp; 2543 if (!next) 2544 break; 2545 2546 parent = next; 2547 2548 nh = rb_entry(parent, struct nexthop, rb_node); 2549 if (new_id < nh->id) { 2550 pp = &next->rb_left; 2551 } else if (new_id > nh->id) { 2552 pp = &next->rb_right; 2553 } else if (replace) { 2554 rc = replace_nexthop(net, nh, new_nh, cfg, extack); 2555 if (!rc) { 2556 new_nh = nh; /* send notification with old nh */ 2557 replace_notify = 1; 2558 } 2559 goto out; 2560 } else { 2561 /* id already exists and not a replace */ 2562 goto out; 2563 } 2564 } 2565 2566 if (replace && !create) { 2567 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists"); 2568 rc = -ENOENT; 2569 goto out; 2570 } 2571 2572 if (new_nh->is_group) { 2573 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp); 2574 struct nh_res_table *res_table; 2575 2576 if (nhg->resilient) { 2577 res_table = rtnl_dereference(nhg->res_table); 2578 2579 /* Not passing the number of buckets is OK when 2580 * replacing, but not when creating a new group. 2581 */ 2582 if (!cfg->nh_grp_res_has_num_buckets) { 2583 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion"); 2584 rc = -EINVAL; 2585 goto out; 2586 } 2587 2588 nh_res_group_rebalance(nhg, res_table); 2589 2590 /* Do not send bucket notifications, we do full 2591 * notification below. 2592 */ 2593 nh_res_table_upkeep(res_table, false, false); 2594 } 2595 } 2596 2597 rb_link_node_rcu(&new_nh->rb_node, parent, pp); 2598 rb_insert_color(&new_nh->rb_node, root); 2599 2600 /* The initial insertion is a full notification for hash-threshold as 2601 * well as resilient groups. 2602 */ 2603 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack); 2604 if (rc) 2605 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root); 2606 2607 out: 2608 if (!rc) { 2609 nh_base_seq_inc(net); 2610 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); 2611 if (replace_notify && 2612 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) 2613 nexthop_replace_notify(net, new_nh, &cfg->nlinfo); 2614 } 2615 2616 return rc; 2617 } 2618 2619 /* rtnl */ 2620 /* remove all nexthops tied to a device being deleted */ 2621 static void nexthop_flush_dev(struct net_device *dev, unsigned long event) 2622 { 2623 unsigned int hash = nh_dev_hashfn(dev->ifindex); 2624 struct net *net = dev_net(dev); 2625 struct hlist_head *head = &net->nexthop.devhash[hash]; 2626 struct hlist_node *n; 2627 struct nh_info *nhi; 2628 2629 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 2630 if (nhi->fib_nhc.nhc_dev != dev) 2631 continue; 2632 2633 if (nhi->reject_nh && 2634 (event == NETDEV_DOWN || event == NETDEV_CHANGE)) 2635 continue; 2636 2637 remove_nexthop(net, nhi->nh_parent, NULL); 2638 } 2639 } 2640 2641 /* rtnl; called when net namespace is deleted */ 2642 static void flush_all_nexthops(struct net *net) 2643 { 2644 struct rb_root *root = &net->nexthop.rb_root; 2645 struct rb_node *node; 2646 struct nexthop *nh; 2647 2648 while ((node = rb_first(root))) { 2649 nh = rb_entry(node, struct nexthop, rb_node); 2650 remove_nexthop(net, nh, NULL); 2651 cond_resched(); 2652 } 2653 } 2654 2655 static struct nexthop *nexthop_create_group(struct net *net, 2656 struct nh_config *cfg) 2657 { 2658 struct nlattr *grps_attr = cfg->nh_grp; 2659 struct nexthop_grp *entry = nla_data(grps_attr); 2660 u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 2661 struct nh_group *nhg; 2662 struct nexthop *nh; 2663 int err; 2664 int i; 2665 2666 if (WARN_ON(!num_nh)) 2667 return ERR_PTR(-EINVAL); 2668 2669 nh = nexthop_alloc(); 2670 if (!nh) 2671 return ERR_PTR(-ENOMEM); 2672 2673 nh->is_group = 1; 2674 2675 nhg = nexthop_grp_alloc(num_nh); 2676 if (!nhg) { 2677 kfree(nh); 2678 return ERR_PTR(-ENOMEM); 2679 } 2680 2681 /* spare group used for removals */ 2682 nhg->spare = nexthop_grp_alloc(num_nh); 2683 if (!nhg->spare) { 2684 kfree(nhg); 2685 kfree(nh); 2686 return ERR_PTR(-ENOMEM); 2687 } 2688 nhg->spare->spare = nhg; 2689 2690 for (i = 0; i < nhg->num_nh; ++i) { 2691 struct nexthop *nhe; 2692 struct nh_info *nhi; 2693 2694 nhe = nexthop_find_by_id(net, entry[i].id); 2695 if (!nexthop_get(nhe)) { 2696 err = -ENOENT; 2697 goto out_no_nh; 2698 } 2699 2700 nhi = rtnl_dereference(nhe->nh_info); 2701 if (nhi->family == AF_INET) 2702 nhg->has_v4 = true; 2703 2704 nhg->nh_entries[i].stats = 2705 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats); 2706 if (!nhg->nh_entries[i].stats) { 2707 err = -ENOMEM; 2708 nexthop_put(nhe); 2709 goto out_no_nh; 2710 } 2711 nhg->nh_entries[i].nh = nhe; 2712 nhg->nh_entries[i].weight = entry[i].weight + 1; 2713 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list); 2714 nhg->nh_entries[i].nh_parent = nh; 2715 } 2716 2717 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) { 2718 nhg->hash_threshold = 1; 2719 nhg->is_multipath = true; 2720 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) { 2721 struct nh_res_table *res_table; 2722 2723 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg); 2724 if (!res_table) { 2725 err = -ENOMEM; 2726 goto out_no_nh; 2727 } 2728 2729 rcu_assign_pointer(nhg->spare->res_table, res_table); 2730 rcu_assign_pointer(nhg->res_table, res_table); 2731 nhg->resilient = true; 2732 nhg->is_multipath = true; 2733 } 2734 2735 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1); 2736 2737 if (nhg->hash_threshold) 2738 nh_hthr_group_rebalance(nhg); 2739 2740 if (cfg->nh_fdb) 2741 nhg->fdb_nh = 1; 2742 2743 if (cfg->nh_hw_stats) 2744 nhg->hw_stats = true; 2745 2746 rcu_assign_pointer(nh->nh_grp, nhg); 2747 2748 return nh; 2749 2750 out_no_nh: 2751 for (i--; i >= 0; --i) { 2752 list_del(&nhg->nh_entries[i].nh_list); 2753 free_percpu(nhg->nh_entries[i].stats); 2754 nexthop_put(nhg->nh_entries[i].nh); 2755 } 2756 2757 kfree(nhg->spare); 2758 kfree(nhg); 2759 kfree(nh); 2760 2761 return ERR_PTR(err); 2762 } 2763 2764 static int nh_create_ipv4(struct net *net, struct nexthop *nh, 2765 struct nh_info *nhi, struct nh_config *cfg, 2766 struct netlink_ext_ack *extack) 2767 { 2768 struct fib_nh *fib_nh = &nhi->fib_nh; 2769 struct fib_config fib_cfg = { 2770 .fc_oif = cfg->nh_ifindex, 2771 .fc_gw4 = cfg->gw.ipv4, 2772 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, 2773 .fc_flags = cfg->nh_flags, 2774 .fc_nlinfo = cfg->nlinfo, 2775 .fc_encap = cfg->nh_encap, 2776 .fc_encap_type = cfg->nh_encap_type, 2777 }; 2778 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN); 2779 int err; 2780 2781 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack); 2782 if (err) { 2783 fib_nh_release(net, fib_nh); 2784 goto out; 2785 } 2786 2787 if (nhi->fdb_nh) 2788 goto out; 2789 2790 /* sets nh_dev if successful */ 2791 err = fib_check_nh(net, fib_nh, tb_id, 0, extack); 2792 if (!err) { 2793 nh->nh_flags = fib_nh->fib_nh_flags; 2794 fib_info_update_nhc_saddr(net, &fib_nh->nh_common, 2795 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); 2796 } else { 2797 fib_nh_release(net, fib_nh); 2798 } 2799 out: 2800 return err; 2801 } 2802 2803 static int nh_create_ipv6(struct net *net, struct nexthop *nh, 2804 struct nh_info *nhi, struct nh_config *cfg, 2805 struct netlink_ext_ack *extack) 2806 { 2807 struct fib6_nh *fib6_nh = &nhi->fib6_nh; 2808 struct fib6_config fib6_cfg = { 2809 .fc_table = l3mdev_fib_table(cfg->dev), 2810 .fc_ifindex = cfg->nh_ifindex, 2811 .fc_gateway = cfg->gw.ipv6, 2812 .fc_flags = cfg->nh_flags, 2813 .fc_nlinfo = cfg->nlinfo, 2814 .fc_encap = cfg->nh_encap, 2815 .fc_encap_type = cfg->nh_encap_type, 2816 .fc_is_fdb = cfg->nh_fdb, 2817 }; 2818 int err; 2819 2820 if (!ipv6_addr_any(&cfg->gw.ipv6)) 2821 fib6_cfg.fc_flags |= RTF_GATEWAY; 2822 2823 /* sets nh_dev if successful */ 2824 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, 2825 extack); 2826 if (err) { 2827 /* IPv6 is not enabled, don't call fib6_nh_release */ 2828 if (err == -EAFNOSUPPORT) 2829 goto out; 2830 ipv6_stub->fib6_nh_release(fib6_nh); 2831 } else { 2832 nh->nh_flags = fib6_nh->fib_nh_flags; 2833 } 2834 out: 2835 return err; 2836 } 2837 2838 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg, 2839 struct netlink_ext_ack *extack) 2840 { 2841 struct nh_info *nhi; 2842 struct nexthop *nh; 2843 int err = 0; 2844 2845 nh = nexthop_alloc(); 2846 if (!nh) 2847 return ERR_PTR(-ENOMEM); 2848 2849 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL); 2850 if (!nhi) { 2851 kfree(nh); 2852 return ERR_PTR(-ENOMEM); 2853 } 2854 2855 nh->nh_flags = cfg->nh_flags; 2856 nh->net = net; 2857 2858 nhi->nh_parent = nh; 2859 nhi->family = cfg->nh_family; 2860 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK; 2861 2862 if (cfg->nh_fdb) 2863 nhi->fdb_nh = 1; 2864 2865 if (cfg->nh_blackhole) { 2866 nhi->reject_nh = 1; 2867 cfg->nh_ifindex = net->loopback_dev->ifindex; 2868 } 2869 2870 switch (cfg->nh_family) { 2871 case AF_INET: 2872 err = nh_create_ipv4(net, nh, nhi, cfg, extack); 2873 break; 2874 case AF_INET6: 2875 err = nh_create_ipv6(net, nh, nhi, cfg, extack); 2876 break; 2877 } 2878 2879 if (err) { 2880 kfree(nhi); 2881 kfree(nh); 2882 return ERR_PTR(err); 2883 } 2884 2885 /* add the entry to the device based hash */ 2886 if (!nhi->fdb_nh) 2887 nexthop_devhash_add(net, nhi); 2888 2889 rcu_assign_pointer(nh->nh_info, nhi); 2890 2891 return nh; 2892 } 2893 2894 /* called with rtnl lock held */ 2895 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg, 2896 struct netlink_ext_ack *extack) 2897 { 2898 struct nexthop *nh; 2899 int err; 2900 2901 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) { 2902 NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); 2903 return ERR_PTR(-EINVAL); 2904 } 2905 2906 if (!cfg->nh_id) { 2907 cfg->nh_id = nh_find_unused_id(net); 2908 if (!cfg->nh_id) { 2909 NL_SET_ERR_MSG(extack, "No unused id"); 2910 return ERR_PTR(-EINVAL); 2911 } 2912 } 2913 2914 if (cfg->nh_grp) 2915 nh = nexthop_create_group(net, cfg); 2916 else 2917 nh = nexthop_create(net, cfg, extack); 2918 2919 if (IS_ERR(nh)) 2920 return nh; 2921 2922 refcount_set(&nh->refcnt, 1); 2923 nh->id = cfg->nh_id; 2924 nh->protocol = cfg->nh_protocol; 2925 nh->net = net; 2926 2927 err = insert_nexthop(net, nh, cfg, extack); 2928 if (err) { 2929 __remove_nexthop(net, nh, NULL); 2930 nexthop_put(nh); 2931 nh = ERR_PTR(err); 2932 } 2933 2934 return nh; 2935 } 2936 2937 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback, 2938 unsigned long *timer_p, bool *has_p, 2939 struct netlink_ext_ack *extack) 2940 { 2941 unsigned long timer; 2942 u32 value; 2943 2944 if (!attr) { 2945 *timer_p = fallback; 2946 *has_p = false; 2947 return 0; 2948 } 2949 2950 value = nla_get_u32(attr); 2951 timer = clock_t_to_jiffies(value); 2952 if (timer == ~0UL) { 2953 NL_SET_ERR_MSG(extack, "Timer value too large"); 2954 return -EINVAL; 2955 } 2956 2957 *timer_p = timer; 2958 *has_p = true; 2959 return 0; 2960 } 2961 2962 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg, 2963 struct netlink_ext_ack *extack) 2964 { 2965 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {}; 2966 int err; 2967 2968 if (res) { 2969 err = nla_parse_nested(tb, 2970 ARRAY_SIZE(rtm_nh_res_policy_new) - 1, 2971 res, rtm_nh_res_policy_new, extack); 2972 if (err < 0) 2973 return err; 2974 } 2975 2976 if (tb[NHA_RES_GROUP_BUCKETS]) { 2977 cfg->nh_grp_res_num_buckets = 2978 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]); 2979 cfg->nh_grp_res_has_num_buckets = true; 2980 if (!cfg->nh_grp_res_num_buckets) { 2981 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0"); 2982 return -EINVAL; 2983 } 2984 } 2985 2986 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER], 2987 NH_RES_DEFAULT_IDLE_TIMER, 2988 &cfg->nh_grp_res_idle_timer, 2989 &cfg->nh_grp_res_has_idle_timer, 2990 extack); 2991 if (err) 2992 return err; 2993 2994 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER], 2995 NH_RES_DEFAULT_UNBALANCED_TIMER, 2996 &cfg->nh_grp_res_unbalanced_timer, 2997 &cfg->nh_grp_res_has_unbalanced_timer, 2998 extack); 2999 } 3000 3001 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, 3002 struct nlmsghdr *nlh, struct nh_config *cfg, 3003 struct netlink_ext_ack *extack) 3004 { 3005 struct nhmsg *nhm = nlmsg_data(nlh); 3006 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; 3007 int err; 3008 3009 err = nlmsg_parse(nlh, sizeof(*nhm), tb, 3010 ARRAY_SIZE(rtm_nh_policy_new) - 1, 3011 rtm_nh_policy_new, extack); 3012 if (err < 0) 3013 return err; 3014 3015 err = -EINVAL; 3016 if (nhm->resvd || nhm->nh_scope) { 3017 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header"); 3018 goto out; 3019 } 3020 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) { 3021 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header"); 3022 goto out; 3023 } 3024 3025 switch (nhm->nh_family) { 3026 case AF_INET: 3027 case AF_INET6: 3028 break; 3029 case AF_UNSPEC: 3030 if (tb[NHA_GROUP]) 3031 break; 3032 fallthrough; 3033 default: 3034 NL_SET_ERR_MSG(extack, "Invalid address family"); 3035 goto out; 3036 } 3037 3038 memset(cfg, 0, sizeof(*cfg)); 3039 cfg->nlflags = nlh->nlmsg_flags; 3040 cfg->nlinfo.portid = NETLINK_CB(skb).portid; 3041 cfg->nlinfo.nlh = nlh; 3042 cfg->nlinfo.nl_net = net; 3043 3044 cfg->nh_family = nhm->nh_family; 3045 cfg->nh_protocol = nhm->nh_protocol; 3046 cfg->nh_flags = nhm->nh_flags; 3047 3048 if (tb[NHA_ID]) 3049 cfg->nh_id = nla_get_u32(tb[NHA_ID]); 3050 3051 if (tb[NHA_FDB]) { 3052 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] || 3053 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) { 3054 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole"); 3055 goto out; 3056 } 3057 if (nhm->nh_flags) { 3058 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header"); 3059 goto out; 3060 } 3061 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]); 3062 } 3063 3064 if (tb[NHA_GROUP]) { 3065 if (nhm->nh_family != AF_UNSPEC) { 3066 NL_SET_ERR_MSG(extack, "Invalid family for group"); 3067 goto out; 3068 } 3069 cfg->nh_grp = tb[NHA_GROUP]; 3070 3071 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH; 3072 if (tb[NHA_GROUP_TYPE]) 3073 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]); 3074 3075 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) { 3076 NL_SET_ERR_MSG(extack, "Invalid group type"); 3077 goto out; 3078 } 3079 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), 3080 cfg->nh_grp_type, extack); 3081 if (err) 3082 goto out; 3083 3084 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) 3085 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP], 3086 cfg, extack); 3087 3088 if (tb[NHA_HW_STATS_ENABLE]) 3089 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]); 3090 3091 /* no other attributes should be set */ 3092 goto out; 3093 } 3094 3095 if (tb[NHA_BLACKHOLE]) { 3096 if (tb[NHA_GATEWAY] || tb[NHA_OIF] || 3097 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) { 3098 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb"); 3099 goto out; 3100 } 3101 3102 cfg->nh_blackhole = 1; 3103 err = 0; 3104 goto out; 3105 } 3106 3107 if (!cfg->nh_fdb && !tb[NHA_OIF]) { 3108 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops"); 3109 goto out; 3110 } 3111 3112 if (!cfg->nh_fdb && tb[NHA_OIF]) { 3113 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); 3114 if (cfg->nh_ifindex) 3115 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); 3116 3117 if (!cfg->dev) { 3118 NL_SET_ERR_MSG(extack, "Invalid device index"); 3119 goto out; 3120 } else if (!(cfg->dev->flags & IFF_UP)) { 3121 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3122 err = -ENETDOWN; 3123 goto out; 3124 } else if (!netif_carrier_ok(cfg->dev)) { 3125 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); 3126 err = -ENETDOWN; 3127 goto out; 3128 } 3129 } 3130 3131 err = -EINVAL; 3132 if (tb[NHA_GATEWAY]) { 3133 struct nlattr *gwa = tb[NHA_GATEWAY]; 3134 3135 switch (cfg->nh_family) { 3136 case AF_INET: 3137 if (nla_len(gwa) != sizeof(u32)) { 3138 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3139 goto out; 3140 } 3141 cfg->gw.ipv4 = nla_get_be32(gwa); 3142 break; 3143 case AF_INET6: 3144 if (nla_len(gwa) != sizeof(struct in6_addr)) { 3145 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3146 goto out; 3147 } 3148 cfg->gw.ipv6 = nla_get_in6_addr(gwa); 3149 break; 3150 default: 3151 NL_SET_ERR_MSG(extack, 3152 "Unknown address family for gateway"); 3153 goto out; 3154 } 3155 } else { 3156 /* device only nexthop (no gateway) */ 3157 if (cfg->nh_flags & RTNH_F_ONLINK) { 3158 NL_SET_ERR_MSG(extack, 3159 "ONLINK flag can not be set for nexthop without a gateway"); 3160 goto out; 3161 } 3162 } 3163 3164 if (tb[NHA_ENCAP]) { 3165 cfg->nh_encap = tb[NHA_ENCAP]; 3166 3167 if (!tb[NHA_ENCAP_TYPE]) { 3168 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing"); 3169 goto out; 3170 } 3171 3172 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); 3173 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); 3174 if (err < 0) 3175 goto out; 3176 3177 } else if (tb[NHA_ENCAP_TYPE]) { 3178 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing"); 3179 goto out; 3180 } 3181 3182 if (tb[NHA_HW_STATS_ENABLE]) { 3183 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops"); 3184 goto out; 3185 } 3186 3187 err = 0; 3188 out: 3189 return err; 3190 } 3191 3192 /* rtnl */ 3193 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3194 struct netlink_ext_ack *extack) 3195 { 3196 struct net *net = sock_net(skb->sk); 3197 struct nh_config cfg; 3198 struct nexthop *nh; 3199 int err; 3200 3201 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack); 3202 if (!err) { 3203 nh = nexthop_add(net, &cfg, extack); 3204 if (IS_ERR(nh)) 3205 err = PTR_ERR(nh); 3206 } 3207 3208 return err; 3209 } 3210 3211 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, 3212 struct nlattr **tb, u32 *id, u32 *op_flags, 3213 struct netlink_ext_ack *extack) 3214 { 3215 struct nhmsg *nhm = nlmsg_data(nlh); 3216 3217 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3218 NL_SET_ERR_MSG(extack, "Invalid values in header"); 3219 return -EINVAL; 3220 } 3221 3222 if (!tb[NHA_ID]) { 3223 NL_SET_ERR_MSG(extack, "Nexthop id is missing"); 3224 return -EINVAL; 3225 } 3226 3227 *id = nla_get_u32(tb[NHA_ID]); 3228 if (!(*id)) { 3229 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3230 return -EINVAL; 3231 } 3232 3233 if (tb[NHA_OP_FLAGS]) 3234 *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]); 3235 else 3236 *op_flags = 0; 3237 3238 return 0; 3239 } 3240 3241 /* rtnl */ 3242 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3243 struct netlink_ext_ack *extack) 3244 { 3245 struct net *net = sock_net(skb->sk); 3246 struct nlattr *tb[NHA_MAX + 1]; 3247 struct nl_info nlinfo = { 3248 .nlh = nlh, 3249 .nl_net = net, 3250 .portid = NETLINK_CB(skb).portid, 3251 }; 3252 struct nexthop *nh; 3253 u32 op_flags; 3254 int err; 3255 u32 id; 3256 3257 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX, 3258 rtm_nh_policy_del, extack); 3259 if (err < 0) 3260 return err; 3261 3262 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack); 3263 if (err) 3264 return err; 3265 3266 nh = nexthop_find_by_id(net, id); 3267 if (!nh) 3268 return -ENOENT; 3269 3270 remove_nexthop(net, nh, &nlinfo); 3271 3272 return 0; 3273 } 3274 3275 /* rtnl */ 3276 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3277 struct netlink_ext_ack *extack) 3278 { 3279 struct net *net = sock_net(in_skb->sk); 3280 struct nlattr *tb[NHA_MAX + 1]; 3281 struct sk_buff *skb = NULL; 3282 struct nexthop *nh; 3283 u32 op_flags; 3284 int err; 3285 u32 id; 3286 3287 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX, 3288 rtm_nh_policy_get, extack); 3289 if (err < 0) 3290 return err; 3291 3292 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack); 3293 if (err) 3294 return err; 3295 3296 err = -ENOBUFS; 3297 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3298 if (!skb) 3299 goto out; 3300 3301 err = -ENOENT; 3302 nh = nexthop_find_by_id(net, id); 3303 if (!nh) 3304 goto errout_free; 3305 3306 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, 3307 nlh->nlmsg_seq, 0, op_flags); 3308 if (err < 0) { 3309 WARN_ON(err == -EMSGSIZE); 3310 goto errout_free; 3311 } 3312 3313 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3314 out: 3315 return err; 3316 errout_free: 3317 kfree_skb(skb); 3318 goto out; 3319 } 3320 3321 struct nh_dump_filter { 3322 u32 nh_id; 3323 int dev_idx; 3324 int master_idx; 3325 bool group_filter; 3326 bool fdb_filter; 3327 u32 res_bucket_nh_id; 3328 u32 op_flags; 3329 }; 3330 3331 static bool nh_dump_filtered(struct nexthop *nh, 3332 struct nh_dump_filter *filter, u8 family) 3333 { 3334 const struct net_device *dev; 3335 const struct nh_info *nhi; 3336 3337 if (filter->group_filter && !nh->is_group) 3338 return true; 3339 3340 if (!filter->dev_idx && !filter->master_idx && !family) 3341 return false; 3342 3343 if (nh->is_group) 3344 return true; 3345 3346 nhi = rtnl_dereference(nh->nh_info); 3347 if (family && nhi->family != family) 3348 return true; 3349 3350 dev = nhi->fib_nhc.nhc_dev; 3351 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx)) 3352 return true; 3353 3354 if (filter->master_idx) { 3355 struct net_device *master; 3356 3357 if (!dev) 3358 return true; 3359 3360 master = netdev_master_upper_dev_get((struct net_device *)dev); 3361 if (!master || master->ifindex != filter->master_idx) 3362 return true; 3363 } 3364 3365 return false; 3366 } 3367 3368 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb, 3369 struct nh_dump_filter *filter, 3370 struct netlink_ext_ack *extack) 3371 { 3372 struct nhmsg *nhm; 3373 u32 idx; 3374 3375 if (tb[NHA_OIF]) { 3376 idx = nla_get_u32(tb[NHA_OIF]); 3377 if (idx > INT_MAX) { 3378 NL_SET_ERR_MSG(extack, "Invalid device index"); 3379 return -EINVAL; 3380 } 3381 filter->dev_idx = idx; 3382 } 3383 if (tb[NHA_MASTER]) { 3384 idx = nla_get_u32(tb[NHA_MASTER]); 3385 if (idx > INT_MAX) { 3386 NL_SET_ERR_MSG(extack, "Invalid master device index"); 3387 return -EINVAL; 3388 } 3389 filter->master_idx = idx; 3390 } 3391 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]); 3392 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]); 3393 3394 nhm = nlmsg_data(nlh); 3395 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3396 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request"); 3397 return -EINVAL; 3398 } 3399 3400 if (tb[NHA_OP_FLAGS]) 3401 filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]); 3402 else 3403 filter->op_flags = 0; 3404 3405 return 0; 3406 } 3407 3408 static int nh_valid_dump_req(const struct nlmsghdr *nlh, 3409 struct nh_dump_filter *filter, 3410 struct netlink_callback *cb) 3411 { 3412 struct nlattr *tb[NHA_MAX + 1]; 3413 int err; 3414 3415 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX, 3416 rtm_nh_policy_dump, cb->extack); 3417 if (err < 0) 3418 return err; 3419 3420 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3421 } 3422 3423 struct rtm_dump_nh_ctx { 3424 u32 idx; 3425 }; 3426 3427 static struct rtm_dump_nh_ctx * 3428 rtm_dump_nh_ctx(struct netlink_callback *cb) 3429 { 3430 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx; 3431 3432 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3433 return ctx; 3434 } 3435 3436 static int rtm_dump_walk_nexthops(struct sk_buff *skb, 3437 struct netlink_callback *cb, 3438 struct rb_root *root, 3439 struct rtm_dump_nh_ctx *ctx, 3440 int (*nh_cb)(struct sk_buff *skb, 3441 struct netlink_callback *cb, 3442 struct nexthop *nh, void *data), 3443 void *data) 3444 { 3445 struct rb_node *node; 3446 int s_idx; 3447 int err; 3448 3449 s_idx = ctx->idx; 3450 for (node = rb_first(root); node; node = rb_next(node)) { 3451 struct nexthop *nh; 3452 3453 nh = rb_entry(node, struct nexthop, rb_node); 3454 if (nh->id < s_idx) 3455 continue; 3456 3457 ctx->idx = nh->id; 3458 err = nh_cb(skb, cb, nh, data); 3459 if (err) 3460 return err; 3461 } 3462 3463 return 0; 3464 } 3465 3466 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb, 3467 struct nexthop *nh, void *data) 3468 { 3469 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3470 struct nh_dump_filter *filter = data; 3471 3472 if (nh_dump_filtered(nh, filter, nhm->nh_family)) 3473 return 0; 3474 3475 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP, 3476 NETLINK_CB(cb->skb).portid, 3477 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags); 3478 } 3479 3480 /* rtnl */ 3481 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) 3482 { 3483 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb); 3484 struct net *net = sock_net(skb->sk); 3485 struct rb_root *root = &net->nexthop.rb_root; 3486 struct nh_dump_filter filter = {}; 3487 int err; 3488 3489 err = nh_valid_dump_req(cb->nlh, &filter, cb); 3490 if (err < 0) 3491 return err; 3492 3493 err = rtm_dump_walk_nexthops(skb, cb, root, ctx, 3494 &rtm_dump_nexthop_cb, &filter); 3495 3496 cb->seq = net->nexthop.seq; 3497 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3498 return err; 3499 } 3500 3501 static struct nexthop * 3502 nexthop_find_group_resilient(struct net *net, u32 id, 3503 struct netlink_ext_ack *extack) 3504 { 3505 struct nh_group *nhg; 3506 struct nexthop *nh; 3507 3508 nh = nexthop_find_by_id(net, id); 3509 if (!nh) 3510 return ERR_PTR(-ENOENT); 3511 3512 if (!nh->is_group) { 3513 NL_SET_ERR_MSG(extack, "Not a nexthop group"); 3514 return ERR_PTR(-EINVAL); 3515 } 3516 3517 nhg = rtnl_dereference(nh->nh_grp); 3518 if (!nhg->resilient) { 3519 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient"); 3520 return ERR_PTR(-EINVAL); 3521 } 3522 3523 return nh; 3524 } 3525 3526 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p, 3527 struct netlink_ext_ack *extack) 3528 { 3529 u32 idx; 3530 3531 if (attr) { 3532 idx = nla_get_u32(attr); 3533 if (!idx) { 3534 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3535 return -EINVAL; 3536 } 3537 *nh_id_p = idx; 3538 } else { 3539 *nh_id_p = 0; 3540 } 3541 3542 return 0; 3543 } 3544 3545 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh, 3546 struct nh_dump_filter *filter, 3547 struct netlink_callback *cb) 3548 { 3549 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)]; 3550 struct nlattr *tb[NHA_MAX + 1]; 3551 int err; 3552 3553 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX, 3554 rtm_nh_policy_dump_bucket, NULL); 3555 if (err < 0) 3556 return err; 3557 3558 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack); 3559 if (err) 3560 return err; 3561 3562 if (tb[NHA_RES_BUCKET]) { 3563 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1; 3564 3565 err = nla_parse_nested(res_tb, max, 3566 tb[NHA_RES_BUCKET], 3567 rtm_nh_res_bucket_policy_dump, 3568 cb->extack); 3569 if (err < 0) 3570 return err; 3571 3572 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID], 3573 &filter->res_bucket_nh_id, 3574 cb->extack); 3575 if (err) 3576 return err; 3577 } 3578 3579 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3580 } 3581 3582 struct rtm_dump_res_bucket_ctx { 3583 struct rtm_dump_nh_ctx nh; 3584 u16 bucket_index; 3585 }; 3586 3587 static struct rtm_dump_res_bucket_ctx * 3588 rtm_dump_res_bucket_ctx(struct netlink_callback *cb) 3589 { 3590 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx; 3591 3592 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3593 return ctx; 3594 } 3595 3596 struct rtm_dump_nexthop_bucket_data { 3597 struct rtm_dump_res_bucket_ctx *ctx; 3598 struct nh_dump_filter filter; 3599 }; 3600 3601 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, 3602 struct netlink_callback *cb, 3603 struct nexthop *nh, 3604 struct rtm_dump_nexthop_bucket_data *dd) 3605 { 3606 u32 portid = NETLINK_CB(cb->skb).portid; 3607 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3608 struct nh_res_table *res_table; 3609 struct nh_group *nhg; 3610 u16 bucket_index; 3611 int err; 3612 3613 nhg = rtnl_dereference(nh->nh_grp); 3614 res_table = rtnl_dereference(nhg->res_table); 3615 for (bucket_index = dd->ctx->bucket_index; 3616 bucket_index < res_table->num_nh_buckets; 3617 bucket_index++) { 3618 struct nh_res_bucket *bucket; 3619 struct nh_grp_entry *nhge; 3620 3621 bucket = &res_table->nh_buckets[bucket_index]; 3622 nhge = rtnl_dereference(bucket->nh_entry); 3623 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family)) 3624 continue; 3625 3626 if (dd->filter.res_bucket_nh_id && 3627 dd->filter.res_bucket_nh_id != nhge->nh->id) 3628 continue; 3629 3630 dd->ctx->bucket_index = bucket_index; 3631 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 3632 RTM_NEWNEXTHOPBUCKET, portid, 3633 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3634 cb->extack); 3635 if (err) 3636 return err; 3637 } 3638 3639 dd->ctx->bucket_index = 0; 3640 3641 return 0; 3642 } 3643 3644 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, 3645 struct netlink_callback *cb, 3646 struct nexthop *nh, void *data) 3647 { 3648 struct rtm_dump_nexthop_bucket_data *dd = data; 3649 struct nh_group *nhg; 3650 3651 if (!nh->is_group) 3652 return 0; 3653 3654 nhg = rtnl_dereference(nh->nh_grp); 3655 if (!nhg->resilient) 3656 return 0; 3657 3658 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd); 3659 } 3660 3661 /* rtnl */ 3662 static int rtm_dump_nexthop_bucket(struct sk_buff *skb, 3663 struct netlink_callback *cb) 3664 { 3665 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb); 3666 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx }; 3667 struct net *net = sock_net(skb->sk); 3668 struct nexthop *nh; 3669 int err; 3670 3671 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb); 3672 if (err) 3673 return err; 3674 3675 if (dd.filter.nh_id) { 3676 nh = nexthop_find_group_resilient(net, dd.filter.nh_id, 3677 cb->extack); 3678 if (IS_ERR(nh)) 3679 return PTR_ERR(nh); 3680 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd); 3681 } else { 3682 struct rb_root *root = &net->nexthop.rb_root; 3683 3684 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh, 3685 &rtm_dump_nexthop_bucket_cb, &dd); 3686 } 3687 3688 cb->seq = net->nexthop.seq; 3689 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3690 return err; 3691 } 3692 3693 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res, 3694 u16 *bucket_index, 3695 struct netlink_ext_ack *extack) 3696 { 3697 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)]; 3698 int err; 3699 3700 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1, 3701 res, rtm_nh_res_bucket_policy_get, extack); 3702 if (err < 0) 3703 return err; 3704 3705 if (!tb[NHA_RES_BUCKET_INDEX]) { 3706 NL_SET_ERR_MSG(extack, "Bucket index is missing"); 3707 return -EINVAL; 3708 } 3709 3710 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]); 3711 return 0; 3712 } 3713 3714 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh, 3715 u32 *id, u16 *bucket_index, 3716 struct netlink_ext_ack *extack) 3717 { 3718 struct nlattr *tb[NHA_MAX + 1]; 3719 u32 op_flags; 3720 int err; 3721 3722 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX, 3723 rtm_nh_policy_get_bucket, extack); 3724 if (err < 0) 3725 return err; 3726 3727 err = nh_valid_get_del_req(nlh, tb, id, &op_flags, extack); 3728 if (err) 3729 return err; 3730 3731 if (!tb[NHA_RES_BUCKET]) { 3732 NL_SET_ERR_MSG(extack, "Bucket information is missing"); 3733 return -EINVAL; 3734 } 3735 3736 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET], 3737 bucket_index, extack); 3738 if (err) 3739 return err; 3740 3741 return 0; 3742 } 3743 3744 /* rtnl */ 3745 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3746 struct netlink_ext_ack *extack) 3747 { 3748 struct net *net = sock_net(in_skb->sk); 3749 struct nh_res_table *res_table; 3750 struct sk_buff *skb = NULL; 3751 struct nh_group *nhg; 3752 struct nexthop *nh; 3753 u16 bucket_index; 3754 int err; 3755 u32 id; 3756 3757 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack); 3758 if (err) 3759 return err; 3760 3761 nh = nexthop_find_group_resilient(net, id, extack); 3762 if (IS_ERR(nh)) 3763 return PTR_ERR(nh); 3764 3765 nhg = rtnl_dereference(nh->nh_grp); 3766 res_table = rtnl_dereference(nhg->res_table); 3767 if (bucket_index >= res_table->num_nh_buckets) { 3768 NL_SET_ERR_MSG(extack, "Bucket index out of bounds"); 3769 return -ENOENT; 3770 } 3771 3772 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3773 if (!skb) 3774 return -ENOBUFS; 3775 3776 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index], 3777 bucket_index, RTM_NEWNEXTHOPBUCKET, 3778 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 3779 0, extack); 3780 if (err < 0) { 3781 WARN_ON(err == -EMSGSIZE); 3782 goto errout_free; 3783 } 3784 3785 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3786 3787 errout_free: 3788 kfree_skb(skb); 3789 return err; 3790 } 3791 3792 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu) 3793 { 3794 unsigned int hash = nh_dev_hashfn(dev->ifindex); 3795 struct net *net = dev_net(dev); 3796 struct hlist_head *head = &net->nexthop.devhash[hash]; 3797 struct hlist_node *n; 3798 struct nh_info *nhi; 3799 3800 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 3801 if (nhi->fib_nhc.nhc_dev == dev) { 3802 if (nhi->family == AF_INET) 3803 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu, 3804 orig_mtu); 3805 } 3806 } 3807 } 3808 3809 /* rtnl */ 3810 static int nh_netdev_event(struct notifier_block *this, 3811 unsigned long event, void *ptr) 3812 { 3813 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3814 struct netdev_notifier_info_ext *info_ext; 3815 3816 switch (event) { 3817 case NETDEV_DOWN: 3818 case NETDEV_UNREGISTER: 3819 nexthop_flush_dev(dev, event); 3820 break; 3821 case NETDEV_CHANGE: 3822 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) 3823 nexthop_flush_dev(dev, event); 3824 break; 3825 case NETDEV_CHANGEMTU: 3826 info_ext = ptr; 3827 nexthop_sync_mtu(dev, info_ext->ext.mtu); 3828 rt_cache_flush(dev_net(dev)); 3829 break; 3830 } 3831 return NOTIFY_DONE; 3832 } 3833 3834 static struct notifier_block nh_netdev_notifier = { 3835 .notifier_call = nh_netdev_event, 3836 }; 3837 3838 static int nexthops_dump(struct net *net, struct notifier_block *nb, 3839 enum nexthop_event_type event_type, 3840 struct netlink_ext_ack *extack) 3841 { 3842 struct rb_root *root = &net->nexthop.rb_root; 3843 struct rb_node *node; 3844 int err = 0; 3845 3846 for (node = rb_first(root); node; node = rb_next(node)) { 3847 struct nexthop *nh; 3848 3849 nh = rb_entry(node, struct nexthop, rb_node); 3850 err = call_nexthop_notifier(nb, net, event_type, nh, extack); 3851 if (err) 3852 break; 3853 } 3854 3855 return err; 3856 } 3857 3858 int register_nexthop_notifier(struct net *net, struct notifier_block *nb, 3859 struct netlink_ext_ack *extack) 3860 { 3861 int err; 3862 3863 rtnl_lock(); 3864 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack); 3865 if (err) 3866 goto unlock; 3867 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, 3868 nb); 3869 unlock: 3870 rtnl_unlock(); 3871 return err; 3872 } 3873 EXPORT_SYMBOL(register_nexthop_notifier); 3874 3875 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3876 { 3877 int err; 3878 3879 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3880 nb); 3881 if (!err) 3882 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3883 return err; 3884 } 3885 EXPORT_SYMBOL(__unregister_nexthop_notifier); 3886 3887 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3888 { 3889 int err; 3890 3891 rtnl_lock(); 3892 err = __unregister_nexthop_notifier(net, nb); 3893 rtnl_unlock(); 3894 return err; 3895 } 3896 EXPORT_SYMBOL(unregister_nexthop_notifier); 3897 3898 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap) 3899 { 3900 struct nexthop *nexthop; 3901 3902 rcu_read_lock(); 3903 3904 nexthop = nexthop_find_by_id(net, id); 3905 if (!nexthop) 3906 goto out; 3907 3908 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3909 if (offload) 3910 nexthop->nh_flags |= RTNH_F_OFFLOAD; 3911 if (trap) 3912 nexthop->nh_flags |= RTNH_F_TRAP; 3913 3914 out: 3915 rcu_read_unlock(); 3916 } 3917 EXPORT_SYMBOL(nexthop_set_hw_flags); 3918 3919 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, 3920 bool offload, bool trap) 3921 { 3922 struct nh_res_table *res_table; 3923 struct nh_res_bucket *bucket; 3924 struct nexthop *nexthop; 3925 struct nh_group *nhg; 3926 3927 rcu_read_lock(); 3928 3929 nexthop = nexthop_find_by_id(net, id); 3930 if (!nexthop || !nexthop->is_group) 3931 goto out; 3932 3933 nhg = rcu_dereference(nexthop->nh_grp); 3934 if (!nhg->resilient) 3935 goto out; 3936 3937 if (bucket_index >= nhg->res_table->num_nh_buckets) 3938 goto out; 3939 3940 res_table = rcu_dereference(nhg->res_table); 3941 bucket = &res_table->nh_buckets[bucket_index]; 3942 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3943 if (offload) 3944 bucket->nh_flags |= RTNH_F_OFFLOAD; 3945 if (trap) 3946 bucket->nh_flags |= RTNH_F_TRAP; 3947 3948 out: 3949 rcu_read_unlock(); 3950 } 3951 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags); 3952 3953 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets, 3954 unsigned long *activity) 3955 { 3956 struct nh_res_table *res_table; 3957 struct nexthop *nexthop; 3958 struct nh_group *nhg; 3959 u16 i; 3960 3961 rcu_read_lock(); 3962 3963 nexthop = nexthop_find_by_id(net, id); 3964 if (!nexthop || !nexthop->is_group) 3965 goto out; 3966 3967 nhg = rcu_dereference(nexthop->nh_grp); 3968 if (!nhg->resilient) 3969 goto out; 3970 3971 /* Instead of silently ignoring some buckets, demand that the sizes 3972 * be the same. 3973 */ 3974 res_table = rcu_dereference(nhg->res_table); 3975 if (num_buckets != res_table->num_nh_buckets) 3976 goto out; 3977 3978 for (i = 0; i < num_buckets; i++) { 3979 if (test_bit(i, activity)) 3980 nh_res_bucket_set_busy(&res_table->nh_buckets[i]); 3981 } 3982 3983 out: 3984 rcu_read_unlock(); 3985 } 3986 EXPORT_SYMBOL(nexthop_res_grp_activity_update); 3987 3988 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list, 3989 struct list_head *dev_to_kill) 3990 { 3991 struct net *net; 3992 3993 ASSERT_RTNL(); 3994 list_for_each_entry(net, net_list, exit_list) 3995 flush_all_nexthops(net); 3996 } 3997 3998 static void __net_exit nexthop_net_exit(struct net *net) 3999 { 4000 kfree(net->nexthop.devhash); 4001 net->nexthop.devhash = NULL; 4002 } 4003 4004 static int __net_init nexthop_net_init(struct net *net) 4005 { 4006 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE; 4007 4008 net->nexthop.rb_root = RB_ROOT; 4009 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL); 4010 if (!net->nexthop.devhash) 4011 return -ENOMEM; 4012 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain); 4013 4014 return 0; 4015 } 4016 4017 static struct pernet_operations nexthop_net_ops = { 4018 .init = nexthop_net_init, 4019 .exit = nexthop_net_exit, 4020 .exit_batch_rtnl = nexthop_net_exit_batch_rtnl, 4021 }; 4022 4023 static int __init nexthop_init(void) 4024 { 4025 register_pernet_subsys(&nexthop_net_ops); 4026 4027 register_netdevice_notifier(&nh_netdev_notifier); 4028 4029 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4030 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0); 4031 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop, 4032 rtm_dump_nexthop, 0); 4033 4034 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4035 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 4036 4037 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 4038 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 4039 4040 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket, 4041 rtm_dump_nexthop_bucket, 0); 4042 4043 return 0; 4044 } 4045 subsys_initcall(nexthop_init); 4046