1 // SPDX-License-Identifier: GPL-2.0 2 /* Generic nexthop implementation 3 * 4 * Copyright (c) 2017-19 Cumulus Networks 5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> 6 */ 7 8 #include <linux/nexthop.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 #include <net/arp.h> 13 #include <net/ipv6_stubs.h> 14 #include <net/lwtunnel.h> 15 #include <net/ndisc.h> 16 #include <net/nexthop.h> 17 #include <net/route.h> 18 #include <net/sock.h> 19 20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ) 21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */ 22 23 static void remove_nexthop(struct net *net, struct nexthop *nh, 24 struct nl_info *nlinfo); 25 26 #define NH_DEV_HASHBITS 8 27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) 28 29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \ 30 NHA_OP_FLAG_DUMP_HW_STATS) 31 32 static const struct nla_policy rtm_nh_policy_new[] = { 33 [NHA_ID] = { .type = NLA_U32 }, 34 [NHA_GROUP] = { .type = NLA_BINARY }, 35 [NHA_GROUP_TYPE] = { .type = NLA_U16 }, 36 [NHA_BLACKHOLE] = { .type = NLA_FLAG }, 37 [NHA_OIF] = { .type = NLA_U32 }, 38 [NHA_GATEWAY] = { .type = NLA_BINARY }, 39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 }, 40 [NHA_ENCAP] = { .type = NLA_NESTED }, 41 [NHA_FDB] = { .type = NLA_FLAG }, 42 [NHA_RES_GROUP] = { .type = NLA_NESTED }, 43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true), 44 }; 45 46 static const struct nla_policy rtm_nh_policy_get[] = { 47 [NHA_ID] = { .type = NLA_U32 }, 48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 49 NHA_OP_FLAGS_DUMP_ALL), 50 }; 51 52 static const struct nla_policy rtm_nh_policy_del[] = { 53 [NHA_ID] = { .type = NLA_U32 }, 54 }; 55 56 static const struct nla_policy rtm_nh_policy_dump[] = { 57 [NHA_OIF] = { .type = NLA_U32 }, 58 [NHA_GROUPS] = { .type = NLA_FLAG }, 59 [NHA_MASTER] = { .type = NLA_U32 }, 60 [NHA_FDB] = { .type = NLA_FLAG }, 61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 62 NHA_OP_FLAGS_DUMP_ALL), 63 }; 64 65 static const struct nla_policy rtm_nh_res_policy_new[] = { 66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 }, 67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 }, 68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 }, 69 }; 70 71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = { 72 [NHA_ID] = { .type = NLA_U32 }, 73 [NHA_OIF] = { .type = NLA_U32 }, 74 [NHA_MASTER] = { .type = NLA_U32 }, 75 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 76 }; 77 78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = { 79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 }, 80 }; 81 82 static const struct nla_policy rtm_nh_policy_get_bucket[] = { 83 [NHA_ID] = { .type = NLA_U32 }, 84 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 85 }; 86 87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = { 88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 }, 89 }; 90 91 static bool nexthop_notifiers_is_empty(struct net *net) 92 { 93 return !net->nexthop.notifier_chain.head; 94 } 95 96 static void 97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info, 98 const struct nh_info *nhi) 99 { 100 nh_info->dev = nhi->fib_nhc.nhc_dev; 101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; 102 if (nh_info->gw_family == AF_INET) 103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; 104 else if (nh_info->gw_family == AF_INET6) 105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; 106 107 nh_info->id = nhi->nh_parent->id; 108 nh_info->is_reject = nhi->reject_nh; 109 nh_info->is_fdb = nhi->fdb_nh; 110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; 111 } 112 113 static int nh_notifier_single_info_init(struct nh_notifier_info *info, 114 const struct nexthop *nh) 115 { 116 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 117 118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; 119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); 120 if (!info->nh) 121 return -ENOMEM; 122 123 __nh_notifier_single_info_init(info->nh, nhi); 124 125 return 0; 126 } 127 128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info) 129 { 130 kfree(info->nh); 131 } 132 133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info, 134 struct nh_group *nhg) 135 { 136 u16 num_nh = nhg->num_nh; 137 int i; 138 139 info->type = NH_NOTIFIER_INFO_TYPE_GRP; 140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), 141 GFP_KERNEL); 142 if (!info->nh_grp) 143 return -ENOMEM; 144 145 info->nh_grp->num_nh = num_nh; 146 info->nh_grp->is_fdb = nhg->fdb_nh; 147 info->nh_grp->hw_stats = nhg->hw_stats; 148 149 for (i = 0; i < num_nh; i++) { 150 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 151 struct nh_info *nhi; 152 153 nhi = rtnl_dereference(nhge->nh->nh_info); 154 info->nh_grp->nh_entries[i].weight = nhge->weight; 155 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, 156 nhi); 157 } 158 159 return 0; 160 } 161 162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info, 163 struct nh_group *nhg) 164 { 165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 166 u16 num_nh_buckets = res_table->num_nh_buckets; 167 unsigned long size; 168 u16 i; 169 170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; 171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets); 172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | 173 __GFP_NOWARN); 174 if (!info->nh_res_table) 175 return -ENOMEM; 176 177 info->nh_res_table->num_nh_buckets = num_nh_buckets; 178 info->nh_res_table->hw_stats = nhg->hw_stats; 179 180 for (i = 0; i < num_nh_buckets; i++) { 181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 182 struct nh_grp_entry *nhge; 183 struct nh_info *nhi; 184 185 nhge = rtnl_dereference(bucket->nh_entry); 186 nhi = rtnl_dereference(nhge->nh->nh_info); 187 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], 188 nhi); 189 } 190 191 return 0; 192 } 193 194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info, 195 const struct nexthop *nh) 196 { 197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 198 199 if (nhg->hash_threshold) 200 return nh_notifier_mpath_info_init(info, nhg); 201 else if (nhg->resilient) 202 return nh_notifier_res_table_info_init(info, nhg); 203 return -EINVAL; 204 } 205 206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info, 207 const struct nexthop *nh) 208 { 209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 210 211 if (nhg->hash_threshold) 212 kfree(info->nh_grp); 213 else if (nhg->resilient) 214 vfree(info->nh_res_table); 215 } 216 217 static int nh_notifier_info_init(struct nh_notifier_info *info, 218 const struct nexthop *nh) 219 { 220 info->id = nh->id; 221 222 if (nh->is_group) 223 return nh_notifier_grp_info_init(info, nh); 224 else 225 return nh_notifier_single_info_init(info, nh); 226 } 227 228 static void nh_notifier_info_fini(struct nh_notifier_info *info, 229 const struct nexthop *nh) 230 { 231 if (nh->is_group) 232 nh_notifier_grp_info_fini(info, nh); 233 else 234 nh_notifier_single_info_fini(info); 235 } 236 237 static int call_nexthop_notifiers(struct net *net, 238 enum nexthop_event_type event_type, 239 struct nexthop *nh, 240 struct netlink_ext_ack *extack) 241 { 242 struct nh_notifier_info info = { 243 .net = net, 244 .extack = extack, 245 }; 246 int err; 247 248 ASSERT_RTNL(); 249 250 if (nexthop_notifiers_is_empty(net)) 251 return 0; 252 253 err = nh_notifier_info_init(&info, nh); 254 if (err) { 255 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 256 return err; 257 } 258 259 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 260 event_type, &info); 261 nh_notifier_info_fini(&info, nh); 262 263 return notifier_to_errno(err); 264 } 265 266 static int 267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info, 268 bool force, unsigned int *p_idle_timer_ms) 269 { 270 struct nh_res_table *res_table; 271 struct nh_group *nhg; 272 struct nexthop *nh; 273 int err = 0; 274 275 /* When 'force' is false, nexthop bucket replacement is performed 276 * because the bucket was deemed to be idle. In this case, capable 277 * listeners can choose to perform an atomic replacement: The bucket is 278 * only replaced if it is inactive. However, if the idle timer interval 279 * is smaller than the interval in which a listener is querying 280 * buckets' activity from the device, then atomic replacement should 281 * not be tried. Pass the idle timer value to listeners, so that they 282 * could determine which type of replacement to perform. 283 */ 284 if (force) { 285 *p_idle_timer_ms = 0; 286 return 0; 287 } 288 289 rcu_read_lock(); 290 291 nh = nexthop_find_by_id(info->net, info->id); 292 if (!nh) { 293 err = -EINVAL; 294 goto out; 295 } 296 297 nhg = rcu_dereference(nh->nh_grp); 298 res_table = rcu_dereference(nhg->res_table); 299 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); 300 301 out: 302 rcu_read_unlock(); 303 304 return err; 305 } 306 307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info, 308 u16 bucket_index, bool force, 309 struct nh_info *oldi, 310 struct nh_info *newi) 311 { 312 unsigned int idle_timer_ms; 313 int err; 314 315 err = nh_notifier_res_bucket_idle_timer_get(info, force, 316 &idle_timer_ms); 317 if (err) 318 return err; 319 320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; 321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), 322 GFP_KERNEL); 323 if (!info->nh_res_bucket) 324 return -ENOMEM; 325 326 info->nh_res_bucket->bucket_index = bucket_index; 327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms; 328 info->nh_res_bucket->force = force; 329 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); 330 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); 331 return 0; 332 } 333 334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info) 335 { 336 kfree(info->nh_res_bucket); 337 } 338 339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 340 u16 bucket_index, bool force, 341 struct nh_info *oldi, 342 struct nh_info *newi, 343 struct netlink_ext_ack *extack) 344 { 345 struct nh_notifier_info info = { 346 .net = net, 347 .extack = extack, 348 .id = nhg_id, 349 }; 350 int err; 351 352 if (nexthop_notifiers_is_empty(net)) 353 return 0; 354 355 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force, 356 oldi, newi); 357 if (err) 358 return err; 359 360 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 361 NEXTHOP_EVENT_BUCKET_REPLACE, &info); 362 nh_notifier_res_bucket_info_fini(&info); 363 364 return notifier_to_errno(err); 365 } 366 367 /* There are three users of RES_TABLE, and NHs etc. referenced from there: 368 * 369 * 1) a collection of callbacks for NH maintenance. This operates under 370 * RTNL, 371 * 2) the delayed work that gradually balances the resilient table, 372 * 3) and nexthop_select_path(), operating under RCU. 373 * 374 * Both the delayed work and the RTNL block are writers, and need to 375 * maintain mutual exclusion. Since there are only two and well-known 376 * writers for each table, the RTNL code can make sure it has exclusive 377 * access thus: 378 * 379 * - Have the DW operate without locking; 380 * - synchronously cancel the DW; 381 * - do the writing; 382 * - if the write was not actually a delete, call upkeep, which schedules 383 * DW again if necessary. 384 * 385 * The functions that are always called from the RTNL context use 386 * rtnl_dereference(). The functions that can also be called from the DW do 387 * a raw dereference and rely on the above mutual exclusion scheme. 388 */ 389 #define nh_res_dereference(p) (rcu_dereference_raw(p)) 390 391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 392 u16 bucket_index, bool force, 393 struct nexthop *old_nh, 394 struct nexthop *new_nh, 395 struct netlink_ext_ack *extack) 396 { 397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); 398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info); 399 400 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index, 401 force, oldi, newi, extack); 402 } 403 404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh, 405 struct netlink_ext_ack *extack) 406 { 407 struct nh_notifier_info info = { 408 .net = net, 409 .extack = extack, 410 .id = nh->id, 411 }; 412 struct nh_group *nhg; 413 int err; 414 415 ASSERT_RTNL(); 416 417 if (nexthop_notifiers_is_empty(net)) 418 return 0; 419 420 /* At this point, the nexthop buckets are still not populated. Only 421 * emit a notification with the logical nexthops, so that a listener 422 * could potentially veto it in case of unsupported configuration. 423 */ 424 nhg = rtnl_dereference(nh->nh_grp); 425 err = nh_notifier_mpath_info_init(&info, nhg); 426 if (err) { 427 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 428 return err; 429 } 430 431 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 432 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, 433 &info); 434 kfree(info.nh_grp); 435 436 return notifier_to_errno(err); 437 } 438 439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net, 440 enum nexthop_event_type event_type, 441 struct nexthop *nh, 442 struct netlink_ext_ack *extack) 443 { 444 struct nh_notifier_info info = { 445 .net = net, 446 .extack = extack, 447 }; 448 int err; 449 450 err = nh_notifier_info_init(&info, nh); 451 if (err) 452 return err; 453 454 err = nb->notifier_call(nb, event_type, &info); 455 nh_notifier_info_fini(&info, nh); 456 457 return notifier_to_errno(err); 458 } 459 460 static unsigned int nh_dev_hashfn(unsigned int val) 461 { 462 unsigned int mask = NH_DEV_HASHSIZE - 1; 463 464 return (val ^ 465 (val >> NH_DEV_HASHBITS) ^ 466 (val >> (NH_DEV_HASHBITS * 2))) & mask; 467 } 468 469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi) 470 { 471 struct net_device *dev = nhi->fib_nhc.nhc_dev; 472 struct hlist_head *head; 473 unsigned int hash; 474 475 WARN_ON(!dev); 476 477 hash = nh_dev_hashfn(dev->ifindex); 478 head = &net->nexthop.devhash[hash]; 479 hlist_add_head(&nhi->dev_hash, head); 480 } 481 482 static void nexthop_free_group(struct nexthop *nh) 483 { 484 struct nh_group *nhg; 485 int i; 486 487 nhg = rcu_dereference_raw(nh->nh_grp); 488 for (i = 0; i < nhg->num_nh; ++i) { 489 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 490 491 WARN_ON(!list_empty(&nhge->nh_list)); 492 free_percpu(nhge->stats); 493 nexthop_put(nhge->nh); 494 } 495 496 WARN_ON(nhg->spare == nhg); 497 498 if (nhg->resilient) 499 vfree(rcu_dereference_raw(nhg->res_table)); 500 501 kfree(nhg->spare); 502 kfree(nhg); 503 } 504 505 static void nexthop_free_single(struct nexthop *nh) 506 { 507 struct nh_info *nhi; 508 509 nhi = rcu_dereference_raw(nh->nh_info); 510 switch (nhi->family) { 511 case AF_INET: 512 fib_nh_release(nh->net, &nhi->fib_nh); 513 break; 514 case AF_INET6: 515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh); 516 break; 517 } 518 kfree(nhi); 519 } 520 521 void nexthop_free_rcu(struct rcu_head *head) 522 { 523 struct nexthop *nh = container_of(head, struct nexthop, rcu); 524 525 if (nh->is_group) 526 nexthop_free_group(nh); 527 else 528 nexthop_free_single(nh); 529 530 kfree(nh); 531 } 532 EXPORT_SYMBOL_GPL(nexthop_free_rcu); 533 534 static struct nexthop *nexthop_alloc(void) 535 { 536 struct nexthop *nh; 537 538 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); 539 if (nh) { 540 INIT_LIST_HEAD(&nh->fi_list); 541 INIT_LIST_HEAD(&nh->f6i_list); 542 INIT_LIST_HEAD(&nh->grp_list); 543 INIT_LIST_HEAD(&nh->fdb_list); 544 } 545 return nh; 546 } 547 548 static struct nh_group *nexthop_grp_alloc(u16 num_nh) 549 { 550 struct nh_group *nhg; 551 552 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL); 553 if (nhg) 554 nhg->num_nh = num_nh; 555 556 return nhg; 557 } 558 559 static void nh_res_table_upkeep_dw(struct work_struct *work); 560 561 static struct nh_res_table * 562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg) 563 { 564 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; 565 struct nh_res_table *res_table; 566 unsigned long size; 567 568 size = struct_size(res_table, nh_buckets, num_nh_buckets); 569 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); 570 if (!res_table) 571 return NULL; 572 573 res_table->net = net; 574 res_table->nhg_id = nhg_id; 575 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); 576 INIT_LIST_HEAD(&res_table->uw_nh_entries); 577 res_table->idle_timer = cfg->nh_grp_res_idle_timer; 578 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; 579 res_table->num_nh_buckets = num_nh_buckets; 580 return res_table; 581 } 582 583 static void nh_base_seq_inc(struct net *net) 584 { 585 while (++net->nexthop.seq == 0) 586 ; 587 } 588 589 /* no reference taken; rcu lock or rtnl must be held */ 590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id) 591 { 592 struct rb_node **pp, *parent = NULL, *next; 593 594 pp = &net->nexthop.rb_root.rb_node; 595 while (1) { 596 struct nexthop *nh; 597 598 next = rcu_dereference_raw(*pp); 599 if (!next) 600 break; 601 parent = next; 602 603 nh = rb_entry(parent, struct nexthop, rb_node); 604 if (id < nh->id) 605 pp = &next->rb_left; 606 else if (id > nh->id) 607 pp = &next->rb_right; 608 else 609 return nh; 610 } 611 return NULL; 612 } 613 EXPORT_SYMBOL_GPL(nexthop_find_by_id); 614 615 /* used for auto id allocation; called with rtnl held */ 616 static u32 nh_find_unused_id(struct net *net) 617 { 618 u32 id_start = net->nexthop.last_id_allocated; 619 620 while (1) { 621 net->nexthop.last_id_allocated++; 622 if (net->nexthop.last_id_allocated == id_start) 623 break; 624 625 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) 626 return net->nexthop.last_id_allocated; 627 } 628 return 0; 629 } 630 631 static void nh_res_time_set_deadline(unsigned long next_time, 632 unsigned long *deadline) 633 { 634 if (time_before(next_time, *deadline)) 635 *deadline = next_time; 636 } 637 638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table) 639 { 640 if (list_empty(&res_table->uw_nh_entries)) 641 return 0; 642 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); 643 } 644 645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg) 646 { 647 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 648 struct nlattr *nest; 649 650 nest = nla_nest_start(skb, NHA_RES_GROUP); 651 if (!nest) 652 return -EMSGSIZE; 653 654 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS, 655 res_table->num_nh_buckets) || 656 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER, 657 jiffies_to_clock_t(res_table->idle_timer)) || 658 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER, 659 jiffies_to_clock_t(res_table->unbalanced_timer)) || 660 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME, 661 nh_res_table_unbalanced_time(res_table), 662 NHA_RES_GROUP_PAD)) 663 goto nla_put_failure; 664 665 nla_nest_end(skb, nest); 666 return 0; 667 668 nla_put_failure: 669 nla_nest_cancel(skb, nest); 670 return -EMSGSIZE; 671 } 672 673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge) 674 { 675 struct nh_grp_entry_stats *cpu_stats; 676 677 cpu_stats = get_cpu_ptr(nhge->stats); 678 u64_stats_update_begin(&cpu_stats->syncp); 679 u64_stats_inc(&cpu_stats->packets); 680 u64_stats_update_end(&cpu_stats->syncp); 681 put_cpu_ptr(cpu_stats); 682 } 683 684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge, 685 u64 *ret_packets) 686 { 687 int i; 688 689 *ret_packets = 0; 690 691 for_each_possible_cpu(i) { 692 struct nh_grp_entry_stats *cpu_stats; 693 unsigned int start; 694 u64 packets; 695 696 cpu_stats = per_cpu_ptr(nhge->stats, i); 697 do { 698 start = u64_stats_fetch_begin(&cpu_stats->syncp); 699 packets = u64_stats_read(&cpu_stats->packets); 700 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 701 702 *ret_packets += packets; 703 } 704 } 705 706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info, 707 const struct nexthop *nh) 708 { 709 struct nh_group *nhg; 710 int i; 711 712 ASSERT_RTNL(); 713 nhg = rtnl_dereference(nh->nh_grp); 714 715 info->id = nh->id; 716 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; 717 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats, 718 stats, nhg->num_nh), 719 GFP_KERNEL); 720 if (!info->nh_grp_hw_stats) 721 return -ENOMEM; 722 723 info->nh_grp_hw_stats->num_nh = nhg->num_nh; 724 for (i = 0; i < nhg->num_nh; i++) { 725 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 726 727 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id; 728 } 729 730 return 0; 731 } 732 733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info) 734 { 735 kfree(info->nh_grp_hw_stats); 736 } 737 738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info, 739 unsigned int nh_idx, 740 u64 delta_packets) 741 { 742 info->hw_stats_used = true; 743 info->stats[nh_idx].packets += delta_packets; 744 } 745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta); 746 747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh, 748 struct nh_notifier_info *info) 749 { 750 struct nh_group *nhg; 751 int i; 752 753 ASSERT_RTNL(); 754 nhg = rtnl_dereference(nh->nh_grp); 755 756 for (i = 0; i < nhg->num_nh; i++) { 757 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 758 759 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets; 760 } 761 } 762 763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used) 764 { 765 struct nh_notifier_info info = { 766 .net = nh->net, 767 }; 768 struct net *net = nh->net; 769 int err; 770 771 if (nexthop_notifiers_is_empty(net)) { 772 *hw_stats_used = false; 773 return 0; 774 } 775 776 err = nh_notifier_grp_hw_stats_init(&info, nh); 777 if (err) 778 return err; 779 780 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 781 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA, 782 &info); 783 784 /* Cache whatever we got, even if there was an error, otherwise the 785 * successful stats retrievals would get lost. 786 */ 787 nh_grp_hw_stats_apply_update(nh, &info); 788 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used; 789 790 nh_notifier_grp_hw_stats_fini(&info); 791 return notifier_to_errno(err); 792 } 793 794 static int nla_put_nh_group_stats_entry(struct sk_buff *skb, 795 struct nh_grp_entry *nhge, 796 u32 op_flags) 797 { 798 struct nlattr *nest; 799 u64 packets; 800 801 nh_grp_entry_stats_read(nhge, &packets); 802 803 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY); 804 if (!nest) 805 return -EMSGSIZE; 806 807 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) || 808 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, 809 packets + nhge->packets_hw)) 810 goto nla_put_failure; 811 812 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 813 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW, 814 nhge->packets_hw)) 815 goto nla_put_failure; 816 817 nla_nest_end(skb, nest); 818 return 0; 819 820 nla_put_failure: 821 nla_nest_cancel(skb, nest); 822 return -EMSGSIZE; 823 } 824 825 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh, 826 u32 op_flags) 827 { 828 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 829 struct nlattr *nest; 830 bool hw_stats_used; 831 int err; 832 int i; 833 834 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats)) 835 goto err_out; 836 837 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && 838 nhg->hw_stats) { 839 err = nh_grp_hw_stats_update(nh, &hw_stats_used); 840 if (err) 841 goto out; 842 843 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used)) 844 goto err_out; 845 } 846 847 nest = nla_nest_start(skb, NHA_GROUP_STATS); 848 if (!nest) 849 goto err_out; 850 851 for (i = 0; i < nhg->num_nh; i++) 852 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i], 853 op_flags)) 854 goto cancel_out; 855 856 nla_nest_end(skb, nest); 857 return 0; 858 859 cancel_out: 860 nla_nest_cancel(skb, nest); 861 err_out: 862 err = -EMSGSIZE; 863 out: 864 return err; 865 } 866 867 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, 868 u32 op_flags, u32 *resp_op_flags) 869 { 870 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 871 struct nexthop_grp *p; 872 size_t len = nhg->num_nh * sizeof(*p); 873 struct nlattr *nla; 874 u16 group_type = 0; 875 u16 weight; 876 int i; 877 878 *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0; 879 880 if (nhg->hash_threshold) 881 group_type = NEXTHOP_GRP_TYPE_MPATH; 882 else if (nhg->resilient) 883 group_type = NEXTHOP_GRP_TYPE_RES; 884 885 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type)) 886 goto nla_put_failure; 887 888 nla = nla_reserve(skb, NHA_GROUP, len); 889 if (!nla) 890 goto nla_put_failure; 891 892 p = nla_data(nla); 893 for (i = 0; i < nhg->num_nh; ++i) { 894 weight = nhg->nh_entries[i].weight - 1; 895 896 *p++ = (struct nexthop_grp) { 897 .id = nhg->nh_entries[i].nh->id, 898 .weight = weight, 899 .weight_high = weight >> 8, 900 }; 901 } 902 903 if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) 904 goto nla_put_failure; 905 906 if (op_flags & NHA_OP_FLAG_DUMP_STATS && 907 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || 908 nla_put_nh_group_stats(skb, nh, op_flags))) 909 goto nla_put_failure; 910 911 return 0; 912 913 nla_put_failure: 914 return -EMSGSIZE; 915 } 916 917 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, 918 int event, u32 portid, u32 seq, unsigned int nlflags, 919 u32 op_flags) 920 { 921 struct fib6_nh *fib6_nh; 922 struct fib_nh *fib_nh; 923 struct nlmsghdr *nlh; 924 struct nh_info *nhi; 925 struct nhmsg *nhm; 926 927 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 928 if (!nlh) 929 return -EMSGSIZE; 930 931 nhm = nlmsg_data(nlh); 932 nhm->nh_family = AF_UNSPEC; 933 nhm->nh_flags = nh->nh_flags; 934 nhm->nh_protocol = nh->protocol; 935 nhm->nh_scope = 0; 936 nhm->resvd = 0; 937 938 if (nla_put_u32(skb, NHA_ID, nh->id)) 939 goto nla_put_failure; 940 941 if (nh->is_group) { 942 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 943 u32 resp_op_flags = 0; 944 945 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) 946 goto nla_put_failure; 947 if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) || 948 nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags)) 949 goto nla_put_failure; 950 goto out; 951 } 952 953 nhi = rtnl_dereference(nh->nh_info); 954 nhm->nh_family = nhi->family; 955 if (nhi->reject_nh) { 956 if (nla_put_flag(skb, NHA_BLACKHOLE)) 957 goto nla_put_failure; 958 goto out; 959 } else if (nhi->fdb_nh) { 960 if (nla_put_flag(skb, NHA_FDB)) 961 goto nla_put_failure; 962 } else { 963 const struct net_device *dev; 964 965 dev = nhi->fib_nhc.nhc_dev; 966 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) 967 goto nla_put_failure; 968 } 969 970 nhm->nh_scope = nhi->fib_nhc.nhc_scope; 971 switch (nhi->family) { 972 case AF_INET: 973 fib_nh = &nhi->fib_nh; 974 if (fib_nh->fib_nh_gw_family && 975 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) 976 goto nla_put_failure; 977 break; 978 979 case AF_INET6: 980 fib6_nh = &nhi->fib6_nh; 981 if (fib6_nh->fib_nh_gw_family && 982 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) 983 goto nla_put_failure; 984 break; 985 } 986 987 if (nhi->fib_nhc.nhc_lwtstate && 988 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, 989 NHA_ENCAP, NHA_ENCAP_TYPE) < 0) 990 goto nla_put_failure; 991 992 out: 993 nlmsg_end(skb, nlh); 994 return 0; 995 996 nla_put_failure: 997 nlmsg_cancel(skb, nlh); 998 return -EMSGSIZE; 999 } 1000 1001 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg) 1002 { 1003 return nla_total_size(0) + /* NHA_RES_GROUP */ 1004 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */ 1005 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */ 1006 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */ 1007 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ 1008 } 1009 1010 static size_t nh_nlmsg_size_grp(struct nexthop *nh) 1011 { 1012 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1013 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; 1014 size_t tot = nla_total_size(sz) + 1015 nla_total_size(2); /* NHA_GROUP_TYPE */ 1016 1017 if (nhg->resilient) 1018 tot += nh_nlmsg_size_grp_res(nhg); 1019 1020 return tot; 1021 } 1022 1023 static size_t nh_nlmsg_size_single(struct nexthop *nh) 1024 { 1025 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1026 size_t sz; 1027 1028 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE 1029 * are mutually exclusive 1030 */ 1031 sz = nla_total_size(4); /* NHA_OIF */ 1032 1033 switch (nhi->family) { 1034 case AF_INET: 1035 if (nhi->fib_nh.fib_nh_gw_family) 1036 sz += nla_total_size(4); /* NHA_GATEWAY */ 1037 break; 1038 1039 case AF_INET6: 1040 /* NHA_GATEWAY */ 1041 if (nhi->fib6_nh.fib_nh_gw_family) 1042 sz += nla_total_size(sizeof(const struct in6_addr)); 1043 break; 1044 } 1045 1046 if (nhi->fib_nhc.nhc_lwtstate) { 1047 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); 1048 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */ 1049 } 1050 1051 return sz; 1052 } 1053 1054 static size_t nh_nlmsg_size(struct nexthop *nh) 1055 { 1056 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); 1057 1058 sz += nla_total_size(4); /* NHA_ID */ 1059 1060 if (nh->is_group) 1061 sz += nh_nlmsg_size_grp(nh) + 1062 nla_total_size(4) + /* NHA_OP_FLAGS */ 1063 0; 1064 else 1065 sz += nh_nlmsg_size_single(nh); 1066 1067 return sz; 1068 } 1069 1070 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) 1071 { 1072 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; 1073 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 1074 struct sk_buff *skb; 1075 int err = -ENOBUFS; 1076 1077 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); 1078 if (!skb) 1079 goto errout; 1080 1081 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0); 1082 if (err < 0) { 1083 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ 1084 WARN_ON(err == -EMSGSIZE); 1085 kfree_skb(skb); 1086 goto errout; 1087 } 1088 1089 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, 1090 info->nlh, gfp_any()); 1091 return; 1092 errout: 1093 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); 1094 } 1095 1096 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) 1097 { 1098 return (unsigned long)atomic_long_read(&bucket->used_time); 1099 } 1100 1101 static unsigned long 1102 nh_res_bucket_idle_point(const struct nh_res_table *res_table, 1103 const struct nh_res_bucket *bucket, 1104 unsigned long now) 1105 { 1106 unsigned long time = nh_res_bucket_used_time(bucket); 1107 1108 /* Bucket was not used since it was migrated. The idle time is now. */ 1109 if (time == bucket->migrated_time) 1110 return now; 1111 1112 return time + res_table->idle_timer; 1113 } 1114 1115 static unsigned long 1116 nh_res_table_unb_point(const struct nh_res_table *res_table) 1117 { 1118 return res_table->unbalanced_since + res_table->unbalanced_timer; 1119 } 1120 1121 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table, 1122 struct nh_res_bucket *bucket) 1123 { 1124 unsigned long now = jiffies; 1125 1126 atomic_long_set(&bucket->used_time, (long)now); 1127 bucket->migrated_time = now; 1128 } 1129 1130 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket) 1131 { 1132 atomic_long_set(&bucket->used_time, (long)jiffies); 1133 } 1134 1135 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket) 1136 { 1137 unsigned long used_time = nh_res_bucket_used_time(bucket); 1138 1139 return jiffies_delta_to_clock_t(jiffies - used_time); 1140 } 1141 1142 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh, 1143 struct nh_res_bucket *bucket, u16 bucket_index, 1144 int event, u32 portid, u32 seq, 1145 unsigned int nlflags, 1146 struct netlink_ext_ack *extack) 1147 { 1148 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1149 struct nlmsghdr *nlh; 1150 struct nlattr *nest; 1151 struct nhmsg *nhm; 1152 1153 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 1154 if (!nlh) 1155 return -EMSGSIZE; 1156 1157 nhm = nlmsg_data(nlh); 1158 nhm->nh_family = AF_UNSPEC; 1159 nhm->nh_flags = bucket->nh_flags; 1160 nhm->nh_protocol = nh->protocol; 1161 nhm->nh_scope = 0; 1162 nhm->resvd = 0; 1163 1164 if (nla_put_u32(skb, NHA_ID, nh->id)) 1165 goto nla_put_failure; 1166 1167 nest = nla_nest_start(skb, NHA_RES_BUCKET); 1168 if (!nest) 1169 goto nla_put_failure; 1170 1171 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) || 1172 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || 1173 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME, 1174 nh_res_bucket_idle_time(bucket), 1175 NHA_RES_BUCKET_PAD)) 1176 goto nla_put_failure_nest; 1177 1178 nla_nest_end(skb, nest); 1179 nlmsg_end(skb, nlh); 1180 return 0; 1181 1182 nla_put_failure_nest: 1183 nla_nest_cancel(skb, nest); 1184 nla_put_failure: 1185 nlmsg_cancel(skb, nlh); 1186 return -EMSGSIZE; 1187 } 1188 1189 static void nexthop_bucket_notify(struct nh_res_table *res_table, 1190 u16 bucket_index) 1191 { 1192 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1193 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 1194 struct nexthop *nh = nhge->nh_parent; 1195 struct sk_buff *skb; 1196 int err = -ENOBUFS; 1197 1198 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1199 if (!skb) 1200 goto errout; 1201 1202 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 1203 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE, 1204 NULL); 1205 if (err < 0) { 1206 kfree_skb(skb); 1207 goto errout; 1208 } 1209 1210 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); 1211 return; 1212 errout: 1213 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); 1214 } 1215 1216 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, 1217 bool *is_fdb, struct netlink_ext_ack *extack) 1218 { 1219 if (nh->is_group) { 1220 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1221 1222 /* Nesting groups within groups is not supported. */ 1223 if (nhg->hash_threshold) { 1224 NL_SET_ERR_MSG(extack, 1225 "Hash-threshold group can not be a nexthop within a group"); 1226 return false; 1227 } 1228 if (nhg->resilient) { 1229 NL_SET_ERR_MSG(extack, 1230 "Resilient group can not be a nexthop within a group"); 1231 return false; 1232 } 1233 *is_fdb = nhg->fdb_nh; 1234 } else { 1235 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1236 1237 if (nhi->reject_nh && npaths > 1) { 1238 NL_SET_ERR_MSG(extack, 1239 "Blackhole nexthop can not be used in a group with more than 1 path"); 1240 return false; 1241 } 1242 *is_fdb = nhi->fdb_nh; 1243 } 1244 1245 return true; 1246 } 1247 1248 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family, 1249 struct netlink_ext_ack *extack) 1250 { 1251 struct nh_info *nhi; 1252 1253 nhi = rtnl_dereference(nh->nh_info); 1254 1255 if (!nhi->fdb_nh) { 1256 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops"); 1257 return -EINVAL; 1258 } 1259 1260 if (*nh_family == AF_UNSPEC) { 1261 *nh_family = nhi->family; 1262 } else if (*nh_family != nhi->family) { 1263 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops"); 1264 return -EINVAL; 1265 } 1266 1267 return 0; 1268 } 1269 1270 static int nh_check_attr_group(struct net *net, 1271 struct nlattr *tb[], size_t tb_size, 1272 u16 nh_grp_type, struct netlink_ext_ack *extack) 1273 { 1274 unsigned int len = nla_len(tb[NHA_GROUP]); 1275 struct nexthop_grp *nhg; 1276 unsigned int i, j; 1277 1278 if (!len || len & (sizeof(struct nexthop_grp) - 1)) { 1279 NL_SET_ERR_MSG(extack, 1280 "Invalid length for nexthop group attribute"); 1281 return -EINVAL; 1282 } 1283 1284 /* convert len to number of nexthop ids */ 1285 len /= sizeof(*nhg); 1286 1287 nhg = nla_data(tb[NHA_GROUP]); 1288 for (i = 0; i < len; ++i) { 1289 if (nhg[i].resvd2) { 1290 NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0"); 1291 return -EINVAL; 1292 } 1293 if (nexthop_grp_weight(&nhg[i]) == 0) { 1294 /* 0xffff got passed in, representing weight of 0x10000, 1295 * which is too heavy. 1296 */ 1297 NL_SET_ERR_MSG(extack, "Invalid value for weight"); 1298 return -EINVAL; 1299 } 1300 for (j = i + 1; j < len; ++j) { 1301 if (nhg[i].id == nhg[j].id) { 1302 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group"); 1303 return -EINVAL; 1304 } 1305 } 1306 } 1307 1308 nhg = nla_data(tb[NHA_GROUP]); 1309 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { 1310 if (!tb[i]) 1311 continue; 1312 switch (i) { 1313 case NHA_HW_STATS_ENABLE: 1314 case NHA_FDB: 1315 continue; 1316 case NHA_RES_GROUP: 1317 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) 1318 continue; 1319 break; 1320 } 1321 NL_SET_ERR_MSG(extack, 1322 "No other attributes can be set in nexthop groups"); 1323 return -EINVAL; 1324 } 1325 1326 return 0; 1327 } 1328 1329 static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[], 1330 struct netlink_ext_ack *extack) 1331 { 1332 u8 nh_family = AF_UNSPEC; 1333 struct nexthop_grp *nhg; 1334 unsigned int len; 1335 unsigned int i; 1336 u8 nhg_fdb; 1337 1338 len = nla_len(tb[NHA_GROUP]) / sizeof(*nhg); 1339 nhg = nla_data(tb[NHA_GROUP]); 1340 nhg_fdb = !!tb[NHA_FDB]; 1341 1342 for (i = 0; i < len; i++) { 1343 struct nexthop *nh; 1344 bool is_fdb_nh; 1345 1346 nh = nexthop_find_by_id(net, nhg[i].id); 1347 if (!nh) { 1348 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 1349 return -EINVAL; 1350 } 1351 if (!valid_group_nh(nh, len, &is_fdb_nh, extack)) 1352 return -EINVAL; 1353 1354 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack)) 1355 return -EINVAL; 1356 1357 if (!nhg_fdb && is_fdb_nh) { 1358 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops"); 1359 return -EINVAL; 1360 } 1361 } 1362 1363 return 0; 1364 } 1365 1366 static bool ipv6_good_nh(const struct fib6_nh *nh) 1367 { 1368 int state = NUD_REACHABLE; 1369 struct neighbour *n; 1370 1371 rcu_read_lock(); 1372 1373 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); 1374 if (n) 1375 state = READ_ONCE(n->nud_state); 1376 1377 rcu_read_unlock(); 1378 1379 return !!(state & NUD_VALID); 1380 } 1381 1382 static bool ipv4_good_nh(const struct fib_nh *nh) 1383 { 1384 int state = NUD_REACHABLE; 1385 struct neighbour *n; 1386 1387 rcu_read_lock(); 1388 1389 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, 1390 (__force u32)nh->fib_nh_gw4); 1391 if (n) 1392 state = READ_ONCE(n->nud_state); 1393 1394 rcu_read_unlock(); 1395 1396 return !!(state & NUD_VALID); 1397 } 1398 1399 static bool nexthop_is_good_nh(const struct nexthop *nh) 1400 { 1401 struct nh_info *nhi = rcu_dereference(nh->nh_info); 1402 1403 switch (nhi->family) { 1404 case AF_INET: 1405 return ipv4_good_nh(&nhi->fib_nh); 1406 case AF_INET6: 1407 return ipv6_good_nh(&nhi->fib6_nh); 1408 } 1409 1410 return false; 1411 } 1412 1413 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash) 1414 { 1415 int i; 1416 1417 for (i = 0; i < nhg->num_nh; i++) { 1418 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1419 1420 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1421 continue; 1422 1423 nh_grp_entry_stats_inc(nhge); 1424 return nhge->nh; 1425 } 1426 1427 WARN_ON_ONCE(1); 1428 return NULL; 1429 } 1430 1431 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash) 1432 { 1433 struct nh_grp_entry *nhge0 = NULL; 1434 int i; 1435 1436 if (nhg->fdb_nh) 1437 return nexthop_select_path_fdb(nhg, hash); 1438 1439 for (i = 0; i < nhg->num_nh; ++i) { 1440 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1441 1442 /* nexthops always check if it is good and does 1443 * not rely on a sysctl for this behavior 1444 */ 1445 if (!nexthop_is_good_nh(nhge->nh)) 1446 continue; 1447 1448 if (!nhge0) 1449 nhge0 = nhge; 1450 1451 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1452 continue; 1453 1454 nh_grp_entry_stats_inc(nhge); 1455 return nhge->nh; 1456 } 1457 1458 if (!nhge0) 1459 nhge0 = &nhg->nh_entries[0]; 1460 nh_grp_entry_stats_inc(nhge0); 1461 return nhge0->nh; 1462 } 1463 1464 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash) 1465 { 1466 struct nh_res_table *res_table = rcu_dereference(nhg->res_table); 1467 u16 bucket_index = hash % res_table->num_nh_buckets; 1468 struct nh_res_bucket *bucket; 1469 struct nh_grp_entry *nhge; 1470 1471 /* nexthop_select_path() is expected to return a non-NULL value, so 1472 * skip protocol validation and just hand out whatever there is. 1473 */ 1474 bucket = &res_table->nh_buckets[bucket_index]; 1475 nh_res_bucket_set_busy(bucket); 1476 nhge = rcu_dereference(bucket->nh_entry); 1477 nh_grp_entry_stats_inc(nhge); 1478 return nhge->nh; 1479 } 1480 1481 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash) 1482 { 1483 struct nh_group *nhg; 1484 1485 if (!nh->is_group) 1486 return nh; 1487 1488 nhg = rcu_dereference(nh->nh_grp); 1489 if (nhg->hash_threshold) 1490 return nexthop_select_path_hthr(nhg, hash); 1491 else if (nhg->resilient) 1492 return nexthop_select_path_res(nhg, hash); 1493 1494 /* Unreachable. */ 1495 return NULL; 1496 } 1497 EXPORT_SYMBOL_GPL(nexthop_select_path); 1498 1499 int nexthop_for_each_fib6_nh(struct nexthop *nh, 1500 int (*cb)(struct fib6_nh *nh, void *arg), 1501 void *arg) 1502 { 1503 struct nh_info *nhi; 1504 int err; 1505 1506 if (nh->is_group) { 1507 struct nh_group *nhg; 1508 int i; 1509 1510 nhg = rcu_dereference_rtnl(nh->nh_grp); 1511 for (i = 0; i < nhg->num_nh; i++) { 1512 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1513 1514 nhi = rcu_dereference_rtnl(nhge->nh->nh_info); 1515 err = cb(&nhi->fib6_nh, arg); 1516 if (err) 1517 return err; 1518 } 1519 } else { 1520 nhi = rcu_dereference_rtnl(nh->nh_info); 1521 err = cb(&nhi->fib6_nh, arg); 1522 if (err) 1523 return err; 1524 } 1525 1526 return 0; 1527 } 1528 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh); 1529 1530 static int check_src_addr(const struct in6_addr *saddr, 1531 struct netlink_ext_ack *extack) 1532 { 1533 if (!ipv6_addr_any(saddr)) { 1534 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects"); 1535 return -EINVAL; 1536 } 1537 return 0; 1538 } 1539 1540 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, 1541 struct netlink_ext_ack *extack) 1542 { 1543 struct nh_info *nhi; 1544 bool is_fdb_nh; 1545 1546 /* fib6_src is unique to a fib6_info and limits the ability to cache 1547 * routes in fib6_nh within a nexthop that is potentially shared 1548 * across multiple fib entries. If the config wants to use source 1549 * routing it can not use nexthop objects. mlxsw also does not allow 1550 * fib6_src on routes. 1551 */ 1552 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0) 1553 return -EINVAL; 1554 1555 if (nh->is_group) { 1556 struct nh_group *nhg; 1557 1558 nhg = rtnl_dereference(nh->nh_grp); 1559 if (nhg->has_v4) 1560 goto no_v4_nh; 1561 is_fdb_nh = nhg->fdb_nh; 1562 } else { 1563 nhi = rtnl_dereference(nh->nh_info); 1564 if (nhi->family == AF_INET) 1565 goto no_v4_nh; 1566 is_fdb_nh = nhi->fdb_nh; 1567 } 1568 1569 if (is_fdb_nh) { 1570 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1571 return -EINVAL; 1572 } 1573 1574 return 0; 1575 no_v4_nh: 1576 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop"); 1577 return -EINVAL; 1578 } 1579 EXPORT_SYMBOL_GPL(fib6_check_nexthop); 1580 1581 /* if existing nexthop has ipv6 routes linked to it, need 1582 * to verify this new spec works with ipv6 1583 */ 1584 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new, 1585 struct netlink_ext_ack *extack) 1586 { 1587 struct fib6_info *f6i; 1588 1589 if (list_empty(&old->f6i_list)) 1590 return 0; 1591 1592 list_for_each_entry(f6i, &old->f6i_list, nh_list) { 1593 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0) 1594 return -EINVAL; 1595 } 1596 1597 return fib6_check_nexthop(new, NULL, extack); 1598 } 1599 1600 static int nexthop_check_scope(struct nh_info *nhi, u8 scope, 1601 struct netlink_ext_ack *extack) 1602 { 1603 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) { 1604 NL_SET_ERR_MSG(extack, 1605 "Route with host scope can not have a gateway"); 1606 return -EINVAL; 1607 } 1608 1609 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) { 1610 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop"); 1611 return -EINVAL; 1612 } 1613 1614 return 0; 1615 } 1616 1617 /* Invoked by fib add code to verify nexthop by id is ok with 1618 * config for prefix; parts of fib_check_nh not done when nexthop 1619 * object is used. 1620 */ 1621 int fib_check_nexthop(struct nexthop *nh, u8 scope, 1622 struct netlink_ext_ack *extack) 1623 { 1624 struct nh_info *nhi; 1625 int err = 0; 1626 1627 if (nh->is_group) { 1628 struct nh_group *nhg; 1629 1630 nhg = rtnl_dereference(nh->nh_grp); 1631 if (nhg->fdb_nh) { 1632 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1633 err = -EINVAL; 1634 goto out; 1635 } 1636 1637 if (scope == RT_SCOPE_HOST) { 1638 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops"); 1639 err = -EINVAL; 1640 goto out; 1641 } 1642 1643 /* all nexthops in a group have the same scope */ 1644 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info); 1645 err = nexthop_check_scope(nhi, scope, extack); 1646 } else { 1647 nhi = rtnl_dereference(nh->nh_info); 1648 if (nhi->fdb_nh) { 1649 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1650 err = -EINVAL; 1651 goto out; 1652 } 1653 err = nexthop_check_scope(nhi, scope, extack); 1654 } 1655 1656 out: 1657 return err; 1658 } 1659 1660 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new, 1661 struct netlink_ext_ack *extack) 1662 { 1663 struct fib_info *fi; 1664 1665 list_for_each_entry(fi, &old->fi_list, nh_list) { 1666 int err; 1667 1668 err = fib_check_nexthop(new, fi->fib_scope, extack); 1669 if (err) 1670 return err; 1671 } 1672 return 0; 1673 } 1674 1675 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge) 1676 { 1677 return nhge->res.count_buckets == nhge->res.wants_buckets; 1678 } 1679 1680 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge) 1681 { 1682 return nhge->res.count_buckets > nhge->res.wants_buckets; 1683 } 1684 1685 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge) 1686 { 1687 return nhge->res.count_buckets < nhge->res.wants_buckets; 1688 } 1689 1690 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table) 1691 { 1692 return list_empty(&res_table->uw_nh_entries); 1693 } 1694 1695 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket) 1696 { 1697 struct nh_grp_entry *nhge; 1698 1699 if (bucket->occupied) { 1700 nhge = nh_res_dereference(bucket->nh_entry); 1701 nhge->res.count_buckets--; 1702 bucket->occupied = false; 1703 } 1704 } 1705 1706 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket, 1707 struct nh_grp_entry *nhge) 1708 { 1709 nh_res_bucket_unset_nh(bucket); 1710 1711 bucket->occupied = true; 1712 rcu_assign_pointer(bucket->nh_entry, nhge); 1713 nhge->res.count_buckets++; 1714 } 1715 1716 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table, 1717 struct nh_res_bucket *bucket, 1718 unsigned long *deadline, bool *force) 1719 { 1720 unsigned long now = jiffies; 1721 struct nh_grp_entry *nhge; 1722 unsigned long idle_point; 1723 1724 if (!bucket->occupied) { 1725 /* The bucket is not occupied, its NHGE pointer is either 1726 * NULL or obsolete. We _have to_ migrate: set force. 1727 */ 1728 *force = true; 1729 return true; 1730 } 1731 1732 nhge = nh_res_dereference(bucket->nh_entry); 1733 1734 /* If the bucket is populated by an underweight or balanced 1735 * nexthop, do not migrate. 1736 */ 1737 if (!nh_res_nhge_is_ow(nhge)) 1738 return false; 1739 1740 /* At this point we know that the bucket is populated with an 1741 * overweight nexthop. It needs to be migrated to a new nexthop if 1742 * the idle timer of unbalanced timer expired. 1743 */ 1744 1745 idle_point = nh_res_bucket_idle_point(res_table, bucket, now); 1746 if (time_after_eq(now, idle_point)) { 1747 /* The bucket is idle. We _can_ migrate: unset force. */ 1748 *force = false; 1749 return true; 1750 } 1751 1752 /* Unbalanced timer of 0 means "never force". */ 1753 if (res_table->unbalanced_timer) { 1754 unsigned long unb_point; 1755 1756 unb_point = nh_res_table_unb_point(res_table); 1757 if (time_after(now, unb_point)) { 1758 /* The bucket is not idle, but the unbalanced timer 1759 * expired. We _can_ migrate, but set force anyway, 1760 * so that drivers know to ignore activity reports 1761 * from the HW. 1762 */ 1763 *force = true; 1764 return true; 1765 } 1766 1767 nh_res_time_set_deadline(unb_point, deadline); 1768 } 1769 1770 nh_res_time_set_deadline(idle_point, deadline); 1771 return false; 1772 } 1773 1774 static bool nh_res_bucket_migrate(struct nh_res_table *res_table, 1775 u16 bucket_index, bool notify, 1776 bool notify_nl, bool force) 1777 { 1778 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1779 struct nh_grp_entry *new_nhge; 1780 struct netlink_ext_ack extack; 1781 int err; 1782 1783 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries, 1784 struct nh_grp_entry, 1785 res.uw_nh_entry); 1786 if (WARN_ON_ONCE(!new_nhge)) 1787 /* If this function is called, "bucket" is either not 1788 * occupied, or it belongs to a next hop that is 1789 * overweight. In either case, there ought to be a 1790 * corresponding underweight next hop. 1791 */ 1792 return false; 1793 1794 if (notify) { 1795 struct nh_grp_entry *old_nhge; 1796 1797 old_nhge = nh_res_dereference(bucket->nh_entry); 1798 err = call_nexthop_res_bucket_notifiers(res_table->net, 1799 res_table->nhg_id, 1800 bucket_index, force, 1801 old_nhge->nh, 1802 new_nhge->nh, &extack); 1803 if (err) { 1804 pr_err_ratelimited("%s\n", extack._msg); 1805 if (!force) 1806 return false; 1807 /* It is not possible to veto a forced replacement, so 1808 * just clear the hardware flags from the nexthop 1809 * bucket to indicate to user space that this bucket is 1810 * not correctly populated in hardware. 1811 */ 1812 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 1813 } 1814 } 1815 1816 nh_res_bucket_set_nh(bucket, new_nhge); 1817 nh_res_bucket_set_idle(res_table, bucket); 1818 1819 if (notify_nl) 1820 nexthop_bucket_notify(res_table, bucket_index); 1821 1822 if (nh_res_nhge_is_balanced(new_nhge)) 1823 list_del(&new_nhge->res.uw_nh_entry); 1824 return true; 1825 } 1826 1827 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2) 1828 1829 static void nh_res_table_upkeep(struct nh_res_table *res_table, 1830 bool notify, bool notify_nl) 1831 { 1832 unsigned long now = jiffies; 1833 unsigned long deadline; 1834 u16 i; 1835 1836 /* Deadline is the next time that upkeep should be run. It is the 1837 * earliest time at which one of the buckets might be migrated. 1838 * Start at the most pessimistic estimate: either unbalanced_timer 1839 * from now, or if there is none, idle_timer from now. For each 1840 * encountered time point, call nh_res_time_set_deadline() to 1841 * refine the estimate. 1842 */ 1843 if (res_table->unbalanced_timer) 1844 deadline = now + res_table->unbalanced_timer; 1845 else 1846 deadline = now + res_table->idle_timer; 1847 1848 for (i = 0; i < res_table->num_nh_buckets; i++) { 1849 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1850 bool force; 1851 1852 if (nh_res_bucket_should_migrate(res_table, bucket, 1853 &deadline, &force)) { 1854 if (!nh_res_bucket_migrate(res_table, i, notify, 1855 notify_nl, force)) { 1856 unsigned long idle_point; 1857 1858 /* A driver can override the migration 1859 * decision if the HW reports that the 1860 * bucket is actually not idle. Therefore 1861 * remark the bucket as busy again and 1862 * update the deadline. 1863 */ 1864 nh_res_bucket_set_busy(bucket); 1865 idle_point = nh_res_bucket_idle_point(res_table, 1866 bucket, 1867 now); 1868 nh_res_time_set_deadline(idle_point, &deadline); 1869 } 1870 } 1871 } 1872 1873 /* If the group is still unbalanced, schedule the next upkeep to 1874 * either the deadline computed above, or the minimum deadline, 1875 * whichever comes later. 1876 */ 1877 if (!nh_res_table_is_balanced(res_table)) { 1878 unsigned long now = jiffies; 1879 unsigned long min_deadline; 1880 1881 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL; 1882 if (time_before(deadline, min_deadline)) 1883 deadline = min_deadline; 1884 1885 queue_delayed_work(system_power_efficient_wq, 1886 &res_table->upkeep_dw, deadline - now); 1887 } 1888 } 1889 1890 static void nh_res_table_upkeep_dw(struct work_struct *work) 1891 { 1892 struct delayed_work *dw = to_delayed_work(work); 1893 struct nh_res_table *res_table; 1894 1895 res_table = container_of(dw, struct nh_res_table, upkeep_dw); 1896 nh_res_table_upkeep(res_table, true, true); 1897 } 1898 1899 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table) 1900 { 1901 cancel_delayed_work_sync(&res_table->upkeep_dw); 1902 } 1903 1904 static void nh_res_group_rebalance(struct nh_group *nhg, 1905 struct nh_res_table *res_table) 1906 { 1907 u16 prev_upper_bound = 0; 1908 u32 total = 0; 1909 u32 w = 0; 1910 int i; 1911 1912 INIT_LIST_HEAD(&res_table->uw_nh_entries); 1913 1914 for (i = 0; i < nhg->num_nh; ++i) 1915 total += nhg->nh_entries[i].weight; 1916 1917 for (i = 0; i < nhg->num_nh; ++i) { 1918 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1919 u16 upper_bound; 1920 u64 btw; 1921 1922 w += nhge->weight; 1923 btw = ((u64)res_table->num_nh_buckets) * w; 1924 upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total); 1925 nhge->res.wants_buckets = upper_bound - prev_upper_bound; 1926 prev_upper_bound = upper_bound; 1927 1928 if (nh_res_nhge_is_uw(nhge)) { 1929 if (list_empty(&res_table->uw_nh_entries)) 1930 res_table->unbalanced_since = jiffies; 1931 list_add(&nhge->res.uw_nh_entry, 1932 &res_table->uw_nh_entries); 1933 } 1934 } 1935 } 1936 1937 /* Migrate buckets in res_table so that they reference NHGE's from NHG with 1938 * the right NH ID. Set those buckets that do not have a corresponding NHGE 1939 * entry in NHG as not occupied. 1940 */ 1941 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table, 1942 struct nh_group *nhg) 1943 { 1944 u16 i; 1945 1946 for (i = 0; i < res_table->num_nh_buckets; i++) { 1947 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1948 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id; 1949 bool found = false; 1950 int j; 1951 1952 for (j = 0; j < nhg->num_nh; j++) { 1953 struct nh_grp_entry *nhge = &nhg->nh_entries[j]; 1954 1955 if (nhge->nh->id == id) { 1956 nh_res_bucket_set_nh(bucket, nhge); 1957 found = true; 1958 break; 1959 } 1960 } 1961 1962 if (!found) 1963 nh_res_bucket_unset_nh(bucket); 1964 } 1965 } 1966 1967 static void replace_nexthop_grp_res(struct nh_group *oldg, 1968 struct nh_group *newg) 1969 { 1970 /* For NH group replacement, the new NHG might only have a stub 1971 * hash table with 0 buckets, because the number of buckets was not 1972 * specified. For NH removal, oldg and newg both reference the same 1973 * res_table. So in any case, in the following, we want to work 1974 * with oldg->res_table. 1975 */ 1976 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table); 1977 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since; 1978 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries); 1979 1980 nh_res_table_cancel_upkeep(old_res_table); 1981 nh_res_table_migrate_buckets(old_res_table, newg); 1982 nh_res_group_rebalance(newg, old_res_table); 1983 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries)) 1984 old_res_table->unbalanced_since = prev_unbalanced_since; 1985 nh_res_table_upkeep(old_res_table, true, false); 1986 } 1987 1988 static void nh_hthr_group_rebalance(struct nh_group *nhg) 1989 { 1990 u32 total = 0; 1991 u32 w = 0; 1992 int i; 1993 1994 for (i = 0; i < nhg->num_nh; ++i) 1995 total += nhg->nh_entries[i].weight; 1996 1997 for (i = 0; i < nhg->num_nh; ++i) { 1998 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1999 u32 upper_bound; 2000 2001 w += nhge->weight; 2002 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; 2003 atomic_set(&nhge->hthr.upper_bound, upper_bound); 2004 } 2005 } 2006 2007 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 2008 struct nl_info *nlinfo) 2009 { 2010 struct nh_grp_entry *nhges, *new_nhges; 2011 struct nexthop *nhp = nhge->nh_parent; 2012 struct netlink_ext_ack extack; 2013 struct nexthop *nh = nhge->nh; 2014 struct nh_group *nhg, *newg; 2015 int i, j, err; 2016 2017 WARN_ON(!nh); 2018 2019 nhg = rtnl_dereference(nhp->nh_grp); 2020 newg = nhg->spare; 2021 2022 /* last entry, keep it visible and remove the parent */ 2023 if (nhg->num_nh == 1) { 2024 remove_nexthop(net, nhp, nlinfo); 2025 return; 2026 } 2027 2028 newg->has_v4 = false; 2029 newg->is_multipath = nhg->is_multipath; 2030 newg->hash_threshold = nhg->hash_threshold; 2031 newg->resilient = nhg->resilient; 2032 newg->fdb_nh = nhg->fdb_nh; 2033 newg->num_nh = nhg->num_nh; 2034 2035 /* copy old entries to new except the one getting removed */ 2036 nhges = nhg->nh_entries; 2037 new_nhges = newg->nh_entries; 2038 for (i = 0, j = 0; i < nhg->num_nh; ++i) { 2039 struct nh_info *nhi; 2040 2041 /* current nexthop getting removed */ 2042 if (nhg->nh_entries[i].nh == nh) { 2043 newg->num_nh--; 2044 continue; 2045 } 2046 2047 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2048 if (nhi->family == AF_INET) 2049 newg->has_v4 = true; 2050 2051 list_del(&nhges[i].nh_list); 2052 new_nhges[j].stats = nhges[i].stats; 2053 new_nhges[j].nh_parent = nhges[i].nh_parent; 2054 new_nhges[j].nh = nhges[i].nh; 2055 new_nhges[j].weight = nhges[i].weight; 2056 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 2057 j++; 2058 } 2059 2060 if (newg->hash_threshold) 2061 nh_hthr_group_rebalance(newg); 2062 else if (newg->resilient) 2063 replace_nexthop_grp_res(nhg, newg); 2064 2065 rcu_assign_pointer(nhp->nh_grp, newg); 2066 2067 list_del(&nhge->nh_list); 2068 free_percpu(nhge->stats); 2069 nexthop_put(nhge->nh); 2070 2071 /* Removal of a NH from a resilient group is notified through 2072 * bucket notifications. 2073 */ 2074 if (newg->hash_threshold) { 2075 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, 2076 &extack); 2077 if (err) 2078 pr_err("%s\n", extack._msg); 2079 } 2080 2081 if (nlinfo) 2082 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 2083 } 2084 2085 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, 2086 struct nl_info *nlinfo) 2087 { 2088 struct nh_grp_entry *nhge, *tmp; 2089 2090 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 2091 remove_nh_grp_entry(net, nhge, nlinfo); 2092 2093 /* make sure all see the newly published array before releasing rtnl */ 2094 synchronize_net(); 2095 } 2096 2097 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) 2098 { 2099 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 2100 struct nh_res_table *res_table; 2101 int i, num_nh = nhg->num_nh; 2102 2103 for (i = 0; i < num_nh; ++i) { 2104 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2105 2106 if (WARN_ON(!nhge->nh)) 2107 continue; 2108 2109 list_del_init(&nhge->nh_list); 2110 } 2111 2112 if (nhg->resilient) { 2113 res_table = rtnl_dereference(nhg->res_table); 2114 nh_res_table_cancel_upkeep(res_table); 2115 } 2116 } 2117 2118 /* not called for nexthop replace */ 2119 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) 2120 { 2121 struct fib6_info *f6i, *tmp; 2122 bool do_flush = false; 2123 struct fib_info *fi; 2124 2125 list_for_each_entry(fi, &nh->fi_list, nh_list) { 2126 fi->fib_flags |= RTNH_F_DEAD; 2127 do_flush = true; 2128 } 2129 if (do_flush) 2130 fib_flush(net); 2131 2132 /* ip6_del_rt removes the entry from this list hence the _safe */ 2133 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { 2134 /* __ip6_del_rt does a release, so do a hold here */ 2135 fib6_info_hold(f6i); 2136 ipv6_stub->ip6_del_rt(net, f6i, 2137 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); 2138 } 2139 } 2140 2141 static void __remove_nexthop(struct net *net, struct nexthop *nh, 2142 struct nl_info *nlinfo) 2143 { 2144 __remove_nexthop_fib(net, nh); 2145 2146 if (nh->is_group) { 2147 remove_nexthop_group(nh, nlinfo); 2148 } else { 2149 struct nh_info *nhi; 2150 2151 nhi = rtnl_dereference(nh->nh_info); 2152 if (nhi->fib_nhc.nhc_dev) 2153 hlist_del(&nhi->dev_hash); 2154 2155 remove_nexthop_from_groups(net, nh, nlinfo); 2156 } 2157 } 2158 2159 static void remove_nexthop(struct net *net, struct nexthop *nh, 2160 struct nl_info *nlinfo) 2161 { 2162 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL); 2163 2164 /* remove from the tree */ 2165 rb_erase(&nh->rb_node, &net->nexthop.rb_root); 2166 2167 if (nlinfo) 2168 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo); 2169 2170 __remove_nexthop(net, nh, nlinfo); 2171 nh_base_seq_inc(net); 2172 2173 nexthop_put(nh); 2174 } 2175 2176 /* if any FIB entries reference this nexthop, any dst entries 2177 * need to be regenerated 2178 */ 2179 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, 2180 struct nexthop *replaced_nh) 2181 { 2182 struct fib6_info *f6i; 2183 struct nh_group *nhg; 2184 int i; 2185 2186 if (!list_empty(&nh->fi_list)) 2187 rt_cache_flush(net); 2188 2189 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2190 ipv6_stub->fib6_update_sernum(net, f6i); 2191 2192 /* if an IPv6 group was replaced, we have to release all old 2193 * dsts to make sure all refcounts are released 2194 */ 2195 if (!replaced_nh->is_group) 2196 return; 2197 2198 nhg = rtnl_dereference(replaced_nh->nh_grp); 2199 for (i = 0; i < nhg->num_nh; i++) { 2200 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 2201 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); 2202 2203 if (nhi->family == AF_INET6) 2204 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); 2205 } 2206 } 2207 2208 static int replace_nexthop_grp(struct net *net, struct nexthop *old, 2209 struct nexthop *new, const struct nh_config *cfg, 2210 struct netlink_ext_ack *extack) 2211 { 2212 struct nh_res_table *tmp_table = NULL; 2213 struct nh_res_table *new_res_table; 2214 struct nh_res_table *old_res_table; 2215 struct nh_group *oldg, *newg; 2216 int i, err; 2217 2218 if (!new->is_group) { 2219 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop."); 2220 return -EINVAL; 2221 } 2222 2223 oldg = rtnl_dereference(old->nh_grp); 2224 newg = rtnl_dereference(new->nh_grp); 2225 2226 if (newg->hash_threshold != oldg->hash_threshold) { 2227 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type."); 2228 return -EINVAL; 2229 } 2230 2231 if (newg->hash_threshold) { 2232 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, 2233 extack); 2234 if (err) 2235 return err; 2236 } else if (newg->resilient) { 2237 new_res_table = rtnl_dereference(newg->res_table); 2238 old_res_table = rtnl_dereference(oldg->res_table); 2239 2240 /* Accept if num_nh_buckets was not given, but if it was 2241 * given, demand that the value be correct. 2242 */ 2243 if (cfg->nh_grp_res_has_num_buckets && 2244 cfg->nh_grp_res_num_buckets != 2245 old_res_table->num_nh_buckets) { 2246 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group."); 2247 return -EINVAL; 2248 } 2249 2250 /* Emit a pre-replace notification so that listeners could veto 2251 * a potentially unsupported configuration. Otherwise, 2252 * individual bucket replacement notifications would need to be 2253 * vetoed, which is something that should only happen if the 2254 * bucket is currently active. 2255 */ 2256 err = call_nexthop_res_table_notifiers(net, new, extack); 2257 if (err) 2258 return err; 2259 2260 if (cfg->nh_grp_res_has_idle_timer) 2261 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer; 2262 if (cfg->nh_grp_res_has_unbalanced_timer) 2263 old_res_table->unbalanced_timer = 2264 cfg->nh_grp_res_unbalanced_timer; 2265 2266 replace_nexthop_grp_res(oldg, newg); 2267 2268 tmp_table = new_res_table; 2269 rcu_assign_pointer(newg->res_table, old_res_table); 2270 rcu_assign_pointer(newg->spare->res_table, old_res_table); 2271 } 2272 2273 /* update parents - used by nexthop code for cleanup */ 2274 for (i = 0; i < newg->num_nh; i++) 2275 newg->nh_entries[i].nh_parent = old; 2276 2277 rcu_assign_pointer(old->nh_grp, newg); 2278 2279 /* Make sure concurrent readers are not using 'oldg' anymore. */ 2280 synchronize_net(); 2281 2282 if (newg->resilient) { 2283 rcu_assign_pointer(oldg->res_table, tmp_table); 2284 rcu_assign_pointer(oldg->spare->res_table, tmp_table); 2285 } 2286 2287 for (i = 0; i < oldg->num_nh; i++) 2288 oldg->nh_entries[i].nh_parent = new; 2289 2290 rcu_assign_pointer(new->nh_grp, oldg); 2291 2292 return 0; 2293 } 2294 2295 static void nh_group_v4_update(struct nh_group *nhg) 2296 { 2297 struct nh_grp_entry *nhges; 2298 bool has_v4 = false; 2299 int i; 2300 2301 nhges = nhg->nh_entries; 2302 for (i = 0; i < nhg->num_nh; i++) { 2303 struct nh_info *nhi; 2304 2305 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2306 if (nhi->family == AF_INET) 2307 has_v4 = true; 2308 } 2309 nhg->has_v4 = has_v4; 2310 } 2311 2312 static int replace_nexthop_single_notify_res(struct net *net, 2313 struct nh_res_table *res_table, 2314 struct nexthop *old, 2315 struct nh_info *oldi, 2316 struct nh_info *newi, 2317 struct netlink_ext_ack *extack) 2318 { 2319 u32 nhg_id = res_table->nhg_id; 2320 int err; 2321 u16 i; 2322 2323 for (i = 0; i < res_table->num_nh_buckets; i++) { 2324 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2325 struct nh_grp_entry *nhge; 2326 2327 nhge = rtnl_dereference(bucket->nh_entry); 2328 if (nhge->nh == old) { 2329 err = __call_nexthop_res_bucket_notifiers(net, nhg_id, 2330 i, true, 2331 oldi, newi, 2332 extack); 2333 if (err) 2334 goto err_notify; 2335 } 2336 } 2337 2338 return 0; 2339 2340 err_notify: 2341 while (i-- > 0) { 2342 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2343 struct nh_grp_entry *nhge; 2344 2345 nhge = rtnl_dereference(bucket->nh_entry); 2346 if (nhge->nh == old) 2347 __call_nexthop_res_bucket_notifiers(net, nhg_id, i, 2348 true, newi, oldi, 2349 extack); 2350 } 2351 return err; 2352 } 2353 2354 static int replace_nexthop_single_notify(struct net *net, 2355 struct nexthop *group_nh, 2356 struct nexthop *old, 2357 struct nh_info *oldi, 2358 struct nh_info *newi, 2359 struct netlink_ext_ack *extack) 2360 { 2361 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp); 2362 struct nh_res_table *res_table; 2363 2364 if (nhg->hash_threshold) { 2365 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, 2366 group_nh, extack); 2367 } else if (nhg->resilient) { 2368 res_table = rtnl_dereference(nhg->res_table); 2369 return replace_nexthop_single_notify_res(net, res_table, 2370 old, oldi, newi, 2371 extack); 2372 } 2373 2374 return -EINVAL; 2375 } 2376 2377 static int replace_nexthop_single(struct net *net, struct nexthop *old, 2378 struct nexthop *new, 2379 struct netlink_ext_ack *extack) 2380 { 2381 u8 old_protocol, old_nh_flags; 2382 struct nh_info *oldi, *newi; 2383 struct nh_grp_entry *nhge; 2384 int err; 2385 2386 if (new->is_group) { 2387 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group."); 2388 return -EINVAL; 2389 } 2390 2391 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack); 2392 if (err) 2393 return err; 2394 2395 /* Hardware flags were set on 'old' as 'new' is not in the red-black 2396 * tree. Therefore, inherit the flags from 'old' to 'new'. 2397 */ 2398 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP); 2399 2400 oldi = rtnl_dereference(old->nh_info); 2401 newi = rtnl_dereference(new->nh_info); 2402 2403 newi->nh_parent = old; 2404 oldi->nh_parent = new; 2405 2406 old_protocol = old->protocol; 2407 old_nh_flags = old->nh_flags; 2408 2409 old->protocol = new->protocol; 2410 old->nh_flags = new->nh_flags; 2411 2412 rcu_assign_pointer(old->nh_info, newi); 2413 rcu_assign_pointer(new->nh_info, oldi); 2414 2415 /* Send a replace notification for all the groups using the nexthop. */ 2416 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2417 struct nexthop *nhp = nhge->nh_parent; 2418 2419 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi, 2420 extack); 2421 if (err) 2422 goto err_notify; 2423 } 2424 2425 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially 2426 * update IPv4 indication in all the groups using the nexthop. 2427 */ 2428 if (oldi->family == AF_INET && newi->family == AF_INET6) { 2429 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2430 struct nexthop *nhp = nhge->nh_parent; 2431 struct nh_group *nhg; 2432 2433 nhg = rtnl_dereference(nhp->nh_grp); 2434 nh_group_v4_update(nhg); 2435 } 2436 } 2437 2438 return 0; 2439 2440 err_notify: 2441 rcu_assign_pointer(new->nh_info, newi); 2442 rcu_assign_pointer(old->nh_info, oldi); 2443 old->nh_flags = old_nh_flags; 2444 old->protocol = old_protocol; 2445 oldi->nh_parent = old; 2446 newi->nh_parent = new; 2447 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) { 2448 struct nexthop *nhp = nhge->nh_parent; 2449 2450 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL); 2451 } 2452 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack); 2453 return err; 2454 } 2455 2456 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh, 2457 struct nl_info *info) 2458 { 2459 struct fib6_info *f6i; 2460 2461 if (!list_empty(&nh->fi_list)) { 2462 struct fib_info *fi; 2463 2464 /* expectation is a few fib_info per nexthop and then 2465 * a lot of routes per fib_info. So mark the fib_info 2466 * and then walk the fib tables once 2467 */ 2468 list_for_each_entry(fi, &nh->fi_list, nh_list) 2469 fi->nh_updated = true; 2470 2471 fib_info_notify_update(net, info); 2472 2473 list_for_each_entry(fi, &nh->fi_list, nh_list) 2474 fi->nh_updated = false; 2475 } 2476 2477 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2478 ipv6_stub->fib6_rt_update(net, f6i, info); 2479 } 2480 2481 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries 2482 * linked to this nexthop and for all groups that the nexthop 2483 * is a member of 2484 */ 2485 static void nexthop_replace_notify(struct net *net, struct nexthop *nh, 2486 struct nl_info *info) 2487 { 2488 struct nh_grp_entry *nhge; 2489 2490 __nexthop_replace_notify(net, nh, info); 2491 2492 list_for_each_entry(nhge, &nh->grp_list, nh_list) 2493 __nexthop_replace_notify(net, nhge->nh_parent, info); 2494 } 2495 2496 static int replace_nexthop(struct net *net, struct nexthop *old, 2497 struct nexthop *new, const struct nh_config *cfg, 2498 struct netlink_ext_ack *extack) 2499 { 2500 bool new_is_reject = false; 2501 struct nh_grp_entry *nhge; 2502 int err; 2503 2504 /* check that existing FIB entries are ok with the 2505 * new nexthop definition 2506 */ 2507 err = fib_check_nh_list(old, new, extack); 2508 if (err) 2509 return err; 2510 2511 err = fib6_check_nh_list(old, new, extack); 2512 if (err) 2513 return err; 2514 2515 if (!new->is_group) { 2516 struct nh_info *nhi = rtnl_dereference(new->nh_info); 2517 2518 new_is_reject = nhi->reject_nh; 2519 } 2520 2521 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2522 /* if new nexthop is a blackhole, any groups using this 2523 * nexthop cannot have more than 1 path 2524 */ 2525 if (new_is_reject && 2526 nexthop_num_path(nhge->nh_parent) > 1) { 2527 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path"); 2528 return -EINVAL; 2529 } 2530 2531 err = fib_check_nh_list(nhge->nh_parent, new, extack); 2532 if (err) 2533 return err; 2534 2535 err = fib6_check_nh_list(nhge->nh_parent, new, extack); 2536 if (err) 2537 return err; 2538 } 2539 2540 if (old->is_group) 2541 err = replace_nexthop_grp(net, old, new, cfg, extack); 2542 else 2543 err = replace_nexthop_single(net, old, new, extack); 2544 2545 if (!err) { 2546 nh_rt_cache_flush(net, old, new); 2547 2548 __remove_nexthop(net, new, NULL); 2549 nexthop_put(new); 2550 } 2551 2552 return err; 2553 } 2554 2555 /* called with rtnl_lock held */ 2556 static int insert_nexthop(struct net *net, struct nexthop *new_nh, 2557 struct nh_config *cfg, struct netlink_ext_ack *extack) 2558 { 2559 struct rb_node **pp, *parent = NULL, *next; 2560 struct rb_root *root = &net->nexthop.rb_root; 2561 bool replace = !!(cfg->nlflags & NLM_F_REPLACE); 2562 bool create = !!(cfg->nlflags & NLM_F_CREATE); 2563 u32 new_id = new_nh->id; 2564 int replace_notify = 0; 2565 int rc = -EEXIST; 2566 2567 pp = &root->rb_node; 2568 while (1) { 2569 struct nexthop *nh; 2570 2571 next = *pp; 2572 if (!next) 2573 break; 2574 2575 parent = next; 2576 2577 nh = rb_entry(parent, struct nexthop, rb_node); 2578 if (new_id < nh->id) { 2579 pp = &next->rb_left; 2580 } else if (new_id > nh->id) { 2581 pp = &next->rb_right; 2582 } else if (replace) { 2583 rc = replace_nexthop(net, nh, new_nh, cfg, extack); 2584 if (!rc) { 2585 new_nh = nh; /* send notification with old nh */ 2586 replace_notify = 1; 2587 } 2588 goto out; 2589 } else { 2590 /* id already exists and not a replace */ 2591 goto out; 2592 } 2593 } 2594 2595 if (replace && !create) { 2596 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists"); 2597 rc = -ENOENT; 2598 goto out; 2599 } 2600 2601 if (new_nh->is_group) { 2602 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp); 2603 struct nh_res_table *res_table; 2604 2605 if (nhg->resilient) { 2606 res_table = rtnl_dereference(nhg->res_table); 2607 2608 /* Not passing the number of buckets is OK when 2609 * replacing, but not when creating a new group. 2610 */ 2611 if (!cfg->nh_grp_res_has_num_buckets) { 2612 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion"); 2613 rc = -EINVAL; 2614 goto out; 2615 } 2616 2617 nh_res_group_rebalance(nhg, res_table); 2618 2619 /* Do not send bucket notifications, we do full 2620 * notification below. 2621 */ 2622 nh_res_table_upkeep(res_table, false, false); 2623 } 2624 } 2625 2626 rb_link_node_rcu(&new_nh->rb_node, parent, pp); 2627 rb_insert_color(&new_nh->rb_node, root); 2628 2629 /* The initial insertion is a full notification for hash-threshold as 2630 * well as resilient groups. 2631 */ 2632 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack); 2633 if (rc) 2634 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root); 2635 2636 out: 2637 if (!rc) { 2638 nh_base_seq_inc(net); 2639 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); 2640 if (replace_notify && 2641 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) 2642 nexthop_replace_notify(net, new_nh, &cfg->nlinfo); 2643 } 2644 2645 return rc; 2646 } 2647 2648 /* rtnl */ 2649 /* remove all nexthops tied to a device being deleted */ 2650 static void nexthop_flush_dev(struct net_device *dev, unsigned long event) 2651 { 2652 unsigned int hash = nh_dev_hashfn(dev->ifindex); 2653 struct net *net = dev_net(dev); 2654 struct hlist_head *head = &net->nexthop.devhash[hash]; 2655 struct hlist_node *n; 2656 struct nh_info *nhi; 2657 2658 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 2659 if (nhi->fib_nhc.nhc_dev != dev) 2660 continue; 2661 2662 if (nhi->reject_nh && 2663 (event == NETDEV_DOWN || event == NETDEV_CHANGE)) 2664 continue; 2665 2666 remove_nexthop(net, nhi->nh_parent, NULL); 2667 } 2668 } 2669 2670 /* rtnl; called when net namespace is deleted */ 2671 static void flush_all_nexthops(struct net *net) 2672 { 2673 struct rb_root *root = &net->nexthop.rb_root; 2674 struct rb_node *node; 2675 struct nexthop *nh; 2676 2677 while ((node = rb_first(root))) { 2678 nh = rb_entry(node, struct nexthop, rb_node); 2679 remove_nexthop(net, nh, NULL); 2680 cond_resched(); 2681 } 2682 } 2683 2684 static struct nexthop *nexthop_create_group(struct net *net, 2685 struct nh_config *cfg) 2686 { 2687 struct nlattr *grps_attr = cfg->nh_grp; 2688 struct nexthop_grp *entry = nla_data(grps_attr); 2689 u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 2690 struct nh_group *nhg; 2691 struct nexthop *nh; 2692 int err; 2693 int i; 2694 2695 nh = nexthop_alloc(); 2696 if (!nh) 2697 return ERR_PTR(-ENOMEM); 2698 2699 nh->is_group = 1; 2700 2701 nhg = nexthop_grp_alloc(num_nh); 2702 if (!nhg) { 2703 kfree(nh); 2704 return ERR_PTR(-ENOMEM); 2705 } 2706 2707 /* spare group used for removals */ 2708 nhg->spare = nexthop_grp_alloc(num_nh); 2709 if (!nhg->spare) { 2710 kfree(nhg); 2711 kfree(nh); 2712 return ERR_PTR(-ENOMEM); 2713 } 2714 nhg->spare->spare = nhg; 2715 2716 for (i = 0; i < nhg->num_nh; ++i) { 2717 struct nexthop *nhe; 2718 struct nh_info *nhi; 2719 2720 nhe = nexthop_find_by_id(net, entry[i].id); 2721 if (!nexthop_get(nhe)) { 2722 err = -ENOENT; 2723 goto out_no_nh; 2724 } 2725 2726 nhi = rtnl_dereference(nhe->nh_info); 2727 if (nhi->family == AF_INET) 2728 nhg->has_v4 = true; 2729 2730 nhg->nh_entries[i].stats = 2731 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats); 2732 if (!nhg->nh_entries[i].stats) { 2733 err = -ENOMEM; 2734 nexthop_put(nhe); 2735 goto out_no_nh; 2736 } 2737 nhg->nh_entries[i].nh = nhe; 2738 nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]); 2739 2740 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list); 2741 nhg->nh_entries[i].nh_parent = nh; 2742 } 2743 2744 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) { 2745 nhg->hash_threshold = 1; 2746 nhg->is_multipath = true; 2747 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) { 2748 struct nh_res_table *res_table; 2749 2750 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg); 2751 if (!res_table) { 2752 err = -ENOMEM; 2753 goto out_no_nh; 2754 } 2755 2756 rcu_assign_pointer(nhg->spare->res_table, res_table); 2757 rcu_assign_pointer(nhg->res_table, res_table); 2758 nhg->resilient = true; 2759 nhg->is_multipath = true; 2760 } 2761 2762 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1); 2763 2764 if (nhg->hash_threshold) 2765 nh_hthr_group_rebalance(nhg); 2766 2767 if (cfg->nh_fdb) 2768 nhg->fdb_nh = 1; 2769 2770 if (cfg->nh_hw_stats) 2771 nhg->hw_stats = true; 2772 2773 rcu_assign_pointer(nh->nh_grp, nhg); 2774 2775 return nh; 2776 2777 out_no_nh: 2778 for (i--; i >= 0; --i) { 2779 list_del(&nhg->nh_entries[i].nh_list); 2780 free_percpu(nhg->nh_entries[i].stats); 2781 nexthop_put(nhg->nh_entries[i].nh); 2782 } 2783 2784 kfree(nhg->spare); 2785 kfree(nhg); 2786 kfree(nh); 2787 2788 return ERR_PTR(err); 2789 } 2790 2791 static int nh_create_ipv4(struct net *net, struct nexthop *nh, 2792 struct nh_info *nhi, struct nh_config *cfg, 2793 struct netlink_ext_ack *extack) 2794 { 2795 struct fib_nh *fib_nh = &nhi->fib_nh; 2796 struct fib_config fib_cfg = { 2797 .fc_oif = cfg->nh_ifindex, 2798 .fc_gw4 = cfg->gw.ipv4, 2799 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, 2800 .fc_flags = cfg->nh_flags, 2801 .fc_nlinfo = cfg->nlinfo, 2802 .fc_encap = cfg->nh_encap, 2803 .fc_encap_type = cfg->nh_encap_type, 2804 }; 2805 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN); 2806 int err; 2807 2808 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack); 2809 if (err) { 2810 fib_nh_release(net, fib_nh); 2811 goto out; 2812 } 2813 2814 if (nhi->fdb_nh) 2815 goto out; 2816 2817 /* sets nh_dev if successful */ 2818 err = fib_check_nh(net, fib_nh, tb_id, 0, extack); 2819 if (!err) { 2820 nh->nh_flags = fib_nh->fib_nh_flags; 2821 fib_info_update_nhc_saddr(net, &fib_nh->nh_common, 2822 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); 2823 } else { 2824 fib_nh_release(net, fib_nh); 2825 } 2826 out: 2827 return err; 2828 } 2829 2830 static int nh_create_ipv6(struct net *net, struct nexthop *nh, 2831 struct nh_info *nhi, struct nh_config *cfg, 2832 struct netlink_ext_ack *extack) 2833 { 2834 struct fib6_nh *fib6_nh = &nhi->fib6_nh; 2835 struct fib6_config fib6_cfg = { 2836 .fc_table = l3mdev_fib_table(cfg->dev), 2837 .fc_ifindex = cfg->nh_ifindex, 2838 .fc_gateway = cfg->gw.ipv6, 2839 .fc_flags = cfg->nh_flags, 2840 .fc_nlinfo = cfg->nlinfo, 2841 .fc_encap = cfg->nh_encap, 2842 .fc_encap_type = cfg->nh_encap_type, 2843 .fc_is_fdb = cfg->nh_fdb, 2844 }; 2845 int err; 2846 2847 if (!ipv6_addr_any(&cfg->gw.ipv6)) 2848 fib6_cfg.fc_flags |= RTF_GATEWAY; 2849 2850 /* sets nh_dev if successful */ 2851 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, 2852 extack); 2853 if (err) { 2854 /* IPv6 is not enabled, don't call fib6_nh_release */ 2855 if (err == -EAFNOSUPPORT) 2856 goto out; 2857 ipv6_stub->fib6_nh_release(fib6_nh); 2858 } else { 2859 nh->nh_flags = fib6_nh->fib_nh_flags; 2860 } 2861 out: 2862 return err; 2863 } 2864 2865 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg, 2866 struct netlink_ext_ack *extack) 2867 { 2868 struct nh_info *nhi; 2869 struct nexthop *nh; 2870 int err = 0; 2871 2872 nh = nexthop_alloc(); 2873 if (!nh) 2874 return ERR_PTR(-ENOMEM); 2875 2876 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL); 2877 if (!nhi) { 2878 kfree(nh); 2879 return ERR_PTR(-ENOMEM); 2880 } 2881 2882 nh->nh_flags = cfg->nh_flags; 2883 nh->net = net; 2884 2885 nhi->nh_parent = nh; 2886 nhi->family = cfg->nh_family; 2887 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK; 2888 2889 if (cfg->nh_fdb) 2890 nhi->fdb_nh = 1; 2891 2892 if (cfg->nh_blackhole) { 2893 nhi->reject_nh = 1; 2894 cfg->nh_ifindex = net->loopback_dev->ifindex; 2895 } 2896 2897 switch (cfg->nh_family) { 2898 case AF_INET: 2899 err = nh_create_ipv4(net, nh, nhi, cfg, extack); 2900 break; 2901 case AF_INET6: 2902 err = nh_create_ipv6(net, nh, nhi, cfg, extack); 2903 break; 2904 } 2905 2906 if (err) { 2907 kfree(nhi); 2908 kfree(nh); 2909 return ERR_PTR(err); 2910 } 2911 2912 /* add the entry to the device based hash */ 2913 if (!nhi->fdb_nh) 2914 nexthop_devhash_add(net, nhi); 2915 2916 rcu_assign_pointer(nh->nh_info, nhi); 2917 2918 return nh; 2919 } 2920 2921 /* called with rtnl lock held */ 2922 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg, 2923 struct netlink_ext_ack *extack) 2924 { 2925 struct nexthop *nh; 2926 int err; 2927 2928 if (!cfg->nh_id) { 2929 cfg->nh_id = nh_find_unused_id(net); 2930 if (!cfg->nh_id) { 2931 NL_SET_ERR_MSG(extack, "No unused id"); 2932 return ERR_PTR(-EINVAL); 2933 } 2934 } 2935 2936 if (cfg->nh_grp) 2937 nh = nexthop_create_group(net, cfg); 2938 else 2939 nh = nexthop_create(net, cfg, extack); 2940 2941 if (IS_ERR(nh)) 2942 return nh; 2943 2944 refcount_set(&nh->refcnt, 1); 2945 nh->id = cfg->nh_id; 2946 nh->protocol = cfg->nh_protocol; 2947 nh->net = net; 2948 2949 err = insert_nexthop(net, nh, cfg, extack); 2950 if (err) { 2951 __remove_nexthop(net, nh, NULL); 2952 nexthop_put(nh); 2953 nh = ERR_PTR(err); 2954 } 2955 2956 return nh; 2957 } 2958 2959 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback, 2960 unsigned long *timer_p, bool *has_p, 2961 struct netlink_ext_ack *extack) 2962 { 2963 unsigned long timer; 2964 u32 value; 2965 2966 if (!attr) { 2967 *timer_p = fallback; 2968 *has_p = false; 2969 return 0; 2970 } 2971 2972 value = nla_get_u32(attr); 2973 timer = clock_t_to_jiffies(value); 2974 if (timer == ~0UL) { 2975 NL_SET_ERR_MSG(extack, "Timer value too large"); 2976 return -EINVAL; 2977 } 2978 2979 *timer_p = timer; 2980 *has_p = true; 2981 return 0; 2982 } 2983 2984 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg, 2985 struct netlink_ext_ack *extack) 2986 { 2987 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {}; 2988 int err; 2989 2990 if (res) { 2991 err = nla_parse_nested(tb, 2992 ARRAY_SIZE(rtm_nh_res_policy_new) - 1, 2993 res, rtm_nh_res_policy_new, extack); 2994 if (err < 0) 2995 return err; 2996 } 2997 2998 if (tb[NHA_RES_GROUP_BUCKETS]) { 2999 cfg->nh_grp_res_num_buckets = 3000 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]); 3001 cfg->nh_grp_res_has_num_buckets = true; 3002 if (!cfg->nh_grp_res_num_buckets) { 3003 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0"); 3004 return -EINVAL; 3005 } 3006 } 3007 3008 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER], 3009 NH_RES_DEFAULT_IDLE_TIMER, 3010 &cfg->nh_grp_res_idle_timer, 3011 &cfg->nh_grp_res_has_idle_timer, 3012 extack); 3013 if (err) 3014 return err; 3015 3016 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER], 3017 NH_RES_DEFAULT_UNBALANCED_TIMER, 3018 &cfg->nh_grp_res_unbalanced_timer, 3019 &cfg->nh_grp_res_has_unbalanced_timer, 3020 extack); 3021 } 3022 3023 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, 3024 struct nlmsghdr *nlh, struct nlattr **tb, 3025 struct nh_config *cfg, 3026 struct netlink_ext_ack *extack) 3027 { 3028 struct nhmsg *nhm = nlmsg_data(nlh); 3029 int err; 3030 3031 err = -EINVAL; 3032 if (nhm->resvd || nhm->nh_scope) { 3033 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header"); 3034 goto out; 3035 } 3036 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) { 3037 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header"); 3038 goto out; 3039 } 3040 3041 switch (nhm->nh_family) { 3042 case AF_INET: 3043 case AF_INET6: 3044 break; 3045 case AF_UNSPEC: 3046 if (tb[NHA_GROUP]) 3047 break; 3048 fallthrough; 3049 default: 3050 NL_SET_ERR_MSG(extack, "Invalid address family"); 3051 goto out; 3052 } 3053 3054 memset(cfg, 0, sizeof(*cfg)); 3055 cfg->nlflags = nlh->nlmsg_flags; 3056 cfg->nlinfo.portid = NETLINK_CB(skb).portid; 3057 cfg->nlinfo.nlh = nlh; 3058 cfg->nlinfo.nl_net = net; 3059 3060 cfg->nh_family = nhm->nh_family; 3061 cfg->nh_protocol = nhm->nh_protocol; 3062 cfg->nh_flags = nhm->nh_flags; 3063 3064 if (tb[NHA_ID]) 3065 cfg->nh_id = nla_get_u32(tb[NHA_ID]); 3066 3067 if (tb[NHA_FDB]) { 3068 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] || 3069 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) { 3070 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole"); 3071 goto out; 3072 } 3073 if (nhm->nh_flags) { 3074 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header"); 3075 goto out; 3076 } 3077 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]); 3078 } 3079 3080 if (tb[NHA_GROUP]) { 3081 if (nhm->nh_family != AF_UNSPEC) { 3082 NL_SET_ERR_MSG(extack, "Invalid family for group"); 3083 goto out; 3084 } 3085 cfg->nh_grp = tb[NHA_GROUP]; 3086 3087 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH; 3088 if (tb[NHA_GROUP_TYPE]) 3089 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]); 3090 3091 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) { 3092 NL_SET_ERR_MSG(extack, "Invalid group type"); 3093 goto out; 3094 } 3095 3096 err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new), 3097 cfg->nh_grp_type, extack); 3098 if (err) 3099 goto out; 3100 3101 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) 3102 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP], 3103 cfg, extack); 3104 3105 if (tb[NHA_HW_STATS_ENABLE]) 3106 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]); 3107 3108 /* no other attributes should be set */ 3109 goto out; 3110 } 3111 3112 if (tb[NHA_BLACKHOLE]) { 3113 if (tb[NHA_GATEWAY] || tb[NHA_OIF] || 3114 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) { 3115 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb"); 3116 goto out; 3117 } 3118 3119 cfg->nh_blackhole = 1; 3120 err = 0; 3121 goto out; 3122 } 3123 3124 if (!cfg->nh_fdb && !tb[NHA_OIF]) { 3125 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops"); 3126 goto out; 3127 } 3128 3129 err = -EINVAL; 3130 if (tb[NHA_GATEWAY]) { 3131 struct nlattr *gwa = tb[NHA_GATEWAY]; 3132 3133 switch (cfg->nh_family) { 3134 case AF_INET: 3135 if (nla_len(gwa) != sizeof(u32)) { 3136 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3137 goto out; 3138 } 3139 cfg->gw.ipv4 = nla_get_be32(gwa); 3140 break; 3141 case AF_INET6: 3142 if (nla_len(gwa) != sizeof(struct in6_addr)) { 3143 NL_SET_ERR_MSG(extack, "Invalid gateway"); 3144 goto out; 3145 } 3146 cfg->gw.ipv6 = nla_get_in6_addr(gwa); 3147 break; 3148 default: 3149 NL_SET_ERR_MSG(extack, 3150 "Unknown address family for gateway"); 3151 goto out; 3152 } 3153 } else { 3154 /* device only nexthop (no gateway) */ 3155 if (cfg->nh_flags & RTNH_F_ONLINK) { 3156 NL_SET_ERR_MSG(extack, 3157 "ONLINK flag can not be set for nexthop without a gateway"); 3158 goto out; 3159 } 3160 } 3161 3162 if (tb[NHA_ENCAP]) { 3163 cfg->nh_encap = tb[NHA_ENCAP]; 3164 3165 if (!tb[NHA_ENCAP_TYPE]) { 3166 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing"); 3167 goto out; 3168 } 3169 3170 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); 3171 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, 3172 extack, false); 3173 if (err < 0) 3174 goto out; 3175 3176 } else if (tb[NHA_ENCAP_TYPE]) { 3177 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing"); 3178 goto out; 3179 } 3180 3181 if (tb[NHA_HW_STATS_ENABLE]) { 3182 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops"); 3183 goto out; 3184 } 3185 3186 err = 0; 3187 out: 3188 return err; 3189 } 3190 3191 static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb, 3192 struct nh_config *cfg, 3193 struct netlink_ext_ack *extack) 3194 { 3195 if (tb[NHA_GROUP]) 3196 return nh_check_attr_group_rtnl(net, tb, extack); 3197 3198 if (tb[NHA_OIF]) { 3199 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); 3200 if (cfg->nh_ifindex) 3201 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); 3202 3203 if (!cfg->dev) { 3204 NL_SET_ERR_MSG(extack, "Invalid device index"); 3205 return -EINVAL; 3206 } 3207 3208 if (!(cfg->dev->flags & IFF_UP)) { 3209 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3210 return -ENETDOWN; 3211 } 3212 3213 if (!netif_carrier_ok(cfg->dev)) { 3214 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); 3215 return -ENETDOWN; 3216 } 3217 } 3218 3219 return 0; 3220 } 3221 3222 /* rtnl */ 3223 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3224 struct netlink_ext_ack *extack) 3225 { 3226 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; 3227 struct net *net = sock_net(skb->sk); 3228 struct nh_config cfg; 3229 struct nexthop *nh; 3230 int err; 3231 3232 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3233 ARRAY_SIZE(rtm_nh_policy_new) - 1, 3234 rtm_nh_policy_new, extack); 3235 if (err < 0) 3236 goto out; 3237 3238 err = rtm_to_nh_config(net, skb, nlh, tb, &cfg, extack); 3239 if (err) 3240 goto out; 3241 3242 if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) { 3243 NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); 3244 err = -EINVAL; 3245 goto out; 3246 } 3247 3248 rtnl_net_lock(net); 3249 3250 err = rtm_to_nh_config_rtnl(net, tb, &cfg, extack); 3251 if (err) 3252 goto unlock; 3253 3254 nh = nexthop_add(net, &cfg, extack); 3255 if (IS_ERR(nh)) 3256 err = PTR_ERR(nh); 3257 3258 unlock: 3259 rtnl_net_unlock(net); 3260 out: 3261 return err; 3262 } 3263 3264 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, 3265 struct nlattr **tb, u32 *id, u32 *op_flags, 3266 struct netlink_ext_ack *extack) 3267 { 3268 struct nhmsg *nhm = nlmsg_data(nlh); 3269 3270 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3271 NL_SET_ERR_MSG(extack, "Invalid values in header"); 3272 return -EINVAL; 3273 } 3274 3275 if (!tb[NHA_ID]) { 3276 NL_SET_ERR_MSG(extack, "Nexthop id is missing"); 3277 return -EINVAL; 3278 } 3279 3280 *id = nla_get_u32(tb[NHA_ID]); 3281 if (!(*id)) { 3282 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3283 return -EINVAL; 3284 } 3285 3286 if (op_flags) 3287 *op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0); 3288 3289 return 0; 3290 } 3291 3292 /* rtnl */ 3293 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3294 struct netlink_ext_ack *extack) 3295 { 3296 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)]; 3297 struct net *net = sock_net(skb->sk); 3298 struct nl_info nlinfo = { 3299 .nlh = nlh, 3300 .nl_net = net, 3301 .portid = NETLINK_CB(skb).portid, 3302 }; 3303 struct nexthop *nh; 3304 int err; 3305 u32 id; 3306 3307 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3308 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del, 3309 extack); 3310 if (err < 0) 3311 return err; 3312 3313 err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack); 3314 if (err) 3315 return err; 3316 3317 rtnl_net_lock(net); 3318 3319 nh = nexthop_find_by_id(net, id); 3320 if (nh) 3321 remove_nexthop(net, nh, &nlinfo); 3322 else 3323 err = -ENOENT; 3324 3325 rtnl_net_unlock(net); 3326 3327 return err; 3328 } 3329 3330 /* rtnl */ 3331 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3332 struct netlink_ext_ack *extack) 3333 { 3334 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)]; 3335 struct net *net = sock_net(in_skb->sk); 3336 struct sk_buff *skb = NULL; 3337 struct nexthop *nh; 3338 u32 op_flags; 3339 int err; 3340 u32 id; 3341 3342 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3343 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get, 3344 extack); 3345 if (err < 0) 3346 return err; 3347 3348 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack); 3349 if (err) 3350 return err; 3351 3352 err = -ENOBUFS; 3353 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3354 if (!skb) 3355 goto out; 3356 3357 err = -ENOENT; 3358 nh = nexthop_find_by_id(net, id); 3359 if (!nh) 3360 goto errout_free; 3361 3362 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, 3363 nlh->nlmsg_seq, 0, op_flags); 3364 if (err < 0) { 3365 WARN_ON(err == -EMSGSIZE); 3366 goto errout_free; 3367 } 3368 3369 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3370 out: 3371 return err; 3372 errout_free: 3373 kfree_skb(skb); 3374 goto out; 3375 } 3376 3377 struct nh_dump_filter { 3378 u32 nh_id; 3379 int dev_idx; 3380 int master_idx; 3381 bool group_filter; 3382 bool fdb_filter; 3383 u32 res_bucket_nh_id; 3384 u32 op_flags; 3385 }; 3386 3387 static bool nh_dump_filtered(struct nexthop *nh, 3388 struct nh_dump_filter *filter, u8 family) 3389 { 3390 const struct net_device *dev; 3391 const struct nh_info *nhi; 3392 3393 if (filter->group_filter && !nh->is_group) 3394 return true; 3395 3396 if (!filter->dev_idx && !filter->master_idx && !family) 3397 return false; 3398 3399 if (nh->is_group) 3400 return true; 3401 3402 nhi = rtnl_dereference(nh->nh_info); 3403 if (family && nhi->family != family) 3404 return true; 3405 3406 dev = nhi->fib_nhc.nhc_dev; 3407 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx)) 3408 return true; 3409 3410 if (filter->master_idx) { 3411 struct net_device *master; 3412 3413 if (!dev) 3414 return true; 3415 3416 master = netdev_master_upper_dev_get((struct net_device *)dev); 3417 if (!master || master->ifindex != filter->master_idx) 3418 return true; 3419 } 3420 3421 return false; 3422 } 3423 3424 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb, 3425 struct nh_dump_filter *filter, 3426 struct netlink_ext_ack *extack) 3427 { 3428 struct nhmsg *nhm; 3429 u32 idx; 3430 3431 if (tb[NHA_OIF]) { 3432 idx = nla_get_u32(tb[NHA_OIF]); 3433 if (idx > INT_MAX) { 3434 NL_SET_ERR_MSG(extack, "Invalid device index"); 3435 return -EINVAL; 3436 } 3437 filter->dev_idx = idx; 3438 } 3439 if (tb[NHA_MASTER]) { 3440 idx = nla_get_u32(tb[NHA_MASTER]); 3441 if (idx > INT_MAX) { 3442 NL_SET_ERR_MSG(extack, "Invalid master device index"); 3443 return -EINVAL; 3444 } 3445 filter->master_idx = idx; 3446 } 3447 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]); 3448 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]); 3449 3450 nhm = nlmsg_data(nlh); 3451 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3452 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request"); 3453 return -EINVAL; 3454 } 3455 3456 return 0; 3457 } 3458 3459 static int nh_valid_dump_req(const struct nlmsghdr *nlh, 3460 struct nh_dump_filter *filter, 3461 struct netlink_callback *cb) 3462 { 3463 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)]; 3464 int err; 3465 3466 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3467 ARRAY_SIZE(rtm_nh_policy_dump) - 1, 3468 rtm_nh_policy_dump, cb->extack); 3469 if (err < 0) 3470 return err; 3471 3472 filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0); 3473 3474 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3475 } 3476 3477 struct rtm_dump_nh_ctx { 3478 u32 idx; 3479 }; 3480 3481 static struct rtm_dump_nh_ctx * 3482 rtm_dump_nh_ctx(struct netlink_callback *cb) 3483 { 3484 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx; 3485 3486 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3487 return ctx; 3488 } 3489 3490 static int rtm_dump_walk_nexthops(struct sk_buff *skb, 3491 struct netlink_callback *cb, 3492 struct rb_root *root, 3493 struct rtm_dump_nh_ctx *ctx, 3494 int (*nh_cb)(struct sk_buff *skb, 3495 struct netlink_callback *cb, 3496 struct nexthop *nh, void *data), 3497 void *data) 3498 { 3499 struct rb_node *node; 3500 int s_idx; 3501 int err; 3502 3503 s_idx = ctx->idx; 3504 for (node = rb_first(root); node; node = rb_next(node)) { 3505 struct nexthop *nh; 3506 3507 nh = rb_entry(node, struct nexthop, rb_node); 3508 if (nh->id < s_idx) 3509 continue; 3510 3511 ctx->idx = nh->id; 3512 err = nh_cb(skb, cb, nh, data); 3513 if (err) 3514 return err; 3515 } 3516 3517 return 0; 3518 } 3519 3520 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb, 3521 struct nexthop *nh, void *data) 3522 { 3523 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3524 struct nh_dump_filter *filter = data; 3525 3526 if (nh_dump_filtered(nh, filter, nhm->nh_family)) 3527 return 0; 3528 3529 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP, 3530 NETLINK_CB(cb->skb).portid, 3531 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags); 3532 } 3533 3534 /* rtnl */ 3535 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) 3536 { 3537 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb); 3538 struct net *net = sock_net(skb->sk); 3539 struct rb_root *root = &net->nexthop.rb_root; 3540 struct nh_dump_filter filter = {}; 3541 int err; 3542 3543 err = nh_valid_dump_req(cb->nlh, &filter, cb); 3544 if (err < 0) 3545 return err; 3546 3547 err = rtm_dump_walk_nexthops(skb, cb, root, ctx, 3548 &rtm_dump_nexthop_cb, &filter); 3549 3550 cb->seq = net->nexthop.seq; 3551 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3552 return err; 3553 } 3554 3555 static struct nexthop * 3556 nexthop_find_group_resilient(struct net *net, u32 id, 3557 struct netlink_ext_ack *extack) 3558 { 3559 struct nh_group *nhg; 3560 struct nexthop *nh; 3561 3562 nh = nexthop_find_by_id(net, id); 3563 if (!nh) 3564 return ERR_PTR(-ENOENT); 3565 3566 if (!nh->is_group) { 3567 NL_SET_ERR_MSG(extack, "Not a nexthop group"); 3568 return ERR_PTR(-EINVAL); 3569 } 3570 3571 nhg = rtnl_dereference(nh->nh_grp); 3572 if (!nhg->resilient) { 3573 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient"); 3574 return ERR_PTR(-EINVAL); 3575 } 3576 3577 return nh; 3578 } 3579 3580 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p, 3581 struct netlink_ext_ack *extack) 3582 { 3583 u32 idx; 3584 3585 if (attr) { 3586 idx = nla_get_u32(attr); 3587 if (!idx) { 3588 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3589 return -EINVAL; 3590 } 3591 *nh_id_p = idx; 3592 } else { 3593 *nh_id_p = 0; 3594 } 3595 3596 return 0; 3597 } 3598 3599 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh, 3600 struct nh_dump_filter *filter, 3601 struct netlink_callback *cb) 3602 { 3603 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)]; 3604 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)]; 3605 int err; 3606 3607 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3608 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1, 3609 rtm_nh_policy_dump_bucket, NULL); 3610 if (err < 0) 3611 return err; 3612 3613 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack); 3614 if (err) 3615 return err; 3616 3617 if (tb[NHA_RES_BUCKET]) { 3618 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1; 3619 3620 err = nla_parse_nested(res_tb, max, 3621 tb[NHA_RES_BUCKET], 3622 rtm_nh_res_bucket_policy_dump, 3623 cb->extack); 3624 if (err < 0) 3625 return err; 3626 3627 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID], 3628 &filter->res_bucket_nh_id, 3629 cb->extack); 3630 if (err) 3631 return err; 3632 } 3633 3634 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3635 } 3636 3637 struct rtm_dump_res_bucket_ctx { 3638 struct rtm_dump_nh_ctx nh; 3639 u16 bucket_index; 3640 }; 3641 3642 static struct rtm_dump_res_bucket_ctx * 3643 rtm_dump_res_bucket_ctx(struct netlink_callback *cb) 3644 { 3645 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx; 3646 3647 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3648 return ctx; 3649 } 3650 3651 struct rtm_dump_nexthop_bucket_data { 3652 struct rtm_dump_res_bucket_ctx *ctx; 3653 struct nh_dump_filter filter; 3654 }; 3655 3656 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, 3657 struct netlink_callback *cb, 3658 struct nexthop *nh, 3659 struct rtm_dump_nexthop_bucket_data *dd) 3660 { 3661 u32 portid = NETLINK_CB(cb->skb).portid; 3662 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3663 struct nh_res_table *res_table; 3664 struct nh_group *nhg; 3665 u16 bucket_index; 3666 int err; 3667 3668 nhg = rtnl_dereference(nh->nh_grp); 3669 res_table = rtnl_dereference(nhg->res_table); 3670 for (bucket_index = dd->ctx->bucket_index; 3671 bucket_index < res_table->num_nh_buckets; 3672 bucket_index++) { 3673 struct nh_res_bucket *bucket; 3674 struct nh_grp_entry *nhge; 3675 3676 bucket = &res_table->nh_buckets[bucket_index]; 3677 nhge = rtnl_dereference(bucket->nh_entry); 3678 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family)) 3679 continue; 3680 3681 if (dd->filter.res_bucket_nh_id && 3682 dd->filter.res_bucket_nh_id != nhge->nh->id) 3683 continue; 3684 3685 dd->ctx->bucket_index = bucket_index; 3686 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 3687 RTM_NEWNEXTHOPBUCKET, portid, 3688 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3689 cb->extack); 3690 if (err) 3691 return err; 3692 } 3693 3694 dd->ctx->bucket_index = 0; 3695 3696 return 0; 3697 } 3698 3699 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, 3700 struct netlink_callback *cb, 3701 struct nexthop *nh, void *data) 3702 { 3703 struct rtm_dump_nexthop_bucket_data *dd = data; 3704 struct nh_group *nhg; 3705 3706 if (!nh->is_group) 3707 return 0; 3708 3709 nhg = rtnl_dereference(nh->nh_grp); 3710 if (!nhg->resilient) 3711 return 0; 3712 3713 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd); 3714 } 3715 3716 /* rtnl */ 3717 static int rtm_dump_nexthop_bucket(struct sk_buff *skb, 3718 struct netlink_callback *cb) 3719 { 3720 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb); 3721 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx }; 3722 struct net *net = sock_net(skb->sk); 3723 struct nexthop *nh; 3724 int err; 3725 3726 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb); 3727 if (err) 3728 return err; 3729 3730 if (dd.filter.nh_id) { 3731 nh = nexthop_find_group_resilient(net, dd.filter.nh_id, 3732 cb->extack); 3733 if (IS_ERR(nh)) 3734 return PTR_ERR(nh); 3735 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd); 3736 } else { 3737 struct rb_root *root = &net->nexthop.rb_root; 3738 3739 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh, 3740 &rtm_dump_nexthop_bucket_cb, &dd); 3741 } 3742 3743 cb->seq = net->nexthop.seq; 3744 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3745 return err; 3746 } 3747 3748 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res, 3749 u16 *bucket_index, 3750 struct netlink_ext_ack *extack) 3751 { 3752 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)]; 3753 int err; 3754 3755 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1, 3756 res, rtm_nh_res_bucket_policy_get, extack); 3757 if (err < 0) 3758 return err; 3759 3760 if (!tb[NHA_RES_BUCKET_INDEX]) { 3761 NL_SET_ERR_MSG(extack, "Bucket index is missing"); 3762 return -EINVAL; 3763 } 3764 3765 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]); 3766 return 0; 3767 } 3768 3769 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh, 3770 u32 *id, u16 *bucket_index, 3771 struct netlink_ext_ack *extack) 3772 { 3773 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)]; 3774 int err; 3775 3776 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3777 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1, 3778 rtm_nh_policy_get_bucket, extack); 3779 if (err < 0) 3780 return err; 3781 3782 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack); 3783 if (err) 3784 return err; 3785 3786 if (!tb[NHA_RES_BUCKET]) { 3787 NL_SET_ERR_MSG(extack, "Bucket information is missing"); 3788 return -EINVAL; 3789 } 3790 3791 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET], 3792 bucket_index, extack); 3793 if (err) 3794 return err; 3795 3796 return 0; 3797 } 3798 3799 /* rtnl */ 3800 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3801 struct netlink_ext_ack *extack) 3802 { 3803 struct net *net = sock_net(in_skb->sk); 3804 struct nh_res_table *res_table; 3805 struct sk_buff *skb = NULL; 3806 struct nh_group *nhg; 3807 struct nexthop *nh; 3808 u16 bucket_index; 3809 int err; 3810 u32 id; 3811 3812 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack); 3813 if (err) 3814 return err; 3815 3816 nh = nexthop_find_group_resilient(net, id, extack); 3817 if (IS_ERR(nh)) 3818 return PTR_ERR(nh); 3819 3820 nhg = rtnl_dereference(nh->nh_grp); 3821 res_table = rtnl_dereference(nhg->res_table); 3822 if (bucket_index >= res_table->num_nh_buckets) { 3823 NL_SET_ERR_MSG(extack, "Bucket index out of bounds"); 3824 return -ENOENT; 3825 } 3826 3827 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3828 if (!skb) 3829 return -ENOBUFS; 3830 3831 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index], 3832 bucket_index, RTM_NEWNEXTHOPBUCKET, 3833 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 3834 0, extack); 3835 if (err < 0) { 3836 WARN_ON(err == -EMSGSIZE); 3837 goto errout_free; 3838 } 3839 3840 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3841 3842 errout_free: 3843 kfree_skb(skb); 3844 return err; 3845 } 3846 3847 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu) 3848 { 3849 unsigned int hash = nh_dev_hashfn(dev->ifindex); 3850 struct net *net = dev_net(dev); 3851 struct hlist_head *head = &net->nexthop.devhash[hash]; 3852 struct hlist_node *n; 3853 struct nh_info *nhi; 3854 3855 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 3856 if (nhi->fib_nhc.nhc_dev == dev) { 3857 if (nhi->family == AF_INET) 3858 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu, 3859 orig_mtu); 3860 } 3861 } 3862 } 3863 3864 /* rtnl */ 3865 static int nh_netdev_event(struct notifier_block *this, 3866 unsigned long event, void *ptr) 3867 { 3868 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3869 struct netdev_notifier_info_ext *info_ext; 3870 3871 switch (event) { 3872 case NETDEV_DOWN: 3873 case NETDEV_UNREGISTER: 3874 nexthop_flush_dev(dev, event); 3875 break; 3876 case NETDEV_CHANGE: 3877 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) 3878 nexthop_flush_dev(dev, event); 3879 break; 3880 case NETDEV_CHANGEMTU: 3881 info_ext = ptr; 3882 nexthop_sync_mtu(dev, info_ext->ext.mtu); 3883 rt_cache_flush(dev_net(dev)); 3884 break; 3885 } 3886 return NOTIFY_DONE; 3887 } 3888 3889 static struct notifier_block nh_netdev_notifier = { 3890 .notifier_call = nh_netdev_event, 3891 }; 3892 3893 static int nexthops_dump(struct net *net, struct notifier_block *nb, 3894 enum nexthop_event_type event_type, 3895 struct netlink_ext_ack *extack) 3896 { 3897 struct rb_root *root = &net->nexthop.rb_root; 3898 struct rb_node *node; 3899 int err = 0; 3900 3901 for (node = rb_first(root); node; node = rb_next(node)) { 3902 struct nexthop *nh; 3903 3904 nh = rb_entry(node, struct nexthop, rb_node); 3905 err = call_nexthop_notifier(nb, net, event_type, nh, extack); 3906 if (err) 3907 break; 3908 } 3909 3910 return err; 3911 } 3912 3913 int register_nexthop_notifier(struct net *net, struct notifier_block *nb, 3914 struct netlink_ext_ack *extack) 3915 { 3916 int err; 3917 3918 rtnl_lock(); 3919 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack); 3920 if (err) 3921 goto unlock; 3922 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, 3923 nb); 3924 unlock: 3925 rtnl_unlock(); 3926 return err; 3927 } 3928 EXPORT_SYMBOL(register_nexthop_notifier); 3929 3930 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3931 { 3932 int err; 3933 3934 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3935 nb); 3936 if (!err) 3937 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3938 return err; 3939 } 3940 EXPORT_SYMBOL(__unregister_nexthop_notifier); 3941 3942 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3943 { 3944 int err; 3945 3946 rtnl_lock(); 3947 err = __unregister_nexthop_notifier(net, nb); 3948 rtnl_unlock(); 3949 return err; 3950 } 3951 EXPORT_SYMBOL(unregister_nexthop_notifier); 3952 3953 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap) 3954 { 3955 struct nexthop *nexthop; 3956 3957 rcu_read_lock(); 3958 3959 nexthop = nexthop_find_by_id(net, id); 3960 if (!nexthop) 3961 goto out; 3962 3963 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3964 if (offload) 3965 nexthop->nh_flags |= RTNH_F_OFFLOAD; 3966 if (trap) 3967 nexthop->nh_flags |= RTNH_F_TRAP; 3968 3969 out: 3970 rcu_read_unlock(); 3971 } 3972 EXPORT_SYMBOL(nexthop_set_hw_flags); 3973 3974 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, 3975 bool offload, bool trap) 3976 { 3977 struct nh_res_table *res_table; 3978 struct nh_res_bucket *bucket; 3979 struct nexthop *nexthop; 3980 struct nh_group *nhg; 3981 3982 rcu_read_lock(); 3983 3984 nexthop = nexthop_find_by_id(net, id); 3985 if (!nexthop || !nexthop->is_group) 3986 goto out; 3987 3988 nhg = rcu_dereference(nexthop->nh_grp); 3989 if (!nhg->resilient) 3990 goto out; 3991 3992 if (bucket_index >= nhg->res_table->num_nh_buckets) 3993 goto out; 3994 3995 res_table = rcu_dereference(nhg->res_table); 3996 bucket = &res_table->nh_buckets[bucket_index]; 3997 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3998 if (offload) 3999 bucket->nh_flags |= RTNH_F_OFFLOAD; 4000 if (trap) 4001 bucket->nh_flags |= RTNH_F_TRAP; 4002 4003 out: 4004 rcu_read_unlock(); 4005 } 4006 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags); 4007 4008 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets, 4009 unsigned long *activity) 4010 { 4011 struct nh_res_table *res_table; 4012 struct nexthop *nexthop; 4013 struct nh_group *nhg; 4014 u16 i; 4015 4016 rcu_read_lock(); 4017 4018 nexthop = nexthop_find_by_id(net, id); 4019 if (!nexthop || !nexthop->is_group) 4020 goto out; 4021 4022 nhg = rcu_dereference(nexthop->nh_grp); 4023 if (!nhg->resilient) 4024 goto out; 4025 4026 /* Instead of silently ignoring some buckets, demand that the sizes 4027 * be the same. 4028 */ 4029 res_table = rcu_dereference(nhg->res_table); 4030 if (num_buckets != res_table->num_nh_buckets) 4031 goto out; 4032 4033 for (i = 0; i < num_buckets; i++) { 4034 if (test_bit(i, activity)) 4035 nh_res_bucket_set_busy(&res_table->nh_buckets[i]); 4036 } 4037 4038 out: 4039 rcu_read_unlock(); 4040 } 4041 EXPORT_SYMBOL(nexthop_res_grp_activity_update); 4042 4043 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list, 4044 struct list_head *dev_to_kill) 4045 { 4046 struct net *net; 4047 4048 ASSERT_RTNL(); 4049 list_for_each_entry(net, net_list, exit_list) 4050 flush_all_nexthops(net); 4051 } 4052 4053 static void __net_exit nexthop_net_exit(struct net *net) 4054 { 4055 kfree(net->nexthop.devhash); 4056 net->nexthop.devhash = NULL; 4057 } 4058 4059 static int __net_init nexthop_net_init(struct net *net) 4060 { 4061 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE; 4062 4063 net->nexthop.rb_root = RB_ROOT; 4064 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL); 4065 if (!net->nexthop.devhash) 4066 return -ENOMEM; 4067 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain); 4068 4069 return 0; 4070 } 4071 4072 static struct pernet_operations nexthop_net_ops = { 4073 .init = nexthop_net_init, 4074 .exit = nexthop_net_exit, 4075 .exit_batch_rtnl = nexthop_net_exit_batch_rtnl, 4076 }; 4077 4078 static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = { 4079 {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop, 4080 .flags = RTNL_FLAG_DOIT_PERNET}, 4081 {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop, 4082 .flags = RTNL_FLAG_DOIT_PERNET}, 4083 {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop, 4084 .dumpit = rtm_dump_nexthop}, 4085 {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket, 4086 .dumpit = rtm_dump_nexthop_bucket}, 4087 {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP, 4088 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET}, 4089 {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP, 4090 .dumpit = rtm_dump_nexthop}, 4091 {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP, 4092 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET}, 4093 {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP, 4094 .dumpit = rtm_dump_nexthop}, 4095 }; 4096 4097 static int __init nexthop_init(void) 4098 { 4099 register_pernet_subsys(&nexthop_net_ops); 4100 4101 register_netdevice_notifier(&nh_netdev_notifier); 4102 4103 rtnl_register_many(nexthop_rtnl_msg_handlers); 4104 4105 return 0; 4106 } 4107 subsys_initcall(nexthop_init); 4108