1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/switchdev/switchdev.c - Switch device API 4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/init.h> 11 #include <linux/mutex.h> 12 #include <linux/notifier.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/if_bridge.h> 16 #include <linux/list.h> 17 #include <linux/workqueue.h> 18 #include <linux/if_vlan.h> 19 #include <linux/rtnetlink.h> 20 #include <net/switchdev.h> 21 22 static LIST_HEAD(deferred); 23 static DEFINE_SPINLOCK(deferred_lock); 24 25 typedef void switchdev_deferred_func_t(struct net_device *dev, 26 const void *data); 27 28 struct switchdev_deferred_item { 29 struct list_head list; 30 struct net_device *dev; 31 switchdev_deferred_func_t *func; 32 unsigned long data[]; 33 }; 34 35 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 36 { 37 struct switchdev_deferred_item *dfitem; 38 39 spin_lock_bh(&deferred_lock); 40 if (list_empty(&deferred)) { 41 dfitem = NULL; 42 goto unlock; 43 } 44 dfitem = list_first_entry(&deferred, 45 struct switchdev_deferred_item, list); 46 list_del(&dfitem->list); 47 unlock: 48 spin_unlock_bh(&deferred_lock); 49 return dfitem; 50 } 51 52 /** 53 * switchdev_deferred_process - Process ops in deferred queue 54 * 55 * Called to flush the ops currently queued in deferred ops queue. 56 * rtnl_lock must be held. 57 */ 58 void switchdev_deferred_process(void) 59 { 60 struct switchdev_deferred_item *dfitem; 61 62 ASSERT_RTNL(); 63 64 while ((dfitem = switchdev_deferred_dequeue())) { 65 dfitem->func(dfitem->dev, dfitem->data); 66 dev_put(dfitem->dev); 67 kfree(dfitem); 68 } 69 } 70 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 71 72 static void switchdev_deferred_process_work(struct work_struct *work) 73 { 74 rtnl_lock(); 75 switchdev_deferred_process(); 76 rtnl_unlock(); 77 } 78 79 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 80 81 static int switchdev_deferred_enqueue(struct net_device *dev, 82 const void *data, size_t data_len, 83 switchdev_deferred_func_t *func) 84 { 85 struct switchdev_deferred_item *dfitem; 86 87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 88 if (!dfitem) 89 return -ENOMEM; 90 dfitem->dev = dev; 91 dfitem->func = func; 92 memcpy(dfitem->data, data, data_len); 93 dev_hold(dev); 94 spin_lock_bh(&deferred_lock); 95 list_add_tail(&dfitem->list, &deferred); 96 spin_unlock_bh(&deferred_lock); 97 schedule_work(&deferred_process_work); 98 return 0; 99 } 100 101 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, 102 struct net_device *dev, 103 const struct switchdev_attr *attr, 104 struct netlink_ext_ack *extack) 105 { 106 int err; 107 int rc; 108 109 struct switchdev_notifier_port_attr_info attr_info = { 110 .attr = attr, 111 .handled = false, 112 }; 113 114 rc = call_switchdev_blocking_notifiers(nt, dev, 115 &attr_info.info, extack); 116 err = notifier_to_errno(rc); 117 if (err) { 118 WARN_ON(!attr_info.handled); 119 return err; 120 } 121 122 if (!attr_info.handled) 123 return -EOPNOTSUPP; 124 125 return 0; 126 } 127 128 static int switchdev_port_attr_set_now(struct net_device *dev, 129 const struct switchdev_attr *attr, 130 struct netlink_ext_ack *extack) 131 { 132 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, 133 extack); 134 } 135 136 static void switchdev_port_attr_set_deferred(struct net_device *dev, 137 const void *data) 138 { 139 const struct switchdev_attr *attr = data; 140 int err; 141 142 err = switchdev_port_attr_set_now(dev, attr, NULL); 143 if (err && err != -EOPNOTSUPP) 144 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 145 err, attr->id); 146 if (attr->complete) 147 attr->complete(dev, err, attr->complete_priv); 148 } 149 150 static int switchdev_port_attr_set_defer(struct net_device *dev, 151 const struct switchdev_attr *attr) 152 { 153 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 154 switchdev_port_attr_set_deferred); 155 } 156 157 /** 158 * switchdev_port_attr_set - Set port attribute 159 * 160 * @dev: port device 161 * @attr: attribute to set 162 * @extack: netlink extended ack, for error message propagation 163 * 164 * rtnl_lock must be held and must not be in atomic section, 165 * in case SWITCHDEV_F_DEFER flag is not set. 166 */ 167 int switchdev_port_attr_set(struct net_device *dev, 168 const struct switchdev_attr *attr, 169 struct netlink_ext_ack *extack) 170 { 171 if (attr->flags & SWITCHDEV_F_DEFER) 172 return switchdev_port_attr_set_defer(dev, attr); 173 ASSERT_RTNL(); 174 return switchdev_port_attr_set_now(dev, attr, extack); 175 } 176 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 177 178 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 179 { 180 switch (obj->id) { 181 case SWITCHDEV_OBJ_ID_PORT_VLAN: 182 return sizeof(struct switchdev_obj_port_vlan); 183 case SWITCHDEV_OBJ_ID_PORT_MDB: 184 return sizeof(struct switchdev_obj_port_mdb); 185 case SWITCHDEV_OBJ_ID_HOST_MDB: 186 return sizeof(struct switchdev_obj_port_mdb); 187 default: 188 BUG(); 189 } 190 return 0; 191 } 192 193 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, 194 struct net_device *dev, 195 const struct switchdev_obj *obj, 196 struct netlink_ext_ack *extack) 197 { 198 int rc; 199 int err; 200 201 struct switchdev_notifier_port_obj_info obj_info = { 202 .obj = obj, 203 .handled = false, 204 }; 205 206 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); 207 err = notifier_to_errno(rc); 208 if (err) { 209 WARN_ON(!obj_info.handled); 210 return err; 211 } 212 if (!obj_info.handled) 213 return -EOPNOTSUPP; 214 return 0; 215 } 216 217 static void switchdev_port_obj_add_deferred(struct net_device *dev, 218 const void *data) 219 { 220 const struct switchdev_obj *obj = data; 221 int err; 222 223 ASSERT_RTNL(); 224 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 225 dev, obj, NULL); 226 if (err && err != -EOPNOTSUPP) 227 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 228 err, obj->id); 229 if (obj->complete) 230 obj->complete(dev, err, obj->complete_priv); 231 } 232 233 static int switchdev_port_obj_add_defer(struct net_device *dev, 234 const struct switchdev_obj *obj) 235 { 236 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 237 switchdev_port_obj_add_deferred); 238 } 239 240 /** 241 * switchdev_port_obj_add - Add port object 242 * 243 * @dev: port device 244 * @obj: object to add 245 * @extack: netlink extended ack 246 * 247 * rtnl_lock must be held and must not be in atomic section, 248 * in case SWITCHDEV_F_DEFER flag is not set. 249 */ 250 int switchdev_port_obj_add(struct net_device *dev, 251 const struct switchdev_obj *obj, 252 struct netlink_ext_ack *extack) 253 { 254 if (obj->flags & SWITCHDEV_F_DEFER) 255 return switchdev_port_obj_add_defer(dev, obj); 256 ASSERT_RTNL(); 257 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 258 dev, obj, extack); 259 } 260 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 261 262 static int switchdev_port_obj_del_now(struct net_device *dev, 263 const struct switchdev_obj *obj) 264 { 265 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, 266 dev, obj, NULL); 267 } 268 269 static void switchdev_port_obj_del_deferred(struct net_device *dev, 270 const void *data) 271 { 272 const struct switchdev_obj *obj = data; 273 int err; 274 275 err = switchdev_port_obj_del_now(dev, obj); 276 if (err && err != -EOPNOTSUPP) 277 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 278 err, obj->id); 279 if (obj->complete) 280 obj->complete(dev, err, obj->complete_priv); 281 } 282 283 static int switchdev_port_obj_del_defer(struct net_device *dev, 284 const struct switchdev_obj *obj) 285 { 286 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 287 switchdev_port_obj_del_deferred); 288 } 289 290 /** 291 * switchdev_port_obj_del - Delete port object 292 * 293 * @dev: port device 294 * @obj: object to delete 295 * 296 * rtnl_lock must be held and must not be in atomic section, 297 * in case SWITCHDEV_F_DEFER flag is not set. 298 */ 299 int switchdev_port_obj_del(struct net_device *dev, 300 const struct switchdev_obj *obj) 301 { 302 if (obj->flags & SWITCHDEV_F_DEFER) 303 return switchdev_port_obj_del_defer(dev, obj); 304 ASSERT_RTNL(); 305 return switchdev_port_obj_del_now(dev, obj); 306 } 307 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 308 309 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); 310 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); 311 312 /** 313 * register_switchdev_notifier - Register notifier 314 * @nb: notifier_block 315 * 316 * Register switch device notifier. 317 */ 318 int register_switchdev_notifier(struct notifier_block *nb) 319 { 320 return atomic_notifier_chain_register(&switchdev_notif_chain, nb); 321 } 322 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 323 324 /** 325 * unregister_switchdev_notifier - Unregister notifier 326 * @nb: notifier_block 327 * 328 * Unregister switch device notifier. 329 */ 330 int unregister_switchdev_notifier(struct notifier_block *nb) 331 { 332 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); 333 } 334 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 335 336 /** 337 * call_switchdev_notifiers - Call notifiers 338 * @val: value passed unmodified to notifier function 339 * @dev: port device 340 * @info: notifier information data 341 * @extack: netlink extended ack 342 * Call all network notifier blocks. 343 */ 344 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 345 struct switchdev_notifier_info *info, 346 struct netlink_ext_ack *extack) 347 { 348 info->dev = dev; 349 info->extack = extack; 350 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); 351 } 352 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 353 354 int register_switchdev_blocking_notifier(struct notifier_block *nb) 355 { 356 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 357 358 return blocking_notifier_chain_register(chain, nb); 359 } 360 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); 361 362 int unregister_switchdev_blocking_notifier(struct notifier_block *nb) 363 { 364 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 365 366 return blocking_notifier_chain_unregister(chain, nb); 367 } 368 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); 369 370 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, 371 struct switchdev_notifier_info *info, 372 struct netlink_ext_ack *extack) 373 { 374 info->dev = dev; 375 info->extack = extack; 376 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, 377 val, info); 378 } 379 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); 380 381 struct switchdev_nested_priv { 382 bool (*check_cb)(const struct net_device *dev); 383 bool (*foreign_dev_check_cb)(const struct net_device *dev, 384 const struct net_device *foreign_dev); 385 const struct net_device *dev; 386 struct net_device *lower_dev; 387 }; 388 389 static int switchdev_lower_dev_walk(struct net_device *lower_dev, 390 struct netdev_nested_priv *priv) 391 { 392 struct switchdev_nested_priv *switchdev_priv = priv->data; 393 bool (*foreign_dev_check_cb)(const struct net_device *dev, 394 const struct net_device *foreign_dev); 395 bool (*check_cb)(const struct net_device *dev); 396 const struct net_device *dev; 397 398 check_cb = switchdev_priv->check_cb; 399 foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb; 400 dev = switchdev_priv->dev; 401 402 if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) { 403 switchdev_priv->lower_dev = lower_dev; 404 return 1; 405 } 406 407 return 0; 408 } 409 410 static struct net_device * 411 switchdev_lower_dev_find(struct net_device *dev, 412 bool (*check_cb)(const struct net_device *dev), 413 bool (*foreign_dev_check_cb)(const struct net_device *dev, 414 const struct net_device *foreign_dev)) 415 { 416 struct switchdev_nested_priv switchdev_priv = { 417 .check_cb = check_cb, 418 .foreign_dev_check_cb = foreign_dev_check_cb, 419 .dev = dev, 420 .lower_dev = NULL, 421 }; 422 struct netdev_nested_priv priv = { 423 .data = &switchdev_priv, 424 }; 425 426 netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv); 427 428 return switchdev_priv.lower_dev; 429 } 430 431 static int __switchdev_handle_fdb_add_to_device(struct net_device *dev, 432 const struct net_device *orig_dev, 433 const struct switchdev_notifier_fdb_info *fdb_info, 434 bool (*check_cb)(const struct net_device *dev), 435 bool (*foreign_dev_check_cb)(const struct net_device *dev, 436 const struct net_device *foreign_dev), 437 int (*add_cb)(struct net_device *dev, 438 const struct net_device *orig_dev, const void *ctx, 439 const struct switchdev_notifier_fdb_info *fdb_info), 440 int (*lag_add_cb)(struct net_device *dev, 441 const struct net_device *orig_dev, const void *ctx, 442 const struct switchdev_notifier_fdb_info *fdb_info)) 443 { 444 const struct switchdev_notifier_info *info = &fdb_info->info; 445 struct net_device *br, *lower_dev; 446 struct list_head *iter; 447 int err = -EOPNOTSUPP; 448 449 if (check_cb(dev)) 450 return add_cb(dev, orig_dev, info->ctx, fdb_info); 451 452 if (netif_is_lag_master(dev)) { 453 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 454 goto maybe_bridged_with_us; 455 456 /* This is a LAG interface that we offload */ 457 if (!lag_add_cb) 458 return -EOPNOTSUPP; 459 460 return lag_add_cb(dev, orig_dev, info->ctx, fdb_info); 461 } 462 463 /* Recurse through lower interfaces in case the FDB entry is pointing 464 * towards a bridge device. 465 */ 466 if (netif_is_bridge_master(dev)) { 467 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 468 return 0; 469 470 /* This is a bridge interface that we offload */ 471 netdev_for_each_lower_dev(dev, lower_dev, iter) { 472 /* Do not propagate FDB entries across bridges */ 473 if (netif_is_bridge_master(lower_dev)) 474 continue; 475 476 /* Bridge ports might be either us, or LAG interfaces 477 * that we offload. 478 */ 479 if (!check_cb(lower_dev) && 480 !switchdev_lower_dev_find(lower_dev, check_cb, 481 foreign_dev_check_cb)) 482 continue; 483 484 err = __switchdev_handle_fdb_add_to_device(lower_dev, orig_dev, 485 fdb_info, check_cb, 486 foreign_dev_check_cb, 487 add_cb, lag_add_cb); 488 if (err && err != -EOPNOTSUPP) 489 return err; 490 } 491 492 return 0; 493 } 494 495 maybe_bridged_with_us: 496 /* Event is neither on a bridge nor a LAG. Check whether it is on an 497 * interface that is in a bridge with us. 498 */ 499 br = netdev_master_upper_dev_get_rcu(dev); 500 if (!br || !netif_is_bridge_master(br)) 501 return 0; 502 503 if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb)) 504 return 0; 505 506 return __switchdev_handle_fdb_add_to_device(br, orig_dev, fdb_info, 507 check_cb, foreign_dev_check_cb, 508 add_cb, lag_add_cb); 509 } 510 511 int switchdev_handle_fdb_add_to_device(struct net_device *dev, 512 const struct switchdev_notifier_fdb_info *fdb_info, 513 bool (*check_cb)(const struct net_device *dev), 514 bool (*foreign_dev_check_cb)(const struct net_device *dev, 515 const struct net_device *foreign_dev), 516 int (*add_cb)(struct net_device *dev, 517 const struct net_device *orig_dev, const void *ctx, 518 const struct switchdev_notifier_fdb_info *fdb_info), 519 int (*lag_add_cb)(struct net_device *dev, 520 const struct net_device *orig_dev, const void *ctx, 521 const struct switchdev_notifier_fdb_info *fdb_info)) 522 { 523 int err; 524 525 err = __switchdev_handle_fdb_add_to_device(dev, dev, fdb_info, 526 check_cb, 527 foreign_dev_check_cb, 528 add_cb, lag_add_cb); 529 if (err == -EOPNOTSUPP) 530 err = 0; 531 532 return err; 533 } 534 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device); 535 536 static int __switchdev_handle_fdb_del_to_device(struct net_device *dev, 537 const struct net_device *orig_dev, 538 const struct switchdev_notifier_fdb_info *fdb_info, 539 bool (*check_cb)(const struct net_device *dev), 540 bool (*foreign_dev_check_cb)(const struct net_device *dev, 541 const struct net_device *foreign_dev), 542 int (*del_cb)(struct net_device *dev, 543 const struct net_device *orig_dev, const void *ctx, 544 const struct switchdev_notifier_fdb_info *fdb_info), 545 int (*lag_del_cb)(struct net_device *dev, 546 const struct net_device *orig_dev, const void *ctx, 547 const struct switchdev_notifier_fdb_info *fdb_info)) 548 { 549 const struct switchdev_notifier_info *info = &fdb_info->info; 550 struct net_device *br, *lower_dev; 551 struct list_head *iter; 552 int err = -EOPNOTSUPP; 553 554 if (check_cb(dev)) 555 return del_cb(dev, orig_dev, info->ctx, fdb_info); 556 557 if (netif_is_lag_master(dev)) { 558 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 559 goto maybe_bridged_with_us; 560 561 /* This is a LAG interface that we offload */ 562 if (!lag_del_cb) 563 return -EOPNOTSUPP; 564 565 return lag_del_cb(dev, orig_dev, info->ctx, fdb_info); 566 } 567 568 /* Recurse through lower interfaces in case the FDB entry is pointing 569 * towards a bridge device. 570 */ 571 if (netif_is_bridge_master(dev)) { 572 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 573 return 0; 574 575 /* This is a bridge interface that we offload */ 576 netdev_for_each_lower_dev(dev, lower_dev, iter) { 577 /* Do not propagate FDB entries across bridges */ 578 if (netif_is_bridge_master(lower_dev)) 579 continue; 580 581 /* Bridge ports might be either us, or LAG interfaces 582 * that we offload. 583 */ 584 if (!check_cb(lower_dev) && 585 !switchdev_lower_dev_find(lower_dev, check_cb, 586 foreign_dev_check_cb)) 587 continue; 588 589 err = __switchdev_handle_fdb_del_to_device(lower_dev, orig_dev, 590 fdb_info, check_cb, 591 foreign_dev_check_cb, 592 del_cb, lag_del_cb); 593 if (err && err != -EOPNOTSUPP) 594 return err; 595 } 596 597 return 0; 598 } 599 600 maybe_bridged_with_us: 601 /* Event is neither on a bridge nor a LAG. Check whether it is on an 602 * interface that is in a bridge with us. 603 */ 604 br = netdev_master_upper_dev_get_rcu(dev); 605 if (!br || !netif_is_bridge_master(br)) 606 return 0; 607 608 if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb)) 609 return 0; 610 611 return __switchdev_handle_fdb_del_to_device(br, orig_dev, fdb_info, 612 check_cb, foreign_dev_check_cb, 613 del_cb, lag_del_cb); 614 } 615 616 int switchdev_handle_fdb_del_to_device(struct net_device *dev, 617 const struct switchdev_notifier_fdb_info *fdb_info, 618 bool (*check_cb)(const struct net_device *dev), 619 bool (*foreign_dev_check_cb)(const struct net_device *dev, 620 const struct net_device *foreign_dev), 621 int (*del_cb)(struct net_device *dev, 622 const struct net_device *orig_dev, const void *ctx, 623 const struct switchdev_notifier_fdb_info *fdb_info), 624 int (*lag_del_cb)(struct net_device *dev, 625 const struct net_device *orig_dev, const void *ctx, 626 const struct switchdev_notifier_fdb_info *fdb_info)) 627 { 628 int err; 629 630 err = __switchdev_handle_fdb_del_to_device(dev, dev, fdb_info, 631 check_cb, 632 foreign_dev_check_cb, 633 del_cb, lag_del_cb); 634 if (err == -EOPNOTSUPP) 635 err = 0; 636 637 return err; 638 } 639 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device); 640 641 static int __switchdev_handle_port_obj_add(struct net_device *dev, 642 struct switchdev_notifier_port_obj_info *port_obj_info, 643 bool (*check_cb)(const struct net_device *dev), 644 int (*add_cb)(struct net_device *dev, const void *ctx, 645 const struct switchdev_obj *obj, 646 struct netlink_ext_ack *extack)) 647 { 648 struct switchdev_notifier_info *info = &port_obj_info->info; 649 struct netlink_ext_ack *extack; 650 struct net_device *lower_dev; 651 struct list_head *iter; 652 int err = -EOPNOTSUPP; 653 654 extack = switchdev_notifier_info_to_extack(info); 655 656 if (check_cb(dev)) { 657 err = add_cb(dev, info->ctx, port_obj_info->obj, extack); 658 if (err != -EOPNOTSUPP) 659 port_obj_info->handled = true; 660 return err; 661 } 662 663 /* Switch ports might be stacked under e.g. a LAG. Ignore the 664 * unsupported devices, another driver might be able to handle them. But 665 * propagate to the callers any hard errors. 666 * 667 * If the driver does its own bookkeeping of stacked ports, it's not 668 * necessary to go through this helper. 669 */ 670 netdev_for_each_lower_dev(dev, lower_dev, iter) { 671 if (netif_is_bridge_master(lower_dev)) 672 continue; 673 674 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, 675 check_cb, add_cb); 676 if (err && err != -EOPNOTSUPP) 677 return err; 678 } 679 680 return err; 681 } 682 683 int switchdev_handle_port_obj_add(struct net_device *dev, 684 struct switchdev_notifier_port_obj_info *port_obj_info, 685 bool (*check_cb)(const struct net_device *dev), 686 int (*add_cb)(struct net_device *dev, const void *ctx, 687 const struct switchdev_obj *obj, 688 struct netlink_ext_ack *extack)) 689 { 690 int err; 691 692 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, 693 add_cb); 694 if (err == -EOPNOTSUPP) 695 err = 0; 696 return err; 697 } 698 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); 699 700 static int __switchdev_handle_port_obj_del(struct net_device *dev, 701 struct switchdev_notifier_port_obj_info *port_obj_info, 702 bool (*check_cb)(const struct net_device *dev), 703 int (*del_cb)(struct net_device *dev, const void *ctx, 704 const struct switchdev_obj *obj)) 705 { 706 struct switchdev_notifier_info *info = &port_obj_info->info; 707 struct net_device *lower_dev; 708 struct list_head *iter; 709 int err = -EOPNOTSUPP; 710 711 if (check_cb(dev)) { 712 err = del_cb(dev, info->ctx, port_obj_info->obj); 713 if (err != -EOPNOTSUPP) 714 port_obj_info->handled = true; 715 return err; 716 } 717 718 /* Switch ports might be stacked under e.g. a LAG. Ignore the 719 * unsupported devices, another driver might be able to handle them. But 720 * propagate to the callers any hard errors. 721 * 722 * If the driver does its own bookkeeping of stacked ports, it's not 723 * necessary to go through this helper. 724 */ 725 netdev_for_each_lower_dev(dev, lower_dev, iter) { 726 if (netif_is_bridge_master(lower_dev)) 727 continue; 728 729 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, 730 check_cb, del_cb); 731 if (err && err != -EOPNOTSUPP) 732 return err; 733 } 734 735 return err; 736 } 737 738 int switchdev_handle_port_obj_del(struct net_device *dev, 739 struct switchdev_notifier_port_obj_info *port_obj_info, 740 bool (*check_cb)(const struct net_device *dev), 741 int (*del_cb)(struct net_device *dev, const void *ctx, 742 const struct switchdev_obj *obj)) 743 { 744 int err; 745 746 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, 747 del_cb); 748 if (err == -EOPNOTSUPP) 749 err = 0; 750 return err; 751 } 752 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); 753 754 static int __switchdev_handle_port_attr_set(struct net_device *dev, 755 struct switchdev_notifier_port_attr_info *port_attr_info, 756 bool (*check_cb)(const struct net_device *dev), 757 int (*set_cb)(struct net_device *dev, const void *ctx, 758 const struct switchdev_attr *attr, 759 struct netlink_ext_ack *extack)) 760 { 761 struct switchdev_notifier_info *info = &port_attr_info->info; 762 struct netlink_ext_ack *extack; 763 struct net_device *lower_dev; 764 struct list_head *iter; 765 int err = -EOPNOTSUPP; 766 767 extack = switchdev_notifier_info_to_extack(info); 768 769 if (check_cb(dev)) { 770 err = set_cb(dev, info->ctx, port_attr_info->attr, extack); 771 if (err != -EOPNOTSUPP) 772 port_attr_info->handled = true; 773 return err; 774 } 775 776 /* Switch ports might be stacked under e.g. a LAG. Ignore the 777 * unsupported devices, another driver might be able to handle them. But 778 * propagate to the callers any hard errors. 779 * 780 * If the driver does its own bookkeeping of stacked ports, it's not 781 * necessary to go through this helper. 782 */ 783 netdev_for_each_lower_dev(dev, lower_dev, iter) { 784 if (netif_is_bridge_master(lower_dev)) 785 continue; 786 787 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, 788 check_cb, set_cb); 789 if (err && err != -EOPNOTSUPP) 790 return err; 791 } 792 793 return err; 794 } 795 796 int switchdev_handle_port_attr_set(struct net_device *dev, 797 struct switchdev_notifier_port_attr_info *port_attr_info, 798 bool (*check_cb)(const struct net_device *dev), 799 int (*set_cb)(struct net_device *dev, const void *ctx, 800 const struct switchdev_attr *attr, 801 struct netlink_ext_ack *extack)) 802 { 803 int err; 804 805 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, 806 set_cb); 807 if (err == -EOPNOTSUPP) 808 err = 0; 809 return err; 810 } 811 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); 812 813 int switchdev_bridge_port_offload(struct net_device *brport_dev, 814 struct net_device *dev, const void *ctx, 815 struct notifier_block *atomic_nb, 816 struct notifier_block *blocking_nb, 817 bool tx_fwd_offload, 818 struct netlink_ext_ack *extack) 819 { 820 struct switchdev_notifier_brport_info brport_info = { 821 .brport = { 822 .dev = dev, 823 .ctx = ctx, 824 .atomic_nb = atomic_nb, 825 .blocking_nb = blocking_nb, 826 .tx_fwd_offload = tx_fwd_offload, 827 }, 828 }; 829 int err; 830 831 ASSERT_RTNL(); 832 833 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED, 834 brport_dev, &brport_info.info, 835 extack); 836 return notifier_to_errno(err); 837 } 838 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload); 839 840 void switchdev_bridge_port_unoffload(struct net_device *brport_dev, 841 const void *ctx, 842 struct notifier_block *atomic_nb, 843 struct notifier_block *blocking_nb) 844 { 845 struct switchdev_notifier_brport_info brport_info = { 846 .brport = { 847 .ctx = ctx, 848 .atomic_nb = atomic_nb, 849 .blocking_nb = blocking_nb, 850 }, 851 }; 852 853 ASSERT_RTNL(); 854 855 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED, 856 brport_dev, &brport_info.info, 857 NULL); 858 } 859 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload); 860