1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 /* flush all VLANs */ 38 .vid = 0, 39 }; 40 41 /* When the port becomes standalone it has already left the bridge. 42 * Don't notify the bridge in that case. 43 */ 44 if (!brport_dev) 45 return; 46 47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 48 brport_dev, &info.info, NULL); 49 } 50 51 static void dsa_port_fast_age(const struct dsa_port *dp) 52 { 53 struct dsa_switch *ds = dp->ds; 54 55 if (!ds->ops->port_fast_age) 56 return; 57 58 ds->ops->port_fast_age(ds, dp->index); 59 60 dsa_port_notify_bridge_fdb_flush(dp); 61 } 62 63 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 64 { 65 struct switchdev_brport_flags flags = { 66 .mask = BR_LEARNING, 67 }; 68 struct dsa_switch *ds = dp->ds; 69 int err; 70 71 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 72 return false; 73 74 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 75 return !err; 76 } 77 78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 79 { 80 struct dsa_switch *ds = dp->ds; 81 int port = dp->index; 82 83 if (!ds->ops->port_stp_state_set) 84 return -EOPNOTSUPP; 85 86 ds->ops->port_stp_state_set(ds, port, state); 87 88 if (!dsa_port_can_configure_learning(dp) || 89 (do_fast_age && dp->learning)) { 90 /* Fast age FDB entries or flush appropriate forwarding database 91 * for the given port, if we are moving it from Learning or 92 * Forwarding state, to Disabled or Blocking or Listening state. 93 * Ports that were standalone before the STP state change don't 94 * need to fast age the FDB, since address learning is off in 95 * standalone mode. 96 */ 97 98 if ((dp->stp_state == BR_STATE_LEARNING || 99 dp->stp_state == BR_STATE_FORWARDING) && 100 (state == BR_STATE_DISABLED || 101 state == BR_STATE_BLOCKING || 102 state == BR_STATE_LISTENING)) 103 dsa_port_fast_age(dp); 104 } 105 106 dp->stp_state = state; 107 108 return 0; 109 } 110 111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 112 bool do_fast_age) 113 { 114 int err; 115 116 err = dsa_port_set_state(dp, state, do_fast_age); 117 if (err) 118 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 119 } 120 121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 122 { 123 struct dsa_switch *ds = dp->ds; 124 int port = dp->index; 125 int err; 126 127 if (ds->ops->port_enable) { 128 err = ds->ops->port_enable(ds, port, phy); 129 if (err) 130 return err; 131 } 132 133 if (!dp->bridge) 134 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 135 136 if (dp->pl) 137 phylink_start(dp->pl); 138 139 return 0; 140 } 141 142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 143 { 144 int err; 145 146 rtnl_lock(); 147 err = dsa_port_enable_rt(dp, phy); 148 rtnl_unlock(); 149 150 return err; 151 } 152 153 void dsa_port_disable_rt(struct dsa_port *dp) 154 { 155 struct dsa_switch *ds = dp->ds; 156 int port = dp->index; 157 158 if (dp->pl) 159 phylink_stop(dp->pl); 160 161 if (!dp->bridge) 162 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 163 164 if (ds->ops->port_disable) 165 ds->ops->port_disable(ds, port); 166 } 167 168 void dsa_port_disable(struct dsa_port *dp) 169 { 170 rtnl_lock(); 171 dsa_port_disable_rt(dp); 172 rtnl_unlock(); 173 } 174 175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 176 struct netlink_ext_ack *extack) 177 { 178 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 179 BR_BCAST_FLOOD | BR_PORT_LOCKED; 180 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 181 int flag, err; 182 183 for_each_set_bit(flag, &mask, 32) { 184 struct switchdev_brport_flags flags = {0}; 185 186 flags.mask = BIT(flag); 187 188 if (br_port_flag_is_set(brport_dev, BIT(flag))) 189 flags.val = BIT(flag); 190 191 err = dsa_port_bridge_flags(dp, flags, extack); 192 if (err && err != -EOPNOTSUPP) 193 return err; 194 } 195 196 return 0; 197 } 198 199 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 200 { 201 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 202 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 203 BR_BCAST_FLOOD | BR_PORT_LOCKED; 204 int flag, err; 205 206 for_each_set_bit(flag, &mask, 32) { 207 struct switchdev_brport_flags flags = {0}; 208 209 flags.mask = BIT(flag); 210 flags.val = val & BIT(flag); 211 212 err = dsa_port_bridge_flags(dp, flags, NULL); 213 if (err && err != -EOPNOTSUPP) 214 dev_err(dp->ds->dev, 215 "failed to clear bridge port flag %lu: %pe\n", 216 flags.val, ERR_PTR(err)); 217 } 218 } 219 220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 221 struct netlink_ext_ack *extack) 222 { 223 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 224 struct net_device *br = dsa_port_bridge_dev_get(dp); 225 int err; 226 227 err = dsa_port_inherit_brport_flags(dp, extack); 228 if (err) 229 return err; 230 231 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 232 if (err && err != -EOPNOTSUPP) 233 return err; 234 235 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 236 if (err && err != -EOPNOTSUPP) 237 return err; 238 239 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 240 if (err && err != -EOPNOTSUPP) 241 return err; 242 243 return 0; 244 } 245 246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) 247 { 248 /* Configure the port for standalone mode (no address learning, 249 * flood everything). 250 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 251 * when the user requests it through netlink or sysfs, but not 252 * automatically at port join or leave, so we need to handle resetting 253 * the brport flags ourselves. But we even prefer it that way, because 254 * otherwise, some setups might never get the notification they need, 255 * for example, when a port leaves a LAG that offloads the bridge, 256 * it becomes standalone, but as far as the bridge is concerned, no 257 * port ever left. 258 */ 259 dsa_port_clear_brport_flags(dp); 260 261 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 262 * so allow it to be in BR_STATE_FORWARDING to be kept functional 263 */ 264 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 265 266 /* VLAN filtering is handled by dsa_switch_bridge_leave */ 267 268 /* Ageing time may be global to the switch chip, so don't change it 269 * here because we have no good reason (or value) to change it to. 270 */ 271 } 272 273 static int dsa_port_bridge_create(struct dsa_port *dp, 274 struct net_device *br, 275 struct netlink_ext_ack *extack) 276 { 277 struct dsa_switch *ds = dp->ds; 278 struct dsa_bridge *bridge; 279 280 bridge = dsa_tree_bridge_find(ds->dst, br); 281 if (bridge) { 282 refcount_inc(&bridge->refcount); 283 dp->bridge = bridge; 284 return 0; 285 } 286 287 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 288 if (!bridge) 289 return -ENOMEM; 290 291 refcount_set(&bridge->refcount, 1); 292 293 bridge->dev = br; 294 295 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 296 if (ds->max_num_bridges && !bridge->num) { 297 NL_SET_ERR_MSG_MOD(extack, 298 "Range of offloadable bridges exceeded"); 299 kfree(bridge); 300 return -EOPNOTSUPP; 301 } 302 303 dp->bridge = bridge; 304 305 return 0; 306 } 307 308 static void dsa_port_bridge_destroy(struct dsa_port *dp, 309 const struct net_device *br) 310 { 311 struct dsa_bridge *bridge = dp->bridge; 312 313 dp->bridge = NULL; 314 315 if (!refcount_dec_and_test(&bridge->refcount)) 316 return; 317 318 if (bridge->num) 319 dsa_bridge_num_put(br, bridge->num); 320 321 kfree(bridge); 322 } 323 324 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 325 struct netlink_ext_ack *extack) 326 { 327 struct dsa_notifier_bridge_info info = { 328 .tree_index = dp->ds->dst->index, 329 .sw_index = dp->ds->index, 330 .port = dp->index, 331 }; 332 struct net_device *dev = dp->slave; 333 struct net_device *brport_dev; 334 int err; 335 336 /* Here the interface is already bridged. Reflect the current 337 * configuration so that drivers can program their chips accordingly. 338 */ 339 err = dsa_port_bridge_create(dp, br, extack); 340 if (err) 341 return err; 342 343 brport_dev = dsa_port_to_bridge_port(dp); 344 345 info.bridge = *dp->bridge; 346 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 347 if (err) 348 goto out_rollback; 349 350 /* Drivers which support bridge TX forwarding should set this */ 351 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 352 353 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 354 &dsa_slave_switchdev_notifier, 355 &dsa_slave_switchdev_blocking_notifier, 356 dp->bridge->tx_fwd_offload, extack); 357 if (err) 358 goto out_rollback_unbridge; 359 360 err = dsa_port_switchdev_sync_attrs(dp, extack); 361 if (err) 362 goto out_rollback_unoffload; 363 364 return 0; 365 366 out_rollback_unoffload: 367 switchdev_bridge_port_unoffload(brport_dev, dp, 368 &dsa_slave_switchdev_notifier, 369 &dsa_slave_switchdev_blocking_notifier); 370 out_rollback_unbridge: 371 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 372 out_rollback: 373 dsa_port_bridge_destroy(dp, br); 374 return err; 375 } 376 377 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 378 { 379 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 380 381 /* Don't try to unoffload something that is not offloaded */ 382 if (!brport_dev) 383 return; 384 385 switchdev_bridge_port_unoffload(brport_dev, dp, 386 &dsa_slave_switchdev_notifier, 387 &dsa_slave_switchdev_blocking_notifier); 388 389 dsa_flush_workqueue(); 390 } 391 392 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 393 { 394 struct dsa_notifier_bridge_info info = { 395 .tree_index = dp->ds->dst->index, 396 .sw_index = dp->ds->index, 397 .port = dp->index, 398 }; 399 int err; 400 401 /* If the port could not be offloaded to begin with, then 402 * there is nothing to do. 403 */ 404 if (!dp->bridge) 405 return; 406 407 info.bridge = *dp->bridge; 408 409 /* Here the port is already unbridged. Reflect the current configuration 410 * so that drivers can program their chips accordingly. 411 */ 412 dsa_port_bridge_destroy(dp, br); 413 414 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 415 if (err) 416 dev_err(dp->ds->dev, 417 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 418 dp->index, ERR_PTR(err)); 419 420 dsa_port_switchdev_unsync_attrs(dp); 421 } 422 423 int dsa_port_lag_change(struct dsa_port *dp, 424 struct netdev_lag_lower_state_info *linfo) 425 { 426 struct dsa_notifier_lag_info info = { 427 .sw_index = dp->ds->index, 428 .port = dp->index, 429 }; 430 bool tx_enabled; 431 432 if (!dp->lag) 433 return 0; 434 435 /* On statically configured aggregates (e.g. loadbalance 436 * without LACP) ports will always be tx_enabled, even if the 437 * link is down. Thus we require both link_up and tx_enabled 438 * in order to include it in the tx set. 439 */ 440 tx_enabled = linfo->link_up && linfo->tx_enabled; 441 442 if (tx_enabled == dp->lag_tx_enabled) 443 return 0; 444 445 dp->lag_tx_enabled = tx_enabled; 446 447 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 448 } 449 450 static int dsa_port_lag_create(struct dsa_port *dp, 451 struct net_device *lag_dev) 452 { 453 struct dsa_switch *ds = dp->ds; 454 struct dsa_lag *lag; 455 456 lag = dsa_tree_lag_find(ds->dst, lag_dev); 457 if (lag) { 458 refcount_inc(&lag->refcount); 459 dp->lag = lag; 460 return 0; 461 } 462 463 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 464 if (!lag) 465 return -ENOMEM; 466 467 refcount_set(&lag->refcount, 1); 468 mutex_init(&lag->fdb_lock); 469 INIT_LIST_HEAD(&lag->fdbs); 470 lag->dev = lag_dev; 471 dsa_lag_map(ds->dst, lag); 472 dp->lag = lag; 473 474 return 0; 475 } 476 477 static void dsa_port_lag_destroy(struct dsa_port *dp) 478 { 479 struct dsa_lag *lag = dp->lag; 480 481 dp->lag = NULL; 482 dp->lag_tx_enabled = false; 483 484 if (!refcount_dec_and_test(&lag->refcount)) 485 return; 486 487 WARN_ON(!list_empty(&lag->fdbs)); 488 dsa_lag_unmap(dp->ds->dst, lag); 489 kfree(lag); 490 } 491 492 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 493 struct netdev_lag_upper_info *uinfo, 494 struct netlink_ext_ack *extack) 495 { 496 struct dsa_notifier_lag_info info = { 497 .sw_index = dp->ds->index, 498 .port = dp->index, 499 .info = uinfo, 500 }; 501 struct net_device *bridge_dev; 502 int err; 503 504 err = dsa_port_lag_create(dp, lag_dev); 505 if (err) 506 goto err_lag_create; 507 508 info.lag = *dp->lag; 509 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 510 if (err) 511 goto err_lag_join; 512 513 bridge_dev = netdev_master_upper_dev_get(lag_dev); 514 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 515 return 0; 516 517 err = dsa_port_bridge_join(dp, bridge_dev, extack); 518 if (err) 519 goto err_bridge_join; 520 521 return 0; 522 523 err_bridge_join: 524 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 525 err_lag_join: 526 dsa_port_lag_destroy(dp); 527 err_lag_create: 528 return err; 529 } 530 531 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 532 { 533 struct net_device *br = dsa_port_bridge_dev_get(dp); 534 535 if (br) 536 dsa_port_pre_bridge_leave(dp, br); 537 } 538 539 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 540 { 541 struct net_device *br = dsa_port_bridge_dev_get(dp); 542 struct dsa_notifier_lag_info info = { 543 .sw_index = dp->ds->index, 544 .port = dp->index, 545 }; 546 int err; 547 548 if (!dp->lag) 549 return; 550 551 /* Port might have been part of a LAG that in turn was 552 * attached to a bridge. 553 */ 554 if (br) 555 dsa_port_bridge_leave(dp, br); 556 557 info.lag = *dp->lag; 558 559 dsa_port_lag_destroy(dp); 560 561 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 562 if (err) 563 dev_err(dp->ds->dev, 564 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 565 dp->index, ERR_PTR(err)); 566 } 567 568 /* Must be called under rcu_read_lock() */ 569 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 570 bool vlan_filtering, 571 struct netlink_ext_ack *extack) 572 { 573 struct dsa_switch *ds = dp->ds; 574 struct dsa_port *other_dp; 575 int err; 576 577 /* VLAN awareness was off, so the question is "can we turn it on". 578 * We may have had 8021q uppers, those need to go. Make sure we don't 579 * enter an inconsistent state: deny changing the VLAN awareness state 580 * as long as we have 8021q uppers. 581 */ 582 if (vlan_filtering && dsa_port_is_user(dp)) { 583 struct net_device *br = dsa_port_bridge_dev_get(dp); 584 struct net_device *upper_dev, *slave = dp->slave; 585 struct list_head *iter; 586 587 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 588 struct bridge_vlan_info br_info; 589 u16 vid; 590 591 if (!is_vlan_dev(upper_dev)) 592 continue; 593 594 vid = vlan_dev_vlan_id(upper_dev); 595 596 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 597 * device, respectively the VID is not found, returning 598 * 0 means success, which is a failure for us here. 599 */ 600 err = br_vlan_get_info(br, vid, &br_info); 601 if (err == 0) { 602 NL_SET_ERR_MSG_MOD(extack, 603 "Must first remove VLAN uppers having VIDs also present in bridge"); 604 return false; 605 } 606 } 607 } 608 609 if (!ds->vlan_filtering_is_global) 610 return true; 611 612 /* For cases where enabling/disabling VLAN awareness is global to the 613 * switch, we need to handle the case where multiple bridges span 614 * different ports of the same switch device and one of them has a 615 * different setting than what is being requested. 616 */ 617 dsa_switch_for_each_port(other_dp, ds) { 618 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 619 620 /* If it's the same bridge, it also has same 621 * vlan_filtering setting => no need to check 622 */ 623 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 624 continue; 625 626 if (br_vlan_enabled(other_br) != vlan_filtering) { 627 NL_SET_ERR_MSG_MOD(extack, 628 "VLAN filtering is a global setting"); 629 return false; 630 } 631 } 632 return true; 633 } 634 635 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 636 struct netlink_ext_ack *extack) 637 { 638 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 639 struct dsa_switch *ds = dp->ds; 640 bool apply; 641 int err; 642 643 if (!ds->ops->port_vlan_filtering) 644 return -EOPNOTSUPP; 645 646 /* We are called from dsa_slave_switchdev_blocking_event(), 647 * which is not under rcu_read_lock(), unlike 648 * dsa_slave_switchdev_event(). 649 */ 650 rcu_read_lock(); 651 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 652 rcu_read_unlock(); 653 if (!apply) 654 return -EINVAL; 655 656 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 657 return 0; 658 659 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 660 extack); 661 if (err) 662 return err; 663 664 if (ds->vlan_filtering_is_global) { 665 struct dsa_port *other_dp; 666 667 ds->vlan_filtering = vlan_filtering; 668 669 dsa_switch_for_each_user_port(other_dp, ds) { 670 struct net_device *slave = dp->slave; 671 672 /* We might be called in the unbind path, so not 673 * all slave devices might still be registered. 674 */ 675 if (!slave) 676 continue; 677 678 err = dsa_slave_manage_vlan_filtering(slave, 679 vlan_filtering); 680 if (err) 681 goto restore; 682 } 683 } else { 684 dp->vlan_filtering = vlan_filtering; 685 686 err = dsa_slave_manage_vlan_filtering(dp->slave, 687 vlan_filtering); 688 if (err) 689 goto restore; 690 } 691 692 return 0; 693 694 restore: 695 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 696 697 if (ds->vlan_filtering_is_global) 698 ds->vlan_filtering = old_vlan_filtering; 699 else 700 dp->vlan_filtering = old_vlan_filtering; 701 702 return err; 703 } 704 705 /* This enforces legacy behavior for switch drivers which assume they can't 706 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 707 */ 708 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 709 { 710 struct net_device *br = dsa_port_bridge_dev_get(dp); 711 struct dsa_switch *ds = dp->ds; 712 713 if (!br) 714 return false; 715 716 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 717 } 718 719 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 720 { 721 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 722 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 723 struct dsa_notifier_ageing_time_info info; 724 int err; 725 726 info.ageing_time = ageing_time; 727 728 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 729 if (err) 730 return err; 731 732 dp->ageing_time = ageing_time; 733 734 return 0; 735 } 736 737 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 738 struct switchdev_brport_flags flags, 739 struct netlink_ext_ack *extack) 740 { 741 struct dsa_switch *ds = dp->ds; 742 743 if (!ds->ops->port_pre_bridge_flags) 744 return -EINVAL; 745 746 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 747 } 748 749 int dsa_port_bridge_flags(struct dsa_port *dp, 750 struct switchdev_brport_flags flags, 751 struct netlink_ext_ack *extack) 752 { 753 struct dsa_switch *ds = dp->ds; 754 int err; 755 756 if (!ds->ops->port_bridge_flags) 757 return -EOPNOTSUPP; 758 759 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 760 if (err) 761 return err; 762 763 if (flags.mask & BR_LEARNING) { 764 bool learning = flags.val & BR_LEARNING; 765 766 if (learning == dp->learning) 767 return 0; 768 769 if ((dp->learning && !learning) && 770 (dp->stp_state == BR_STATE_LEARNING || 771 dp->stp_state == BR_STATE_FORWARDING)) 772 dsa_port_fast_age(dp); 773 774 dp->learning = learning; 775 } 776 777 return 0; 778 } 779 780 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, 781 bool targeted_match) 782 { 783 struct dsa_notifier_mtu_info info = { 784 .sw_index = dp->ds->index, 785 .targeted_match = targeted_match, 786 .port = dp->index, 787 .mtu = new_mtu, 788 }; 789 790 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 791 } 792 793 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 794 u16 vid) 795 { 796 struct dsa_notifier_fdb_info info = { 797 .sw_index = dp->ds->index, 798 .port = dp->index, 799 .addr = addr, 800 .vid = vid, 801 }; 802 803 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 804 } 805 806 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 807 u16 vid) 808 { 809 struct dsa_notifier_fdb_info info = { 810 .sw_index = dp->ds->index, 811 .port = dp->index, 812 .addr = addr, 813 .vid = vid, 814 815 }; 816 817 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 818 } 819 820 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr, 821 u16 vid) 822 { 823 struct dsa_notifier_fdb_info info = { 824 .sw_index = dp->ds->index, 825 .port = dp->index, 826 .addr = addr, 827 .vid = vid, 828 }; 829 struct dsa_port *cpu_dp = dp->cpu_dp; 830 int err; 831 832 /* Avoid a call to __dev_set_promiscuity() on the master, which 833 * requires rtnl_lock(), since we can't guarantee that is held here, 834 * and we can't take it either. 835 */ 836 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 837 err = dev_uc_add(cpu_dp->master, addr); 838 if (err) 839 return err; 840 } 841 842 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 843 } 844 845 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr, 846 u16 vid) 847 { 848 struct dsa_notifier_fdb_info info = { 849 .sw_index = dp->ds->index, 850 .port = dp->index, 851 .addr = addr, 852 .vid = vid, 853 }; 854 struct dsa_port *cpu_dp = dp->cpu_dp; 855 int err; 856 857 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 858 err = dev_uc_del(cpu_dp->master, addr); 859 if (err) 860 return err; 861 } 862 863 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 864 } 865 866 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 867 u16 vid) 868 { 869 struct dsa_notifier_lag_fdb_info info = { 870 .lag = dp->lag, 871 .addr = addr, 872 .vid = vid, 873 }; 874 875 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 876 } 877 878 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 879 u16 vid) 880 { 881 struct dsa_notifier_lag_fdb_info info = { 882 .lag = dp->lag, 883 .addr = addr, 884 .vid = vid, 885 }; 886 887 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 888 } 889 890 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 891 { 892 struct dsa_switch *ds = dp->ds; 893 int port = dp->index; 894 895 if (!ds->ops->port_fdb_dump) 896 return -EOPNOTSUPP; 897 898 return ds->ops->port_fdb_dump(ds, port, cb, data); 899 } 900 901 int dsa_port_mdb_add(const struct dsa_port *dp, 902 const struct switchdev_obj_port_mdb *mdb) 903 { 904 struct dsa_notifier_mdb_info info = { 905 .sw_index = dp->ds->index, 906 .port = dp->index, 907 .mdb = mdb, 908 }; 909 910 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 911 } 912 913 int dsa_port_mdb_del(const struct dsa_port *dp, 914 const struct switchdev_obj_port_mdb *mdb) 915 { 916 struct dsa_notifier_mdb_info info = { 917 .sw_index = dp->ds->index, 918 .port = dp->index, 919 .mdb = mdb, 920 }; 921 922 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 923 } 924 925 int dsa_port_host_mdb_add(const struct dsa_port *dp, 926 const struct switchdev_obj_port_mdb *mdb) 927 { 928 struct dsa_notifier_mdb_info info = { 929 .sw_index = dp->ds->index, 930 .port = dp->index, 931 .mdb = mdb, 932 }; 933 struct dsa_port *cpu_dp = dp->cpu_dp; 934 int err; 935 936 err = dev_mc_add(cpu_dp->master, mdb->addr); 937 if (err) 938 return err; 939 940 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 941 } 942 943 int dsa_port_host_mdb_del(const struct dsa_port *dp, 944 const struct switchdev_obj_port_mdb *mdb) 945 { 946 struct dsa_notifier_mdb_info info = { 947 .sw_index = dp->ds->index, 948 .port = dp->index, 949 .mdb = mdb, 950 }; 951 struct dsa_port *cpu_dp = dp->cpu_dp; 952 int err; 953 954 err = dev_mc_del(cpu_dp->master, mdb->addr); 955 if (err) 956 return err; 957 958 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 959 } 960 961 int dsa_port_vlan_add(struct dsa_port *dp, 962 const struct switchdev_obj_port_vlan *vlan, 963 struct netlink_ext_ack *extack) 964 { 965 struct dsa_notifier_vlan_info info = { 966 .sw_index = dp->ds->index, 967 .port = dp->index, 968 .vlan = vlan, 969 .extack = extack, 970 }; 971 972 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 973 } 974 975 int dsa_port_vlan_del(struct dsa_port *dp, 976 const struct switchdev_obj_port_vlan *vlan) 977 { 978 struct dsa_notifier_vlan_info info = { 979 .sw_index = dp->ds->index, 980 .port = dp->index, 981 .vlan = vlan, 982 }; 983 984 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 985 } 986 987 int dsa_port_host_vlan_add(struct dsa_port *dp, 988 const struct switchdev_obj_port_vlan *vlan, 989 struct netlink_ext_ack *extack) 990 { 991 struct dsa_notifier_vlan_info info = { 992 .sw_index = dp->ds->index, 993 .port = dp->index, 994 .vlan = vlan, 995 .extack = extack, 996 }; 997 struct dsa_port *cpu_dp = dp->cpu_dp; 998 int err; 999 1000 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1001 if (err && err != -EOPNOTSUPP) 1002 return err; 1003 1004 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1005 1006 return err; 1007 } 1008 1009 int dsa_port_host_vlan_del(struct dsa_port *dp, 1010 const struct switchdev_obj_port_vlan *vlan) 1011 { 1012 struct dsa_notifier_vlan_info info = { 1013 .sw_index = dp->ds->index, 1014 .port = dp->index, 1015 .vlan = vlan, 1016 }; 1017 struct dsa_port *cpu_dp = dp->cpu_dp; 1018 int err; 1019 1020 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1021 if (err && err != -EOPNOTSUPP) 1022 return err; 1023 1024 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1025 1026 return err; 1027 } 1028 1029 int dsa_port_mrp_add(const struct dsa_port *dp, 1030 const struct switchdev_obj_mrp *mrp) 1031 { 1032 struct dsa_switch *ds = dp->ds; 1033 1034 if (!ds->ops->port_mrp_add) 1035 return -EOPNOTSUPP; 1036 1037 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1038 } 1039 1040 int dsa_port_mrp_del(const struct dsa_port *dp, 1041 const struct switchdev_obj_mrp *mrp) 1042 { 1043 struct dsa_switch *ds = dp->ds; 1044 1045 if (!ds->ops->port_mrp_del) 1046 return -EOPNOTSUPP; 1047 1048 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1049 } 1050 1051 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1052 const struct switchdev_obj_ring_role_mrp *mrp) 1053 { 1054 struct dsa_switch *ds = dp->ds; 1055 1056 if (!ds->ops->port_mrp_add_ring_role) 1057 return -EOPNOTSUPP; 1058 1059 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1060 } 1061 1062 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1063 const struct switchdev_obj_ring_role_mrp *mrp) 1064 { 1065 struct dsa_switch *ds = dp->ds; 1066 1067 if (!ds->ops->port_mrp_del_ring_role) 1068 return -EOPNOTSUPP; 1069 1070 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1071 } 1072 1073 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1074 const struct dsa_device_ops *tag_ops) 1075 { 1076 cpu_dp->rcv = tag_ops->rcv; 1077 cpu_dp->tag_ops = tag_ops; 1078 } 1079 1080 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1081 { 1082 struct device_node *phy_dn; 1083 struct phy_device *phydev; 1084 1085 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1086 if (!phy_dn) 1087 return NULL; 1088 1089 phydev = of_phy_find_device(phy_dn); 1090 if (!phydev) { 1091 of_node_put(phy_dn); 1092 return ERR_PTR(-EPROBE_DEFER); 1093 } 1094 1095 of_node_put(phy_dn); 1096 return phydev; 1097 } 1098 1099 static void dsa_port_phylink_validate(struct phylink_config *config, 1100 unsigned long *supported, 1101 struct phylink_link_state *state) 1102 { 1103 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1104 struct dsa_switch *ds = dp->ds; 1105 1106 if (!ds->ops->phylink_validate) { 1107 if (config->mac_capabilities) 1108 phylink_generic_validate(config, supported, state); 1109 return; 1110 } 1111 1112 ds->ops->phylink_validate(ds, dp->index, supported, state); 1113 } 1114 1115 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1116 struct phylink_link_state *state) 1117 { 1118 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1119 struct dsa_switch *ds = dp->ds; 1120 int err; 1121 1122 /* Only called for inband modes */ 1123 if (!ds->ops->phylink_mac_link_state) { 1124 state->link = 0; 1125 return; 1126 } 1127 1128 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1129 if (err < 0) { 1130 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1131 dp->index, err); 1132 state->link = 0; 1133 } 1134 } 1135 1136 static struct phylink_pcs * 1137 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1138 phy_interface_t interface) 1139 { 1140 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1141 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1142 struct dsa_switch *ds = dp->ds; 1143 1144 if (ds->ops->phylink_mac_select_pcs) 1145 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1146 1147 return pcs; 1148 } 1149 1150 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1151 unsigned int mode, 1152 const struct phylink_link_state *state) 1153 { 1154 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1155 struct dsa_switch *ds = dp->ds; 1156 1157 if (!ds->ops->phylink_mac_config) 1158 return; 1159 1160 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1161 } 1162 1163 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1164 { 1165 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1166 struct dsa_switch *ds = dp->ds; 1167 1168 if (!ds->ops->phylink_mac_an_restart) 1169 return; 1170 1171 ds->ops->phylink_mac_an_restart(ds, dp->index); 1172 } 1173 1174 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1175 unsigned int mode, 1176 phy_interface_t interface) 1177 { 1178 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1179 struct phy_device *phydev = NULL; 1180 struct dsa_switch *ds = dp->ds; 1181 1182 if (dsa_port_is_user(dp)) 1183 phydev = dp->slave->phydev; 1184 1185 if (!ds->ops->phylink_mac_link_down) { 1186 if (ds->ops->adjust_link && phydev) 1187 ds->ops->adjust_link(ds, dp->index, phydev); 1188 return; 1189 } 1190 1191 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1192 } 1193 1194 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1195 struct phy_device *phydev, 1196 unsigned int mode, 1197 phy_interface_t interface, 1198 int speed, int duplex, 1199 bool tx_pause, bool rx_pause) 1200 { 1201 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1202 struct dsa_switch *ds = dp->ds; 1203 1204 if (!ds->ops->phylink_mac_link_up) { 1205 if (ds->ops->adjust_link && phydev) 1206 ds->ops->adjust_link(ds, dp->index, phydev); 1207 return; 1208 } 1209 1210 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1211 speed, duplex, tx_pause, rx_pause); 1212 } 1213 1214 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1215 .validate = dsa_port_phylink_validate, 1216 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1217 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1218 .mac_config = dsa_port_phylink_mac_config, 1219 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1220 .mac_link_down = dsa_port_phylink_mac_link_down, 1221 .mac_link_up = dsa_port_phylink_mac_link_up, 1222 }; 1223 1224 int dsa_port_phylink_create(struct dsa_port *dp) 1225 { 1226 struct dsa_switch *ds = dp->ds; 1227 phy_interface_t mode; 1228 int err; 1229 1230 err = of_get_phy_mode(dp->dn, &mode); 1231 if (err) 1232 mode = PHY_INTERFACE_MODE_NA; 1233 1234 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1235 * an indicator of a legacy phylink driver. 1236 */ 1237 if (ds->ops->phylink_mac_link_state || 1238 ds->ops->phylink_mac_an_restart) 1239 dp->pl_config.legacy_pre_march2020 = true; 1240 1241 if (ds->ops->phylink_get_caps) 1242 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1243 1244 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1245 mode, &dsa_port_phylink_mac_ops); 1246 if (IS_ERR(dp->pl)) { 1247 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1248 return PTR_ERR(dp->pl); 1249 } 1250 1251 return 0; 1252 } 1253 1254 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1255 { 1256 struct dsa_switch *ds = dp->ds; 1257 struct phy_device *phydev; 1258 int port = dp->index; 1259 int err = 0; 1260 1261 phydev = dsa_port_get_phy_device(dp); 1262 if (!phydev) 1263 return 0; 1264 1265 if (IS_ERR(phydev)) 1266 return PTR_ERR(phydev); 1267 1268 if (enable) { 1269 err = genphy_resume(phydev); 1270 if (err < 0) 1271 goto err_put_dev; 1272 1273 err = genphy_read_status(phydev); 1274 if (err < 0) 1275 goto err_put_dev; 1276 } else { 1277 err = genphy_suspend(phydev); 1278 if (err < 0) 1279 goto err_put_dev; 1280 } 1281 1282 if (ds->ops->adjust_link) 1283 ds->ops->adjust_link(ds, port, phydev); 1284 1285 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1286 1287 err_put_dev: 1288 put_device(&phydev->mdio.dev); 1289 return err; 1290 } 1291 1292 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1293 { 1294 struct device_node *dn = dp->dn; 1295 struct dsa_switch *ds = dp->ds; 1296 struct phy_device *phydev; 1297 int port = dp->index; 1298 phy_interface_t mode; 1299 int err; 1300 1301 err = of_phy_register_fixed_link(dn); 1302 if (err) { 1303 dev_err(ds->dev, 1304 "failed to register the fixed PHY of port %d\n", 1305 port); 1306 return err; 1307 } 1308 1309 phydev = of_phy_find_device(dn); 1310 1311 err = of_get_phy_mode(dn, &mode); 1312 if (err) 1313 mode = PHY_INTERFACE_MODE_NA; 1314 phydev->interface = mode; 1315 1316 genphy_read_status(phydev); 1317 1318 if (ds->ops->adjust_link) 1319 ds->ops->adjust_link(ds, port, phydev); 1320 1321 put_device(&phydev->mdio.dev); 1322 1323 return 0; 1324 } 1325 1326 static int dsa_port_phylink_register(struct dsa_port *dp) 1327 { 1328 struct dsa_switch *ds = dp->ds; 1329 struct device_node *port_dn = dp->dn; 1330 int err; 1331 1332 dp->pl_config.dev = ds->dev; 1333 dp->pl_config.type = PHYLINK_DEV; 1334 1335 err = dsa_port_phylink_create(dp); 1336 if (err) 1337 return err; 1338 1339 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1340 if (err && err != -ENODEV) { 1341 pr_err("could not attach to PHY: %d\n", err); 1342 goto err_phy_connect; 1343 } 1344 1345 return 0; 1346 1347 err_phy_connect: 1348 phylink_destroy(dp->pl); 1349 return err; 1350 } 1351 1352 int dsa_port_link_register_of(struct dsa_port *dp) 1353 { 1354 struct dsa_switch *ds = dp->ds; 1355 struct device_node *phy_np; 1356 int port = dp->index; 1357 1358 if (!ds->ops->adjust_link) { 1359 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1360 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1361 if (ds->ops->phylink_mac_link_down) 1362 ds->ops->phylink_mac_link_down(ds, port, 1363 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1364 return dsa_port_phylink_register(dp); 1365 } 1366 return 0; 1367 } 1368 1369 dev_warn(ds->dev, 1370 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1371 1372 if (of_phy_is_fixed_link(dp->dn)) 1373 return dsa_port_fixed_link_register_of(dp); 1374 else 1375 return dsa_port_setup_phy_of(dp, true); 1376 } 1377 1378 void dsa_port_link_unregister_of(struct dsa_port *dp) 1379 { 1380 struct dsa_switch *ds = dp->ds; 1381 1382 if (!ds->ops->adjust_link && dp->pl) { 1383 rtnl_lock(); 1384 phylink_disconnect_phy(dp->pl); 1385 rtnl_unlock(); 1386 phylink_destroy(dp->pl); 1387 dp->pl = NULL; 1388 return; 1389 } 1390 1391 if (of_phy_is_fixed_link(dp->dn)) 1392 of_phy_deregister_fixed_link(dp->dn); 1393 else 1394 dsa_port_setup_phy_of(dp, false); 1395 } 1396 1397 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1398 { 1399 struct dsa_switch *ds = dp->ds; 1400 int err; 1401 1402 if (!ds->ops->port_hsr_join) 1403 return -EOPNOTSUPP; 1404 1405 dp->hsr_dev = hsr; 1406 1407 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1408 if (err) 1409 dp->hsr_dev = NULL; 1410 1411 return err; 1412 } 1413 1414 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1415 { 1416 struct dsa_switch *ds = dp->ds; 1417 int err; 1418 1419 dp->hsr_dev = NULL; 1420 1421 if (ds->ops->port_hsr_leave) { 1422 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1423 if (err) 1424 dev_err(dp->ds->dev, 1425 "port %d failed to leave HSR %s: %pe\n", 1426 dp->index, hsr->name, ERR_PTR(err)); 1427 } 1428 } 1429 1430 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1431 { 1432 struct dsa_notifier_tag_8021q_vlan_info info = { 1433 .tree_index = dp->ds->dst->index, 1434 .sw_index = dp->ds->index, 1435 .port = dp->index, 1436 .vid = vid, 1437 }; 1438 1439 if (broadcast) 1440 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1441 1442 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1443 } 1444 1445 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1446 { 1447 struct dsa_notifier_tag_8021q_vlan_info info = { 1448 .tree_index = dp->ds->dst->index, 1449 .sw_index = dp->ds->index, 1450 .port = dp->index, 1451 .vid = vid, 1452 }; 1453 int err; 1454 1455 if (broadcast) 1456 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1457 else 1458 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1459 if (err) 1460 dev_err(dp->ds->dev, 1461 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1462 dp->index, vid, ERR_PTR(err)); 1463 } 1464