1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/of_mdio.h> 13 #include <linux/of_net.h> 14 15 #include "dsa.h" 16 #include "port.h" 17 #include "switch.h" 18 #include "tag_8021q.h" 19 #include "user.h" 20 21 /** 22 * dsa_port_notify - Notify the switching fabric of changes to a port 23 * @dp: port on which change occurred 24 * @e: event, must be of type DSA_NOTIFIER_* 25 * @v: event-specific value. 26 * 27 * Notify all switches in the DSA tree that this port's switch belongs to, 28 * including this switch itself, of an event. Allows the other switches to 29 * reconfigure themselves for cross-chip operations. Can also be used to 30 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 31 * a user port's state changes. 32 */ 33 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 34 { 35 return dsa_tree_notify(dp->ds->dst, e, v); 36 } 37 38 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 39 { 40 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 41 struct switchdev_notifier_fdb_info info = { 42 .vid = vid, 43 }; 44 45 /* When the port becomes standalone it has already left the bridge. 46 * Don't notify the bridge in that case. 47 */ 48 if (!brport_dev) 49 return; 50 51 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 52 brport_dev, &info.info, NULL); 53 } 54 55 static void dsa_port_fast_age(const struct dsa_port *dp) 56 { 57 struct dsa_switch *ds = dp->ds; 58 59 if (!ds->ops->port_fast_age) 60 return; 61 62 ds->ops->port_fast_age(ds, dp->index); 63 64 /* flush all VLANs */ 65 dsa_port_notify_bridge_fdb_flush(dp, 0); 66 } 67 68 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 69 { 70 struct dsa_switch *ds = dp->ds; 71 int err; 72 73 if (!ds->ops->port_vlan_fast_age) 74 return -EOPNOTSUPP; 75 76 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 77 78 if (!err) 79 dsa_port_notify_bridge_fdb_flush(dp, vid); 80 81 return err; 82 } 83 84 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 85 { 86 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 87 int err, vid; 88 89 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 90 if (err) 91 return err; 92 93 for_each_set_bit(vid, vids, VLAN_N_VID) { 94 err = dsa_port_vlan_fast_age(dp, vid); 95 if (err) 96 return err; 97 } 98 99 return 0; 100 } 101 102 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 103 { 104 struct switchdev_brport_flags flags = { 105 .mask = BR_LEARNING, 106 }; 107 struct dsa_switch *ds = dp->ds; 108 int err; 109 110 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 111 return false; 112 113 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 114 return !err; 115 } 116 117 bool dsa_port_supports_hwtstamp(struct dsa_port *dp) 118 { 119 struct dsa_switch *ds = dp->ds; 120 struct ifreq ifr = {}; 121 int err; 122 123 if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set) 124 return false; 125 126 /* "See through" shim implementations of the "get" method. 127 * Since we can't cook up a complete ioctl request structure, this will 128 * fail in copy_to_user() with -EFAULT, which hopefully is enough to 129 * detect a valid implementation. 130 */ 131 err = ds->ops->port_hwtstamp_get(ds, dp->index, &ifr); 132 return err != -EOPNOTSUPP; 133 } 134 135 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 136 { 137 struct dsa_switch *ds = dp->ds; 138 int port = dp->index; 139 140 if (!ds->ops->port_stp_state_set) 141 return -EOPNOTSUPP; 142 143 ds->ops->port_stp_state_set(ds, port, state); 144 145 if (!dsa_port_can_configure_learning(dp) || 146 (do_fast_age && dp->learning)) { 147 /* Fast age FDB entries or flush appropriate forwarding database 148 * for the given port, if we are moving it from Learning or 149 * Forwarding state, to Disabled or Blocking or Listening state. 150 * Ports that were standalone before the STP state change don't 151 * need to fast age the FDB, since address learning is off in 152 * standalone mode. 153 */ 154 155 if ((dp->stp_state == BR_STATE_LEARNING || 156 dp->stp_state == BR_STATE_FORWARDING) && 157 (state == BR_STATE_DISABLED || 158 state == BR_STATE_BLOCKING || 159 state == BR_STATE_LISTENING)) 160 dsa_port_fast_age(dp); 161 } 162 163 dp->stp_state = state; 164 165 return 0; 166 } 167 168 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 169 bool do_fast_age) 170 { 171 struct dsa_switch *ds = dp->ds; 172 int err; 173 174 err = dsa_port_set_state(dp, state, do_fast_age); 175 if (err && err != -EOPNOTSUPP) { 176 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 177 dp->index, state, ERR_PTR(err)); 178 } 179 } 180 181 int dsa_port_set_mst_state(struct dsa_port *dp, 182 const struct switchdev_mst_state *state, 183 struct netlink_ext_ack *extack) 184 { 185 struct dsa_switch *ds = dp->ds; 186 u8 prev_state; 187 int err; 188 189 if (!ds->ops->port_mst_state_set) 190 return -EOPNOTSUPP; 191 192 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 193 &prev_state); 194 if (err) 195 return err; 196 197 err = ds->ops->port_mst_state_set(ds, dp->index, state); 198 if (err) 199 return err; 200 201 if (!(dp->learning && 202 (prev_state == BR_STATE_LEARNING || 203 prev_state == BR_STATE_FORWARDING) && 204 (state->state == BR_STATE_DISABLED || 205 state->state == BR_STATE_BLOCKING || 206 state->state == BR_STATE_LISTENING))) 207 return 0; 208 209 err = dsa_port_msti_fast_age(dp, state->msti); 210 if (err) 211 NL_SET_ERR_MSG_MOD(extack, 212 "Unable to flush associated VLANs"); 213 214 return 0; 215 } 216 217 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 218 { 219 struct dsa_switch *ds = dp->ds; 220 int port = dp->index; 221 int err; 222 223 if (ds->ops->port_enable) { 224 err = ds->ops->port_enable(ds, port, phy); 225 if (err) 226 return err; 227 } 228 229 if (!dp->bridge) 230 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 231 232 if (dp->pl) 233 phylink_start(dp->pl); 234 235 return 0; 236 } 237 238 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 239 { 240 int err; 241 242 rtnl_lock(); 243 err = dsa_port_enable_rt(dp, phy); 244 rtnl_unlock(); 245 246 return err; 247 } 248 249 void dsa_port_disable_rt(struct dsa_port *dp) 250 { 251 struct dsa_switch *ds = dp->ds; 252 int port = dp->index; 253 254 if (dp->pl) 255 phylink_stop(dp->pl); 256 257 if (!dp->bridge) 258 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 259 260 if (ds->ops->port_disable) 261 ds->ops->port_disable(ds, port); 262 } 263 264 void dsa_port_disable(struct dsa_port *dp) 265 { 266 rtnl_lock(); 267 dsa_port_disable_rt(dp); 268 rtnl_unlock(); 269 } 270 271 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 272 struct dsa_bridge bridge) 273 { 274 struct netlink_ext_ack extack = {0}; 275 bool change_vlan_filtering = false; 276 struct dsa_switch *ds = dp->ds; 277 struct dsa_port *other_dp; 278 bool vlan_filtering; 279 int err; 280 281 if (ds->needs_standalone_vlan_filtering && 282 !br_vlan_enabled(bridge.dev)) { 283 change_vlan_filtering = true; 284 vlan_filtering = true; 285 } else if (!ds->needs_standalone_vlan_filtering && 286 br_vlan_enabled(bridge.dev)) { 287 change_vlan_filtering = true; 288 vlan_filtering = false; 289 } 290 291 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 292 * event for changing vlan_filtering setting upon user ports leaving 293 * it. That is a good thing, because that lets us handle it and also 294 * handle the case where the switch's vlan_filtering setting is global 295 * (not per port). When that happens, the correct moment to trigger the 296 * vlan_filtering callback is only when the last port leaves the last 297 * VLAN-aware bridge. 298 */ 299 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 300 dsa_switch_for_each_port(other_dp, ds) { 301 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 302 303 if (br && br_vlan_enabled(br)) { 304 change_vlan_filtering = false; 305 break; 306 } 307 } 308 } 309 310 if (!change_vlan_filtering) 311 return; 312 313 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 314 if (extack._msg) { 315 dev_err(ds->dev, "port %d: %s\n", dp->index, 316 extack._msg); 317 } 318 if (err && err != -EOPNOTSUPP) { 319 dev_err(ds->dev, 320 "port %d failed to reset VLAN filtering to %d: %pe\n", 321 dp->index, vlan_filtering, ERR_PTR(err)); 322 } 323 } 324 325 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 326 struct netlink_ext_ack *extack) 327 { 328 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 329 BR_BCAST_FLOOD | BR_PORT_LOCKED; 330 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 331 int flag, err; 332 333 for_each_set_bit(flag, &mask, 32) { 334 struct switchdev_brport_flags flags = {0}; 335 336 flags.mask = BIT(flag); 337 338 if (br_port_flag_is_set(brport_dev, BIT(flag))) 339 flags.val = BIT(flag); 340 341 err = dsa_port_bridge_flags(dp, flags, extack); 342 if (err && err != -EOPNOTSUPP) 343 return err; 344 } 345 346 return 0; 347 } 348 349 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 350 { 351 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 352 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 353 BR_BCAST_FLOOD | BR_PORT_LOCKED; 354 int flag, err; 355 356 for_each_set_bit(flag, &mask, 32) { 357 struct switchdev_brport_flags flags = {0}; 358 359 flags.mask = BIT(flag); 360 flags.val = val & BIT(flag); 361 362 err = dsa_port_bridge_flags(dp, flags, NULL); 363 if (err && err != -EOPNOTSUPP) 364 dev_err(dp->ds->dev, 365 "failed to clear bridge port flag %lu: %pe\n", 366 flags.val, ERR_PTR(err)); 367 } 368 } 369 370 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 371 struct netlink_ext_ack *extack) 372 { 373 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 374 struct net_device *br = dsa_port_bridge_dev_get(dp); 375 int err; 376 377 err = dsa_port_inherit_brport_flags(dp, extack); 378 if (err) 379 return err; 380 381 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 382 if (err && err != -EOPNOTSUPP) 383 return err; 384 385 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 386 if (err && err != -EOPNOTSUPP) 387 return err; 388 389 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 390 if (err && err != -EOPNOTSUPP) 391 return err; 392 393 return 0; 394 } 395 396 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 397 struct dsa_bridge bridge) 398 { 399 /* Configure the port for standalone mode (no address learning, 400 * flood everything). 401 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 402 * when the user requests it through netlink or sysfs, but not 403 * automatically at port join or leave, so we need to handle resetting 404 * the brport flags ourselves. But we even prefer it that way, because 405 * otherwise, some setups might never get the notification they need, 406 * for example, when a port leaves a LAG that offloads the bridge, 407 * it becomes standalone, but as far as the bridge is concerned, no 408 * port ever left. 409 */ 410 dsa_port_clear_brport_flags(dp); 411 412 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 413 * so allow it to be in BR_STATE_FORWARDING to be kept functional 414 */ 415 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 416 417 dsa_port_reset_vlan_filtering(dp, bridge); 418 419 /* Ageing time may be global to the switch chip, so don't change it 420 * here because we have no good reason (or value) to change it to. 421 */ 422 } 423 424 static int dsa_port_bridge_create(struct dsa_port *dp, 425 struct net_device *br, 426 struct netlink_ext_ack *extack) 427 { 428 struct dsa_switch *ds = dp->ds; 429 struct dsa_bridge *bridge; 430 431 bridge = dsa_tree_bridge_find(ds->dst, br); 432 if (bridge) { 433 refcount_inc(&bridge->refcount); 434 dp->bridge = bridge; 435 return 0; 436 } 437 438 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 439 if (!bridge) 440 return -ENOMEM; 441 442 refcount_set(&bridge->refcount, 1); 443 444 bridge->dev = br; 445 446 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 447 if (ds->max_num_bridges && !bridge->num) { 448 NL_SET_ERR_MSG_MOD(extack, 449 "Range of offloadable bridges exceeded"); 450 kfree(bridge); 451 return -EOPNOTSUPP; 452 } 453 454 dp->bridge = bridge; 455 456 return 0; 457 } 458 459 static void dsa_port_bridge_destroy(struct dsa_port *dp, 460 const struct net_device *br) 461 { 462 struct dsa_bridge *bridge = dp->bridge; 463 464 dp->bridge = NULL; 465 466 if (!refcount_dec_and_test(&bridge->refcount)) 467 return; 468 469 if (bridge->num) 470 dsa_bridge_num_put(br, bridge->num); 471 472 kfree(bridge); 473 } 474 475 static bool dsa_port_supports_mst(struct dsa_port *dp) 476 { 477 struct dsa_switch *ds = dp->ds; 478 479 return ds->ops->vlan_msti_set && 480 ds->ops->port_mst_state_set && 481 ds->ops->port_vlan_fast_age && 482 dsa_port_can_configure_learning(dp); 483 } 484 485 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 486 struct netlink_ext_ack *extack) 487 { 488 struct dsa_notifier_bridge_info info = { 489 .dp = dp, 490 .extack = extack, 491 }; 492 struct net_device *dev = dp->user; 493 struct net_device *brport_dev; 494 int err; 495 496 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 497 return -EOPNOTSUPP; 498 499 /* Here the interface is already bridged. Reflect the current 500 * configuration so that drivers can program their chips accordingly. 501 */ 502 err = dsa_port_bridge_create(dp, br, extack); 503 if (err) 504 return err; 505 506 brport_dev = dsa_port_to_bridge_port(dp); 507 508 info.bridge = *dp->bridge; 509 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 510 if (err) 511 goto out_rollback; 512 513 /* Drivers which support bridge TX forwarding should set this */ 514 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 515 516 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 517 &dsa_user_switchdev_notifier, 518 &dsa_user_switchdev_blocking_notifier, 519 dp->bridge->tx_fwd_offload, extack); 520 if (err) 521 goto out_rollback_unbridge; 522 523 err = dsa_port_switchdev_sync_attrs(dp, extack); 524 if (err) 525 goto out_rollback_unoffload; 526 527 return 0; 528 529 out_rollback_unoffload: 530 switchdev_bridge_port_unoffload(brport_dev, dp, 531 &dsa_user_switchdev_notifier, 532 &dsa_user_switchdev_blocking_notifier); 533 dsa_flush_workqueue(); 534 out_rollback_unbridge: 535 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 536 out_rollback: 537 dsa_port_bridge_destroy(dp, br); 538 return err; 539 } 540 541 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 542 { 543 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 544 545 /* Don't try to unoffload something that is not offloaded */ 546 if (!brport_dev) 547 return; 548 549 switchdev_bridge_port_unoffload(brport_dev, dp, 550 &dsa_user_switchdev_notifier, 551 &dsa_user_switchdev_blocking_notifier); 552 553 dsa_flush_workqueue(); 554 } 555 556 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 557 { 558 struct dsa_notifier_bridge_info info = { 559 .dp = dp, 560 }; 561 int err; 562 563 /* If the port could not be offloaded to begin with, then 564 * there is nothing to do. 565 */ 566 if (!dp->bridge) 567 return; 568 569 info.bridge = *dp->bridge; 570 571 /* Here the port is already unbridged. Reflect the current configuration 572 * so that drivers can program their chips accordingly. 573 */ 574 dsa_port_bridge_destroy(dp, br); 575 576 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 577 if (err) 578 dev_err(dp->ds->dev, 579 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 580 dp->index, ERR_PTR(err)); 581 582 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 583 } 584 585 int dsa_port_lag_change(struct dsa_port *dp, 586 struct netdev_lag_lower_state_info *linfo) 587 { 588 struct dsa_notifier_lag_info info = { 589 .dp = dp, 590 }; 591 bool tx_enabled; 592 593 if (!dp->lag) 594 return 0; 595 596 /* On statically configured aggregates (e.g. loadbalance 597 * without LACP) ports will always be tx_enabled, even if the 598 * link is down. Thus we require both link_up and tx_enabled 599 * in order to include it in the tx set. 600 */ 601 tx_enabled = linfo->link_up && linfo->tx_enabled; 602 603 if (tx_enabled == dp->lag_tx_enabled) 604 return 0; 605 606 dp->lag_tx_enabled = tx_enabled; 607 608 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 609 } 610 611 static int dsa_port_lag_create(struct dsa_port *dp, 612 struct net_device *lag_dev) 613 { 614 struct dsa_switch *ds = dp->ds; 615 struct dsa_lag *lag; 616 617 lag = dsa_tree_lag_find(ds->dst, lag_dev); 618 if (lag) { 619 refcount_inc(&lag->refcount); 620 dp->lag = lag; 621 return 0; 622 } 623 624 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 625 if (!lag) 626 return -ENOMEM; 627 628 refcount_set(&lag->refcount, 1); 629 mutex_init(&lag->fdb_lock); 630 INIT_LIST_HEAD(&lag->fdbs); 631 lag->dev = lag_dev; 632 dsa_lag_map(ds->dst, lag); 633 dp->lag = lag; 634 635 return 0; 636 } 637 638 static void dsa_port_lag_destroy(struct dsa_port *dp) 639 { 640 struct dsa_lag *lag = dp->lag; 641 642 dp->lag = NULL; 643 dp->lag_tx_enabled = false; 644 645 if (!refcount_dec_and_test(&lag->refcount)) 646 return; 647 648 WARN_ON(!list_empty(&lag->fdbs)); 649 dsa_lag_unmap(dp->ds->dst, lag); 650 kfree(lag); 651 } 652 653 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 654 struct netdev_lag_upper_info *uinfo, 655 struct netlink_ext_ack *extack) 656 { 657 struct dsa_notifier_lag_info info = { 658 .dp = dp, 659 .info = uinfo, 660 .extack = extack, 661 }; 662 struct net_device *bridge_dev; 663 int err; 664 665 err = dsa_port_lag_create(dp, lag_dev); 666 if (err) 667 goto err_lag_create; 668 669 info.lag = *dp->lag; 670 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 671 if (err) 672 goto err_lag_join; 673 674 bridge_dev = netdev_master_upper_dev_get(lag_dev); 675 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 676 return 0; 677 678 err = dsa_port_bridge_join(dp, bridge_dev, extack); 679 if (err) 680 goto err_bridge_join; 681 682 return 0; 683 684 err_bridge_join: 685 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 686 err_lag_join: 687 dsa_port_lag_destroy(dp); 688 err_lag_create: 689 return err; 690 } 691 692 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 693 { 694 struct net_device *br = dsa_port_bridge_dev_get(dp); 695 696 if (br) 697 dsa_port_pre_bridge_leave(dp, br); 698 } 699 700 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 701 { 702 struct net_device *br = dsa_port_bridge_dev_get(dp); 703 struct dsa_notifier_lag_info info = { 704 .dp = dp, 705 }; 706 int err; 707 708 if (!dp->lag) 709 return; 710 711 /* Port might have been part of a LAG that in turn was 712 * attached to a bridge. 713 */ 714 if (br) 715 dsa_port_bridge_leave(dp, br); 716 717 info.lag = *dp->lag; 718 719 dsa_port_lag_destroy(dp); 720 721 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 722 if (err) 723 dev_err(dp->ds->dev, 724 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 725 dp->index, ERR_PTR(err)); 726 } 727 728 /* Must be called under rcu_read_lock() */ 729 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 730 bool vlan_filtering, 731 struct netlink_ext_ack *extack) 732 { 733 struct dsa_switch *ds = dp->ds; 734 struct dsa_port *other_dp; 735 int err; 736 737 /* VLAN awareness was off, so the question is "can we turn it on". 738 * We may have had 8021q uppers, those need to go. Make sure we don't 739 * enter an inconsistent state: deny changing the VLAN awareness state 740 * as long as we have 8021q uppers. 741 */ 742 if (vlan_filtering && dsa_port_is_user(dp)) { 743 struct net_device *br = dsa_port_bridge_dev_get(dp); 744 struct net_device *upper_dev, *user = dp->user; 745 struct list_head *iter; 746 747 netdev_for_each_upper_dev_rcu(user, upper_dev, iter) { 748 struct bridge_vlan_info br_info; 749 u16 vid; 750 751 if (!is_vlan_dev(upper_dev)) 752 continue; 753 754 vid = vlan_dev_vlan_id(upper_dev); 755 756 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 757 * device, respectively the VID is not found, returning 758 * 0 means success, which is a failure for us here. 759 */ 760 err = br_vlan_get_info(br, vid, &br_info); 761 if (err == 0) { 762 NL_SET_ERR_MSG_MOD(extack, 763 "Must first remove VLAN uppers having VIDs also present in bridge"); 764 return false; 765 } 766 } 767 } 768 769 if (!ds->vlan_filtering_is_global) 770 return true; 771 772 /* For cases where enabling/disabling VLAN awareness is global to the 773 * switch, we need to handle the case where multiple bridges span 774 * different ports of the same switch device and one of them has a 775 * different setting than what is being requested. 776 */ 777 dsa_switch_for_each_port(other_dp, ds) { 778 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 779 780 /* If it's the same bridge, it also has same 781 * vlan_filtering setting => no need to check 782 */ 783 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 784 continue; 785 786 if (br_vlan_enabled(other_br) != vlan_filtering) { 787 NL_SET_ERR_MSG_MOD(extack, 788 "VLAN filtering is a global setting"); 789 return false; 790 } 791 } 792 return true; 793 } 794 795 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 796 struct netlink_ext_ack *extack) 797 { 798 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 799 struct dsa_switch *ds = dp->ds; 800 bool apply; 801 int err; 802 803 if (!ds->ops->port_vlan_filtering) 804 return -EOPNOTSUPP; 805 806 /* We are called from dsa_user_switchdev_blocking_event(), 807 * which is not under rcu_read_lock(), unlike 808 * dsa_user_switchdev_event(). 809 */ 810 rcu_read_lock(); 811 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 812 rcu_read_unlock(); 813 if (!apply) 814 return -EINVAL; 815 816 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 817 return 0; 818 819 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 820 extack); 821 if (err) 822 return err; 823 824 if (ds->vlan_filtering_is_global) { 825 struct dsa_port *other_dp; 826 827 ds->vlan_filtering = vlan_filtering; 828 829 dsa_switch_for_each_user_port(other_dp, ds) { 830 struct net_device *user = other_dp->user; 831 832 /* We might be called in the unbind path, so not 833 * all user devices might still be registered. 834 */ 835 if (!user) 836 continue; 837 838 err = dsa_user_manage_vlan_filtering(user, 839 vlan_filtering); 840 if (err) 841 goto restore; 842 } 843 } else { 844 dp->vlan_filtering = vlan_filtering; 845 846 err = dsa_user_manage_vlan_filtering(dp->user, 847 vlan_filtering); 848 if (err) 849 goto restore; 850 } 851 852 return 0; 853 854 restore: 855 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 856 857 if (ds->vlan_filtering_is_global) 858 ds->vlan_filtering = old_vlan_filtering; 859 else 860 dp->vlan_filtering = old_vlan_filtering; 861 862 return err; 863 } 864 865 /* This enforces legacy behavior for switch drivers which assume they can't 866 * receive VLAN configuration when joining a bridge with vlan_filtering=0 867 */ 868 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 869 { 870 struct net_device *br = dsa_port_bridge_dev_get(dp); 871 struct dsa_switch *ds = dp->ds; 872 873 if (!br) 874 return false; 875 876 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 877 } 878 879 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 880 { 881 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 882 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 883 struct dsa_notifier_ageing_time_info info; 884 int err; 885 886 info.ageing_time = ageing_time; 887 888 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 889 if (err) 890 return err; 891 892 dp->ageing_time = ageing_time; 893 894 return 0; 895 } 896 897 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 898 struct netlink_ext_ack *extack) 899 { 900 if (on && !dsa_port_supports_mst(dp)) { 901 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 902 return -EINVAL; 903 } 904 905 return 0; 906 } 907 908 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 909 struct switchdev_brport_flags flags, 910 struct netlink_ext_ack *extack) 911 { 912 struct dsa_switch *ds = dp->ds; 913 914 if (!ds->ops->port_pre_bridge_flags) 915 return -EINVAL; 916 917 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 918 } 919 920 int dsa_port_bridge_flags(struct dsa_port *dp, 921 struct switchdev_brport_flags flags, 922 struct netlink_ext_ack *extack) 923 { 924 struct dsa_switch *ds = dp->ds; 925 int err; 926 927 if (!ds->ops->port_bridge_flags) 928 return -EOPNOTSUPP; 929 930 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 931 if (err) 932 return err; 933 934 if (flags.mask & BR_LEARNING) { 935 bool learning = flags.val & BR_LEARNING; 936 937 if (learning == dp->learning) 938 return 0; 939 940 if ((dp->learning && !learning) && 941 (dp->stp_state == BR_STATE_LEARNING || 942 dp->stp_state == BR_STATE_FORWARDING)) 943 dsa_port_fast_age(dp); 944 945 dp->learning = learning; 946 } 947 948 return 0; 949 } 950 951 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 952 { 953 struct dsa_switch *ds = dp->ds; 954 955 if (ds->ops->port_set_host_flood) 956 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 957 } 958 959 int dsa_port_vlan_msti(struct dsa_port *dp, 960 const struct switchdev_vlan_msti *msti) 961 { 962 struct dsa_switch *ds = dp->ds; 963 964 if (!ds->ops->vlan_msti_set) 965 return -EOPNOTSUPP; 966 967 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 968 } 969 970 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 971 { 972 struct dsa_notifier_mtu_info info = { 973 .dp = dp, 974 .mtu = new_mtu, 975 }; 976 977 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 978 } 979 980 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 981 u16 vid) 982 { 983 struct dsa_notifier_fdb_info info = { 984 .dp = dp, 985 .addr = addr, 986 .vid = vid, 987 .db = { 988 .type = DSA_DB_BRIDGE, 989 .bridge = *dp->bridge, 990 }, 991 }; 992 993 /* Refcounting takes bridge.num as a key, and should be global for all 994 * bridges in the absence of FDB isolation, and per bridge otherwise. 995 * Force the bridge.num to zero here in the absence of FDB isolation. 996 */ 997 if (!dp->ds->fdb_isolation) 998 info.db.bridge.num = 0; 999 1000 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 1001 } 1002 1003 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1004 u16 vid) 1005 { 1006 struct dsa_notifier_fdb_info info = { 1007 .dp = dp, 1008 .addr = addr, 1009 .vid = vid, 1010 .db = { 1011 .type = DSA_DB_BRIDGE, 1012 .bridge = *dp->bridge, 1013 }, 1014 }; 1015 1016 if (!dp->ds->fdb_isolation) 1017 info.db.bridge.num = 0; 1018 1019 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 1020 } 1021 1022 static int dsa_port_host_fdb_add(struct dsa_port *dp, 1023 const unsigned char *addr, u16 vid, 1024 struct dsa_db db) 1025 { 1026 struct dsa_notifier_fdb_info info = { 1027 .dp = dp, 1028 .addr = addr, 1029 .vid = vid, 1030 .db = db, 1031 }; 1032 1033 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1034 } 1035 1036 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1037 const unsigned char *addr, u16 vid) 1038 { 1039 struct dsa_db db = { 1040 .type = DSA_DB_PORT, 1041 .dp = dp, 1042 }; 1043 1044 return dsa_port_host_fdb_add(dp, addr, vid, db); 1045 } 1046 1047 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1048 const unsigned char *addr, u16 vid) 1049 { 1050 struct net_device *conduit = dsa_port_to_conduit(dp); 1051 struct dsa_db db = { 1052 .type = DSA_DB_BRIDGE, 1053 .bridge = *dp->bridge, 1054 }; 1055 int err; 1056 1057 if (!dp->ds->fdb_isolation) 1058 db.bridge.num = 0; 1059 1060 /* Avoid a call to __dev_set_promiscuity() on the conduit, which 1061 * requires rtnl_lock(), since we can't guarantee that is held here, 1062 * and we can't take it either. 1063 */ 1064 if (conduit->priv_flags & IFF_UNICAST_FLT) { 1065 err = dev_uc_add(conduit, addr); 1066 if (err) 1067 return err; 1068 } 1069 1070 return dsa_port_host_fdb_add(dp, addr, vid, db); 1071 } 1072 1073 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1074 const unsigned char *addr, u16 vid, 1075 struct dsa_db db) 1076 { 1077 struct dsa_notifier_fdb_info info = { 1078 .dp = dp, 1079 .addr = addr, 1080 .vid = vid, 1081 .db = db, 1082 }; 1083 1084 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1085 } 1086 1087 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1088 const unsigned char *addr, u16 vid) 1089 { 1090 struct dsa_db db = { 1091 .type = DSA_DB_PORT, 1092 .dp = dp, 1093 }; 1094 1095 return dsa_port_host_fdb_del(dp, addr, vid, db); 1096 } 1097 1098 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1099 const unsigned char *addr, u16 vid) 1100 { 1101 struct net_device *conduit = dsa_port_to_conduit(dp); 1102 struct dsa_db db = { 1103 .type = DSA_DB_BRIDGE, 1104 .bridge = *dp->bridge, 1105 }; 1106 int err; 1107 1108 if (!dp->ds->fdb_isolation) 1109 db.bridge.num = 0; 1110 1111 if (conduit->priv_flags & IFF_UNICAST_FLT) { 1112 err = dev_uc_del(conduit, addr); 1113 if (err) 1114 return err; 1115 } 1116 1117 return dsa_port_host_fdb_del(dp, addr, vid, db); 1118 } 1119 1120 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1121 u16 vid) 1122 { 1123 struct dsa_notifier_lag_fdb_info info = { 1124 .lag = dp->lag, 1125 .addr = addr, 1126 .vid = vid, 1127 .db = { 1128 .type = DSA_DB_BRIDGE, 1129 .bridge = *dp->bridge, 1130 }, 1131 }; 1132 1133 if (!dp->ds->fdb_isolation) 1134 info.db.bridge.num = 0; 1135 1136 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1137 } 1138 1139 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1140 u16 vid) 1141 { 1142 struct dsa_notifier_lag_fdb_info info = { 1143 .lag = dp->lag, 1144 .addr = addr, 1145 .vid = vid, 1146 .db = { 1147 .type = DSA_DB_BRIDGE, 1148 .bridge = *dp->bridge, 1149 }, 1150 }; 1151 1152 if (!dp->ds->fdb_isolation) 1153 info.db.bridge.num = 0; 1154 1155 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1156 } 1157 1158 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1159 { 1160 struct dsa_switch *ds = dp->ds; 1161 int port = dp->index; 1162 1163 if (!ds->ops->port_fdb_dump) 1164 return -EOPNOTSUPP; 1165 1166 return ds->ops->port_fdb_dump(ds, port, cb, data); 1167 } 1168 1169 int dsa_port_mdb_add(const struct dsa_port *dp, 1170 const struct switchdev_obj_port_mdb *mdb) 1171 { 1172 struct dsa_notifier_mdb_info info = { 1173 .dp = dp, 1174 .mdb = mdb, 1175 .db = { 1176 .type = DSA_DB_BRIDGE, 1177 .bridge = *dp->bridge, 1178 }, 1179 }; 1180 1181 if (!dp->ds->fdb_isolation) 1182 info.db.bridge.num = 0; 1183 1184 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1185 } 1186 1187 int dsa_port_mdb_del(const struct dsa_port *dp, 1188 const struct switchdev_obj_port_mdb *mdb) 1189 { 1190 struct dsa_notifier_mdb_info info = { 1191 .dp = dp, 1192 .mdb = mdb, 1193 .db = { 1194 .type = DSA_DB_BRIDGE, 1195 .bridge = *dp->bridge, 1196 }, 1197 }; 1198 1199 if (!dp->ds->fdb_isolation) 1200 info.db.bridge.num = 0; 1201 1202 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1203 } 1204 1205 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1206 const struct switchdev_obj_port_mdb *mdb, 1207 struct dsa_db db) 1208 { 1209 struct dsa_notifier_mdb_info info = { 1210 .dp = dp, 1211 .mdb = mdb, 1212 .db = db, 1213 }; 1214 1215 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1216 } 1217 1218 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1219 const struct switchdev_obj_port_mdb *mdb) 1220 { 1221 struct dsa_db db = { 1222 .type = DSA_DB_PORT, 1223 .dp = dp, 1224 }; 1225 1226 return dsa_port_host_mdb_add(dp, mdb, db); 1227 } 1228 1229 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1230 const struct switchdev_obj_port_mdb *mdb) 1231 { 1232 struct net_device *conduit = dsa_port_to_conduit(dp); 1233 struct dsa_db db = { 1234 .type = DSA_DB_BRIDGE, 1235 .bridge = *dp->bridge, 1236 }; 1237 int err; 1238 1239 if (!dp->ds->fdb_isolation) 1240 db.bridge.num = 0; 1241 1242 err = dev_mc_add(conduit, mdb->addr); 1243 if (err) 1244 return err; 1245 1246 return dsa_port_host_mdb_add(dp, mdb, db); 1247 } 1248 1249 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1250 const struct switchdev_obj_port_mdb *mdb, 1251 struct dsa_db db) 1252 { 1253 struct dsa_notifier_mdb_info info = { 1254 .dp = dp, 1255 .mdb = mdb, 1256 .db = db, 1257 }; 1258 1259 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1260 } 1261 1262 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1263 const struct switchdev_obj_port_mdb *mdb) 1264 { 1265 struct dsa_db db = { 1266 .type = DSA_DB_PORT, 1267 .dp = dp, 1268 }; 1269 1270 return dsa_port_host_mdb_del(dp, mdb, db); 1271 } 1272 1273 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1274 const struct switchdev_obj_port_mdb *mdb) 1275 { 1276 struct net_device *conduit = dsa_port_to_conduit(dp); 1277 struct dsa_db db = { 1278 .type = DSA_DB_BRIDGE, 1279 .bridge = *dp->bridge, 1280 }; 1281 int err; 1282 1283 if (!dp->ds->fdb_isolation) 1284 db.bridge.num = 0; 1285 1286 err = dev_mc_del(conduit, mdb->addr); 1287 if (err) 1288 return err; 1289 1290 return dsa_port_host_mdb_del(dp, mdb, db); 1291 } 1292 1293 int dsa_port_vlan_add(struct dsa_port *dp, 1294 const struct switchdev_obj_port_vlan *vlan, 1295 struct netlink_ext_ack *extack) 1296 { 1297 struct dsa_notifier_vlan_info info = { 1298 .dp = dp, 1299 .vlan = vlan, 1300 .extack = extack, 1301 }; 1302 1303 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1304 } 1305 1306 int dsa_port_vlan_del(struct dsa_port *dp, 1307 const struct switchdev_obj_port_vlan *vlan) 1308 { 1309 struct dsa_notifier_vlan_info info = { 1310 .dp = dp, 1311 .vlan = vlan, 1312 }; 1313 1314 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1315 } 1316 1317 int dsa_port_host_vlan_add(struct dsa_port *dp, 1318 const struct switchdev_obj_port_vlan *vlan, 1319 struct netlink_ext_ack *extack) 1320 { 1321 struct net_device *conduit = dsa_port_to_conduit(dp); 1322 struct dsa_notifier_vlan_info info = { 1323 .dp = dp, 1324 .vlan = vlan, 1325 .extack = extack, 1326 }; 1327 int err; 1328 1329 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1330 if (err && err != -EOPNOTSUPP) 1331 return err; 1332 1333 vlan_vid_add(conduit, htons(ETH_P_8021Q), vlan->vid); 1334 1335 return err; 1336 } 1337 1338 int dsa_port_host_vlan_del(struct dsa_port *dp, 1339 const struct switchdev_obj_port_vlan *vlan) 1340 { 1341 struct net_device *conduit = dsa_port_to_conduit(dp); 1342 struct dsa_notifier_vlan_info info = { 1343 .dp = dp, 1344 .vlan = vlan, 1345 }; 1346 int err; 1347 1348 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1349 if (err && err != -EOPNOTSUPP) 1350 return err; 1351 1352 vlan_vid_del(conduit, htons(ETH_P_8021Q), vlan->vid); 1353 1354 return err; 1355 } 1356 1357 int dsa_port_mrp_add(const struct dsa_port *dp, 1358 const struct switchdev_obj_mrp *mrp) 1359 { 1360 struct dsa_switch *ds = dp->ds; 1361 1362 if (!ds->ops->port_mrp_add) 1363 return -EOPNOTSUPP; 1364 1365 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1366 } 1367 1368 int dsa_port_mrp_del(const struct dsa_port *dp, 1369 const struct switchdev_obj_mrp *mrp) 1370 { 1371 struct dsa_switch *ds = dp->ds; 1372 1373 if (!ds->ops->port_mrp_del) 1374 return -EOPNOTSUPP; 1375 1376 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1377 } 1378 1379 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1380 const struct switchdev_obj_ring_role_mrp *mrp) 1381 { 1382 struct dsa_switch *ds = dp->ds; 1383 1384 if (!ds->ops->port_mrp_add_ring_role) 1385 return -EOPNOTSUPP; 1386 1387 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1388 } 1389 1390 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1391 const struct switchdev_obj_ring_role_mrp *mrp) 1392 { 1393 struct dsa_switch *ds = dp->ds; 1394 1395 if (!ds->ops->port_mrp_del_ring_role) 1396 return -EOPNOTSUPP; 1397 1398 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1399 } 1400 1401 static int dsa_port_assign_conduit(struct dsa_port *dp, 1402 struct net_device *conduit, 1403 struct netlink_ext_ack *extack, 1404 bool fail_on_err) 1405 { 1406 struct dsa_switch *ds = dp->ds; 1407 int port = dp->index, err; 1408 1409 err = ds->ops->port_change_conduit(ds, port, conduit, extack); 1410 if (err && !fail_on_err) 1411 dev_err(ds->dev, "port %d failed to assign conduit %s: %pe\n", 1412 port, conduit->name, ERR_PTR(err)); 1413 1414 if (err && fail_on_err) 1415 return err; 1416 1417 dp->cpu_dp = conduit->dsa_ptr; 1418 dp->cpu_port_in_lag = netif_is_lag_master(conduit); 1419 1420 return 0; 1421 } 1422 1423 /* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip 1424 * notifiers and drivers have implicit assumptions about user-to-CPU-port 1425 * mappings, so we unfortunately cannot delay the deletion of the objects 1426 * (switchdev, standalone addresses, standalone VLANs) on the old CPU port 1427 * until the new CPU port has been set up. So we need to completely tear down 1428 * the old CPU port before changing it, and restore it on errors during the 1429 * bringup of the new one. 1430 */ 1431 int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit, 1432 struct netlink_ext_ack *extack) 1433 { 1434 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); 1435 struct net_device *old_conduit = dsa_port_to_conduit(dp); 1436 struct net_device *dev = dp->user; 1437 struct dsa_switch *ds = dp->ds; 1438 bool vlan_filtering; 1439 int err, tmp; 1440 1441 /* Bridges may hold host FDB, MDB and VLAN objects. These need to be 1442 * migrated, so dynamically unoffload and later reoffload the bridge 1443 * port. 1444 */ 1445 if (bridge_dev) { 1446 dsa_port_pre_bridge_leave(dp, bridge_dev); 1447 dsa_port_bridge_leave(dp, bridge_dev); 1448 } 1449 1450 /* The port might still be VLAN filtering even if it's no longer 1451 * under a bridge, either due to ds->vlan_filtering_is_global or 1452 * ds->needs_standalone_vlan_filtering. In turn this means VLANs 1453 * on the CPU port. 1454 */ 1455 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1456 if (vlan_filtering) { 1457 err = dsa_user_manage_vlan_filtering(dev, false); 1458 if (err) { 1459 NL_SET_ERR_MSG_MOD(extack, 1460 "Failed to remove standalone VLANs"); 1461 goto rewind_old_bridge; 1462 } 1463 } 1464 1465 /* Standalone addresses, and addresses of upper interfaces like 1466 * VLAN, LAG, HSR need to be migrated. 1467 */ 1468 dsa_user_unsync_ha(dev); 1469 1470 err = dsa_port_assign_conduit(dp, conduit, extack, true); 1471 if (err) 1472 goto rewind_old_addrs; 1473 1474 dsa_user_sync_ha(dev); 1475 1476 if (vlan_filtering) { 1477 err = dsa_user_manage_vlan_filtering(dev, true); 1478 if (err) { 1479 NL_SET_ERR_MSG_MOD(extack, 1480 "Failed to restore standalone VLANs"); 1481 goto rewind_new_addrs; 1482 } 1483 } 1484 1485 if (bridge_dev) { 1486 err = dsa_port_bridge_join(dp, bridge_dev, extack); 1487 if (err && err == -EOPNOTSUPP) { 1488 NL_SET_ERR_MSG_MOD(extack, 1489 "Failed to reoffload bridge"); 1490 goto rewind_new_vlan; 1491 } 1492 } 1493 1494 return 0; 1495 1496 rewind_new_vlan: 1497 if (vlan_filtering) 1498 dsa_user_manage_vlan_filtering(dev, false); 1499 1500 rewind_new_addrs: 1501 dsa_user_unsync_ha(dev); 1502 1503 dsa_port_assign_conduit(dp, old_conduit, NULL, false); 1504 1505 /* Restore the objects on the old CPU port */ 1506 rewind_old_addrs: 1507 dsa_user_sync_ha(dev); 1508 1509 if (vlan_filtering) { 1510 tmp = dsa_user_manage_vlan_filtering(dev, true); 1511 if (tmp) { 1512 dev_err(ds->dev, 1513 "port %d failed to restore standalone VLANs: %pe\n", 1514 dp->index, ERR_PTR(tmp)); 1515 } 1516 } 1517 1518 rewind_old_bridge: 1519 if (bridge_dev) { 1520 tmp = dsa_port_bridge_join(dp, bridge_dev, extack); 1521 if (tmp) { 1522 dev_err(ds->dev, 1523 "port %d failed to rejoin bridge %s: %pe\n", 1524 dp->index, bridge_dev->name, ERR_PTR(tmp)); 1525 } 1526 } 1527 1528 return err; 1529 } 1530 1531 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1532 const struct dsa_device_ops *tag_ops) 1533 { 1534 cpu_dp->rcv = tag_ops->rcv; 1535 cpu_dp->tag_ops = tag_ops; 1536 } 1537 1538 static struct phylink_pcs * 1539 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1540 phy_interface_t interface) 1541 { 1542 struct dsa_port *dp = dsa_phylink_to_port(config); 1543 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1544 struct dsa_switch *ds = dp->ds; 1545 1546 if (ds->ops->phylink_mac_select_pcs) 1547 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1548 1549 return pcs; 1550 } 1551 1552 static int dsa_port_phylink_mac_prepare(struct phylink_config *config, 1553 unsigned int mode, 1554 phy_interface_t interface) 1555 { 1556 struct dsa_port *dp = dsa_phylink_to_port(config); 1557 struct dsa_switch *ds = dp->ds; 1558 int err = 0; 1559 1560 if (ds->ops->phylink_mac_prepare) 1561 err = ds->ops->phylink_mac_prepare(ds, dp->index, mode, 1562 interface); 1563 1564 return err; 1565 } 1566 1567 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1568 unsigned int mode, 1569 const struct phylink_link_state *state) 1570 { 1571 struct dsa_port *dp = dsa_phylink_to_port(config); 1572 struct dsa_switch *ds = dp->ds; 1573 1574 if (!ds->ops->phylink_mac_config) 1575 return; 1576 1577 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1578 } 1579 1580 static int dsa_port_phylink_mac_finish(struct phylink_config *config, 1581 unsigned int mode, 1582 phy_interface_t interface) 1583 { 1584 struct dsa_port *dp = dsa_phylink_to_port(config); 1585 struct dsa_switch *ds = dp->ds; 1586 int err = 0; 1587 1588 if (ds->ops->phylink_mac_finish) 1589 err = ds->ops->phylink_mac_finish(ds, dp->index, mode, 1590 interface); 1591 1592 return err; 1593 } 1594 1595 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1596 unsigned int mode, 1597 phy_interface_t interface) 1598 { 1599 struct dsa_port *dp = dsa_phylink_to_port(config); 1600 struct dsa_switch *ds = dp->ds; 1601 1602 if (!ds->ops->phylink_mac_link_down) 1603 return; 1604 1605 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1606 } 1607 1608 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1609 struct phy_device *phydev, 1610 unsigned int mode, 1611 phy_interface_t interface, 1612 int speed, int duplex, 1613 bool tx_pause, bool rx_pause) 1614 { 1615 struct dsa_port *dp = dsa_phylink_to_port(config); 1616 struct dsa_switch *ds = dp->ds; 1617 1618 if (!ds->ops->phylink_mac_link_up) 1619 return; 1620 1621 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1622 speed, duplex, tx_pause, rx_pause); 1623 } 1624 1625 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1626 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1627 .mac_prepare = dsa_port_phylink_mac_prepare, 1628 .mac_config = dsa_port_phylink_mac_config, 1629 .mac_finish = dsa_port_phylink_mac_finish, 1630 .mac_link_down = dsa_port_phylink_mac_link_down, 1631 .mac_link_up = dsa_port_phylink_mac_link_up, 1632 }; 1633 1634 int dsa_port_phylink_create(struct dsa_port *dp) 1635 { 1636 const struct phylink_mac_ops *mac_ops; 1637 struct dsa_switch *ds = dp->ds; 1638 phy_interface_t mode; 1639 struct phylink *pl; 1640 int err; 1641 1642 err = of_get_phy_mode(dp->dn, &mode); 1643 if (err) 1644 mode = PHY_INTERFACE_MODE_NA; 1645 1646 if (ds->ops->phylink_get_caps) { 1647 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1648 } else { 1649 /* For legacy drivers */ 1650 if (mode != PHY_INTERFACE_MODE_NA) { 1651 __set_bit(mode, dp->pl_config.supported_interfaces); 1652 } else { 1653 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 1654 dp->pl_config.supported_interfaces); 1655 __set_bit(PHY_INTERFACE_MODE_GMII, 1656 dp->pl_config.supported_interfaces); 1657 } 1658 } 1659 1660 mac_ops = &dsa_port_phylink_mac_ops; 1661 if (ds->phylink_mac_ops) 1662 mac_ops = ds->phylink_mac_ops; 1663 1664 pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), mode, 1665 mac_ops); 1666 if (IS_ERR(pl)) { 1667 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); 1668 return PTR_ERR(pl); 1669 } 1670 1671 dp->pl = pl; 1672 1673 return 0; 1674 } 1675 1676 void dsa_port_phylink_destroy(struct dsa_port *dp) 1677 { 1678 phylink_destroy(dp->pl); 1679 dp->pl = NULL; 1680 } 1681 1682 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1683 { 1684 struct dsa_switch *ds = dp->ds; 1685 struct device_node *port_dn = dp->dn; 1686 int err; 1687 1688 dp->pl_config.dev = ds->dev; 1689 dp->pl_config.type = PHYLINK_DEV; 1690 1691 err = dsa_port_phylink_create(dp); 1692 if (err) 1693 return err; 1694 1695 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1696 if (err && err != -ENODEV) { 1697 pr_err("could not attach to PHY: %d\n", err); 1698 goto err_phy_connect; 1699 } 1700 1701 return 0; 1702 1703 err_phy_connect: 1704 dsa_port_phylink_destroy(dp); 1705 return err; 1706 } 1707 1708 /* During the initial DSA driver migration to OF, port nodes were sometimes 1709 * added to device trees with no indication of how they should operate from a 1710 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1711 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1712 * their type. 1713 * 1714 * User ports with no phy-handle or fixed-link are expected to connect to an 1715 * internal PHY located on the ds->user_mii_bus at an MDIO address equal to 1716 * the port number. This description is still actively supported. 1717 * 1718 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1719 * operate at the maximum speed that their phy-mode is capable of. If the 1720 * phy-mode is absent, they are expected to operate using the phy-mode 1721 * supported by the port that gives the highest link speed. It is unspecified 1722 * if the port should use flow control or not, half duplex or full duplex, or 1723 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1724 * enabled or not. 1725 * 1726 * In the latter case of shared ports, omitting the link management description 1727 * from the firmware node is deprecated and strongly discouraged. DSA uses 1728 * phylink, which rejects the firmware nodes of these ports for lacking 1729 * required properties. 1730 * 1731 * For switches in this table, DSA will skip enforcing validation and will 1732 * later omit registering a phylink instance for the shared ports, if they lack 1733 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1734 * It becomes the responsibility of the driver to ensure that these ports 1735 * operate at the maximum speed (whatever this means) and will interoperate 1736 * with the DSA conduit or other cascade port, since phylink methods will not be 1737 * invoked for them. 1738 * 1739 * If you are considering expanding this table for newly introduced switches, 1740 * think again. It is OK to remove switches from this table if there aren't DT 1741 * blobs in circulation which rely on defaulting the shared ports. 1742 */ 1743 static const char * const dsa_switches_apply_workarounds[] = { 1744 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1745 "arrow,xrs7003e", 1746 "arrow,xrs7003f", 1747 "arrow,xrs7004e", 1748 "arrow,xrs7004f", 1749 #endif 1750 #if IS_ENABLED(CONFIG_B53) 1751 "brcm,bcm5325", 1752 "brcm,bcm53115", 1753 "brcm,bcm53125", 1754 "brcm,bcm53128", 1755 "brcm,bcm5365", 1756 "brcm,bcm5389", 1757 "brcm,bcm5395", 1758 "brcm,bcm5397", 1759 "brcm,bcm5398", 1760 "brcm,bcm53010-srab", 1761 "brcm,bcm53011-srab", 1762 "brcm,bcm53012-srab", 1763 "brcm,bcm53018-srab", 1764 "brcm,bcm53019-srab", 1765 "brcm,bcm5301x-srab", 1766 "brcm,bcm11360-srab", 1767 "brcm,bcm58522-srab", 1768 "brcm,bcm58525-srab", 1769 "brcm,bcm58535-srab", 1770 "brcm,bcm58622-srab", 1771 "brcm,bcm58623-srab", 1772 "brcm,bcm58625-srab", 1773 "brcm,bcm88312-srab", 1774 "brcm,cygnus-srab", 1775 "brcm,nsp-srab", 1776 "brcm,omega-srab", 1777 "brcm,bcm3384-switch", 1778 "brcm,bcm6328-switch", 1779 "brcm,bcm6368-switch", 1780 "brcm,bcm63xx-switch", 1781 #endif 1782 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1783 "brcm,bcm7445-switch-v4.0", 1784 "brcm,bcm7278-switch-v4.0", 1785 "brcm,bcm7278-switch-v4.8", 1786 #endif 1787 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1788 "lantiq,xrx200-gswip", 1789 "lantiq,xrx300-gswip", 1790 "lantiq,xrx330-gswip", 1791 #endif 1792 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1793 "marvell,mv88e6060", 1794 #endif 1795 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1796 "marvell,mv88e6085", 1797 "marvell,mv88e6190", 1798 "marvell,mv88e6250", 1799 #endif 1800 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1801 "microchip,ksz8765", 1802 "microchip,ksz8794", 1803 "microchip,ksz8795", 1804 "microchip,ksz8863", 1805 "microchip,ksz8873", 1806 "microchip,ksz9477", 1807 "microchip,ksz9897", 1808 "microchip,ksz9893", 1809 "microchip,ksz9563", 1810 "microchip,ksz8563", 1811 "microchip,ksz9567", 1812 #endif 1813 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1814 "smsc,lan9303-mdio", 1815 #endif 1816 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1817 "smsc,lan9303-i2c", 1818 #endif 1819 NULL, 1820 }; 1821 1822 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1823 bool *missing_phy_mode, 1824 bool *missing_link_description) 1825 { 1826 struct device_node *dn = dp->dn, *phy_np; 1827 struct dsa_switch *ds = dp->ds; 1828 phy_interface_t mode; 1829 1830 *missing_phy_mode = false; 1831 *missing_link_description = false; 1832 1833 if (of_get_phy_mode(dn, &mode)) { 1834 *missing_phy_mode = true; 1835 dev_err(ds->dev, 1836 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1837 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1838 } 1839 1840 /* Note: of_phy_is_fixed_link() also returns true for 1841 * managed = "in-band-status" 1842 */ 1843 if (of_phy_is_fixed_link(dn)) 1844 return; 1845 1846 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1847 if (phy_np) { 1848 of_node_put(phy_np); 1849 return; 1850 } 1851 1852 *missing_link_description = true; 1853 1854 dev_err(ds->dev, 1855 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1856 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1857 } 1858 1859 static void dsa_shared_port_link_down(struct dsa_port *dp) 1860 { 1861 struct dsa_switch *ds = dp->ds; 1862 1863 if (ds->phylink_mac_ops && ds->phylink_mac_ops->mac_link_down) 1864 ds->phylink_mac_ops->mac_link_down(&dp->pl_config, MLO_AN_FIXED, 1865 PHY_INTERFACE_MODE_NA); 1866 else if (ds->ops->phylink_mac_link_down) 1867 ds->ops->phylink_mac_link_down(ds, dp->index, MLO_AN_FIXED, 1868 PHY_INTERFACE_MODE_NA); 1869 } 1870 1871 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1872 { 1873 struct dsa_switch *ds = dp->ds; 1874 bool missing_link_description; 1875 bool missing_phy_mode; 1876 1877 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1878 &missing_link_description); 1879 1880 if ((missing_phy_mode || missing_link_description) && 1881 !of_device_compatible_match(ds->dev->of_node, 1882 dsa_switches_apply_workarounds)) 1883 return -EINVAL; 1884 1885 if (missing_link_description) { 1886 dev_warn(ds->dev, 1887 "Skipping phylink registration for %s port %d\n", 1888 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1889 } else { 1890 dsa_shared_port_link_down(dp); 1891 1892 return dsa_shared_port_phylink_register(dp); 1893 } 1894 1895 return 0; 1896 } 1897 1898 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 1899 { 1900 if (dp->pl) { 1901 rtnl_lock(); 1902 phylink_disconnect_phy(dp->pl); 1903 rtnl_unlock(); 1904 dsa_port_phylink_destroy(dp); 1905 return; 1906 } 1907 } 1908 1909 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr, 1910 struct netlink_ext_ack *extack) 1911 { 1912 struct dsa_switch *ds = dp->ds; 1913 int err; 1914 1915 if (!ds->ops->port_hsr_join) 1916 return -EOPNOTSUPP; 1917 1918 dp->hsr_dev = hsr; 1919 1920 err = ds->ops->port_hsr_join(ds, dp->index, hsr, extack); 1921 if (err) 1922 dp->hsr_dev = NULL; 1923 1924 return err; 1925 } 1926 1927 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1928 { 1929 struct dsa_switch *ds = dp->ds; 1930 int err; 1931 1932 dp->hsr_dev = NULL; 1933 1934 if (ds->ops->port_hsr_leave) { 1935 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1936 if (err) 1937 dev_err(dp->ds->dev, 1938 "port %d failed to leave HSR %s: %pe\n", 1939 dp->index, hsr->name, ERR_PTR(err)); 1940 } 1941 } 1942 1943 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1944 { 1945 struct dsa_notifier_tag_8021q_vlan_info info = { 1946 .dp = dp, 1947 .vid = vid, 1948 }; 1949 1950 if (broadcast) 1951 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1952 1953 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1954 } 1955 1956 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1957 { 1958 struct dsa_notifier_tag_8021q_vlan_info info = { 1959 .dp = dp, 1960 .vid = vid, 1961 }; 1962 int err; 1963 1964 if (broadcast) 1965 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1966 else 1967 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1968 if (err) 1969 dev_err(dp->ds->dev, 1970 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1971 dp->index, vid, ERR_PTR(err)); 1972 } 1973