1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/of_mdio.h> 13 #include <linux/of_net.h> 14 15 #include "dsa.h" 16 #include "port.h" 17 #include "slave.h" 18 #include "switch.h" 19 #include "tag_8021q.h" 20 21 /** 22 * dsa_port_notify - Notify the switching fabric of changes to a port 23 * @dp: port on which change occurred 24 * @e: event, must be of type DSA_NOTIFIER_* 25 * @v: event-specific value. 26 * 27 * Notify all switches in the DSA tree that this port's switch belongs to, 28 * including this switch itself, of an event. Allows the other switches to 29 * reconfigure themselves for cross-chip operations. Can also be used to 30 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 31 * a user port's state changes. 32 */ 33 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 34 { 35 return dsa_tree_notify(dp->ds->dst, e, v); 36 } 37 38 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 39 { 40 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 41 struct switchdev_notifier_fdb_info info = { 42 .vid = vid, 43 }; 44 45 /* When the port becomes standalone it has already left the bridge. 46 * Don't notify the bridge in that case. 47 */ 48 if (!brport_dev) 49 return; 50 51 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 52 brport_dev, &info.info, NULL); 53 } 54 55 static void dsa_port_fast_age(const struct dsa_port *dp) 56 { 57 struct dsa_switch *ds = dp->ds; 58 59 if (!ds->ops->port_fast_age) 60 return; 61 62 ds->ops->port_fast_age(ds, dp->index); 63 64 /* flush all VLANs */ 65 dsa_port_notify_bridge_fdb_flush(dp, 0); 66 } 67 68 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 69 { 70 struct dsa_switch *ds = dp->ds; 71 int err; 72 73 if (!ds->ops->port_vlan_fast_age) 74 return -EOPNOTSUPP; 75 76 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 77 78 if (!err) 79 dsa_port_notify_bridge_fdb_flush(dp, vid); 80 81 return err; 82 } 83 84 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 85 { 86 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 87 int err, vid; 88 89 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 90 if (err) 91 return err; 92 93 for_each_set_bit(vid, vids, VLAN_N_VID) { 94 err = dsa_port_vlan_fast_age(dp, vid); 95 if (err) 96 return err; 97 } 98 99 return 0; 100 } 101 102 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 103 { 104 struct switchdev_brport_flags flags = { 105 .mask = BR_LEARNING, 106 }; 107 struct dsa_switch *ds = dp->ds; 108 int err; 109 110 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 111 return false; 112 113 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 114 return !err; 115 } 116 117 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr) 118 { 119 struct dsa_switch *ds = dp->ds; 120 int err; 121 122 if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set) 123 return false; 124 125 /* "See through" shim implementations of the "get" method. 126 * This will clobber the ifreq structure, but we will either return an 127 * error, or the master will overwrite it with proper values. 128 */ 129 err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr); 130 return err != -EOPNOTSUPP; 131 } 132 133 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 134 { 135 struct dsa_switch *ds = dp->ds; 136 int port = dp->index; 137 138 if (!ds->ops->port_stp_state_set) 139 return -EOPNOTSUPP; 140 141 ds->ops->port_stp_state_set(ds, port, state); 142 143 if (!dsa_port_can_configure_learning(dp) || 144 (do_fast_age && dp->learning)) { 145 /* Fast age FDB entries or flush appropriate forwarding database 146 * for the given port, if we are moving it from Learning or 147 * Forwarding state, to Disabled or Blocking or Listening state. 148 * Ports that were standalone before the STP state change don't 149 * need to fast age the FDB, since address learning is off in 150 * standalone mode. 151 */ 152 153 if ((dp->stp_state == BR_STATE_LEARNING || 154 dp->stp_state == BR_STATE_FORWARDING) && 155 (state == BR_STATE_DISABLED || 156 state == BR_STATE_BLOCKING || 157 state == BR_STATE_LISTENING)) 158 dsa_port_fast_age(dp); 159 } 160 161 dp->stp_state = state; 162 163 return 0; 164 } 165 166 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 167 bool do_fast_age) 168 { 169 struct dsa_switch *ds = dp->ds; 170 int err; 171 172 err = dsa_port_set_state(dp, state, do_fast_age); 173 if (err && err != -EOPNOTSUPP) { 174 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 175 dp->index, state, ERR_PTR(err)); 176 } 177 } 178 179 int dsa_port_set_mst_state(struct dsa_port *dp, 180 const struct switchdev_mst_state *state, 181 struct netlink_ext_ack *extack) 182 { 183 struct dsa_switch *ds = dp->ds; 184 u8 prev_state; 185 int err; 186 187 if (!ds->ops->port_mst_state_set) 188 return -EOPNOTSUPP; 189 190 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 191 &prev_state); 192 if (err) 193 return err; 194 195 err = ds->ops->port_mst_state_set(ds, dp->index, state); 196 if (err) 197 return err; 198 199 if (!(dp->learning && 200 (prev_state == BR_STATE_LEARNING || 201 prev_state == BR_STATE_FORWARDING) && 202 (state->state == BR_STATE_DISABLED || 203 state->state == BR_STATE_BLOCKING || 204 state->state == BR_STATE_LISTENING))) 205 return 0; 206 207 err = dsa_port_msti_fast_age(dp, state->msti); 208 if (err) 209 NL_SET_ERR_MSG_MOD(extack, 210 "Unable to flush associated VLANs"); 211 212 return 0; 213 } 214 215 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 216 { 217 struct dsa_switch *ds = dp->ds; 218 int port = dp->index; 219 int err; 220 221 if (ds->ops->port_enable) { 222 err = ds->ops->port_enable(ds, port, phy); 223 if (err) 224 return err; 225 } 226 227 if (!dp->bridge) 228 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 229 230 if (dp->pl) 231 phylink_start(dp->pl); 232 233 return 0; 234 } 235 236 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 237 { 238 int err; 239 240 rtnl_lock(); 241 err = dsa_port_enable_rt(dp, phy); 242 rtnl_unlock(); 243 244 return err; 245 } 246 247 void dsa_port_disable_rt(struct dsa_port *dp) 248 { 249 struct dsa_switch *ds = dp->ds; 250 int port = dp->index; 251 252 if (dp->pl) 253 phylink_stop(dp->pl); 254 255 if (!dp->bridge) 256 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 257 258 if (ds->ops->port_disable) 259 ds->ops->port_disable(ds, port); 260 } 261 262 void dsa_port_disable(struct dsa_port *dp) 263 { 264 rtnl_lock(); 265 dsa_port_disable_rt(dp); 266 rtnl_unlock(); 267 } 268 269 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 270 struct dsa_bridge bridge) 271 { 272 struct netlink_ext_ack extack = {0}; 273 bool change_vlan_filtering = false; 274 struct dsa_switch *ds = dp->ds; 275 struct dsa_port *other_dp; 276 bool vlan_filtering; 277 int err; 278 279 if (ds->needs_standalone_vlan_filtering && 280 !br_vlan_enabled(bridge.dev)) { 281 change_vlan_filtering = true; 282 vlan_filtering = true; 283 } else if (!ds->needs_standalone_vlan_filtering && 284 br_vlan_enabled(bridge.dev)) { 285 change_vlan_filtering = true; 286 vlan_filtering = false; 287 } 288 289 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 290 * event for changing vlan_filtering setting upon slave ports leaving 291 * it. That is a good thing, because that lets us handle it and also 292 * handle the case where the switch's vlan_filtering setting is global 293 * (not per port). When that happens, the correct moment to trigger the 294 * vlan_filtering callback is only when the last port leaves the last 295 * VLAN-aware bridge. 296 */ 297 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 298 dsa_switch_for_each_port(other_dp, ds) { 299 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 300 301 if (br && br_vlan_enabled(br)) { 302 change_vlan_filtering = false; 303 break; 304 } 305 } 306 } 307 308 if (!change_vlan_filtering) 309 return; 310 311 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 312 if (extack._msg) { 313 dev_err(ds->dev, "port %d: %s\n", dp->index, 314 extack._msg); 315 } 316 if (err && err != -EOPNOTSUPP) { 317 dev_err(ds->dev, 318 "port %d failed to reset VLAN filtering to %d: %pe\n", 319 dp->index, vlan_filtering, ERR_PTR(err)); 320 } 321 } 322 323 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 324 struct netlink_ext_ack *extack) 325 { 326 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 327 BR_BCAST_FLOOD | BR_PORT_LOCKED; 328 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 329 int flag, err; 330 331 for_each_set_bit(flag, &mask, 32) { 332 struct switchdev_brport_flags flags = {0}; 333 334 flags.mask = BIT(flag); 335 336 if (br_port_flag_is_set(brport_dev, BIT(flag))) 337 flags.val = BIT(flag); 338 339 err = dsa_port_bridge_flags(dp, flags, extack); 340 if (err && err != -EOPNOTSUPP) 341 return err; 342 } 343 344 return 0; 345 } 346 347 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 348 { 349 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 350 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 351 BR_BCAST_FLOOD | BR_PORT_LOCKED; 352 int flag, err; 353 354 for_each_set_bit(flag, &mask, 32) { 355 struct switchdev_brport_flags flags = {0}; 356 357 flags.mask = BIT(flag); 358 flags.val = val & BIT(flag); 359 360 err = dsa_port_bridge_flags(dp, flags, NULL); 361 if (err && err != -EOPNOTSUPP) 362 dev_err(dp->ds->dev, 363 "failed to clear bridge port flag %lu: %pe\n", 364 flags.val, ERR_PTR(err)); 365 } 366 } 367 368 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 369 struct netlink_ext_ack *extack) 370 { 371 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 372 struct net_device *br = dsa_port_bridge_dev_get(dp); 373 int err; 374 375 err = dsa_port_inherit_brport_flags(dp, extack); 376 if (err) 377 return err; 378 379 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 380 if (err && err != -EOPNOTSUPP) 381 return err; 382 383 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 384 if (err && err != -EOPNOTSUPP) 385 return err; 386 387 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 388 if (err && err != -EOPNOTSUPP) 389 return err; 390 391 return 0; 392 } 393 394 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 395 struct dsa_bridge bridge) 396 { 397 /* Configure the port for standalone mode (no address learning, 398 * flood everything). 399 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 400 * when the user requests it through netlink or sysfs, but not 401 * automatically at port join or leave, so we need to handle resetting 402 * the brport flags ourselves. But we even prefer it that way, because 403 * otherwise, some setups might never get the notification they need, 404 * for example, when a port leaves a LAG that offloads the bridge, 405 * it becomes standalone, but as far as the bridge is concerned, no 406 * port ever left. 407 */ 408 dsa_port_clear_brport_flags(dp); 409 410 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 411 * so allow it to be in BR_STATE_FORWARDING to be kept functional 412 */ 413 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 414 415 dsa_port_reset_vlan_filtering(dp, bridge); 416 417 /* Ageing time may be global to the switch chip, so don't change it 418 * here because we have no good reason (or value) to change it to. 419 */ 420 } 421 422 static int dsa_port_bridge_create(struct dsa_port *dp, 423 struct net_device *br, 424 struct netlink_ext_ack *extack) 425 { 426 struct dsa_switch *ds = dp->ds; 427 struct dsa_bridge *bridge; 428 429 bridge = dsa_tree_bridge_find(ds->dst, br); 430 if (bridge) { 431 refcount_inc(&bridge->refcount); 432 dp->bridge = bridge; 433 return 0; 434 } 435 436 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 437 if (!bridge) 438 return -ENOMEM; 439 440 refcount_set(&bridge->refcount, 1); 441 442 bridge->dev = br; 443 444 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 445 if (ds->max_num_bridges && !bridge->num) { 446 NL_SET_ERR_MSG_MOD(extack, 447 "Range of offloadable bridges exceeded"); 448 kfree(bridge); 449 return -EOPNOTSUPP; 450 } 451 452 dp->bridge = bridge; 453 454 return 0; 455 } 456 457 static void dsa_port_bridge_destroy(struct dsa_port *dp, 458 const struct net_device *br) 459 { 460 struct dsa_bridge *bridge = dp->bridge; 461 462 dp->bridge = NULL; 463 464 if (!refcount_dec_and_test(&bridge->refcount)) 465 return; 466 467 if (bridge->num) 468 dsa_bridge_num_put(br, bridge->num); 469 470 kfree(bridge); 471 } 472 473 static bool dsa_port_supports_mst(struct dsa_port *dp) 474 { 475 struct dsa_switch *ds = dp->ds; 476 477 return ds->ops->vlan_msti_set && 478 ds->ops->port_mst_state_set && 479 ds->ops->port_vlan_fast_age && 480 dsa_port_can_configure_learning(dp); 481 } 482 483 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 484 struct netlink_ext_ack *extack) 485 { 486 struct dsa_notifier_bridge_info info = { 487 .dp = dp, 488 .extack = extack, 489 }; 490 struct net_device *dev = dp->slave; 491 struct net_device *brport_dev; 492 int err; 493 494 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 495 return -EOPNOTSUPP; 496 497 /* Here the interface is already bridged. Reflect the current 498 * configuration so that drivers can program their chips accordingly. 499 */ 500 err = dsa_port_bridge_create(dp, br, extack); 501 if (err) 502 return err; 503 504 brport_dev = dsa_port_to_bridge_port(dp); 505 506 info.bridge = *dp->bridge; 507 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 508 if (err) 509 goto out_rollback; 510 511 /* Drivers which support bridge TX forwarding should set this */ 512 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 513 514 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 515 &dsa_slave_switchdev_notifier, 516 &dsa_slave_switchdev_blocking_notifier, 517 dp->bridge->tx_fwd_offload, extack); 518 if (err) 519 goto out_rollback_unbridge; 520 521 err = dsa_port_switchdev_sync_attrs(dp, extack); 522 if (err) 523 goto out_rollback_unoffload; 524 525 return 0; 526 527 out_rollback_unoffload: 528 switchdev_bridge_port_unoffload(brport_dev, dp, 529 &dsa_slave_switchdev_notifier, 530 &dsa_slave_switchdev_blocking_notifier); 531 dsa_flush_workqueue(); 532 out_rollback_unbridge: 533 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 534 out_rollback: 535 dsa_port_bridge_destroy(dp, br); 536 return err; 537 } 538 539 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 540 { 541 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 542 543 /* Don't try to unoffload something that is not offloaded */ 544 if (!brport_dev) 545 return; 546 547 switchdev_bridge_port_unoffload(brport_dev, dp, 548 &dsa_slave_switchdev_notifier, 549 &dsa_slave_switchdev_blocking_notifier); 550 551 dsa_flush_workqueue(); 552 } 553 554 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 555 { 556 struct dsa_notifier_bridge_info info = { 557 .dp = dp, 558 }; 559 int err; 560 561 /* If the port could not be offloaded to begin with, then 562 * there is nothing to do. 563 */ 564 if (!dp->bridge) 565 return; 566 567 info.bridge = *dp->bridge; 568 569 /* Here the port is already unbridged. Reflect the current configuration 570 * so that drivers can program their chips accordingly. 571 */ 572 dsa_port_bridge_destroy(dp, br); 573 574 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 575 if (err) 576 dev_err(dp->ds->dev, 577 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 578 dp->index, ERR_PTR(err)); 579 580 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 581 } 582 583 int dsa_port_lag_change(struct dsa_port *dp, 584 struct netdev_lag_lower_state_info *linfo) 585 { 586 struct dsa_notifier_lag_info info = { 587 .dp = dp, 588 }; 589 bool tx_enabled; 590 591 if (!dp->lag) 592 return 0; 593 594 /* On statically configured aggregates (e.g. loadbalance 595 * without LACP) ports will always be tx_enabled, even if the 596 * link is down. Thus we require both link_up and tx_enabled 597 * in order to include it in the tx set. 598 */ 599 tx_enabled = linfo->link_up && linfo->tx_enabled; 600 601 if (tx_enabled == dp->lag_tx_enabled) 602 return 0; 603 604 dp->lag_tx_enabled = tx_enabled; 605 606 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 607 } 608 609 static int dsa_port_lag_create(struct dsa_port *dp, 610 struct net_device *lag_dev) 611 { 612 struct dsa_switch *ds = dp->ds; 613 struct dsa_lag *lag; 614 615 lag = dsa_tree_lag_find(ds->dst, lag_dev); 616 if (lag) { 617 refcount_inc(&lag->refcount); 618 dp->lag = lag; 619 return 0; 620 } 621 622 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 623 if (!lag) 624 return -ENOMEM; 625 626 refcount_set(&lag->refcount, 1); 627 mutex_init(&lag->fdb_lock); 628 INIT_LIST_HEAD(&lag->fdbs); 629 lag->dev = lag_dev; 630 dsa_lag_map(ds->dst, lag); 631 dp->lag = lag; 632 633 return 0; 634 } 635 636 static void dsa_port_lag_destroy(struct dsa_port *dp) 637 { 638 struct dsa_lag *lag = dp->lag; 639 640 dp->lag = NULL; 641 dp->lag_tx_enabled = false; 642 643 if (!refcount_dec_and_test(&lag->refcount)) 644 return; 645 646 WARN_ON(!list_empty(&lag->fdbs)); 647 dsa_lag_unmap(dp->ds->dst, lag); 648 kfree(lag); 649 } 650 651 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 652 struct netdev_lag_upper_info *uinfo, 653 struct netlink_ext_ack *extack) 654 { 655 struct dsa_notifier_lag_info info = { 656 .dp = dp, 657 .info = uinfo, 658 .extack = extack, 659 }; 660 struct net_device *bridge_dev; 661 int err; 662 663 err = dsa_port_lag_create(dp, lag_dev); 664 if (err) 665 goto err_lag_create; 666 667 info.lag = *dp->lag; 668 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 669 if (err) 670 goto err_lag_join; 671 672 bridge_dev = netdev_master_upper_dev_get(lag_dev); 673 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 674 return 0; 675 676 err = dsa_port_bridge_join(dp, bridge_dev, extack); 677 if (err) 678 goto err_bridge_join; 679 680 return 0; 681 682 err_bridge_join: 683 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 684 err_lag_join: 685 dsa_port_lag_destroy(dp); 686 err_lag_create: 687 return err; 688 } 689 690 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 691 { 692 struct net_device *br = dsa_port_bridge_dev_get(dp); 693 694 if (br) 695 dsa_port_pre_bridge_leave(dp, br); 696 } 697 698 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 699 { 700 struct net_device *br = dsa_port_bridge_dev_get(dp); 701 struct dsa_notifier_lag_info info = { 702 .dp = dp, 703 }; 704 int err; 705 706 if (!dp->lag) 707 return; 708 709 /* Port might have been part of a LAG that in turn was 710 * attached to a bridge. 711 */ 712 if (br) 713 dsa_port_bridge_leave(dp, br); 714 715 info.lag = *dp->lag; 716 717 dsa_port_lag_destroy(dp); 718 719 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 720 if (err) 721 dev_err(dp->ds->dev, 722 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 723 dp->index, ERR_PTR(err)); 724 } 725 726 /* Must be called under rcu_read_lock() */ 727 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 728 bool vlan_filtering, 729 struct netlink_ext_ack *extack) 730 { 731 struct dsa_switch *ds = dp->ds; 732 struct dsa_port *other_dp; 733 int err; 734 735 /* VLAN awareness was off, so the question is "can we turn it on". 736 * We may have had 8021q uppers, those need to go. Make sure we don't 737 * enter an inconsistent state: deny changing the VLAN awareness state 738 * as long as we have 8021q uppers. 739 */ 740 if (vlan_filtering && dsa_port_is_user(dp)) { 741 struct net_device *br = dsa_port_bridge_dev_get(dp); 742 struct net_device *upper_dev, *slave = dp->slave; 743 struct list_head *iter; 744 745 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 746 struct bridge_vlan_info br_info; 747 u16 vid; 748 749 if (!is_vlan_dev(upper_dev)) 750 continue; 751 752 vid = vlan_dev_vlan_id(upper_dev); 753 754 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 755 * device, respectively the VID is not found, returning 756 * 0 means success, which is a failure for us here. 757 */ 758 err = br_vlan_get_info(br, vid, &br_info); 759 if (err == 0) { 760 NL_SET_ERR_MSG_MOD(extack, 761 "Must first remove VLAN uppers having VIDs also present in bridge"); 762 return false; 763 } 764 } 765 } 766 767 if (!ds->vlan_filtering_is_global) 768 return true; 769 770 /* For cases where enabling/disabling VLAN awareness is global to the 771 * switch, we need to handle the case where multiple bridges span 772 * different ports of the same switch device and one of them has a 773 * different setting than what is being requested. 774 */ 775 dsa_switch_for_each_port(other_dp, ds) { 776 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 777 778 /* If it's the same bridge, it also has same 779 * vlan_filtering setting => no need to check 780 */ 781 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 782 continue; 783 784 if (br_vlan_enabled(other_br) != vlan_filtering) { 785 NL_SET_ERR_MSG_MOD(extack, 786 "VLAN filtering is a global setting"); 787 return false; 788 } 789 } 790 return true; 791 } 792 793 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 794 struct netlink_ext_ack *extack) 795 { 796 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 797 struct dsa_switch *ds = dp->ds; 798 bool apply; 799 int err; 800 801 if (!ds->ops->port_vlan_filtering) 802 return -EOPNOTSUPP; 803 804 /* We are called from dsa_slave_switchdev_blocking_event(), 805 * which is not under rcu_read_lock(), unlike 806 * dsa_slave_switchdev_event(). 807 */ 808 rcu_read_lock(); 809 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 810 rcu_read_unlock(); 811 if (!apply) 812 return -EINVAL; 813 814 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 815 return 0; 816 817 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 818 extack); 819 if (err) 820 return err; 821 822 if (ds->vlan_filtering_is_global) { 823 struct dsa_port *other_dp; 824 825 ds->vlan_filtering = vlan_filtering; 826 827 dsa_switch_for_each_user_port(other_dp, ds) { 828 struct net_device *slave = other_dp->slave; 829 830 /* We might be called in the unbind path, so not 831 * all slave devices might still be registered. 832 */ 833 if (!slave) 834 continue; 835 836 err = dsa_slave_manage_vlan_filtering(slave, 837 vlan_filtering); 838 if (err) 839 goto restore; 840 } 841 } else { 842 dp->vlan_filtering = vlan_filtering; 843 844 err = dsa_slave_manage_vlan_filtering(dp->slave, 845 vlan_filtering); 846 if (err) 847 goto restore; 848 } 849 850 return 0; 851 852 restore: 853 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 854 855 if (ds->vlan_filtering_is_global) 856 ds->vlan_filtering = old_vlan_filtering; 857 else 858 dp->vlan_filtering = old_vlan_filtering; 859 860 return err; 861 } 862 863 /* This enforces legacy behavior for switch drivers which assume they can't 864 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 865 */ 866 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 867 { 868 struct net_device *br = dsa_port_bridge_dev_get(dp); 869 struct dsa_switch *ds = dp->ds; 870 871 if (!br) 872 return false; 873 874 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 875 } 876 877 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 878 { 879 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 880 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 881 struct dsa_notifier_ageing_time_info info; 882 int err; 883 884 info.ageing_time = ageing_time; 885 886 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 887 if (err) 888 return err; 889 890 dp->ageing_time = ageing_time; 891 892 return 0; 893 } 894 895 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 896 struct netlink_ext_ack *extack) 897 { 898 if (on && !dsa_port_supports_mst(dp)) { 899 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 900 return -EINVAL; 901 } 902 903 return 0; 904 } 905 906 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 907 struct switchdev_brport_flags flags, 908 struct netlink_ext_ack *extack) 909 { 910 struct dsa_switch *ds = dp->ds; 911 912 if (!ds->ops->port_pre_bridge_flags) 913 return -EINVAL; 914 915 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 916 } 917 918 int dsa_port_bridge_flags(struct dsa_port *dp, 919 struct switchdev_brport_flags flags, 920 struct netlink_ext_ack *extack) 921 { 922 struct dsa_switch *ds = dp->ds; 923 int err; 924 925 if (!ds->ops->port_bridge_flags) 926 return -EOPNOTSUPP; 927 928 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 929 if (err) 930 return err; 931 932 if (flags.mask & BR_LEARNING) { 933 bool learning = flags.val & BR_LEARNING; 934 935 if (learning == dp->learning) 936 return 0; 937 938 if ((dp->learning && !learning) && 939 (dp->stp_state == BR_STATE_LEARNING || 940 dp->stp_state == BR_STATE_FORWARDING)) 941 dsa_port_fast_age(dp); 942 943 dp->learning = learning; 944 } 945 946 return 0; 947 } 948 949 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 950 { 951 struct dsa_switch *ds = dp->ds; 952 953 if (ds->ops->port_set_host_flood) 954 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 955 } 956 957 int dsa_port_vlan_msti(struct dsa_port *dp, 958 const struct switchdev_vlan_msti *msti) 959 { 960 struct dsa_switch *ds = dp->ds; 961 962 if (!ds->ops->vlan_msti_set) 963 return -EOPNOTSUPP; 964 965 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 966 } 967 968 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 969 { 970 struct dsa_notifier_mtu_info info = { 971 .dp = dp, 972 .mtu = new_mtu, 973 }; 974 975 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 976 } 977 978 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 979 u16 vid) 980 { 981 struct dsa_notifier_fdb_info info = { 982 .dp = dp, 983 .addr = addr, 984 .vid = vid, 985 .db = { 986 .type = DSA_DB_BRIDGE, 987 .bridge = *dp->bridge, 988 }, 989 }; 990 991 /* Refcounting takes bridge.num as a key, and should be global for all 992 * bridges in the absence of FDB isolation, and per bridge otherwise. 993 * Force the bridge.num to zero here in the absence of FDB isolation. 994 */ 995 if (!dp->ds->fdb_isolation) 996 info.db.bridge.num = 0; 997 998 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 999 } 1000 1001 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1002 u16 vid) 1003 { 1004 struct dsa_notifier_fdb_info info = { 1005 .dp = dp, 1006 .addr = addr, 1007 .vid = vid, 1008 .db = { 1009 .type = DSA_DB_BRIDGE, 1010 .bridge = *dp->bridge, 1011 }, 1012 }; 1013 1014 if (!dp->ds->fdb_isolation) 1015 info.db.bridge.num = 0; 1016 1017 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 1018 } 1019 1020 static int dsa_port_host_fdb_add(struct dsa_port *dp, 1021 const unsigned char *addr, u16 vid, 1022 struct dsa_db db) 1023 { 1024 struct dsa_notifier_fdb_info info = { 1025 .dp = dp, 1026 .addr = addr, 1027 .vid = vid, 1028 .db = db, 1029 }; 1030 1031 if (!dp->ds->fdb_isolation) 1032 info.db.bridge.num = 0; 1033 1034 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1035 } 1036 1037 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1038 const unsigned char *addr, u16 vid) 1039 { 1040 struct dsa_db db = { 1041 .type = DSA_DB_PORT, 1042 .dp = dp, 1043 }; 1044 1045 return dsa_port_host_fdb_add(dp, addr, vid, db); 1046 } 1047 1048 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1049 const unsigned char *addr, u16 vid) 1050 { 1051 struct net_device *master = dsa_port_to_master(dp); 1052 struct dsa_db db = { 1053 .type = DSA_DB_BRIDGE, 1054 .bridge = *dp->bridge, 1055 }; 1056 int err; 1057 1058 /* Avoid a call to __dev_set_promiscuity() on the master, which 1059 * requires rtnl_lock(), since we can't guarantee that is held here, 1060 * and we can't take it either. 1061 */ 1062 if (master->priv_flags & IFF_UNICAST_FLT) { 1063 err = dev_uc_add(master, addr); 1064 if (err) 1065 return err; 1066 } 1067 1068 return dsa_port_host_fdb_add(dp, addr, vid, db); 1069 } 1070 1071 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1072 const unsigned char *addr, u16 vid, 1073 struct dsa_db db) 1074 { 1075 struct dsa_notifier_fdb_info info = { 1076 .dp = dp, 1077 .addr = addr, 1078 .vid = vid, 1079 .db = db, 1080 }; 1081 1082 if (!dp->ds->fdb_isolation) 1083 info.db.bridge.num = 0; 1084 1085 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1086 } 1087 1088 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1089 const unsigned char *addr, u16 vid) 1090 { 1091 struct dsa_db db = { 1092 .type = DSA_DB_PORT, 1093 .dp = dp, 1094 }; 1095 1096 return dsa_port_host_fdb_del(dp, addr, vid, db); 1097 } 1098 1099 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1100 const unsigned char *addr, u16 vid) 1101 { 1102 struct net_device *master = dsa_port_to_master(dp); 1103 struct dsa_db db = { 1104 .type = DSA_DB_BRIDGE, 1105 .bridge = *dp->bridge, 1106 }; 1107 int err; 1108 1109 if (master->priv_flags & IFF_UNICAST_FLT) { 1110 err = dev_uc_del(master, addr); 1111 if (err) 1112 return err; 1113 } 1114 1115 return dsa_port_host_fdb_del(dp, addr, vid, db); 1116 } 1117 1118 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1119 u16 vid) 1120 { 1121 struct dsa_notifier_lag_fdb_info info = { 1122 .lag = dp->lag, 1123 .addr = addr, 1124 .vid = vid, 1125 .db = { 1126 .type = DSA_DB_BRIDGE, 1127 .bridge = *dp->bridge, 1128 }, 1129 }; 1130 1131 if (!dp->ds->fdb_isolation) 1132 info.db.bridge.num = 0; 1133 1134 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1135 } 1136 1137 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1138 u16 vid) 1139 { 1140 struct dsa_notifier_lag_fdb_info info = { 1141 .lag = dp->lag, 1142 .addr = addr, 1143 .vid = vid, 1144 .db = { 1145 .type = DSA_DB_BRIDGE, 1146 .bridge = *dp->bridge, 1147 }, 1148 }; 1149 1150 if (!dp->ds->fdb_isolation) 1151 info.db.bridge.num = 0; 1152 1153 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1154 } 1155 1156 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1157 { 1158 struct dsa_switch *ds = dp->ds; 1159 int port = dp->index; 1160 1161 if (!ds->ops->port_fdb_dump) 1162 return -EOPNOTSUPP; 1163 1164 return ds->ops->port_fdb_dump(ds, port, cb, data); 1165 } 1166 1167 int dsa_port_mdb_add(const struct dsa_port *dp, 1168 const struct switchdev_obj_port_mdb *mdb) 1169 { 1170 struct dsa_notifier_mdb_info info = { 1171 .dp = dp, 1172 .mdb = mdb, 1173 .db = { 1174 .type = DSA_DB_BRIDGE, 1175 .bridge = *dp->bridge, 1176 }, 1177 }; 1178 1179 if (!dp->ds->fdb_isolation) 1180 info.db.bridge.num = 0; 1181 1182 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1183 } 1184 1185 int dsa_port_mdb_del(const struct dsa_port *dp, 1186 const struct switchdev_obj_port_mdb *mdb) 1187 { 1188 struct dsa_notifier_mdb_info info = { 1189 .dp = dp, 1190 .mdb = mdb, 1191 .db = { 1192 .type = DSA_DB_BRIDGE, 1193 .bridge = *dp->bridge, 1194 }, 1195 }; 1196 1197 if (!dp->ds->fdb_isolation) 1198 info.db.bridge.num = 0; 1199 1200 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1201 } 1202 1203 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1204 const struct switchdev_obj_port_mdb *mdb, 1205 struct dsa_db db) 1206 { 1207 struct dsa_notifier_mdb_info info = { 1208 .dp = dp, 1209 .mdb = mdb, 1210 .db = db, 1211 }; 1212 1213 if (!dp->ds->fdb_isolation) 1214 info.db.bridge.num = 0; 1215 1216 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1217 } 1218 1219 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1220 const struct switchdev_obj_port_mdb *mdb) 1221 { 1222 struct dsa_db db = { 1223 .type = DSA_DB_PORT, 1224 .dp = dp, 1225 }; 1226 1227 return dsa_port_host_mdb_add(dp, mdb, db); 1228 } 1229 1230 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1231 const struct switchdev_obj_port_mdb *mdb) 1232 { 1233 struct net_device *master = dsa_port_to_master(dp); 1234 struct dsa_db db = { 1235 .type = DSA_DB_BRIDGE, 1236 .bridge = *dp->bridge, 1237 }; 1238 int err; 1239 1240 err = dev_mc_add(master, mdb->addr); 1241 if (err) 1242 return err; 1243 1244 return dsa_port_host_mdb_add(dp, mdb, db); 1245 } 1246 1247 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1248 const struct switchdev_obj_port_mdb *mdb, 1249 struct dsa_db db) 1250 { 1251 struct dsa_notifier_mdb_info info = { 1252 .dp = dp, 1253 .mdb = mdb, 1254 .db = db, 1255 }; 1256 1257 if (!dp->ds->fdb_isolation) 1258 info.db.bridge.num = 0; 1259 1260 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1261 } 1262 1263 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1264 const struct switchdev_obj_port_mdb *mdb) 1265 { 1266 struct dsa_db db = { 1267 .type = DSA_DB_PORT, 1268 .dp = dp, 1269 }; 1270 1271 return dsa_port_host_mdb_del(dp, mdb, db); 1272 } 1273 1274 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1275 const struct switchdev_obj_port_mdb *mdb) 1276 { 1277 struct net_device *master = dsa_port_to_master(dp); 1278 struct dsa_db db = { 1279 .type = DSA_DB_BRIDGE, 1280 .bridge = *dp->bridge, 1281 }; 1282 int err; 1283 1284 err = dev_mc_del(master, mdb->addr); 1285 if (err) 1286 return err; 1287 1288 return dsa_port_host_mdb_del(dp, mdb, db); 1289 } 1290 1291 int dsa_port_vlan_add(struct dsa_port *dp, 1292 const struct switchdev_obj_port_vlan *vlan, 1293 struct netlink_ext_ack *extack) 1294 { 1295 struct dsa_notifier_vlan_info info = { 1296 .dp = dp, 1297 .vlan = vlan, 1298 .extack = extack, 1299 }; 1300 1301 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1302 } 1303 1304 int dsa_port_vlan_del(struct dsa_port *dp, 1305 const struct switchdev_obj_port_vlan *vlan) 1306 { 1307 struct dsa_notifier_vlan_info info = { 1308 .dp = dp, 1309 .vlan = vlan, 1310 }; 1311 1312 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1313 } 1314 1315 int dsa_port_host_vlan_add(struct dsa_port *dp, 1316 const struct switchdev_obj_port_vlan *vlan, 1317 struct netlink_ext_ack *extack) 1318 { 1319 struct net_device *master = dsa_port_to_master(dp); 1320 struct dsa_notifier_vlan_info info = { 1321 .dp = dp, 1322 .vlan = vlan, 1323 .extack = extack, 1324 }; 1325 int err; 1326 1327 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1328 if (err && err != -EOPNOTSUPP) 1329 return err; 1330 1331 vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid); 1332 1333 return err; 1334 } 1335 1336 int dsa_port_host_vlan_del(struct dsa_port *dp, 1337 const struct switchdev_obj_port_vlan *vlan) 1338 { 1339 struct net_device *master = dsa_port_to_master(dp); 1340 struct dsa_notifier_vlan_info info = { 1341 .dp = dp, 1342 .vlan = vlan, 1343 }; 1344 int err; 1345 1346 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1347 if (err && err != -EOPNOTSUPP) 1348 return err; 1349 1350 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); 1351 1352 return err; 1353 } 1354 1355 int dsa_port_mrp_add(const struct dsa_port *dp, 1356 const struct switchdev_obj_mrp *mrp) 1357 { 1358 struct dsa_switch *ds = dp->ds; 1359 1360 if (!ds->ops->port_mrp_add) 1361 return -EOPNOTSUPP; 1362 1363 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1364 } 1365 1366 int dsa_port_mrp_del(const struct dsa_port *dp, 1367 const struct switchdev_obj_mrp *mrp) 1368 { 1369 struct dsa_switch *ds = dp->ds; 1370 1371 if (!ds->ops->port_mrp_del) 1372 return -EOPNOTSUPP; 1373 1374 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1375 } 1376 1377 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1378 const struct switchdev_obj_ring_role_mrp *mrp) 1379 { 1380 struct dsa_switch *ds = dp->ds; 1381 1382 if (!ds->ops->port_mrp_add_ring_role) 1383 return -EOPNOTSUPP; 1384 1385 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1386 } 1387 1388 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1389 const struct switchdev_obj_ring_role_mrp *mrp) 1390 { 1391 struct dsa_switch *ds = dp->ds; 1392 1393 if (!ds->ops->port_mrp_del_ring_role) 1394 return -EOPNOTSUPP; 1395 1396 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1397 } 1398 1399 static int dsa_port_assign_master(struct dsa_port *dp, 1400 struct net_device *master, 1401 struct netlink_ext_ack *extack, 1402 bool fail_on_err) 1403 { 1404 struct dsa_switch *ds = dp->ds; 1405 int port = dp->index, err; 1406 1407 err = ds->ops->port_change_master(ds, port, master, extack); 1408 if (err && !fail_on_err) 1409 dev_err(ds->dev, "port %d failed to assign master %s: %pe\n", 1410 port, master->name, ERR_PTR(err)); 1411 1412 if (err && fail_on_err) 1413 return err; 1414 1415 dp->cpu_dp = master->dsa_ptr; 1416 dp->cpu_port_in_lag = netif_is_lag_master(master); 1417 1418 return 0; 1419 } 1420 1421 /* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip 1422 * notifiers and drivers have implicit assumptions about user-to-CPU-port 1423 * mappings, so we unfortunately cannot delay the deletion of the objects 1424 * (switchdev, standalone addresses, standalone VLANs) on the old CPU port 1425 * until the new CPU port has been set up. So we need to completely tear down 1426 * the old CPU port before changing it, and restore it on errors during the 1427 * bringup of the new one. 1428 */ 1429 int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, 1430 struct netlink_ext_ack *extack) 1431 { 1432 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); 1433 struct net_device *old_master = dsa_port_to_master(dp); 1434 struct net_device *dev = dp->slave; 1435 struct dsa_switch *ds = dp->ds; 1436 bool vlan_filtering; 1437 int err, tmp; 1438 1439 /* Bridges may hold host FDB, MDB and VLAN objects. These need to be 1440 * migrated, so dynamically unoffload and later reoffload the bridge 1441 * port. 1442 */ 1443 if (bridge_dev) { 1444 dsa_port_pre_bridge_leave(dp, bridge_dev); 1445 dsa_port_bridge_leave(dp, bridge_dev); 1446 } 1447 1448 /* The port might still be VLAN filtering even if it's no longer 1449 * under a bridge, either due to ds->vlan_filtering_is_global or 1450 * ds->needs_standalone_vlan_filtering. In turn this means VLANs 1451 * on the CPU port. 1452 */ 1453 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1454 if (vlan_filtering) { 1455 err = dsa_slave_manage_vlan_filtering(dev, false); 1456 if (err) { 1457 NL_SET_ERR_MSG_MOD(extack, 1458 "Failed to remove standalone VLANs"); 1459 goto rewind_old_bridge; 1460 } 1461 } 1462 1463 /* Standalone addresses, and addresses of upper interfaces like 1464 * VLAN, LAG, HSR need to be migrated. 1465 */ 1466 dsa_slave_unsync_ha(dev); 1467 1468 err = dsa_port_assign_master(dp, master, extack, true); 1469 if (err) 1470 goto rewind_old_addrs; 1471 1472 dsa_slave_sync_ha(dev); 1473 1474 if (vlan_filtering) { 1475 err = dsa_slave_manage_vlan_filtering(dev, true); 1476 if (err) { 1477 NL_SET_ERR_MSG_MOD(extack, 1478 "Failed to restore standalone VLANs"); 1479 goto rewind_new_addrs; 1480 } 1481 } 1482 1483 if (bridge_dev) { 1484 err = dsa_port_bridge_join(dp, bridge_dev, extack); 1485 if (err && err == -EOPNOTSUPP) { 1486 NL_SET_ERR_MSG_MOD(extack, 1487 "Failed to reoffload bridge"); 1488 goto rewind_new_vlan; 1489 } 1490 } 1491 1492 return 0; 1493 1494 rewind_new_vlan: 1495 if (vlan_filtering) 1496 dsa_slave_manage_vlan_filtering(dev, false); 1497 1498 rewind_new_addrs: 1499 dsa_slave_unsync_ha(dev); 1500 1501 dsa_port_assign_master(dp, old_master, NULL, false); 1502 1503 /* Restore the objects on the old CPU port */ 1504 rewind_old_addrs: 1505 dsa_slave_sync_ha(dev); 1506 1507 if (vlan_filtering) { 1508 tmp = dsa_slave_manage_vlan_filtering(dev, true); 1509 if (tmp) { 1510 dev_err(ds->dev, 1511 "port %d failed to restore standalone VLANs: %pe\n", 1512 dp->index, ERR_PTR(tmp)); 1513 } 1514 } 1515 1516 rewind_old_bridge: 1517 if (bridge_dev) { 1518 tmp = dsa_port_bridge_join(dp, bridge_dev, extack); 1519 if (tmp) { 1520 dev_err(ds->dev, 1521 "port %d failed to rejoin bridge %s: %pe\n", 1522 dp->index, bridge_dev->name, ERR_PTR(tmp)); 1523 } 1524 } 1525 1526 return err; 1527 } 1528 1529 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1530 const struct dsa_device_ops *tag_ops) 1531 { 1532 cpu_dp->rcv = tag_ops->rcv; 1533 cpu_dp->tag_ops = tag_ops; 1534 } 1535 1536 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1537 { 1538 struct device_node *phy_dn; 1539 struct phy_device *phydev; 1540 1541 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1542 if (!phy_dn) 1543 return NULL; 1544 1545 phydev = of_phy_find_device(phy_dn); 1546 if (!phydev) { 1547 of_node_put(phy_dn); 1548 return ERR_PTR(-EPROBE_DEFER); 1549 } 1550 1551 of_node_put(phy_dn); 1552 return phydev; 1553 } 1554 1555 static void dsa_port_phylink_validate(struct phylink_config *config, 1556 unsigned long *supported, 1557 struct phylink_link_state *state) 1558 { 1559 /* Skip call for drivers which don't yet set mac_capabilities, 1560 * since validating in that case would mean their PHY will advertise 1561 * nothing. In turn, skipping validation makes them advertise 1562 * everything that the PHY supports, so those drivers should be 1563 * converted ASAP. 1564 */ 1565 if (config->mac_capabilities) 1566 phylink_generic_validate(config, supported, state); 1567 } 1568 1569 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1570 struct phylink_link_state *state) 1571 { 1572 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1573 struct dsa_switch *ds = dp->ds; 1574 int err; 1575 1576 /* Only called for inband modes */ 1577 if (!ds->ops->phylink_mac_link_state) { 1578 state->link = 0; 1579 return; 1580 } 1581 1582 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1583 if (err < 0) { 1584 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1585 dp->index, err); 1586 state->link = 0; 1587 } 1588 } 1589 1590 static struct phylink_pcs * 1591 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1592 phy_interface_t interface) 1593 { 1594 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1595 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1596 struct dsa_switch *ds = dp->ds; 1597 1598 if (ds->ops->phylink_mac_select_pcs) 1599 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1600 1601 return pcs; 1602 } 1603 1604 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1605 unsigned int mode, 1606 const struct phylink_link_state *state) 1607 { 1608 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1609 struct dsa_switch *ds = dp->ds; 1610 1611 if (!ds->ops->phylink_mac_config) 1612 return; 1613 1614 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1615 } 1616 1617 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1618 { 1619 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1620 struct dsa_switch *ds = dp->ds; 1621 1622 if (!ds->ops->phylink_mac_an_restart) 1623 return; 1624 1625 ds->ops->phylink_mac_an_restart(ds, dp->index); 1626 } 1627 1628 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1629 unsigned int mode, 1630 phy_interface_t interface) 1631 { 1632 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1633 struct phy_device *phydev = NULL; 1634 struct dsa_switch *ds = dp->ds; 1635 1636 if (dsa_port_is_user(dp)) 1637 phydev = dp->slave->phydev; 1638 1639 if (!ds->ops->phylink_mac_link_down) { 1640 if (ds->ops->adjust_link && phydev) 1641 ds->ops->adjust_link(ds, dp->index, phydev); 1642 return; 1643 } 1644 1645 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1646 } 1647 1648 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1649 struct phy_device *phydev, 1650 unsigned int mode, 1651 phy_interface_t interface, 1652 int speed, int duplex, 1653 bool tx_pause, bool rx_pause) 1654 { 1655 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1656 struct dsa_switch *ds = dp->ds; 1657 1658 if (!ds->ops->phylink_mac_link_up) { 1659 if (ds->ops->adjust_link && phydev) 1660 ds->ops->adjust_link(ds, dp->index, phydev); 1661 return; 1662 } 1663 1664 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1665 speed, duplex, tx_pause, rx_pause); 1666 } 1667 1668 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1669 .validate = dsa_port_phylink_validate, 1670 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1671 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1672 .mac_config = dsa_port_phylink_mac_config, 1673 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1674 .mac_link_down = dsa_port_phylink_mac_link_down, 1675 .mac_link_up = dsa_port_phylink_mac_link_up, 1676 }; 1677 1678 int dsa_port_phylink_create(struct dsa_port *dp) 1679 { 1680 struct dsa_switch *ds = dp->ds; 1681 phy_interface_t mode; 1682 struct phylink *pl; 1683 int err; 1684 1685 err = of_get_phy_mode(dp->dn, &mode); 1686 if (err) 1687 mode = PHY_INTERFACE_MODE_NA; 1688 1689 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1690 * an indicator of a legacy phylink driver. 1691 */ 1692 if (ds->ops->phylink_mac_link_state || 1693 ds->ops->phylink_mac_an_restart) 1694 dp->pl_config.legacy_pre_march2020 = true; 1695 1696 if (ds->ops->phylink_get_caps) 1697 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1698 1699 pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1700 mode, &dsa_port_phylink_mac_ops); 1701 if (IS_ERR(pl)) { 1702 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); 1703 return PTR_ERR(pl); 1704 } 1705 1706 dp->pl = pl; 1707 1708 return 0; 1709 } 1710 1711 void dsa_port_phylink_destroy(struct dsa_port *dp) 1712 { 1713 phylink_destroy(dp->pl); 1714 dp->pl = NULL; 1715 } 1716 1717 static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) 1718 { 1719 struct dsa_switch *ds = dp->ds; 1720 struct phy_device *phydev; 1721 int port = dp->index; 1722 int err = 0; 1723 1724 phydev = dsa_port_get_phy_device(dp); 1725 if (!phydev) 1726 return 0; 1727 1728 if (IS_ERR(phydev)) 1729 return PTR_ERR(phydev); 1730 1731 if (enable) { 1732 err = genphy_resume(phydev); 1733 if (err < 0) 1734 goto err_put_dev; 1735 1736 err = genphy_read_status(phydev); 1737 if (err < 0) 1738 goto err_put_dev; 1739 } else { 1740 err = genphy_suspend(phydev); 1741 if (err < 0) 1742 goto err_put_dev; 1743 } 1744 1745 if (ds->ops->adjust_link) 1746 ds->ops->adjust_link(ds, port, phydev); 1747 1748 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1749 1750 err_put_dev: 1751 put_device(&phydev->mdio.dev); 1752 return err; 1753 } 1754 1755 static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) 1756 { 1757 struct device_node *dn = dp->dn; 1758 struct dsa_switch *ds = dp->ds; 1759 struct phy_device *phydev; 1760 int port = dp->index; 1761 phy_interface_t mode; 1762 int err; 1763 1764 err = of_phy_register_fixed_link(dn); 1765 if (err) { 1766 dev_err(ds->dev, 1767 "failed to register the fixed PHY of port %d\n", 1768 port); 1769 return err; 1770 } 1771 1772 phydev = of_phy_find_device(dn); 1773 1774 err = of_get_phy_mode(dn, &mode); 1775 if (err) 1776 mode = PHY_INTERFACE_MODE_NA; 1777 phydev->interface = mode; 1778 1779 genphy_read_status(phydev); 1780 1781 if (ds->ops->adjust_link) 1782 ds->ops->adjust_link(ds, port, phydev); 1783 1784 put_device(&phydev->mdio.dev); 1785 1786 return 0; 1787 } 1788 1789 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1790 { 1791 struct dsa_switch *ds = dp->ds; 1792 struct device_node *port_dn = dp->dn; 1793 int err; 1794 1795 dp->pl_config.dev = ds->dev; 1796 dp->pl_config.type = PHYLINK_DEV; 1797 1798 err = dsa_port_phylink_create(dp); 1799 if (err) 1800 return err; 1801 1802 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1803 if (err && err != -ENODEV) { 1804 pr_err("could not attach to PHY: %d\n", err); 1805 goto err_phy_connect; 1806 } 1807 1808 return 0; 1809 1810 err_phy_connect: 1811 dsa_port_phylink_destroy(dp); 1812 return err; 1813 } 1814 1815 /* During the initial DSA driver migration to OF, port nodes were sometimes 1816 * added to device trees with no indication of how they should operate from a 1817 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1818 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1819 * their type. 1820 * 1821 * User ports with no phy-handle or fixed-link are expected to connect to an 1822 * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to 1823 * the port number. This description is still actively supported. 1824 * 1825 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1826 * operate at the maximum speed that their phy-mode is capable of. If the 1827 * phy-mode is absent, they are expected to operate using the phy-mode 1828 * supported by the port that gives the highest link speed. It is unspecified 1829 * if the port should use flow control or not, half duplex or full duplex, or 1830 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1831 * enabled or not. 1832 * 1833 * In the latter case of shared ports, omitting the link management description 1834 * from the firmware node is deprecated and strongly discouraged. DSA uses 1835 * phylink, which rejects the firmware nodes of these ports for lacking 1836 * required properties. 1837 * 1838 * For switches in this table, DSA will skip enforcing validation and will 1839 * later omit registering a phylink instance for the shared ports, if they lack 1840 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1841 * It becomes the responsibility of the driver to ensure that these ports 1842 * operate at the maximum speed (whatever this means) and will interoperate 1843 * with the DSA master or other cascade port, since phylink methods will not be 1844 * invoked for them. 1845 * 1846 * If you are considering expanding this table for newly introduced switches, 1847 * think again. It is OK to remove switches from this table if there aren't DT 1848 * blobs in circulation which rely on defaulting the shared ports. 1849 */ 1850 static const char * const dsa_switches_apply_workarounds[] = { 1851 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1852 "arrow,xrs7003e", 1853 "arrow,xrs7003f", 1854 "arrow,xrs7004e", 1855 "arrow,xrs7004f", 1856 #endif 1857 #if IS_ENABLED(CONFIG_B53) 1858 "brcm,bcm5325", 1859 "brcm,bcm53115", 1860 "brcm,bcm53125", 1861 "brcm,bcm53128", 1862 "brcm,bcm5365", 1863 "brcm,bcm5389", 1864 "brcm,bcm5395", 1865 "brcm,bcm5397", 1866 "brcm,bcm5398", 1867 "brcm,bcm53010-srab", 1868 "brcm,bcm53011-srab", 1869 "brcm,bcm53012-srab", 1870 "brcm,bcm53018-srab", 1871 "brcm,bcm53019-srab", 1872 "brcm,bcm5301x-srab", 1873 "brcm,bcm11360-srab", 1874 "brcm,bcm58522-srab", 1875 "brcm,bcm58525-srab", 1876 "brcm,bcm58535-srab", 1877 "brcm,bcm58622-srab", 1878 "brcm,bcm58623-srab", 1879 "brcm,bcm58625-srab", 1880 "brcm,bcm88312-srab", 1881 "brcm,cygnus-srab", 1882 "brcm,nsp-srab", 1883 "brcm,omega-srab", 1884 "brcm,bcm3384-switch", 1885 "brcm,bcm6328-switch", 1886 "brcm,bcm6368-switch", 1887 "brcm,bcm63xx-switch", 1888 #endif 1889 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1890 "brcm,bcm7445-switch-v4.0", 1891 "brcm,bcm7278-switch-v4.0", 1892 "brcm,bcm7278-switch-v4.8", 1893 #endif 1894 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1895 "lantiq,xrx200-gswip", 1896 "lantiq,xrx300-gswip", 1897 "lantiq,xrx330-gswip", 1898 #endif 1899 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1900 "marvell,mv88e6060", 1901 #endif 1902 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1903 "marvell,mv88e6085", 1904 "marvell,mv88e6190", 1905 "marvell,mv88e6250", 1906 #endif 1907 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1908 "microchip,ksz8765", 1909 "microchip,ksz8794", 1910 "microchip,ksz8795", 1911 "microchip,ksz8863", 1912 "microchip,ksz8873", 1913 "microchip,ksz9477", 1914 "microchip,ksz9897", 1915 "microchip,ksz9893", 1916 "microchip,ksz9563", 1917 "microchip,ksz8563", 1918 "microchip,ksz9567", 1919 #endif 1920 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1921 "smsc,lan9303-mdio", 1922 #endif 1923 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1924 "smsc,lan9303-i2c", 1925 #endif 1926 NULL, 1927 }; 1928 1929 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1930 bool *missing_phy_mode, 1931 bool *missing_link_description) 1932 { 1933 struct device_node *dn = dp->dn, *phy_np; 1934 struct dsa_switch *ds = dp->ds; 1935 phy_interface_t mode; 1936 1937 *missing_phy_mode = false; 1938 *missing_link_description = false; 1939 1940 if (of_get_phy_mode(dn, &mode)) { 1941 *missing_phy_mode = true; 1942 dev_err(ds->dev, 1943 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1944 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1945 } 1946 1947 /* Note: of_phy_is_fixed_link() also returns true for 1948 * managed = "in-band-status" 1949 */ 1950 if (of_phy_is_fixed_link(dn)) 1951 return; 1952 1953 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1954 if (phy_np) { 1955 of_node_put(phy_np); 1956 return; 1957 } 1958 1959 *missing_link_description = true; 1960 1961 dev_err(ds->dev, 1962 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1963 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1964 } 1965 1966 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1967 { 1968 struct dsa_switch *ds = dp->ds; 1969 bool missing_link_description; 1970 bool missing_phy_mode; 1971 int port = dp->index; 1972 1973 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1974 &missing_link_description); 1975 1976 if ((missing_phy_mode || missing_link_description) && 1977 !of_device_compatible_match(ds->dev->of_node, 1978 dsa_switches_apply_workarounds)) 1979 return -EINVAL; 1980 1981 if (!ds->ops->adjust_link) { 1982 if (missing_link_description) { 1983 dev_warn(ds->dev, 1984 "Skipping phylink registration for %s port %d\n", 1985 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1986 } else { 1987 if (ds->ops->phylink_mac_link_down) 1988 ds->ops->phylink_mac_link_down(ds, port, 1989 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1990 1991 return dsa_shared_port_phylink_register(dp); 1992 } 1993 return 0; 1994 } 1995 1996 dev_warn(ds->dev, 1997 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1998 1999 if (of_phy_is_fixed_link(dp->dn)) 2000 return dsa_shared_port_fixed_link_register_of(dp); 2001 else 2002 return dsa_shared_port_setup_phy_of(dp, true); 2003 } 2004 2005 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 2006 { 2007 struct dsa_switch *ds = dp->ds; 2008 2009 if (!ds->ops->adjust_link && dp->pl) { 2010 rtnl_lock(); 2011 phylink_disconnect_phy(dp->pl); 2012 rtnl_unlock(); 2013 dsa_port_phylink_destroy(dp); 2014 return; 2015 } 2016 2017 if (of_phy_is_fixed_link(dp->dn)) 2018 of_phy_deregister_fixed_link(dp->dn); 2019 else 2020 dsa_shared_port_setup_phy_of(dp, false); 2021 } 2022 2023 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 2024 { 2025 struct dsa_switch *ds = dp->ds; 2026 int err; 2027 2028 if (!ds->ops->port_hsr_join) 2029 return -EOPNOTSUPP; 2030 2031 dp->hsr_dev = hsr; 2032 2033 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 2034 if (err) 2035 dp->hsr_dev = NULL; 2036 2037 return err; 2038 } 2039 2040 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 2041 { 2042 struct dsa_switch *ds = dp->ds; 2043 int err; 2044 2045 dp->hsr_dev = NULL; 2046 2047 if (ds->ops->port_hsr_leave) { 2048 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 2049 if (err) 2050 dev_err(dp->ds->dev, 2051 "port %d failed to leave HSR %s: %pe\n", 2052 dp->index, hsr->name, ERR_PTR(err)); 2053 } 2054 } 2055 2056 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 2057 { 2058 struct dsa_notifier_tag_8021q_vlan_info info = { 2059 .dp = dp, 2060 .vid = vid, 2061 }; 2062 2063 if (broadcast) 2064 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2065 2066 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2067 } 2068 2069 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 2070 { 2071 struct dsa_notifier_tag_8021q_vlan_info info = { 2072 .dp = dp, 2073 .vid = vid, 2074 }; 2075 int err; 2076 2077 if (broadcast) 2078 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2079 else 2080 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2081 if (err) 2082 dev_err(dp->ds->dev, 2083 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 2084 dp->index, vid, ERR_PTR(err)); 2085 } 2086