1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 .vid = vid, 38 }; 39 40 /* When the port becomes standalone it has already left the bridge. 41 * Don't notify the bridge in that case. 42 */ 43 if (!brport_dev) 44 return; 45 46 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 47 brport_dev, &info.info, NULL); 48 } 49 50 static void dsa_port_fast_age(const struct dsa_port *dp) 51 { 52 struct dsa_switch *ds = dp->ds; 53 54 if (!ds->ops->port_fast_age) 55 return; 56 57 ds->ops->port_fast_age(ds, dp->index); 58 59 /* flush all VLANs */ 60 dsa_port_notify_bridge_fdb_flush(dp, 0); 61 } 62 63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 64 { 65 struct dsa_switch *ds = dp->ds; 66 int err; 67 68 if (!ds->ops->port_vlan_fast_age) 69 return -EOPNOTSUPP; 70 71 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 72 73 if (!err) 74 dsa_port_notify_bridge_fdb_flush(dp, vid); 75 76 return err; 77 } 78 79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 80 { 81 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 82 int err, vid; 83 84 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 85 if (err) 86 return err; 87 88 for_each_set_bit(vid, vids, VLAN_N_VID) { 89 err = dsa_port_vlan_fast_age(dp, vid); 90 if (err) 91 return err; 92 } 93 94 return 0; 95 } 96 97 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 98 { 99 struct switchdev_brport_flags flags = { 100 .mask = BR_LEARNING, 101 }; 102 struct dsa_switch *ds = dp->ds; 103 int err; 104 105 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 106 return false; 107 108 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 109 return !err; 110 } 111 112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 113 { 114 struct dsa_switch *ds = dp->ds; 115 int port = dp->index; 116 117 if (!ds->ops->port_stp_state_set) 118 return -EOPNOTSUPP; 119 120 ds->ops->port_stp_state_set(ds, port, state); 121 122 if (!dsa_port_can_configure_learning(dp) || 123 (do_fast_age && dp->learning)) { 124 /* Fast age FDB entries or flush appropriate forwarding database 125 * for the given port, if we are moving it from Learning or 126 * Forwarding state, to Disabled or Blocking or Listening state. 127 * Ports that were standalone before the STP state change don't 128 * need to fast age the FDB, since address learning is off in 129 * standalone mode. 130 */ 131 132 if ((dp->stp_state == BR_STATE_LEARNING || 133 dp->stp_state == BR_STATE_FORWARDING) && 134 (state == BR_STATE_DISABLED || 135 state == BR_STATE_BLOCKING || 136 state == BR_STATE_LISTENING)) 137 dsa_port_fast_age(dp); 138 } 139 140 dp->stp_state = state; 141 142 return 0; 143 } 144 145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 146 bool do_fast_age) 147 { 148 int err; 149 150 err = dsa_port_set_state(dp, state, do_fast_age); 151 if (err) 152 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 153 } 154 155 int dsa_port_set_mst_state(struct dsa_port *dp, 156 const struct switchdev_mst_state *state, 157 struct netlink_ext_ack *extack) 158 { 159 struct dsa_switch *ds = dp->ds; 160 u8 prev_state; 161 int err; 162 163 if (!ds->ops->port_mst_state_set) 164 return -EOPNOTSUPP; 165 166 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 167 &prev_state); 168 if (err) 169 return err; 170 171 err = ds->ops->port_mst_state_set(ds, dp->index, state); 172 if (err) 173 return err; 174 175 if (!(dp->learning && 176 (prev_state == BR_STATE_LEARNING || 177 prev_state == BR_STATE_FORWARDING) && 178 (state->state == BR_STATE_DISABLED || 179 state->state == BR_STATE_BLOCKING || 180 state->state == BR_STATE_LISTENING))) 181 return 0; 182 183 err = dsa_port_msti_fast_age(dp, state->msti); 184 if (err) 185 NL_SET_ERR_MSG_MOD(extack, 186 "Unable to flush associated VLANs"); 187 188 return 0; 189 } 190 191 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 192 { 193 struct dsa_switch *ds = dp->ds; 194 int port = dp->index; 195 int err; 196 197 if (ds->ops->port_enable) { 198 err = ds->ops->port_enable(ds, port, phy); 199 if (err) 200 return err; 201 } 202 203 if (!dp->bridge) 204 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 205 206 if (dp->pl) 207 phylink_start(dp->pl); 208 209 return 0; 210 } 211 212 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 213 { 214 int err; 215 216 rtnl_lock(); 217 err = dsa_port_enable_rt(dp, phy); 218 rtnl_unlock(); 219 220 return err; 221 } 222 223 void dsa_port_disable_rt(struct dsa_port *dp) 224 { 225 struct dsa_switch *ds = dp->ds; 226 int port = dp->index; 227 228 if (dp->pl) 229 phylink_stop(dp->pl); 230 231 if (!dp->bridge) 232 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 233 234 if (ds->ops->port_disable) 235 ds->ops->port_disable(ds, port); 236 } 237 238 void dsa_port_disable(struct dsa_port *dp) 239 { 240 rtnl_lock(); 241 dsa_port_disable_rt(dp); 242 rtnl_unlock(); 243 } 244 245 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 246 struct dsa_bridge bridge) 247 { 248 struct netlink_ext_ack extack = {0}; 249 bool change_vlan_filtering = false; 250 struct dsa_switch *ds = dp->ds; 251 struct dsa_port *other_dp; 252 bool vlan_filtering; 253 int err; 254 255 if (ds->needs_standalone_vlan_filtering && 256 !br_vlan_enabled(bridge.dev)) { 257 change_vlan_filtering = true; 258 vlan_filtering = true; 259 } else if (!ds->needs_standalone_vlan_filtering && 260 br_vlan_enabled(bridge.dev)) { 261 change_vlan_filtering = true; 262 vlan_filtering = false; 263 } 264 265 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 266 * event for changing vlan_filtering setting upon slave ports leaving 267 * it. That is a good thing, because that lets us handle it and also 268 * handle the case where the switch's vlan_filtering setting is global 269 * (not per port). When that happens, the correct moment to trigger the 270 * vlan_filtering callback is only when the last port leaves the last 271 * VLAN-aware bridge. 272 */ 273 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 274 dsa_switch_for_each_port(other_dp, ds) { 275 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 276 277 if (br && br_vlan_enabled(br)) { 278 change_vlan_filtering = false; 279 break; 280 } 281 } 282 } 283 284 if (!change_vlan_filtering) 285 return; 286 287 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 288 if (extack._msg) { 289 dev_err(ds->dev, "port %d: %s\n", dp->index, 290 extack._msg); 291 } 292 if (err && err != -EOPNOTSUPP) { 293 dev_err(ds->dev, 294 "port %d failed to reset VLAN filtering to %d: %pe\n", 295 dp->index, vlan_filtering, ERR_PTR(err)); 296 } 297 } 298 299 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 300 struct netlink_ext_ack *extack) 301 { 302 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 303 BR_BCAST_FLOOD | BR_PORT_LOCKED; 304 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 305 int flag, err; 306 307 for_each_set_bit(flag, &mask, 32) { 308 struct switchdev_brport_flags flags = {0}; 309 310 flags.mask = BIT(flag); 311 312 if (br_port_flag_is_set(brport_dev, BIT(flag))) 313 flags.val = BIT(flag); 314 315 err = dsa_port_bridge_flags(dp, flags, extack); 316 if (err && err != -EOPNOTSUPP) 317 return err; 318 } 319 320 return 0; 321 } 322 323 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 324 { 325 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 326 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 327 BR_BCAST_FLOOD | BR_PORT_LOCKED; 328 int flag, err; 329 330 for_each_set_bit(flag, &mask, 32) { 331 struct switchdev_brport_flags flags = {0}; 332 333 flags.mask = BIT(flag); 334 flags.val = val & BIT(flag); 335 336 err = dsa_port_bridge_flags(dp, flags, NULL); 337 if (err && err != -EOPNOTSUPP) 338 dev_err(dp->ds->dev, 339 "failed to clear bridge port flag %lu: %pe\n", 340 flags.val, ERR_PTR(err)); 341 } 342 } 343 344 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 345 struct netlink_ext_ack *extack) 346 { 347 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 348 struct net_device *br = dsa_port_bridge_dev_get(dp); 349 int err; 350 351 err = dsa_port_inherit_brport_flags(dp, extack); 352 if (err) 353 return err; 354 355 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 356 if (err && err != -EOPNOTSUPP) 357 return err; 358 359 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 360 if (err && err != -EOPNOTSUPP) 361 return err; 362 363 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 364 if (err && err != -EOPNOTSUPP) 365 return err; 366 367 return 0; 368 } 369 370 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 371 struct dsa_bridge bridge) 372 { 373 /* Configure the port for standalone mode (no address learning, 374 * flood everything). 375 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 376 * when the user requests it through netlink or sysfs, but not 377 * automatically at port join or leave, so we need to handle resetting 378 * the brport flags ourselves. But we even prefer it that way, because 379 * otherwise, some setups might never get the notification they need, 380 * for example, when a port leaves a LAG that offloads the bridge, 381 * it becomes standalone, but as far as the bridge is concerned, no 382 * port ever left. 383 */ 384 dsa_port_clear_brport_flags(dp); 385 386 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 387 * so allow it to be in BR_STATE_FORWARDING to be kept functional 388 */ 389 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 390 391 dsa_port_reset_vlan_filtering(dp, bridge); 392 393 /* Ageing time may be global to the switch chip, so don't change it 394 * here because we have no good reason (or value) to change it to. 395 */ 396 } 397 398 static int dsa_port_bridge_create(struct dsa_port *dp, 399 struct net_device *br, 400 struct netlink_ext_ack *extack) 401 { 402 struct dsa_switch *ds = dp->ds; 403 struct dsa_bridge *bridge; 404 405 bridge = dsa_tree_bridge_find(ds->dst, br); 406 if (bridge) { 407 refcount_inc(&bridge->refcount); 408 dp->bridge = bridge; 409 return 0; 410 } 411 412 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 413 if (!bridge) 414 return -ENOMEM; 415 416 refcount_set(&bridge->refcount, 1); 417 418 bridge->dev = br; 419 420 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 421 if (ds->max_num_bridges && !bridge->num) { 422 NL_SET_ERR_MSG_MOD(extack, 423 "Range of offloadable bridges exceeded"); 424 kfree(bridge); 425 return -EOPNOTSUPP; 426 } 427 428 dp->bridge = bridge; 429 430 return 0; 431 } 432 433 static void dsa_port_bridge_destroy(struct dsa_port *dp, 434 const struct net_device *br) 435 { 436 struct dsa_bridge *bridge = dp->bridge; 437 438 dp->bridge = NULL; 439 440 if (!refcount_dec_and_test(&bridge->refcount)) 441 return; 442 443 if (bridge->num) 444 dsa_bridge_num_put(br, bridge->num); 445 446 kfree(bridge); 447 } 448 449 static bool dsa_port_supports_mst(struct dsa_port *dp) 450 { 451 struct dsa_switch *ds = dp->ds; 452 453 return ds->ops->vlan_msti_set && 454 ds->ops->port_mst_state_set && 455 ds->ops->port_vlan_fast_age && 456 dsa_port_can_configure_learning(dp); 457 } 458 459 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 460 struct netlink_ext_ack *extack) 461 { 462 struct dsa_notifier_bridge_info info = { 463 .dp = dp, 464 .extack = extack, 465 }; 466 struct net_device *dev = dp->slave; 467 struct net_device *brport_dev; 468 int err; 469 470 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 471 return -EOPNOTSUPP; 472 473 /* Here the interface is already bridged. Reflect the current 474 * configuration so that drivers can program their chips accordingly. 475 */ 476 err = dsa_port_bridge_create(dp, br, extack); 477 if (err) 478 return err; 479 480 brport_dev = dsa_port_to_bridge_port(dp); 481 482 info.bridge = *dp->bridge; 483 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 484 if (err) 485 goto out_rollback; 486 487 /* Drivers which support bridge TX forwarding should set this */ 488 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 489 490 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 491 &dsa_slave_switchdev_notifier, 492 &dsa_slave_switchdev_blocking_notifier, 493 dp->bridge->tx_fwd_offload, extack); 494 if (err) 495 goto out_rollback_unbridge; 496 497 err = dsa_port_switchdev_sync_attrs(dp, extack); 498 if (err) 499 goto out_rollback_unoffload; 500 501 return 0; 502 503 out_rollback_unoffload: 504 switchdev_bridge_port_unoffload(brport_dev, dp, 505 &dsa_slave_switchdev_notifier, 506 &dsa_slave_switchdev_blocking_notifier); 507 dsa_flush_workqueue(); 508 out_rollback_unbridge: 509 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 510 out_rollback: 511 dsa_port_bridge_destroy(dp, br); 512 return err; 513 } 514 515 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 516 { 517 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 518 519 /* Don't try to unoffload something that is not offloaded */ 520 if (!brport_dev) 521 return; 522 523 switchdev_bridge_port_unoffload(brport_dev, dp, 524 &dsa_slave_switchdev_notifier, 525 &dsa_slave_switchdev_blocking_notifier); 526 527 dsa_flush_workqueue(); 528 } 529 530 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 531 { 532 struct dsa_notifier_bridge_info info = { 533 .dp = dp, 534 }; 535 int err; 536 537 /* If the port could not be offloaded to begin with, then 538 * there is nothing to do. 539 */ 540 if (!dp->bridge) 541 return; 542 543 info.bridge = *dp->bridge; 544 545 /* Here the port is already unbridged. Reflect the current configuration 546 * so that drivers can program their chips accordingly. 547 */ 548 dsa_port_bridge_destroy(dp, br); 549 550 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 551 if (err) 552 dev_err(dp->ds->dev, 553 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 554 dp->index, ERR_PTR(err)); 555 556 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 557 } 558 559 int dsa_port_lag_change(struct dsa_port *dp, 560 struct netdev_lag_lower_state_info *linfo) 561 { 562 struct dsa_notifier_lag_info info = { 563 .dp = dp, 564 }; 565 bool tx_enabled; 566 567 if (!dp->lag) 568 return 0; 569 570 /* On statically configured aggregates (e.g. loadbalance 571 * without LACP) ports will always be tx_enabled, even if the 572 * link is down. Thus we require both link_up and tx_enabled 573 * in order to include it in the tx set. 574 */ 575 tx_enabled = linfo->link_up && linfo->tx_enabled; 576 577 if (tx_enabled == dp->lag_tx_enabled) 578 return 0; 579 580 dp->lag_tx_enabled = tx_enabled; 581 582 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 583 } 584 585 static int dsa_port_lag_create(struct dsa_port *dp, 586 struct net_device *lag_dev) 587 { 588 struct dsa_switch *ds = dp->ds; 589 struct dsa_lag *lag; 590 591 lag = dsa_tree_lag_find(ds->dst, lag_dev); 592 if (lag) { 593 refcount_inc(&lag->refcount); 594 dp->lag = lag; 595 return 0; 596 } 597 598 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 599 if (!lag) 600 return -ENOMEM; 601 602 refcount_set(&lag->refcount, 1); 603 mutex_init(&lag->fdb_lock); 604 INIT_LIST_HEAD(&lag->fdbs); 605 lag->dev = lag_dev; 606 dsa_lag_map(ds->dst, lag); 607 dp->lag = lag; 608 609 return 0; 610 } 611 612 static void dsa_port_lag_destroy(struct dsa_port *dp) 613 { 614 struct dsa_lag *lag = dp->lag; 615 616 dp->lag = NULL; 617 dp->lag_tx_enabled = false; 618 619 if (!refcount_dec_and_test(&lag->refcount)) 620 return; 621 622 WARN_ON(!list_empty(&lag->fdbs)); 623 dsa_lag_unmap(dp->ds->dst, lag); 624 kfree(lag); 625 } 626 627 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 628 struct netdev_lag_upper_info *uinfo, 629 struct netlink_ext_ack *extack) 630 { 631 struct dsa_notifier_lag_info info = { 632 .dp = dp, 633 .info = uinfo, 634 }; 635 struct net_device *bridge_dev; 636 int err; 637 638 err = dsa_port_lag_create(dp, lag_dev); 639 if (err) 640 goto err_lag_create; 641 642 info.lag = *dp->lag; 643 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 644 if (err) 645 goto err_lag_join; 646 647 bridge_dev = netdev_master_upper_dev_get(lag_dev); 648 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 649 return 0; 650 651 err = dsa_port_bridge_join(dp, bridge_dev, extack); 652 if (err) 653 goto err_bridge_join; 654 655 return 0; 656 657 err_bridge_join: 658 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 659 err_lag_join: 660 dsa_port_lag_destroy(dp); 661 err_lag_create: 662 return err; 663 } 664 665 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 666 { 667 struct net_device *br = dsa_port_bridge_dev_get(dp); 668 669 if (br) 670 dsa_port_pre_bridge_leave(dp, br); 671 } 672 673 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 674 { 675 struct net_device *br = dsa_port_bridge_dev_get(dp); 676 struct dsa_notifier_lag_info info = { 677 .dp = dp, 678 }; 679 int err; 680 681 if (!dp->lag) 682 return; 683 684 /* Port might have been part of a LAG that in turn was 685 * attached to a bridge. 686 */ 687 if (br) 688 dsa_port_bridge_leave(dp, br); 689 690 info.lag = *dp->lag; 691 692 dsa_port_lag_destroy(dp); 693 694 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 695 if (err) 696 dev_err(dp->ds->dev, 697 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 698 dp->index, ERR_PTR(err)); 699 } 700 701 /* Must be called under rcu_read_lock() */ 702 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 703 bool vlan_filtering, 704 struct netlink_ext_ack *extack) 705 { 706 struct dsa_switch *ds = dp->ds; 707 struct dsa_port *other_dp; 708 int err; 709 710 /* VLAN awareness was off, so the question is "can we turn it on". 711 * We may have had 8021q uppers, those need to go. Make sure we don't 712 * enter an inconsistent state: deny changing the VLAN awareness state 713 * as long as we have 8021q uppers. 714 */ 715 if (vlan_filtering && dsa_port_is_user(dp)) { 716 struct net_device *br = dsa_port_bridge_dev_get(dp); 717 struct net_device *upper_dev, *slave = dp->slave; 718 struct list_head *iter; 719 720 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 721 struct bridge_vlan_info br_info; 722 u16 vid; 723 724 if (!is_vlan_dev(upper_dev)) 725 continue; 726 727 vid = vlan_dev_vlan_id(upper_dev); 728 729 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 730 * device, respectively the VID is not found, returning 731 * 0 means success, which is a failure for us here. 732 */ 733 err = br_vlan_get_info(br, vid, &br_info); 734 if (err == 0) { 735 NL_SET_ERR_MSG_MOD(extack, 736 "Must first remove VLAN uppers having VIDs also present in bridge"); 737 return false; 738 } 739 } 740 } 741 742 if (!ds->vlan_filtering_is_global) 743 return true; 744 745 /* For cases where enabling/disabling VLAN awareness is global to the 746 * switch, we need to handle the case where multiple bridges span 747 * different ports of the same switch device and one of them has a 748 * different setting than what is being requested. 749 */ 750 dsa_switch_for_each_port(other_dp, ds) { 751 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 752 753 /* If it's the same bridge, it also has same 754 * vlan_filtering setting => no need to check 755 */ 756 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 757 continue; 758 759 if (br_vlan_enabled(other_br) != vlan_filtering) { 760 NL_SET_ERR_MSG_MOD(extack, 761 "VLAN filtering is a global setting"); 762 return false; 763 } 764 } 765 return true; 766 } 767 768 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 769 struct netlink_ext_ack *extack) 770 { 771 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 772 struct dsa_switch *ds = dp->ds; 773 bool apply; 774 int err; 775 776 if (!ds->ops->port_vlan_filtering) 777 return -EOPNOTSUPP; 778 779 /* We are called from dsa_slave_switchdev_blocking_event(), 780 * which is not under rcu_read_lock(), unlike 781 * dsa_slave_switchdev_event(). 782 */ 783 rcu_read_lock(); 784 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 785 rcu_read_unlock(); 786 if (!apply) 787 return -EINVAL; 788 789 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 790 return 0; 791 792 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 793 extack); 794 if (err) 795 return err; 796 797 if (ds->vlan_filtering_is_global) { 798 struct dsa_port *other_dp; 799 800 ds->vlan_filtering = vlan_filtering; 801 802 dsa_switch_for_each_user_port(other_dp, ds) { 803 struct net_device *slave = other_dp->slave; 804 805 /* We might be called in the unbind path, so not 806 * all slave devices might still be registered. 807 */ 808 if (!slave) 809 continue; 810 811 err = dsa_slave_manage_vlan_filtering(slave, 812 vlan_filtering); 813 if (err) 814 goto restore; 815 } 816 } else { 817 dp->vlan_filtering = vlan_filtering; 818 819 err = dsa_slave_manage_vlan_filtering(dp->slave, 820 vlan_filtering); 821 if (err) 822 goto restore; 823 } 824 825 return 0; 826 827 restore: 828 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 829 830 if (ds->vlan_filtering_is_global) 831 ds->vlan_filtering = old_vlan_filtering; 832 else 833 dp->vlan_filtering = old_vlan_filtering; 834 835 return err; 836 } 837 838 /* This enforces legacy behavior for switch drivers which assume they can't 839 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 840 */ 841 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 842 { 843 struct net_device *br = dsa_port_bridge_dev_get(dp); 844 struct dsa_switch *ds = dp->ds; 845 846 if (!br) 847 return false; 848 849 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 850 } 851 852 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 853 { 854 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 855 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 856 struct dsa_notifier_ageing_time_info info; 857 int err; 858 859 info.ageing_time = ageing_time; 860 861 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 862 if (err) 863 return err; 864 865 dp->ageing_time = ageing_time; 866 867 return 0; 868 } 869 870 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 871 struct netlink_ext_ack *extack) 872 { 873 if (on && !dsa_port_supports_mst(dp)) { 874 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 875 return -EINVAL; 876 } 877 878 return 0; 879 } 880 881 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 882 struct switchdev_brport_flags flags, 883 struct netlink_ext_ack *extack) 884 { 885 struct dsa_switch *ds = dp->ds; 886 887 if (!ds->ops->port_pre_bridge_flags) 888 return -EINVAL; 889 890 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 891 } 892 893 int dsa_port_bridge_flags(struct dsa_port *dp, 894 struct switchdev_brport_flags flags, 895 struct netlink_ext_ack *extack) 896 { 897 struct dsa_switch *ds = dp->ds; 898 int err; 899 900 if (!ds->ops->port_bridge_flags) 901 return -EOPNOTSUPP; 902 903 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 904 if (err) 905 return err; 906 907 if (flags.mask & BR_LEARNING) { 908 bool learning = flags.val & BR_LEARNING; 909 910 if (learning == dp->learning) 911 return 0; 912 913 if ((dp->learning && !learning) && 914 (dp->stp_state == BR_STATE_LEARNING || 915 dp->stp_state == BR_STATE_FORWARDING)) 916 dsa_port_fast_age(dp); 917 918 dp->learning = learning; 919 } 920 921 return 0; 922 } 923 924 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 925 { 926 struct dsa_switch *ds = dp->ds; 927 928 if (ds->ops->port_set_host_flood) 929 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 930 } 931 932 int dsa_port_vlan_msti(struct dsa_port *dp, 933 const struct switchdev_vlan_msti *msti) 934 { 935 struct dsa_switch *ds = dp->ds; 936 937 if (!ds->ops->vlan_msti_set) 938 return -EOPNOTSUPP; 939 940 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 941 } 942 943 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 944 { 945 struct dsa_notifier_mtu_info info = { 946 .dp = dp, 947 .mtu = new_mtu, 948 }; 949 950 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 951 } 952 953 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 954 u16 vid) 955 { 956 struct dsa_notifier_fdb_info info = { 957 .dp = dp, 958 .addr = addr, 959 .vid = vid, 960 .db = { 961 .type = DSA_DB_BRIDGE, 962 .bridge = *dp->bridge, 963 }, 964 }; 965 966 /* Refcounting takes bridge.num as a key, and should be global for all 967 * bridges in the absence of FDB isolation, and per bridge otherwise. 968 * Force the bridge.num to zero here in the absence of FDB isolation. 969 */ 970 if (!dp->ds->fdb_isolation) 971 info.db.bridge.num = 0; 972 973 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 974 } 975 976 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 977 u16 vid) 978 { 979 struct dsa_notifier_fdb_info info = { 980 .dp = dp, 981 .addr = addr, 982 .vid = vid, 983 .db = { 984 .type = DSA_DB_BRIDGE, 985 .bridge = *dp->bridge, 986 }, 987 }; 988 989 if (!dp->ds->fdb_isolation) 990 info.db.bridge.num = 0; 991 992 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 993 } 994 995 static int dsa_port_host_fdb_add(struct dsa_port *dp, 996 const unsigned char *addr, u16 vid, 997 struct dsa_db db) 998 { 999 struct dsa_notifier_fdb_info info = { 1000 .dp = dp, 1001 .addr = addr, 1002 .vid = vid, 1003 .db = db, 1004 }; 1005 1006 if (!dp->ds->fdb_isolation) 1007 info.db.bridge.num = 0; 1008 1009 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1010 } 1011 1012 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1013 const unsigned char *addr, u16 vid) 1014 { 1015 struct dsa_db db = { 1016 .type = DSA_DB_PORT, 1017 .dp = dp, 1018 }; 1019 1020 return dsa_port_host_fdb_add(dp, addr, vid, db); 1021 } 1022 1023 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1024 const unsigned char *addr, u16 vid) 1025 { 1026 struct dsa_port *cpu_dp = dp->cpu_dp; 1027 struct dsa_db db = { 1028 .type = DSA_DB_BRIDGE, 1029 .bridge = *dp->bridge, 1030 }; 1031 int err; 1032 1033 /* Avoid a call to __dev_set_promiscuity() on the master, which 1034 * requires rtnl_lock(), since we can't guarantee that is held here, 1035 * and we can't take it either. 1036 */ 1037 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1038 err = dev_uc_add(cpu_dp->master, addr); 1039 if (err) 1040 return err; 1041 } 1042 1043 return dsa_port_host_fdb_add(dp, addr, vid, db); 1044 } 1045 1046 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1047 const unsigned char *addr, u16 vid, 1048 struct dsa_db db) 1049 { 1050 struct dsa_notifier_fdb_info info = { 1051 .dp = dp, 1052 .addr = addr, 1053 .vid = vid, 1054 .db = db, 1055 }; 1056 1057 if (!dp->ds->fdb_isolation) 1058 info.db.bridge.num = 0; 1059 1060 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1061 } 1062 1063 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1064 const unsigned char *addr, u16 vid) 1065 { 1066 struct dsa_db db = { 1067 .type = DSA_DB_PORT, 1068 .dp = dp, 1069 }; 1070 1071 return dsa_port_host_fdb_del(dp, addr, vid, db); 1072 } 1073 1074 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1075 const unsigned char *addr, u16 vid) 1076 { 1077 struct dsa_port *cpu_dp = dp->cpu_dp; 1078 struct dsa_db db = { 1079 .type = DSA_DB_BRIDGE, 1080 .bridge = *dp->bridge, 1081 }; 1082 int err; 1083 1084 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1085 err = dev_uc_del(cpu_dp->master, addr); 1086 if (err) 1087 return err; 1088 } 1089 1090 return dsa_port_host_fdb_del(dp, addr, vid, db); 1091 } 1092 1093 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1094 u16 vid) 1095 { 1096 struct dsa_notifier_lag_fdb_info info = { 1097 .lag = dp->lag, 1098 .addr = addr, 1099 .vid = vid, 1100 .db = { 1101 .type = DSA_DB_BRIDGE, 1102 .bridge = *dp->bridge, 1103 }, 1104 }; 1105 1106 if (!dp->ds->fdb_isolation) 1107 info.db.bridge.num = 0; 1108 1109 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1110 } 1111 1112 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1113 u16 vid) 1114 { 1115 struct dsa_notifier_lag_fdb_info info = { 1116 .lag = dp->lag, 1117 .addr = addr, 1118 .vid = vid, 1119 .db = { 1120 .type = DSA_DB_BRIDGE, 1121 .bridge = *dp->bridge, 1122 }, 1123 }; 1124 1125 if (!dp->ds->fdb_isolation) 1126 info.db.bridge.num = 0; 1127 1128 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1129 } 1130 1131 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1132 { 1133 struct dsa_switch *ds = dp->ds; 1134 int port = dp->index; 1135 1136 if (!ds->ops->port_fdb_dump) 1137 return -EOPNOTSUPP; 1138 1139 return ds->ops->port_fdb_dump(ds, port, cb, data); 1140 } 1141 1142 int dsa_port_mdb_add(const struct dsa_port *dp, 1143 const struct switchdev_obj_port_mdb *mdb) 1144 { 1145 struct dsa_notifier_mdb_info info = { 1146 .dp = dp, 1147 .mdb = mdb, 1148 .db = { 1149 .type = DSA_DB_BRIDGE, 1150 .bridge = *dp->bridge, 1151 }, 1152 }; 1153 1154 if (!dp->ds->fdb_isolation) 1155 info.db.bridge.num = 0; 1156 1157 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1158 } 1159 1160 int dsa_port_mdb_del(const struct dsa_port *dp, 1161 const struct switchdev_obj_port_mdb *mdb) 1162 { 1163 struct dsa_notifier_mdb_info info = { 1164 .dp = dp, 1165 .mdb = mdb, 1166 .db = { 1167 .type = DSA_DB_BRIDGE, 1168 .bridge = *dp->bridge, 1169 }, 1170 }; 1171 1172 if (!dp->ds->fdb_isolation) 1173 info.db.bridge.num = 0; 1174 1175 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1176 } 1177 1178 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1179 const struct switchdev_obj_port_mdb *mdb, 1180 struct dsa_db db) 1181 { 1182 struct dsa_notifier_mdb_info info = { 1183 .dp = dp, 1184 .mdb = mdb, 1185 .db = db, 1186 }; 1187 1188 if (!dp->ds->fdb_isolation) 1189 info.db.bridge.num = 0; 1190 1191 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1192 } 1193 1194 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1195 const struct switchdev_obj_port_mdb *mdb) 1196 { 1197 struct dsa_db db = { 1198 .type = DSA_DB_PORT, 1199 .dp = dp, 1200 }; 1201 1202 return dsa_port_host_mdb_add(dp, mdb, db); 1203 } 1204 1205 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1206 const struct switchdev_obj_port_mdb *mdb) 1207 { 1208 struct dsa_port *cpu_dp = dp->cpu_dp; 1209 struct dsa_db db = { 1210 .type = DSA_DB_BRIDGE, 1211 .bridge = *dp->bridge, 1212 }; 1213 int err; 1214 1215 err = dev_mc_add(cpu_dp->master, mdb->addr); 1216 if (err) 1217 return err; 1218 1219 return dsa_port_host_mdb_add(dp, mdb, db); 1220 } 1221 1222 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1223 const struct switchdev_obj_port_mdb *mdb, 1224 struct dsa_db db) 1225 { 1226 struct dsa_notifier_mdb_info info = { 1227 .dp = dp, 1228 .mdb = mdb, 1229 .db = db, 1230 }; 1231 1232 if (!dp->ds->fdb_isolation) 1233 info.db.bridge.num = 0; 1234 1235 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1236 } 1237 1238 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1239 const struct switchdev_obj_port_mdb *mdb) 1240 { 1241 struct dsa_db db = { 1242 .type = DSA_DB_PORT, 1243 .dp = dp, 1244 }; 1245 1246 return dsa_port_host_mdb_del(dp, mdb, db); 1247 } 1248 1249 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1250 const struct switchdev_obj_port_mdb *mdb) 1251 { 1252 struct dsa_port *cpu_dp = dp->cpu_dp; 1253 struct dsa_db db = { 1254 .type = DSA_DB_BRIDGE, 1255 .bridge = *dp->bridge, 1256 }; 1257 int err; 1258 1259 err = dev_mc_del(cpu_dp->master, mdb->addr); 1260 if (err) 1261 return err; 1262 1263 return dsa_port_host_mdb_del(dp, mdb, db); 1264 } 1265 1266 int dsa_port_vlan_add(struct dsa_port *dp, 1267 const struct switchdev_obj_port_vlan *vlan, 1268 struct netlink_ext_ack *extack) 1269 { 1270 struct dsa_notifier_vlan_info info = { 1271 .dp = dp, 1272 .vlan = vlan, 1273 .extack = extack, 1274 }; 1275 1276 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1277 } 1278 1279 int dsa_port_vlan_del(struct dsa_port *dp, 1280 const struct switchdev_obj_port_vlan *vlan) 1281 { 1282 struct dsa_notifier_vlan_info info = { 1283 .dp = dp, 1284 .vlan = vlan, 1285 }; 1286 1287 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1288 } 1289 1290 int dsa_port_host_vlan_add(struct dsa_port *dp, 1291 const struct switchdev_obj_port_vlan *vlan, 1292 struct netlink_ext_ack *extack) 1293 { 1294 struct dsa_notifier_vlan_info info = { 1295 .dp = dp, 1296 .vlan = vlan, 1297 .extack = extack, 1298 }; 1299 struct dsa_port *cpu_dp = dp->cpu_dp; 1300 int err; 1301 1302 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1303 if (err && err != -EOPNOTSUPP) 1304 return err; 1305 1306 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1307 1308 return err; 1309 } 1310 1311 int dsa_port_host_vlan_del(struct dsa_port *dp, 1312 const struct switchdev_obj_port_vlan *vlan) 1313 { 1314 struct dsa_notifier_vlan_info info = { 1315 .dp = dp, 1316 .vlan = vlan, 1317 }; 1318 struct dsa_port *cpu_dp = dp->cpu_dp; 1319 int err; 1320 1321 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1322 if (err && err != -EOPNOTSUPP) 1323 return err; 1324 1325 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1326 1327 return err; 1328 } 1329 1330 int dsa_port_mrp_add(const struct dsa_port *dp, 1331 const struct switchdev_obj_mrp *mrp) 1332 { 1333 struct dsa_switch *ds = dp->ds; 1334 1335 if (!ds->ops->port_mrp_add) 1336 return -EOPNOTSUPP; 1337 1338 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1339 } 1340 1341 int dsa_port_mrp_del(const struct dsa_port *dp, 1342 const struct switchdev_obj_mrp *mrp) 1343 { 1344 struct dsa_switch *ds = dp->ds; 1345 1346 if (!ds->ops->port_mrp_del) 1347 return -EOPNOTSUPP; 1348 1349 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1350 } 1351 1352 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1353 const struct switchdev_obj_ring_role_mrp *mrp) 1354 { 1355 struct dsa_switch *ds = dp->ds; 1356 1357 if (!ds->ops->port_mrp_add_ring_role) 1358 return -EOPNOTSUPP; 1359 1360 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1361 } 1362 1363 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1364 const struct switchdev_obj_ring_role_mrp *mrp) 1365 { 1366 struct dsa_switch *ds = dp->ds; 1367 1368 if (!ds->ops->port_mrp_del_ring_role) 1369 return -EOPNOTSUPP; 1370 1371 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1372 } 1373 1374 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1375 const struct dsa_device_ops *tag_ops) 1376 { 1377 cpu_dp->rcv = tag_ops->rcv; 1378 cpu_dp->tag_ops = tag_ops; 1379 } 1380 1381 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1382 { 1383 struct device_node *phy_dn; 1384 struct phy_device *phydev; 1385 1386 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1387 if (!phy_dn) 1388 return NULL; 1389 1390 phydev = of_phy_find_device(phy_dn); 1391 if (!phydev) { 1392 of_node_put(phy_dn); 1393 return ERR_PTR(-EPROBE_DEFER); 1394 } 1395 1396 of_node_put(phy_dn); 1397 return phydev; 1398 } 1399 1400 static void dsa_port_phylink_validate(struct phylink_config *config, 1401 unsigned long *supported, 1402 struct phylink_link_state *state) 1403 { 1404 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1405 struct dsa_switch *ds = dp->ds; 1406 1407 if (!ds->ops->phylink_validate) { 1408 if (config->mac_capabilities) 1409 phylink_generic_validate(config, supported, state); 1410 return; 1411 } 1412 1413 ds->ops->phylink_validate(ds, dp->index, supported, state); 1414 } 1415 1416 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1417 struct phylink_link_state *state) 1418 { 1419 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1420 struct dsa_switch *ds = dp->ds; 1421 int err; 1422 1423 /* Only called for inband modes */ 1424 if (!ds->ops->phylink_mac_link_state) { 1425 state->link = 0; 1426 return; 1427 } 1428 1429 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1430 if (err < 0) { 1431 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1432 dp->index, err); 1433 state->link = 0; 1434 } 1435 } 1436 1437 static struct phylink_pcs * 1438 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1439 phy_interface_t interface) 1440 { 1441 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1442 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1443 struct dsa_switch *ds = dp->ds; 1444 1445 if (ds->ops->phylink_mac_select_pcs) 1446 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1447 1448 return pcs; 1449 } 1450 1451 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1452 unsigned int mode, 1453 const struct phylink_link_state *state) 1454 { 1455 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1456 struct dsa_switch *ds = dp->ds; 1457 1458 if (!ds->ops->phylink_mac_config) 1459 return; 1460 1461 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1462 } 1463 1464 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1465 { 1466 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1467 struct dsa_switch *ds = dp->ds; 1468 1469 if (!ds->ops->phylink_mac_an_restart) 1470 return; 1471 1472 ds->ops->phylink_mac_an_restart(ds, dp->index); 1473 } 1474 1475 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1476 unsigned int mode, 1477 phy_interface_t interface) 1478 { 1479 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1480 struct phy_device *phydev = NULL; 1481 struct dsa_switch *ds = dp->ds; 1482 1483 if (dsa_port_is_user(dp)) 1484 phydev = dp->slave->phydev; 1485 1486 if (!ds->ops->phylink_mac_link_down) { 1487 if (ds->ops->adjust_link && phydev) 1488 ds->ops->adjust_link(ds, dp->index, phydev); 1489 return; 1490 } 1491 1492 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1493 } 1494 1495 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1496 struct phy_device *phydev, 1497 unsigned int mode, 1498 phy_interface_t interface, 1499 int speed, int duplex, 1500 bool tx_pause, bool rx_pause) 1501 { 1502 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1503 struct dsa_switch *ds = dp->ds; 1504 1505 if (!ds->ops->phylink_mac_link_up) { 1506 if (ds->ops->adjust_link && phydev) 1507 ds->ops->adjust_link(ds, dp->index, phydev); 1508 return; 1509 } 1510 1511 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1512 speed, duplex, tx_pause, rx_pause); 1513 } 1514 1515 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1516 .validate = dsa_port_phylink_validate, 1517 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1518 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1519 .mac_config = dsa_port_phylink_mac_config, 1520 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1521 .mac_link_down = dsa_port_phylink_mac_link_down, 1522 .mac_link_up = dsa_port_phylink_mac_link_up, 1523 }; 1524 1525 int dsa_port_phylink_create(struct dsa_port *dp) 1526 { 1527 struct dsa_switch *ds = dp->ds; 1528 phy_interface_t mode; 1529 int err; 1530 1531 err = of_get_phy_mode(dp->dn, &mode); 1532 if (err) 1533 mode = PHY_INTERFACE_MODE_NA; 1534 1535 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1536 * an indicator of a legacy phylink driver. 1537 */ 1538 if (ds->ops->phylink_mac_link_state || 1539 ds->ops->phylink_mac_an_restart) 1540 dp->pl_config.legacy_pre_march2020 = true; 1541 1542 if (ds->ops->phylink_get_caps) 1543 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1544 1545 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1546 mode, &dsa_port_phylink_mac_ops); 1547 if (IS_ERR(dp->pl)) { 1548 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1549 return PTR_ERR(dp->pl); 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1556 { 1557 struct dsa_switch *ds = dp->ds; 1558 struct phy_device *phydev; 1559 int port = dp->index; 1560 int err = 0; 1561 1562 phydev = dsa_port_get_phy_device(dp); 1563 if (!phydev) 1564 return 0; 1565 1566 if (IS_ERR(phydev)) 1567 return PTR_ERR(phydev); 1568 1569 if (enable) { 1570 err = genphy_resume(phydev); 1571 if (err < 0) 1572 goto err_put_dev; 1573 1574 err = genphy_read_status(phydev); 1575 if (err < 0) 1576 goto err_put_dev; 1577 } else { 1578 err = genphy_suspend(phydev); 1579 if (err < 0) 1580 goto err_put_dev; 1581 } 1582 1583 if (ds->ops->adjust_link) 1584 ds->ops->adjust_link(ds, port, phydev); 1585 1586 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1587 1588 err_put_dev: 1589 put_device(&phydev->mdio.dev); 1590 return err; 1591 } 1592 1593 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1594 { 1595 struct device_node *dn = dp->dn; 1596 struct dsa_switch *ds = dp->ds; 1597 struct phy_device *phydev; 1598 int port = dp->index; 1599 phy_interface_t mode; 1600 int err; 1601 1602 err = of_phy_register_fixed_link(dn); 1603 if (err) { 1604 dev_err(ds->dev, 1605 "failed to register the fixed PHY of port %d\n", 1606 port); 1607 return err; 1608 } 1609 1610 phydev = of_phy_find_device(dn); 1611 1612 err = of_get_phy_mode(dn, &mode); 1613 if (err) 1614 mode = PHY_INTERFACE_MODE_NA; 1615 phydev->interface = mode; 1616 1617 genphy_read_status(phydev); 1618 1619 if (ds->ops->adjust_link) 1620 ds->ops->adjust_link(ds, port, phydev); 1621 1622 put_device(&phydev->mdio.dev); 1623 1624 return 0; 1625 } 1626 1627 static int dsa_port_phylink_register(struct dsa_port *dp) 1628 { 1629 struct dsa_switch *ds = dp->ds; 1630 struct device_node *port_dn = dp->dn; 1631 int err; 1632 1633 dp->pl_config.dev = ds->dev; 1634 dp->pl_config.type = PHYLINK_DEV; 1635 1636 err = dsa_port_phylink_create(dp); 1637 if (err) 1638 return err; 1639 1640 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1641 if (err && err != -ENODEV) { 1642 pr_err("could not attach to PHY: %d\n", err); 1643 goto err_phy_connect; 1644 } 1645 1646 return 0; 1647 1648 err_phy_connect: 1649 phylink_destroy(dp->pl); 1650 return err; 1651 } 1652 1653 int dsa_port_link_register_of(struct dsa_port *dp) 1654 { 1655 struct dsa_switch *ds = dp->ds; 1656 struct device_node *phy_np; 1657 int port = dp->index; 1658 1659 if (!ds->ops->adjust_link) { 1660 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1661 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1662 if (ds->ops->phylink_mac_link_down) 1663 ds->ops->phylink_mac_link_down(ds, port, 1664 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1665 of_node_put(phy_np); 1666 return dsa_port_phylink_register(dp); 1667 } 1668 of_node_put(phy_np); 1669 return 0; 1670 } 1671 1672 dev_warn(ds->dev, 1673 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1674 1675 if (of_phy_is_fixed_link(dp->dn)) 1676 return dsa_port_fixed_link_register_of(dp); 1677 else 1678 return dsa_port_setup_phy_of(dp, true); 1679 } 1680 1681 void dsa_port_link_unregister_of(struct dsa_port *dp) 1682 { 1683 struct dsa_switch *ds = dp->ds; 1684 1685 if (!ds->ops->adjust_link && dp->pl) { 1686 rtnl_lock(); 1687 phylink_disconnect_phy(dp->pl); 1688 rtnl_unlock(); 1689 phylink_destroy(dp->pl); 1690 dp->pl = NULL; 1691 return; 1692 } 1693 1694 if (of_phy_is_fixed_link(dp->dn)) 1695 of_phy_deregister_fixed_link(dp->dn); 1696 else 1697 dsa_port_setup_phy_of(dp, false); 1698 } 1699 1700 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1701 { 1702 struct dsa_switch *ds = dp->ds; 1703 int err; 1704 1705 if (!ds->ops->port_hsr_join) 1706 return -EOPNOTSUPP; 1707 1708 dp->hsr_dev = hsr; 1709 1710 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1711 if (err) 1712 dp->hsr_dev = NULL; 1713 1714 return err; 1715 } 1716 1717 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1718 { 1719 struct dsa_switch *ds = dp->ds; 1720 int err; 1721 1722 dp->hsr_dev = NULL; 1723 1724 if (ds->ops->port_hsr_leave) { 1725 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1726 if (err) 1727 dev_err(dp->ds->dev, 1728 "port %d failed to leave HSR %s: %pe\n", 1729 dp->index, hsr->name, ERR_PTR(err)); 1730 } 1731 } 1732 1733 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1734 { 1735 struct dsa_notifier_tag_8021q_vlan_info info = { 1736 .dp = dp, 1737 .vid = vid, 1738 }; 1739 1740 if (broadcast) 1741 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1742 1743 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1744 } 1745 1746 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1747 { 1748 struct dsa_notifier_tag_8021q_vlan_info info = { 1749 .dp = dp, 1750 .vid = vid, 1751 }; 1752 int err; 1753 1754 if (broadcast) 1755 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1756 else 1757 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1758 if (err) 1759 dev_err(dp->ds->dev, 1760 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1761 dp->index, vid, ERR_PTR(err)); 1762 } 1763