1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 /* flush all VLANs */ 38 .vid = 0, 39 }; 40 41 /* When the port becomes standalone it has already left the bridge. 42 * Don't notify the bridge in that case. 43 */ 44 if (!brport_dev) 45 return; 46 47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 48 brport_dev, &info.info, NULL); 49 } 50 51 static void dsa_port_fast_age(const struct dsa_port *dp) 52 { 53 struct dsa_switch *ds = dp->ds; 54 55 if (!ds->ops->port_fast_age) 56 return; 57 58 ds->ops->port_fast_age(ds, dp->index); 59 60 dsa_port_notify_bridge_fdb_flush(dp); 61 } 62 63 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 64 { 65 struct switchdev_brport_flags flags = { 66 .mask = BR_LEARNING, 67 }; 68 struct dsa_switch *ds = dp->ds; 69 int err; 70 71 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 72 return false; 73 74 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 75 return !err; 76 } 77 78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 79 { 80 struct dsa_switch *ds = dp->ds; 81 int port = dp->index; 82 83 if (!ds->ops->port_stp_state_set) 84 return -EOPNOTSUPP; 85 86 ds->ops->port_stp_state_set(ds, port, state); 87 88 if (!dsa_port_can_configure_learning(dp) || 89 (do_fast_age && dp->learning)) { 90 /* Fast age FDB entries or flush appropriate forwarding database 91 * for the given port, if we are moving it from Learning or 92 * Forwarding state, to Disabled or Blocking or Listening state. 93 * Ports that were standalone before the STP state change don't 94 * need to fast age the FDB, since address learning is off in 95 * standalone mode. 96 */ 97 98 if ((dp->stp_state == BR_STATE_LEARNING || 99 dp->stp_state == BR_STATE_FORWARDING) && 100 (state == BR_STATE_DISABLED || 101 state == BR_STATE_BLOCKING || 102 state == BR_STATE_LISTENING)) 103 dsa_port_fast_age(dp); 104 } 105 106 dp->stp_state = state; 107 108 return 0; 109 } 110 111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 112 bool do_fast_age) 113 { 114 int err; 115 116 err = dsa_port_set_state(dp, state, do_fast_age); 117 if (err) 118 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 119 } 120 121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 122 { 123 struct dsa_switch *ds = dp->ds; 124 int port = dp->index; 125 int err; 126 127 if (ds->ops->port_enable) { 128 err = ds->ops->port_enable(ds, port, phy); 129 if (err) 130 return err; 131 } 132 133 if (!dp->bridge_dev) 134 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 135 136 if (dp->pl) 137 phylink_start(dp->pl); 138 139 return 0; 140 } 141 142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 143 { 144 int err; 145 146 rtnl_lock(); 147 err = dsa_port_enable_rt(dp, phy); 148 rtnl_unlock(); 149 150 return err; 151 } 152 153 void dsa_port_disable_rt(struct dsa_port *dp) 154 { 155 struct dsa_switch *ds = dp->ds; 156 int port = dp->index; 157 158 if (dp->pl) 159 phylink_stop(dp->pl); 160 161 if (!dp->bridge_dev) 162 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 163 164 if (ds->ops->port_disable) 165 ds->ops->port_disable(ds, port); 166 } 167 168 void dsa_port_disable(struct dsa_port *dp) 169 { 170 rtnl_lock(); 171 dsa_port_disable_rt(dp); 172 rtnl_unlock(); 173 } 174 175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 176 struct netlink_ext_ack *extack) 177 { 178 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 179 BR_BCAST_FLOOD; 180 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 181 int flag, err; 182 183 for_each_set_bit(flag, &mask, 32) { 184 struct switchdev_brport_flags flags = {0}; 185 186 flags.mask = BIT(flag); 187 188 if (br_port_flag_is_set(brport_dev, BIT(flag))) 189 flags.val = BIT(flag); 190 191 err = dsa_port_bridge_flags(dp, flags, extack); 192 if (err && err != -EOPNOTSUPP) 193 return err; 194 } 195 196 return 0; 197 } 198 199 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 200 { 201 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 202 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 203 BR_BCAST_FLOOD; 204 int flag, err; 205 206 for_each_set_bit(flag, &mask, 32) { 207 struct switchdev_brport_flags flags = {0}; 208 209 flags.mask = BIT(flag); 210 flags.val = val & BIT(flag); 211 212 err = dsa_port_bridge_flags(dp, flags, NULL); 213 if (err && err != -EOPNOTSUPP) 214 dev_err(dp->ds->dev, 215 "failed to clear bridge port flag %lu: %pe\n", 216 flags.val, ERR_PTR(err)); 217 } 218 } 219 220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 221 struct netlink_ext_ack *extack) 222 { 223 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 224 struct net_device *br = dsa_port_bridge_dev_get(dp); 225 int err; 226 227 err = dsa_port_inherit_brport_flags(dp, extack); 228 if (err) 229 return err; 230 231 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 232 if (err && err != -EOPNOTSUPP) 233 return err; 234 235 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 236 if (err && err != -EOPNOTSUPP) 237 return err; 238 239 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 240 if (err && err != -EOPNOTSUPP) 241 return err; 242 243 return 0; 244 } 245 246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) 247 { 248 /* Configure the port for standalone mode (no address learning, 249 * flood everything). 250 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 251 * when the user requests it through netlink or sysfs, but not 252 * automatically at port join or leave, so we need to handle resetting 253 * the brport flags ourselves. But we even prefer it that way, because 254 * otherwise, some setups might never get the notification they need, 255 * for example, when a port leaves a LAG that offloads the bridge, 256 * it becomes standalone, but as far as the bridge is concerned, no 257 * port ever left. 258 */ 259 dsa_port_clear_brport_flags(dp); 260 261 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 262 * so allow it to be in BR_STATE_FORWARDING to be kept functional 263 */ 264 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 265 266 /* VLAN filtering is handled by dsa_switch_bridge_leave */ 267 268 /* Ageing time may be global to the switch chip, so don't change it 269 * here because we have no good reason (or value) to change it to. 270 */ 271 } 272 273 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp, 274 struct net_device *bridge_dev, 275 unsigned int bridge_num) 276 { 277 struct dsa_switch *ds = dp->ds; 278 279 /* No bridge TX forwarding offload => do nothing */ 280 if (!ds->ops->port_bridge_tx_fwd_unoffload || !bridge_num) 281 return; 282 283 /* Notify the chips only once the offload has been deactivated, so 284 * that they can update their configuration accordingly. 285 */ 286 ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev, 287 bridge_num); 288 } 289 290 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp, 291 struct net_device *bridge_dev, 292 unsigned int bridge_num) 293 { 294 struct dsa_switch *ds = dp->ds; 295 int err; 296 297 /* FDB isolation is required for TX forwarding offload */ 298 if (!ds->ops->port_bridge_tx_fwd_offload || !bridge_num) 299 return false; 300 301 /* Notify the driver */ 302 err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev, 303 bridge_num); 304 305 return err ? false : true; 306 } 307 308 static int dsa_port_bridge_create(struct dsa_port *dp, 309 struct net_device *br, 310 struct netlink_ext_ack *extack) 311 { 312 struct dsa_switch *ds = dp->ds; 313 unsigned int bridge_num; 314 315 dp->bridge_dev = br; 316 317 if (!ds->max_num_bridges) 318 return 0; 319 320 bridge_num = dsa_bridge_num_get(br, ds->max_num_bridges); 321 if (!bridge_num) { 322 NL_SET_ERR_MSG_MOD(extack, 323 "Range of offloadable bridges exceeded"); 324 return -EOPNOTSUPP; 325 } 326 327 dp->bridge_num = bridge_num; 328 329 return 0; 330 } 331 332 static void dsa_port_bridge_destroy(struct dsa_port *dp, 333 const struct net_device *br) 334 { 335 struct dsa_switch *ds = dp->ds; 336 337 dp->bridge_dev = NULL; 338 339 if (ds->max_num_bridges) { 340 int bridge_num = dp->bridge_num; 341 342 dp->bridge_num = 0; 343 dsa_bridge_num_put(br, bridge_num); 344 } 345 } 346 347 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 348 struct netlink_ext_ack *extack) 349 { 350 struct dsa_notifier_bridge_info info = { 351 .tree_index = dp->ds->dst->index, 352 .sw_index = dp->ds->index, 353 .port = dp->index, 354 .br = br, 355 }; 356 struct net_device *dev = dp->slave; 357 struct net_device *brport_dev; 358 bool tx_fwd_offload; 359 int err; 360 361 /* Here the interface is already bridged. Reflect the current 362 * configuration so that drivers can program their chips accordingly. 363 */ 364 err = dsa_port_bridge_create(dp, br, extack); 365 if (err) 366 return err; 367 368 brport_dev = dsa_port_to_bridge_port(dp); 369 370 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 371 if (err) 372 goto out_rollback; 373 374 tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br, 375 dsa_port_bridge_num_get(dp)); 376 377 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 378 &dsa_slave_switchdev_notifier, 379 &dsa_slave_switchdev_blocking_notifier, 380 tx_fwd_offload, extack); 381 if (err) 382 goto out_rollback_unbridge; 383 384 err = dsa_port_switchdev_sync_attrs(dp, extack); 385 if (err) 386 goto out_rollback_unoffload; 387 388 return 0; 389 390 out_rollback_unoffload: 391 switchdev_bridge_port_unoffload(brport_dev, dp, 392 &dsa_slave_switchdev_notifier, 393 &dsa_slave_switchdev_blocking_notifier); 394 out_rollback_unbridge: 395 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 396 out_rollback: 397 dsa_port_bridge_destroy(dp, br); 398 return err; 399 } 400 401 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 402 { 403 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 404 405 /* Don't try to unoffload something that is not offloaded */ 406 if (!brport_dev) 407 return; 408 409 switchdev_bridge_port_unoffload(brport_dev, dp, 410 &dsa_slave_switchdev_notifier, 411 &dsa_slave_switchdev_blocking_notifier); 412 413 dsa_flush_workqueue(); 414 } 415 416 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 417 { 418 unsigned int bridge_num = dsa_port_bridge_num_get(dp); 419 struct dsa_notifier_bridge_info info = { 420 .tree_index = dp->ds->dst->index, 421 .sw_index = dp->ds->index, 422 .port = dp->index, 423 .br = br, 424 }; 425 int err; 426 427 /* Here the port is already unbridged. Reflect the current configuration 428 * so that drivers can program their chips accordingly. 429 */ 430 dsa_port_bridge_destroy(dp, br); 431 432 dsa_port_bridge_tx_fwd_unoffload(dp, br, bridge_num); 433 434 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 435 if (err) 436 dev_err(dp->ds->dev, 437 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 438 dp->index, ERR_PTR(err)); 439 440 dsa_port_switchdev_unsync_attrs(dp); 441 } 442 443 int dsa_port_lag_change(struct dsa_port *dp, 444 struct netdev_lag_lower_state_info *linfo) 445 { 446 struct dsa_notifier_lag_info info = { 447 .sw_index = dp->ds->index, 448 .port = dp->index, 449 }; 450 bool tx_enabled; 451 452 if (!dp->lag_dev) 453 return 0; 454 455 /* On statically configured aggregates (e.g. loadbalance 456 * without LACP) ports will always be tx_enabled, even if the 457 * link is down. Thus we require both link_up and tx_enabled 458 * in order to include it in the tx set. 459 */ 460 tx_enabled = linfo->link_up && linfo->tx_enabled; 461 462 if (tx_enabled == dp->lag_tx_enabled) 463 return 0; 464 465 dp->lag_tx_enabled = tx_enabled; 466 467 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 468 } 469 470 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag, 471 struct netdev_lag_upper_info *uinfo, 472 struct netlink_ext_ack *extack) 473 { 474 struct dsa_notifier_lag_info info = { 475 .sw_index = dp->ds->index, 476 .port = dp->index, 477 .lag = lag, 478 .info = uinfo, 479 }; 480 struct net_device *bridge_dev; 481 int err; 482 483 dsa_lag_map(dp->ds->dst, lag); 484 dp->lag_dev = lag; 485 486 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 487 if (err) 488 goto err_lag_join; 489 490 bridge_dev = netdev_master_upper_dev_get(lag); 491 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 492 return 0; 493 494 err = dsa_port_bridge_join(dp, bridge_dev, extack); 495 if (err) 496 goto err_bridge_join; 497 498 return 0; 499 500 err_bridge_join: 501 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 502 err_lag_join: 503 dp->lag_dev = NULL; 504 dsa_lag_unmap(dp->ds->dst, lag); 505 return err; 506 } 507 508 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag) 509 { 510 struct net_device *br = dsa_port_bridge_dev_get(dp); 511 512 if (br) 513 dsa_port_pre_bridge_leave(dp, br); 514 } 515 516 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) 517 { 518 struct net_device *br = dsa_port_bridge_dev_get(dp); 519 struct dsa_notifier_lag_info info = { 520 .sw_index = dp->ds->index, 521 .port = dp->index, 522 .lag = lag, 523 }; 524 int err; 525 526 if (!dp->lag_dev) 527 return; 528 529 /* Port might have been part of a LAG that in turn was 530 * attached to a bridge. 531 */ 532 if (br) 533 dsa_port_bridge_leave(dp, br); 534 535 dp->lag_tx_enabled = false; 536 dp->lag_dev = NULL; 537 538 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 539 if (err) 540 dev_err(dp->ds->dev, 541 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 542 dp->index, ERR_PTR(err)); 543 544 dsa_lag_unmap(dp->ds->dst, lag); 545 } 546 547 /* Must be called under rcu_read_lock() */ 548 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 549 bool vlan_filtering, 550 struct netlink_ext_ack *extack) 551 { 552 struct dsa_switch *ds = dp->ds; 553 struct dsa_port *other_dp; 554 int err; 555 556 /* VLAN awareness was off, so the question is "can we turn it on". 557 * We may have had 8021q uppers, those need to go. Make sure we don't 558 * enter an inconsistent state: deny changing the VLAN awareness state 559 * as long as we have 8021q uppers. 560 */ 561 if (vlan_filtering && dsa_port_is_user(dp)) { 562 struct net_device *br = dsa_port_bridge_dev_get(dp); 563 struct net_device *upper_dev, *slave = dp->slave; 564 struct list_head *iter; 565 566 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 567 struct bridge_vlan_info br_info; 568 u16 vid; 569 570 if (!is_vlan_dev(upper_dev)) 571 continue; 572 573 vid = vlan_dev_vlan_id(upper_dev); 574 575 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 576 * device, respectively the VID is not found, returning 577 * 0 means success, which is a failure for us here. 578 */ 579 err = br_vlan_get_info(br, vid, &br_info); 580 if (err == 0) { 581 NL_SET_ERR_MSG_MOD(extack, 582 "Must first remove VLAN uppers having VIDs also present in bridge"); 583 return false; 584 } 585 } 586 } 587 588 if (!ds->vlan_filtering_is_global) 589 return true; 590 591 /* For cases where enabling/disabling VLAN awareness is global to the 592 * switch, we need to handle the case where multiple bridges span 593 * different ports of the same switch device and one of them has a 594 * different setting than what is being requested. 595 */ 596 dsa_switch_for_each_port(other_dp, ds) { 597 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 598 599 /* If it's the same bridge, it also has same 600 * vlan_filtering setting => no need to check 601 */ 602 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 603 continue; 604 605 if (br_vlan_enabled(other_br) != vlan_filtering) { 606 NL_SET_ERR_MSG_MOD(extack, 607 "VLAN filtering is a global setting"); 608 return false; 609 } 610 } 611 return true; 612 } 613 614 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 615 struct netlink_ext_ack *extack) 616 { 617 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 618 struct dsa_switch *ds = dp->ds; 619 bool apply; 620 int err; 621 622 if (!ds->ops->port_vlan_filtering) 623 return -EOPNOTSUPP; 624 625 /* We are called from dsa_slave_switchdev_blocking_event(), 626 * which is not under rcu_read_lock(), unlike 627 * dsa_slave_switchdev_event(). 628 */ 629 rcu_read_lock(); 630 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 631 rcu_read_unlock(); 632 if (!apply) 633 return -EINVAL; 634 635 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 636 return 0; 637 638 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 639 extack); 640 if (err) 641 return err; 642 643 if (ds->vlan_filtering_is_global) { 644 struct dsa_port *other_dp; 645 646 ds->vlan_filtering = vlan_filtering; 647 648 dsa_switch_for_each_user_port(other_dp, ds) { 649 struct net_device *slave = dp->slave; 650 651 /* We might be called in the unbind path, so not 652 * all slave devices might still be registered. 653 */ 654 if (!slave) 655 continue; 656 657 err = dsa_slave_manage_vlan_filtering(slave, 658 vlan_filtering); 659 if (err) 660 goto restore; 661 } 662 } else { 663 dp->vlan_filtering = vlan_filtering; 664 665 err = dsa_slave_manage_vlan_filtering(dp->slave, 666 vlan_filtering); 667 if (err) 668 goto restore; 669 } 670 671 return 0; 672 673 restore: 674 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 675 676 if (ds->vlan_filtering_is_global) 677 ds->vlan_filtering = old_vlan_filtering; 678 else 679 dp->vlan_filtering = old_vlan_filtering; 680 681 return err; 682 } 683 684 /* This enforces legacy behavior for switch drivers which assume they can't 685 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 686 */ 687 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 688 { 689 struct net_device *br = dsa_port_bridge_dev_get(dp); 690 struct dsa_switch *ds = dp->ds; 691 692 if (!br) 693 return false; 694 695 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 696 } 697 698 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 699 { 700 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 701 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 702 struct dsa_notifier_ageing_time_info info; 703 int err; 704 705 info.ageing_time = ageing_time; 706 707 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 708 if (err) 709 return err; 710 711 dp->ageing_time = ageing_time; 712 713 return 0; 714 } 715 716 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 717 struct switchdev_brport_flags flags, 718 struct netlink_ext_ack *extack) 719 { 720 struct dsa_switch *ds = dp->ds; 721 722 if (!ds->ops->port_pre_bridge_flags) 723 return -EINVAL; 724 725 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 726 } 727 728 int dsa_port_bridge_flags(struct dsa_port *dp, 729 struct switchdev_brport_flags flags, 730 struct netlink_ext_ack *extack) 731 { 732 struct dsa_switch *ds = dp->ds; 733 int err; 734 735 if (!ds->ops->port_bridge_flags) 736 return -EOPNOTSUPP; 737 738 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 739 if (err) 740 return err; 741 742 if (flags.mask & BR_LEARNING) { 743 bool learning = flags.val & BR_LEARNING; 744 745 if (learning == dp->learning) 746 return 0; 747 748 if ((dp->learning && !learning) && 749 (dp->stp_state == BR_STATE_LEARNING || 750 dp->stp_state == BR_STATE_FORWARDING)) 751 dsa_port_fast_age(dp); 752 753 dp->learning = learning; 754 } 755 756 return 0; 757 } 758 759 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, 760 bool targeted_match) 761 { 762 struct dsa_notifier_mtu_info info = { 763 .sw_index = dp->ds->index, 764 .targeted_match = targeted_match, 765 .port = dp->index, 766 .mtu = new_mtu, 767 }; 768 769 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 770 } 771 772 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 773 u16 vid) 774 { 775 struct dsa_notifier_fdb_info info = { 776 .sw_index = dp->ds->index, 777 .port = dp->index, 778 .addr = addr, 779 .vid = vid, 780 }; 781 782 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 783 } 784 785 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 786 u16 vid) 787 { 788 struct dsa_notifier_fdb_info info = { 789 .sw_index = dp->ds->index, 790 .port = dp->index, 791 .addr = addr, 792 .vid = vid, 793 794 }; 795 796 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 797 } 798 799 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr, 800 u16 vid) 801 { 802 struct dsa_notifier_fdb_info info = { 803 .sw_index = dp->ds->index, 804 .port = dp->index, 805 .addr = addr, 806 .vid = vid, 807 }; 808 struct dsa_port *cpu_dp = dp->cpu_dp; 809 int err; 810 811 err = dev_uc_add(cpu_dp->master, addr); 812 if (err) 813 return err; 814 815 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 816 } 817 818 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr, 819 u16 vid) 820 { 821 struct dsa_notifier_fdb_info info = { 822 .sw_index = dp->ds->index, 823 .port = dp->index, 824 .addr = addr, 825 .vid = vid, 826 }; 827 struct dsa_port *cpu_dp = dp->cpu_dp; 828 int err; 829 830 err = dev_uc_del(cpu_dp->master, addr); 831 if (err) 832 return err; 833 834 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 835 } 836 837 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 838 { 839 struct dsa_switch *ds = dp->ds; 840 int port = dp->index; 841 842 if (!ds->ops->port_fdb_dump) 843 return -EOPNOTSUPP; 844 845 return ds->ops->port_fdb_dump(ds, port, cb, data); 846 } 847 848 int dsa_port_mdb_add(const struct dsa_port *dp, 849 const struct switchdev_obj_port_mdb *mdb) 850 { 851 struct dsa_notifier_mdb_info info = { 852 .sw_index = dp->ds->index, 853 .port = dp->index, 854 .mdb = mdb, 855 }; 856 857 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 858 } 859 860 int dsa_port_mdb_del(const struct dsa_port *dp, 861 const struct switchdev_obj_port_mdb *mdb) 862 { 863 struct dsa_notifier_mdb_info info = { 864 .sw_index = dp->ds->index, 865 .port = dp->index, 866 .mdb = mdb, 867 }; 868 869 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 870 } 871 872 int dsa_port_host_mdb_add(const struct dsa_port *dp, 873 const struct switchdev_obj_port_mdb *mdb) 874 { 875 struct dsa_notifier_mdb_info info = { 876 .sw_index = dp->ds->index, 877 .port = dp->index, 878 .mdb = mdb, 879 }; 880 struct dsa_port *cpu_dp = dp->cpu_dp; 881 int err; 882 883 err = dev_mc_add(cpu_dp->master, mdb->addr); 884 if (err) 885 return err; 886 887 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 888 } 889 890 int dsa_port_host_mdb_del(const struct dsa_port *dp, 891 const struct switchdev_obj_port_mdb *mdb) 892 { 893 struct dsa_notifier_mdb_info info = { 894 .sw_index = dp->ds->index, 895 .port = dp->index, 896 .mdb = mdb, 897 }; 898 struct dsa_port *cpu_dp = dp->cpu_dp; 899 int err; 900 901 err = dev_mc_del(cpu_dp->master, mdb->addr); 902 if (err) 903 return err; 904 905 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 906 } 907 908 int dsa_port_vlan_add(struct dsa_port *dp, 909 const struct switchdev_obj_port_vlan *vlan, 910 struct netlink_ext_ack *extack) 911 { 912 struct dsa_notifier_vlan_info info = { 913 .sw_index = dp->ds->index, 914 .port = dp->index, 915 .vlan = vlan, 916 .extack = extack, 917 }; 918 919 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 920 } 921 922 int dsa_port_vlan_del(struct dsa_port *dp, 923 const struct switchdev_obj_port_vlan *vlan) 924 { 925 struct dsa_notifier_vlan_info info = { 926 .sw_index = dp->ds->index, 927 .port = dp->index, 928 .vlan = vlan, 929 }; 930 931 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 932 } 933 934 int dsa_port_mrp_add(const struct dsa_port *dp, 935 const struct switchdev_obj_mrp *mrp) 936 { 937 struct dsa_notifier_mrp_info info = { 938 .sw_index = dp->ds->index, 939 .port = dp->index, 940 .mrp = mrp, 941 }; 942 943 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info); 944 } 945 946 int dsa_port_mrp_del(const struct dsa_port *dp, 947 const struct switchdev_obj_mrp *mrp) 948 { 949 struct dsa_notifier_mrp_info info = { 950 .sw_index = dp->ds->index, 951 .port = dp->index, 952 .mrp = mrp, 953 }; 954 955 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info); 956 } 957 958 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 959 const struct switchdev_obj_ring_role_mrp *mrp) 960 { 961 struct dsa_notifier_mrp_ring_role_info info = { 962 .sw_index = dp->ds->index, 963 .port = dp->index, 964 .mrp = mrp, 965 }; 966 967 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info); 968 } 969 970 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 971 const struct switchdev_obj_ring_role_mrp *mrp) 972 { 973 struct dsa_notifier_mrp_ring_role_info info = { 974 .sw_index = dp->ds->index, 975 .port = dp->index, 976 .mrp = mrp, 977 }; 978 979 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info); 980 } 981 982 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 983 const struct dsa_device_ops *tag_ops) 984 { 985 cpu_dp->rcv = tag_ops->rcv; 986 cpu_dp->tag_ops = tag_ops; 987 } 988 989 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 990 { 991 struct device_node *phy_dn; 992 struct phy_device *phydev; 993 994 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 995 if (!phy_dn) 996 return NULL; 997 998 phydev = of_phy_find_device(phy_dn); 999 if (!phydev) { 1000 of_node_put(phy_dn); 1001 return ERR_PTR(-EPROBE_DEFER); 1002 } 1003 1004 of_node_put(phy_dn); 1005 return phydev; 1006 } 1007 1008 static void dsa_port_phylink_validate(struct phylink_config *config, 1009 unsigned long *supported, 1010 struct phylink_link_state *state) 1011 { 1012 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1013 struct dsa_switch *ds = dp->ds; 1014 1015 if (!ds->ops->phylink_validate) { 1016 if (config->mac_capabilities) 1017 phylink_generic_validate(config, supported, state); 1018 return; 1019 } 1020 1021 ds->ops->phylink_validate(ds, dp->index, supported, state); 1022 } 1023 1024 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1025 struct phylink_link_state *state) 1026 { 1027 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1028 struct dsa_switch *ds = dp->ds; 1029 int err; 1030 1031 /* Only called for inband modes */ 1032 if (!ds->ops->phylink_mac_link_state) { 1033 state->link = 0; 1034 return; 1035 } 1036 1037 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1038 if (err < 0) { 1039 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1040 dp->index, err); 1041 state->link = 0; 1042 } 1043 } 1044 1045 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1046 unsigned int mode, 1047 const struct phylink_link_state *state) 1048 { 1049 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1050 struct dsa_switch *ds = dp->ds; 1051 1052 if (!ds->ops->phylink_mac_config) 1053 return; 1054 1055 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1056 } 1057 1058 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1059 { 1060 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1061 struct dsa_switch *ds = dp->ds; 1062 1063 if (!ds->ops->phylink_mac_an_restart) 1064 return; 1065 1066 ds->ops->phylink_mac_an_restart(ds, dp->index); 1067 } 1068 1069 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1070 unsigned int mode, 1071 phy_interface_t interface) 1072 { 1073 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1074 struct phy_device *phydev = NULL; 1075 struct dsa_switch *ds = dp->ds; 1076 1077 if (dsa_port_is_user(dp)) 1078 phydev = dp->slave->phydev; 1079 1080 if (!ds->ops->phylink_mac_link_down) { 1081 if (ds->ops->adjust_link && phydev) 1082 ds->ops->adjust_link(ds, dp->index, phydev); 1083 return; 1084 } 1085 1086 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1087 } 1088 1089 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1090 struct phy_device *phydev, 1091 unsigned int mode, 1092 phy_interface_t interface, 1093 int speed, int duplex, 1094 bool tx_pause, bool rx_pause) 1095 { 1096 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1097 struct dsa_switch *ds = dp->ds; 1098 1099 if (!ds->ops->phylink_mac_link_up) { 1100 if (ds->ops->adjust_link && phydev) 1101 ds->ops->adjust_link(ds, dp->index, phydev); 1102 return; 1103 } 1104 1105 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1106 speed, duplex, tx_pause, rx_pause); 1107 } 1108 1109 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1110 .validate = dsa_port_phylink_validate, 1111 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1112 .mac_config = dsa_port_phylink_mac_config, 1113 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1114 .mac_link_down = dsa_port_phylink_mac_link_down, 1115 .mac_link_up = dsa_port_phylink_mac_link_up, 1116 }; 1117 1118 int dsa_port_phylink_create(struct dsa_port *dp) 1119 { 1120 struct dsa_switch *ds = dp->ds; 1121 phy_interface_t mode; 1122 int err; 1123 1124 err = of_get_phy_mode(dp->dn, &mode); 1125 if (err) 1126 mode = PHY_INTERFACE_MODE_NA; 1127 1128 if (ds->ops->phylink_get_caps) 1129 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1130 1131 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1132 mode, &dsa_port_phylink_mac_ops); 1133 if (IS_ERR(dp->pl)) { 1134 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1135 return PTR_ERR(dp->pl); 1136 } 1137 1138 return 0; 1139 } 1140 1141 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1142 { 1143 struct dsa_switch *ds = dp->ds; 1144 struct phy_device *phydev; 1145 int port = dp->index; 1146 int err = 0; 1147 1148 phydev = dsa_port_get_phy_device(dp); 1149 if (!phydev) 1150 return 0; 1151 1152 if (IS_ERR(phydev)) 1153 return PTR_ERR(phydev); 1154 1155 if (enable) { 1156 err = genphy_resume(phydev); 1157 if (err < 0) 1158 goto err_put_dev; 1159 1160 err = genphy_read_status(phydev); 1161 if (err < 0) 1162 goto err_put_dev; 1163 } else { 1164 err = genphy_suspend(phydev); 1165 if (err < 0) 1166 goto err_put_dev; 1167 } 1168 1169 if (ds->ops->adjust_link) 1170 ds->ops->adjust_link(ds, port, phydev); 1171 1172 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1173 1174 err_put_dev: 1175 put_device(&phydev->mdio.dev); 1176 return err; 1177 } 1178 1179 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1180 { 1181 struct device_node *dn = dp->dn; 1182 struct dsa_switch *ds = dp->ds; 1183 struct phy_device *phydev; 1184 int port = dp->index; 1185 phy_interface_t mode; 1186 int err; 1187 1188 err = of_phy_register_fixed_link(dn); 1189 if (err) { 1190 dev_err(ds->dev, 1191 "failed to register the fixed PHY of port %d\n", 1192 port); 1193 return err; 1194 } 1195 1196 phydev = of_phy_find_device(dn); 1197 1198 err = of_get_phy_mode(dn, &mode); 1199 if (err) 1200 mode = PHY_INTERFACE_MODE_NA; 1201 phydev->interface = mode; 1202 1203 genphy_read_status(phydev); 1204 1205 if (ds->ops->adjust_link) 1206 ds->ops->adjust_link(ds, port, phydev); 1207 1208 put_device(&phydev->mdio.dev); 1209 1210 return 0; 1211 } 1212 1213 static int dsa_port_phylink_register(struct dsa_port *dp) 1214 { 1215 struct dsa_switch *ds = dp->ds; 1216 struct device_node *port_dn = dp->dn; 1217 int err; 1218 1219 dp->pl_config.dev = ds->dev; 1220 dp->pl_config.type = PHYLINK_DEV; 1221 dp->pl_config.pcs_poll = ds->pcs_poll; 1222 1223 err = dsa_port_phylink_create(dp); 1224 if (err) 1225 return err; 1226 1227 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1228 if (err && err != -ENODEV) { 1229 pr_err("could not attach to PHY: %d\n", err); 1230 goto err_phy_connect; 1231 } 1232 1233 return 0; 1234 1235 err_phy_connect: 1236 phylink_destroy(dp->pl); 1237 return err; 1238 } 1239 1240 int dsa_port_link_register_of(struct dsa_port *dp) 1241 { 1242 struct dsa_switch *ds = dp->ds; 1243 struct device_node *phy_np; 1244 int port = dp->index; 1245 1246 if (!ds->ops->adjust_link) { 1247 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1248 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1249 if (ds->ops->phylink_mac_link_down) 1250 ds->ops->phylink_mac_link_down(ds, port, 1251 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1252 return dsa_port_phylink_register(dp); 1253 } 1254 return 0; 1255 } 1256 1257 dev_warn(ds->dev, 1258 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1259 1260 if (of_phy_is_fixed_link(dp->dn)) 1261 return dsa_port_fixed_link_register_of(dp); 1262 else 1263 return dsa_port_setup_phy_of(dp, true); 1264 } 1265 1266 void dsa_port_link_unregister_of(struct dsa_port *dp) 1267 { 1268 struct dsa_switch *ds = dp->ds; 1269 1270 if (!ds->ops->adjust_link && dp->pl) { 1271 rtnl_lock(); 1272 phylink_disconnect_phy(dp->pl); 1273 rtnl_unlock(); 1274 phylink_destroy(dp->pl); 1275 dp->pl = NULL; 1276 return; 1277 } 1278 1279 if (of_phy_is_fixed_link(dp->dn)) 1280 of_phy_deregister_fixed_link(dp->dn); 1281 else 1282 dsa_port_setup_phy_of(dp, false); 1283 } 1284 1285 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data) 1286 { 1287 struct phy_device *phydev; 1288 int ret = -EOPNOTSUPP; 1289 1290 if (of_phy_is_fixed_link(dp->dn)) 1291 return ret; 1292 1293 phydev = dsa_port_get_phy_device(dp); 1294 if (IS_ERR_OR_NULL(phydev)) 1295 return ret; 1296 1297 ret = phy_ethtool_get_strings(phydev, data); 1298 put_device(&phydev->mdio.dev); 1299 1300 return ret; 1301 } 1302 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings); 1303 1304 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data) 1305 { 1306 struct phy_device *phydev; 1307 int ret = -EOPNOTSUPP; 1308 1309 if (of_phy_is_fixed_link(dp->dn)) 1310 return ret; 1311 1312 phydev = dsa_port_get_phy_device(dp); 1313 if (IS_ERR_OR_NULL(phydev)) 1314 return ret; 1315 1316 ret = phy_ethtool_get_stats(phydev, NULL, data); 1317 put_device(&phydev->mdio.dev); 1318 1319 return ret; 1320 } 1321 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats); 1322 1323 int dsa_port_get_phy_sset_count(struct dsa_port *dp) 1324 { 1325 struct phy_device *phydev; 1326 int ret = -EOPNOTSUPP; 1327 1328 if (of_phy_is_fixed_link(dp->dn)) 1329 return ret; 1330 1331 phydev = dsa_port_get_phy_device(dp); 1332 if (IS_ERR_OR_NULL(phydev)) 1333 return ret; 1334 1335 ret = phy_ethtool_get_sset_count(phydev); 1336 put_device(&phydev->mdio.dev); 1337 1338 return ret; 1339 } 1340 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count); 1341 1342 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1343 { 1344 struct dsa_notifier_hsr_info info = { 1345 .sw_index = dp->ds->index, 1346 .port = dp->index, 1347 .hsr = hsr, 1348 }; 1349 int err; 1350 1351 dp->hsr_dev = hsr; 1352 1353 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info); 1354 if (err) 1355 dp->hsr_dev = NULL; 1356 1357 return err; 1358 } 1359 1360 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1361 { 1362 struct dsa_notifier_hsr_info info = { 1363 .sw_index = dp->ds->index, 1364 .port = dp->index, 1365 .hsr = hsr, 1366 }; 1367 int err; 1368 1369 dp->hsr_dev = NULL; 1370 1371 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info); 1372 if (err) 1373 dev_err(dp->ds->dev, 1374 "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n", 1375 dp->index, ERR_PTR(err)); 1376 } 1377 1378 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1379 { 1380 struct dsa_notifier_tag_8021q_vlan_info info = { 1381 .tree_index = dp->ds->dst->index, 1382 .sw_index = dp->ds->index, 1383 .port = dp->index, 1384 .vid = vid, 1385 }; 1386 1387 if (broadcast) 1388 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1389 1390 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1391 } 1392 1393 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1394 { 1395 struct dsa_notifier_tag_8021q_vlan_info info = { 1396 .tree_index = dp->ds->dst->index, 1397 .sw_index = dp->ds->index, 1398 .port = dp->index, 1399 .vid = vid, 1400 }; 1401 int err; 1402 1403 if (broadcast) 1404 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1405 else 1406 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1407 if (err) 1408 dev_err(dp->ds->dev, 1409 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1410 dp->index, vid, ERR_PTR(err)); 1411 } 1412