1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7 #ifndef __LINUX_NET_DSA_H 8 #define __LINUX_NET_DSA_H 9 10 #include <linux/if.h> 11 #include <linux/if_ether.h> 12 #include <linux/list.h> 13 #include <linux/notifier.h> 14 #include <linux/timer.h> 15 #include <linux/workqueue.h> 16 #include <linux/of.h> 17 #include <linux/ethtool.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/phy.h> 20 #include <linux/platform_data/dsa.h> 21 #include <linux/phylink.h> 22 #include <net/devlink.h> 23 #include <net/switchdev.h> 24 25 struct tc_action; 26 struct phy_device; 27 struct fixed_phy_status; 28 struct phylink_link_state; 29 30 #define DSA_TAG_PROTO_NONE_VALUE 0 31 #define DSA_TAG_PROTO_BRCM_VALUE 1 32 #define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2 33 #define DSA_TAG_PROTO_DSA_VALUE 3 34 #define DSA_TAG_PROTO_EDSA_VALUE 4 35 #define DSA_TAG_PROTO_GSWIP_VALUE 5 36 #define DSA_TAG_PROTO_KSZ9477_VALUE 6 37 #define DSA_TAG_PROTO_KSZ9893_VALUE 7 38 #define DSA_TAG_PROTO_LAN9303_VALUE 8 39 #define DSA_TAG_PROTO_MTK_VALUE 9 40 #define DSA_TAG_PROTO_QCA_VALUE 10 41 #define DSA_TAG_PROTO_TRAILER_VALUE 11 42 #define DSA_TAG_PROTO_8021Q_VALUE 12 43 #define DSA_TAG_PROTO_SJA1105_VALUE 13 44 #define DSA_TAG_PROTO_KSZ8795_VALUE 14 45 #define DSA_TAG_PROTO_OCELOT_VALUE 15 46 #define DSA_TAG_PROTO_AR9331_VALUE 16 47 #define DSA_TAG_PROTO_RTL4_A_VALUE 17 48 #define DSA_TAG_PROTO_HELLCREEK_VALUE 18 49 #define DSA_TAG_PROTO_XRS700X_VALUE 19 50 #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 51 #define DSA_TAG_PROTO_SEVILLE_VALUE 21 52 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 53 #define DSA_TAG_PROTO_SJA1110_VALUE 23 54 55 enum dsa_tag_protocol { 56 DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, 57 DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE, 58 DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE, 59 DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE, 60 DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE, 61 DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE, 62 DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE, 63 DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE, 64 DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE, 65 DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE, 66 DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE, 67 DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE, 68 DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE, 69 DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE, 70 DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE, 71 DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, 72 DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE, 73 DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE, 74 DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE, 75 DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE, 76 DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE, 77 DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, 78 DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, 79 DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, 80 }; 81 82 struct packet_type; 83 struct dsa_switch; 84 85 struct dsa_device_ops { 86 struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); 87 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, 88 struct packet_type *pt); 89 void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, 90 int *offset); 91 /* Used to determine which traffic should match the DSA filter in 92 * eth_type_trans, and which, if any, should bypass it and be processed 93 * as regular on the master net device. 94 */ 95 bool (*filter)(const struct sk_buff *skb, struct net_device *dev); 96 unsigned int needed_headroom; 97 unsigned int needed_tailroom; 98 const char *name; 99 enum dsa_tag_protocol proto; 100 /* Some tagging protocols either mangle or shift the destination MAC 101 * address, in which case the DSA master would drop packets on ingress 102 * if what it understands out of the destination MAC address is not in 103 * its RX filter. 104 */ 105 bool promisc_on_master; 106 }; 107 108 /* This structure defines the control interfaces that are overlayed by the 109 * DSA layer on top of the DSA CPU/management net_device instance. This is 110 * used by the core net_device layer while calling various net_device_ops 111 * function pointers. 112 */ 113 struct dsa_netdevice_ops { 114 int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, 115 int cmd); 116 }; 117 118 #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" 119 #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \ 120 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE)) 121 122 struct dsa_switch_tree { 123 struct list_head list; 124 125 /* Notifier chain for switch-wide events */ 126 struct raw_notifier_head nh; 127 128 /* Tree identifier */ 129 unsigned int index; 130 131 /* Number of switches attached to this tree */ 132 struct kref refcount; 133 134 /* Has this tree been applied to the hardware? */ 135 bool setup; 136 137 /* Tagging protocol operations */ 138 const struct dsa_device_ops *tag_ops; 139 140 /* Default tagging protocol preferred by the switches in this 141 * tree. 142 */ 143 enum dsa_tag_protocol default_proto; 144 145 /* 146 * Configuration data for the platform device that owns 147 * this dsa switch tree instance. 148 */ 149 struct dsa_platform_data *pd; 150 151 /* List of switch ports */ 152 struct list_head ports; 153 154 /* List of DSA links composing the routing table */ 155 struct list_head rtable; 156 157 /* Maps offloaded LAG netdevs to a zero-based linear ID for 158 * drivers that need it. 159 */ 160 struct net_device **lags; 161 unsigned int lags_len; 162 163 /* Track the largest switch index within a tree */ 164 unsigned int last_switch; 165 166 /* Track the bridges with forwarding offload enabled */ 167 unsigned long fwd_offloading_bridges; 168 }; 169 170 #define dsa_lags_foreach_id(_id, _dst) \ 171 for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \ 172 if ((_dst)->lags[(_id)]) 173 174 #define dsa_lag_foreach_port(_dp, _dst, _lag) \ 175 list_for_each_entry((_dp), &(_dst)->ports, list) \ 176 if ((_dp)->lag_dev == (_lag)) 177 178 #define dsa_hsr_foreach_port(_dp, _ds, _hsr) \ 179 list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ 180 if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr)) 181 182 static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst, 183 unsigned int id) 184 { 185 return dst->lags[id]; 186 } 187 188 static inline int dsa_lag_id(struct dsa_switch_tree *dst, 189 struct net_device *lag) 190 { 191 unsigned int id; 192 193 dsa_lags_foreach_id(id, dst) { 194 if (dsa_lag_dev(dst, id) == lag) 195 return id; 196 } 197 198 return -ENODEV; 199 } 200 201 /* TC matchall action types */ 202 enum dsa_port_mall_action_type { 203 DSA_PORT_MALL_MIRROR, 204 DSA_PORT_MALL_POLICER, 205 }; 206 207 /* TC mirroring entry */ 208 struct dsa_mall_mirror_tc_entry { 209 u8 to_local_port; 210 bool ingress; 211 }; 212 213 /* TC port policer entry */ 214 struct dsa_mall_policer_tc_entry { 215 u32 burst; 216 u64 rate_bytes_per_sec; 217 }; 218 219 /* TC matchall entry */ 220 struct dsa_mall_tc_entry { 221 struct list_head list; 222 unsigned long cookie; 223 enum dsa_port_mall_action_type type; 224 union { 225 struct dsa_mall_mirror_tc_entry mirror; 226 struct dsa_mall_policer_tc_entry policer; 227 }; 228 }; 229 230 231 struct dsa_port { 232 /* A CPU port is physically connected to a master device. 233 * A user port exposed to userspace has a slave device. 234 */ 235 union { 236 struct net_device *master; 237 struct net_device *slave; 238 }; 239 240 /* Copy of the tagging protocol operations, for quicker access 241 * in the data path. Valid only for the CPU ports. 242 */ 243 const struct dsa_device_ops *tag_ops; 244 245 /* Copies for faster access in master receive hot path */ 246 struct dsa_switch_tree *dst; 247 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, 248 struct packet_type *pt); 249 bool (*filter)(const struct sk_buff *skb, struct net_device *dev); 250 251 enum { 252 DSA_PORT_TYPE_UNUSED = 0, 253 DSA_PORT_TYPE_CPU, 254 DSA_PORT_TYPE_DSA, 255 DSA_PORT_TYPE_USER, 256 } type; 257 258 struct dsa_switch *ds; 259 unsigned int index; 260 const char *name; 261 struct dsa_port *cpu_dp; 262 u8 mac[ETH_ALEN]; 263 struct device_node *dn; 264 unsigned int ageing_time; 265 bool vlan_filtering; 266 u8 stp_state; 267 struct net_device *bridge_dev; 268 int bridge_num; 269 struct devlink_port devlink_port; 270 bool devlink_port_setup; 271 struct phylink *pl; 272 struct phylink_config pl_config; 273 struct net_device *lag_dev; 274 bool lag_tx_enabled; 275 struct net_device *hsr_dev; 276 277 struct list_head list; 278 279 /* 280 * Give the switch driver somewhere to hang its per-port private data 281 * structures (accessible from the tagger). 282 */ 283 void *priv; 284 285 /* 286 * Original copy of the master netdev ethtool_ops 287 */ 288 const struct ethtool_ops *orig_ethtool_ops; 289 290 /* 291 * Original copy of the master netdev net_device_ops 292 */ 293 const struct dsa_netdevice_ops *netdev_ops; 294 295 /* List of MAC addresses that must be forwarded on this port. 296 * These are only valid on CPU ports and DSA links. 297 */ 298 struct list_head fdbs; 299 struct list_head mdbs; 300 301 bool setup; 302 }; 303 304 /* TODO: ideally DSA ports would have a single dp->link_dp member, 305 * and no dst->rtable nor this struct dsa_link would be needed, 306 * but this would require some more complex tree walking, 307 * so keep it stupid at the moment and list them all. 308 */ 309 struct dsa_link { 310 struct dsa_port *dp; 311 struct dsa_port *link_dp; 312 struct list_head list; 313 }; 314 315 struct dsa_mac_addr { 316 unsigned char addr[ETH_ALEN]; 317 u16 vid; 318 refcount_t refcount; 319 struct list_head list; 320 }; 321 322 struct dsa_switch { 323 bool setup; 324 325 struct device *dev; 326 327 /* 328 * Parent switch tree, and switch index. 329 */ 330 struct dsa_switch_tree *dst; 331 unsigned int index; 332 333 /* Listener for switch fabric events */ 334 struct notifier_block nb; 335 336 /* 337 * Give the switch driver somewhere to hang its private data 338 * structure. 339 */ 340 void *priv; 341 342 /* 343 * Configuration data for this switch. 344 */ 345 struct dsa_chip_data *cd; 346 347 /* 348 * The switch operations. 349 */ 350 const struct dsa_switch_ops *ops; 351 352 /* 353 * Slave mii_bus and devices for the individual ports. 354 */ 355 u32 phys_mii_mask; 356 struct mii_bus *slave_mii_bus; 357 358 /* Ageing Time limits in msecs */ 359 unsigned int ageing_time_min; 360 unsigned int ageing_time_max; 361 362 /* Storage for drivers using tag_8021q */ 363 struct dsa_8021q_context *tag_8021q_ctx; 364 365 /* devlink used to represent this switch device */ 366 struct devlink *devlink; 367 368 /* Number of switch port queues */ 369 unsigned int num_tx_queues; 370 371 /* Disallow bridge core from requesting different VLAN awareness 372 * settings on ports if not hardware-supported 373 */ 374 bool vlan_filtering_is_global; 375 376 /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges 377 * that have vlan_filtering=0. All drivers should ideally set this (and 378 * then the option would get removed), but it is unknown whether this 379 * would break things or not. 380 */ 381 bool configure_vlan_while_not_filtering; 382 383 /* If the switch driver always programs the CPU port as egress tagged 384 * despite the VLAN configuration indicating otherwise, then setting 385 * @untag_bridge_pvid will force the DSA receive path to pop the bridge's 386 * default_pvid VLAN tagged frames to offer a consistent behavior 387 * between a vlan_filtering=0 and vlan_filtering=1 bridge device. 388 */ 389 bool untag_bridge_pvid; 390 391 /* Let DSA manage the FDB entries towards the CPU, based on the 392 * software bridge database. 393 */ 394 bool assisted_learning_on_cpu_port; 395 396 /* In case vlan_filtering_is_global is set, the VLAN awareness state 397 * should be retrieved from here and not from the per-port settings. 398 */ 399 bool vlan_filtering; 400 401 /* MAC PCS does not provide link state change interrupt, and requires 402 * polling. Flag passed on to PHYLINK. 403 */ 404 bool pcs_poll; 405 406 /* For switches that only have the MRU configurable. To ensure the 407 * configured MTU is not exceeded, normalization of MRU on all bridged 408 * interfaces is needed. 409 */ 410 bool mtu_enforcement_ingress; 411 412 /* Drivers that benefit from having an ID associated with each 413 * offloaded LAG should set this to the maximum number of 414 * supported IDs. DSA will then maintain a mapping of _at 415 * least_ these many IDs, accessible to drivers via 416 * dsa_lag_id(). 417 */ 418 unsigned int num_lag_ids; 419 420 /* Drivers that support bridge forwarding offload should set this to 421 * the maximum number of bridges spanning the same switch tree that can 422 * be offloaded. 423 */ 424 unsigned int num_fwd_offloading_bridges; 425 426 size_t num_ports; 427 }; 428 429 static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p) 430 { 431 struct dsa_switch_tree *dst = ds->dst; 432 struct dsa_port *dp; 433 434 list_for_each_entry(dp, &dst->ports, list) 435 if (dp->ds == ds && dp->index == p) 436 return dp; 437 438 return NULL; 439 } 440 441 static inline bool dsa_port_is_dsa(struct dsa_port *port) 442 { 443 return port->type == DSA_PORT_TYPE_DSA; 444 } 445 446 static inline bool dsa_port_is_cpu(struct dsa_port *port) 447 { 448 return port->type == DSA_PORT_TYPE_CPU; 449 } 450 451 static inline bool dsa_port_is_user(struct dsa_port *dp) 452 { 453 return dp->type == DSA_PORT_TYPE_USER; 454 } 455 456 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) 457 { 458 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; 459 } 460 461 static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) 462 { 463 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU; 464 } 465 466 static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) 467 { 468 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA; 469 } 470 471 static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) 472 { 473 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER; 474 } 475 476 static inline u32 dsa_user_ports(struct dsa_switch *ds) 477 { 478 u32 mask = 0; 479 int p; 480 481 for (p = 0; p < ds->num_ports; p++) 482 if (dsa_is_user_port(ds, p)) 483 mask |= BIT(p); 484 485 return mask; 486 } 487 488 /* Return the local port used to reach an arbitrary switch device */ 489 static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device) 490 { 491 struct dsa_switch_tree *dst = ds->dst; 492 struct dsa_link *dl; 493 494 list_for_each_entry(dl, &dst->rtable, list) 495 if (dl->dp->ds == ds && dl->link_dp->ds->index == device) 496 return dl->dp->index; 497 498 return ds->num_ports; 499 } 500 501 /* Return the local port used to reach an arbitrary switch port */ 502 static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device, 503 int port) 504 { 505 if (device == ds->index) 506 return port; 507 else 508 return dsa_routing_port(ds, device); 509 } 510 511 /* Return the local port used to reach the dedicated CPU port */ 512 static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port) 513 { 514 const struct dsa_port *dp = dsa_to_port(ds, port); 515 const struct dsa_port *cpu_dp = dp->cpu_dp; 516 517 if (!cpu_dp) 518 return port; 519 520 return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index); 521 } 522 523 /* Return true if this is the local port used to reach the CPU port */ 524 static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port) 525 { 526 if (dsa_is_unused_port(ds, port)) 527 return false; 528 529 return port == dsa_upstream_port(ds, port); 530 } 531 532 /* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning 533 * that the routing port from @downstream_ds to @upstream_ds is also the port 534 * which @downstream_ds uses to reach its dedicated CPU. 535 */ 536 static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds, 537 struct dsa_switch *downstream_ds) 538 { 539 int routing_port; 540 541 if (upstream_ds == downstream_ds) 542 return true; 543 544 routing_port = dsa_routing_port(downstream_ds, upstream_ds->index); 545 546 return dsa_is_upstream_port(downstream_ds, routing_port); 547 } 548 549 static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) 550 { 551 const struct dsa_switch *ds = dp->ds; 552 553 if (ds->vlan_filtering_is_global) 554 return ds->vlan_filtering; 555 else 556 return dp->vlan_filtering; 557 } 558 559 static inline 560 struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) 561 { 562 if (!dp->bridge_dev) 563 return NULL; 564 565 if (dp->lag_dev) 566 return dp->lag_dev; 567 else if (dp->hsr_dev) 568 return dp->hsr_dev; 569 570 return dp->slave; 571 } 572 573 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, 574 bool is_static, void *data); 575 struct dsa_switch_ops { 576 /* 577 * Tagging protocol helpers called for the CPU ports and DSA links. 578 * @get_tag_protocol retrieves the initial tagging protocol and is 579 * mandatory. Switches which can operate using multiple tagging 580 * protocols should implement @change_tag_protocol and report in 581 * @get_tag_protocol the tagger in current use. 582 */ 583 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, 584 int port, 585 enum dsa_tag_protocol mprot); 586 int (*change_tag_protocol)(struct dsa_switch *ds, int port, 587 enum dsa_tag_protocol proto); 588 589 int (*setup)(struct dsa_switch *ds); 590 void (*teardown)(struct dsa_switch *ds); 591 u32 (*get_phy_flags)(struct dsa_switch *ds, int port); 592 593 /* 594 * Access to the switch's PHY registers. 595 */ 596 int (*phy_read)(struct dsa_switch *ds, int port, int regnum); 597 int (*phy_write)(struct dsa_switch *ds, int port, 598 int regnum, u16 val); 599 600 /* 601 * Link state adjustment (called from libphy) 602 */ 603 void (*adjust_link)(struct dsa_switch *ds, int port, 604 struct phy_device *phydev); 605 void (*fixed_link_update)(struct dsa_switch *ds, int port, 606 struct fixed_phy_status *st); 607 608 /* 609 * PHYLINK integration 610 */ 611 void (*phylink_validate)(struct dsa_switch *ds, int port, 612 unsigned long *supported, 613 struct phylink_link_state *state); 614 int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, 615 struct phylink_link_state *state); 616 void (*phylink_mac_config)(struct dsa_switch *ds, int port, 617 unsigned int mode, 618 const struct phylink_link_state *state); 619 void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port); 620 void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, 621 unsigned int mode, 622 phy_interface_t interface); 623 void (*phylink_mac_link_up)(struct dsa_switch *ds, int port, 624 unsigned int mode, 625 phy_interface_t interface, 626 struct phy_device *phydev, 627 int speed, int duplex, 628 bool tx_pause, bool rx_pause); 629 void (*phylink_fixed_state)(struct dsa_switch *ds, int port, 630 struct phylink_link_state *state); 631 /* 632 * Port statistics counters. 633 */ 634 void (*get_strings)(struct dsa_switch *ds, int port, 635 u32 stringset, uint8_t *data); 636 void (*get_ethtool_stats)(struct dsa_switch *ds, 637 int port, uint64_t *data); 638 int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); 639 void (*get_ethtool_phy_stats)(struct dsa_switch *ds, 640 int port, uint64_t *data); 641 void (*get_stats64)(struct dsa_switch *ds, int port, 642 struct rtnl_link_stats64 *s); 643 void (*self_test)(struct dsa_switch *ds, int port, 644 struct ethtool_test *etest, u64 *data); 645 646 /* 647 * ethtool Wake-on-LAN 648 */ 649 void (*get_wol)(struct dsa_switch *ds, int port, 650 struct ethtool_wolinfo *w); 651 int (*set_wol)(struct dsa_switch *ds, int port, 652 struct ethtool_wolinfo *w); 653 654 /* 655 * ethtool timestamp info 656 */ 657 int (*get_ts_info)(struct dsa_switch *ds, int port, 658 struct ethtool_ts_info *ts); 659 660 /* 661 * Suspend and resume 662 */ 663 int (*suspend)(struct dsa_switch *ds); 664 int (*resume)(struct dsa_switch *ds); 665 666 /* 667 * Port enable/disable 668 */ 669 int (*port_enable)(struct dsa_switch *ds, int port, 670 struct phy_device *phy); 671 void (*port_disable)(struct dsa_switch *ds, int port); 672 673 /* 674 * Port's MAC EEE settings 675 */ 676 int (*set_mac_eee)(struct dsa_switch *ds, int port, 677 struct ethtool_eee *e); 678 int (*get_mac_eee)(struct dsa_switch *ds, int port, 679 struct ethtool_eee *e); 680 681 /* EEPROM access */ 682 int (*get_eeprom_len)(struct dsa_switch *ds); 683 int (*get_eeprom)(struct dsa_switch *ds, 684 struct ethtool_eeprom *eeprom, u8 *data); 685 int (*set_eeprom)(struct dsa_switch *ds, 686 struct ethtool_eeprom *eeprom, u8 *data); 687 688 /* 689 * Register access. 690 */ 691 int (*get_regs_len)(struct dsa_switch *ds, int port); 692 void (*get_regs)(struct dsa_switch *ds, int port, 693 struct ethtool_regs *regs, void *p); 694 695 /* 696 * Upper device tracking. 697 */ 698 int (*port_prechangeupper)(struct dsa_switch *ds, int port, 699 struct netdev_notifier_changeupper_info *info); 700 701 /* 702 * Bridge integration 703 */ 704 int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); 705 int (*port_bridge_join)(struct dsa_switch *ds, int port, 706 struct net_device *bridge); 707 void (*port_bridge_leave)(struct dsa_switch *ds, int port, 708 struct net_device *bridge); 709 /* Called right after .port_bridge_join() */ 710 int (*port_bridge_tx_fwd_offload)(struct dsa_switch *ds, int port, 711 struct net_device *bridge, 712 int bridge_num); 713 /* Called right before .port_bridge_leave() */ 714 void (*port_bridge_tx_fwd_unoffload)(struct dsa_switch *ds, int port, 715 struct net_device *bridge, 716 int bridge_num); 717 void (*port_stp_state_set)(struct dsa_switch *ds, int port, 718 u8 state); 719 void (*port_fast_age)(struct dsa_switch *ds, int port); 720 int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port, 721 struct switchdev_brport_flags flags, 722 struct netlink_ext_ack *extack); 723 int (*port_bridge_flags)(struct dsa_switch *ds, int port, 724 struct switchdev_brport_flags flags, 725 struct netlink_ext_ack *extack); 726 int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter, 727 struct netlink_ext_ack *extack); 728 729 /* 730 * VLAN support 731 */ 732 int (*port_vlan_filtering)(struct dsa_switch *ds, int port, 733 bool vlan_filtering, 734 struct netlink_ext_ack *extack); 735 int (*port_vlan_add)(struct dsa_switch *ds, int port, 736 const struct switchdev_obj_port_vlan *vlan, 737 struct netlink_ext_ack *extack); 738 int (*port_vlan_del)(struct dsa_switch *ds, int port, 739 const struct switchdev_obj_port_vlan *vlan); 740 /* 741 * Forwarding database 742 */ 743 int (*port_fdb_add)(struct dsa_switch *ds, int port, 744 const unsigned char *addr, u16 vid); 745 int (*port_fdb_del)(struct dsa_switch *ds, int port, 746 const unsigned char *addr, u16 vid); 747 int (*port_fdb_dump)(struct dsa_switch *ds, int port, 748 dsa_fdb_dump_cb_t *cb, void *data); 749 750 /* 751 * Multicast database 752 */ 753 int (*port_mdb_add)(struct dsa_switch *ds, int port, 754 const struct switchdev_obj_port_mdb *mdb); 755 int (*port_mdb_del)(struct dsa_switch *ds, int port, 756 const struct switchdev_obj_port_mdb *mdb); 757 /* 758 * RXNFC 759 */ 760 int (*get_rxnfc)(struct dsa_switch *ds, int port, 761 struct ethtool_rxnfc *nfc, u32 *rule_locs); 762 int (*set_rxnfc)(struct dsa_switch *ds, int port, 763 struct ethtool_rxnfc *nfc); 764 765 /* 766 * TC integration 767 */ 768 int (*cls_flower_add)(struct dsa_switch *ds, int port, 769 struct flow_cls_offload *cls, bool ingress); 770 int (*cls_flower_del)(struct dsa_switch *ds, int port, 771 struct flow_cls_offload *cls, bool ingress); 772 int (*cls_flower_stats)(struct dsa_switch *ds, int port, 773 struct flow_cls_offload *cls, bool ingress); 774 int (*port_mirror_add)(struct dsa_switch *ds, int port, 775 struct dsa_mall_mirror_tc_entry *mirror, 776 bool ingress); 777 void (*port_mirror_del)(struct dsa_switch *ds, int port, 778 struct dsa_mall_mirror_tc_entry *mirror); 779 int (*port_policer_add)(struct dsa_switch *ds, int port, 780 struct dsa_mall_policer_tc_entry *policer); 781 void (*port_policer_del)(struct dsa_switch *ds, int port); 782 int (*port_setup_tc)(struct dsa_switch *ds, int port, 783 enum tc_setup_type type, void *type_data); 784 785 /* 786 * Cross-chip operations 787 */ 788 int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, 789 int sw_index, int port, 790 struct net_device *br); 791 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, 792 int sw_index, int port, 793 struct net_device *br); 794 int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, 795 int port); 796 int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, 797 int port, struct net_device *lag, 798 struct netdev_lag_upper_info *info); 799 int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index, 800 int port, struct net_device *lag); 801 802 /* 803 * PTP functionality 804 */ 805 int (*port_hwtstamp_get)(struct dsa_switch *ds, int port, 806 struct ifreq *ifr); 807 int (*port_hwtstamp_set)(struct dsa_switch *ds, int port, 808 struct ifreq *ifr); 809 void (*port_txtstamp)(struct dsa_switch *ds, int port, 810 struct sk_buff *skb); 811 bool (*port_rxtstamp)(struct dsa_switch *ds, int port, 812 struct sk_buff *skb, unsigned int type); 813 814 /* Devlink parameters, etc */ 815 int (*devlink_param_get)(struct dsa_switch *ds, u32 id, 816 struct devlink_param_gset_ctx *ctx); 817 int (*devlink_param_set)(struct dsa_switch *ds, u32 id, 818 struct devlink_param_gset_ctx *ctx); 819 int (*devlink_info_get)(struct dsa_switch *ds, 820 struct devlink_info_req *req, 821 struct netlink_ext_ack *extack); 822 int (*devlink_sb_pool_get)(struct dsa_switch *ds, 823 unsigned int sb_index, u16 pool_index, 824 struct devlink_sb_pool_info *pool_info); 825 int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index, 826 u16 pool_index, u32 size, 827 enum devlink_sb_threshold_type threshold_type, 828 struct netlink_ext_ack *extack); 829 int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port, 830 unsigned int sb_index, u16 pool_index, 831 u32 *p_threshold); 832 int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port, 833 unsigned int sb_index, u16 pool_index, 834 u32 threshold, 835 struct netlink_ext_ack *extack); 836 int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port, 837 unsigned int sb_index, u16 tc_index, 838 enum devlink_sb_pool_type pool_type, 839 u16 *p_pool_index, u32 *p_threshold); 840 int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port, 841 unsigned int sb_index, u16 tc_index, 842 enum devlink_sb_pool_type pool_type, 843 u16 pool_index, u32 threshold, 844 struct netlink_ext_ack *extack); 845 int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds, 846 unsigned int sb_index); 847 int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds, 848 unsigned int sb_index); 849 int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port, 850 unsigned int sb_index, u16 pool_index, 851 u32 *p_cur, u32 *p_max); 852 int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port, 853 unsigned int sb_index, u16 tc_index, 854 enum devlink_sb_pool_type pool_type, 855 u32 *p_cur, u32 *p_max); 856 857 /* 858 * MTU change functionality. Switches can also adjust their MRU through 859 * this method. By MTU, one understands the SDU (L2 payload) length. 860 * If the switch needs to account for the DSA tag on the CPU port, this 861 * method needs to do so privately. 862 */ 863 int (*port_change_mtu)(struct dsa_switch *ds, int port, 864 int new_mtu); 865 int (*port_max_mtu)(struct dsa_switch *ds, int port); 866 867 /* 868 * LAG integration 869 */ 870 int (*port_lag_change)(struct dsa_switch *ds, int port); 871 int (*port_lag_join)(struct dsa_switch *ds, int port, 872 struct net_device *lag, 873 struct netdev_lag_upper_info *info); 874 int (*port_lag_leave)(struct dsa_switch *ds, int port, 875 struct net_device *lag); 876 877 /* 878 * HSR integration 879 */ 880 int (*port_hsr_join)(struct dsa_switch *ds, int port, 881 struct net_device *hsr); 882 int (*port_hsr_leave)(struct dsa_switch *ds, int port, 883 struct net_device *hsr); 884 885 /* 886 * MRP integration 887 */ 888 int (*port_mrp_add)(struct dsa_switch *ds, int port, 889 const struct switchdev_obj_mrp *mrp); 890 int (*port_mrp_del)(struct dsa_switch *ds, int port, 891 const struct switchdev_obj_mrp *mrp); 892 int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port, 893 const struct switchdev_obj_ring_role_mrp *mrp); 894 int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port, 895 const struct switchdev_obj_ring_role_mrp *mrp); 896 897 /* 898 * tag_8021q operations 899 */ 900 int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, 901 u16 flags); 902 int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); 903 }; 904 905 #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ 906 DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \ 907 dsa_devlink_param_get, dsa_devlink_param_set, NULL) 908 909 int dsa_devlink_param_get(struct devlink *dl, u32 id, 910 struct devlink_param_gset_ctx *ctx); 911 int dsa_devlink_param_set(struct devlink *dl, u32 id, 912 struct devlink_param_gset_ctx *ctx); 913 int dsa_devlink_params_register(struct dsa_switch *ds, 914 const struct devlink_param *params, 915 size_t params_count); 916 void dsa_devlink_params_unregister(struct dsa_switch *ds, 917 const struct devlink_param *params, 918 size_t params_count); 919 int dsa_devlink_resource_register(struct dsa_switch *ds, 920 const char *resource_name, 921 u64 resource_size, 922 u64 resource_id, 923 u64 parent_resource_id, 924 const struct devlink_resource_size_params *size_params); 925 926 void dsa_devlink_resources_unregister(struct dsa_switch *ds); 927 928 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds, 929 u64 resource_id, 930 devlink_resource_occ_get_t *occ_get, 931 void *occ_get_priv); 932 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds, 933 u64 resource_id); 934 struct devlink_region * 935 dsa_devlink_region_create(struct dsa_switch *ds, 936 const struct devlink_region_ops *ops, 937 u32 region_max_snapshots, u64 region_size); 938 struct devlink_region * 939 dsa_devlink_port_region_create(struct dsa_switch *ds, 940 int port, 941 const struct devlink_port_region_ops *ops, 942 u32 region_max_snapshots, u64 region_size); 943 void dsa_devlink_region_destroy(struct devlink_region *region); 944 945 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev); 946 947 struct dsa_devlink_priv { 948 struct dsa_switch *ds; 949 }; 950 951 static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl) 952 { 953 struct dsa_devlink_priv *dl_priv = devlink_priv(dl); 954 955 return dl_priv->ds; 956 } 957 958 static inline 959 struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port) 960 { 961 struct devlink *dl = port->devlink; 962 struct dsa_devlink_priv *dl_priv = devlink_priv(dl); 963 964 return dl_priv->ds; 965 } 966 967 static inline int dsa_devlink_port_to_port(struct devlink_port *port) 968 { 969 return port->index; 970 } 971 972 struct dsa_switch_driver { 973 struct list_head list; 974 const struct dsa_switch_ops *ops; 975 }; 976 977 struct net_device *dsa_dev_to_net_device(struct device *dev); 978 979 /* Keep inline for faster access in hot path */ 980 static inline bool netdev_uses_dsa(const struct net_device *dev) 981 { 982 #if IS_ENABLED(CONFIG_NET_DSA) 983 return dev->dsa_ptr && dev->dsa_ptr->rcv; 984 #endif 985 return false; 986 } 987 988 static inline bool dsa_can_decode(const struct sk_buff *skb, 989 struct net_device *dev) 990 { 991 #if IS_ENABLED(CONFIG_NET_DSA) 992 return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev); 993 #endif 994 return false; 995 } 996 997 /* All DSA tags that push the EtherType to the right (basically all except tail 998 * tags, which don't break dissection) can be treated the same from the 999 * perspective of the flow dissector. 1000 * 1001 * We need to return: 1002 * - offset: the (B - A) difference between: 1003 * A. the position of the real EtherType and 1004 * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes 1005 * after the normal EtherType was supposed to be) 1006 * The offset in bytes is exactly equal to the tagger overhead (and half of 1007 * that, in __be16 shorts). 1008 * 1009 * - proto: the value of the real EtherType. 1010 */ 1011 static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb, 1012 __be16 *proto, int *offset) 1013 { 1014 #if IS_ENABLED(CONFIG_NET_DSA) 1015 const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops; 1016 int tag_len = ops->needed_headroom; 1017 1018 *offset = tag_len; 1019 *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1]; 1020 #endif 1021 } 1022 1023 #if IS_ENABLED(CONFIG_NET_DSA) 1024 static inline int __dsa_netdevice_ops_check(struct net_device *dev) 1025 { 1026 int err = -EOPNOTSUPP; 1027 1028 if (!dev->dsa_ptr) 1029 return err; 1030 1031 if (!dev->dsa_ptr->netdev_ops) 1032 return err; 1033 1034 return 0; 1035 } 1036 1037 static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, 1038 int cmd) 1039 { 1040 const struct dsa_netdevice_ops *ops; 1041 int err; 1042 1043 err = __dsa_netdevice_ops_check(dev); 1044 if (err) 1045 return err; 1046 1047 ops = dev->dsa_ptr->netdev_ops; 1048 1049 return ops->ndo_do_ioctl(dev, ifr, cmd); 1050 } 1051 #else 1052 static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, 1053 int cmd) 1054 { 1055 return -EOPNOTSUPP; 1056 } 1057 #endif 1058 1059 void dsa_unregister_switch(struct dsa_switch *ds); 1060 int dsa_register_switch(struct dsa_switch *ds); 1061 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); 1062 #ifdef CONFIG_PM_SLEEP 1063 int dsa_switch_suspend(struct dsa_switch *ds); 1064 int dsa_switch_resume(struct dsa_switch *ds); 1065 #else 1066 static inline int dsa_switch_suspend(struct dsa_switch *ds) 1067 { 1068 return 0; 1069 } 1070 static inline int dsa_switch_resume(struct dsa_switch *ds) 1071 { 1072 return 0; 1073 } 1074 #endif /* CONFIG_PM_SLEEP */ 1075 1076 #if IS_ENABLED(CONFIG_NET_DSA) 1077 bool dsa_slave_dev_check(const struct net_device *dev); 1078 #else 1079 static inline bool dsa_slave_dev_check(const struct net_device *dev) 1080 { 1081 return false; 1082 } 1083 #endif 1084 1085 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev); 1086 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data); 1087 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data); 1088 int dsa_port_get_phy_sset_count(struct dsa_port *dp); 1089 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up); 1090 1091 struct dsa_tag_driver { 1092 const struct dsa_device_ops *ops; 1093 struct list_head list; 1094 struct module *owner; 1095 }; 1096 1097 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[], 1098 unsigned int count, 1099 struct module *owner); 1100 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[], 1101 unsigned int count); 1102 1103 #define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \ 1104 static int __init dsa_tag_driver_module_init(void) \ 1105 { \ 1106 dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \ 1107 THIS_MODULE); \ 1108 return 0; \ 1109 } \ 1110 module_init(dsa_tag_driver_module_init); \ 1111 \ 1112 static void __exit dsa_tag_driver_module_exit(void) \ 1113 { \ 1114 dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \ 1115 } \ 1116 module_exit(dsa_tag_driver_module_exit) 1117 1118 /** 1119 * module_dsa_tag_drivers() - Helper macro for registering DSA tag 1120 * drivers 1121 * @__ops_array: Array of tag driver strucutres 1122 * 1123 * Helper macro for DSA tag drivers which do not do anything special 1124 * in module init/exit. Each module may only use this macro once, and 1125 * calling it replaces module_init() and module_exit(). 1126 */ 1127 #define module_dsa_tag_drivers(__ops_array) \ 1128 dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array)) 1129 1130 #define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops 1131 1132 /* Create a static structure we can build a linked list of dsa_tag 1133 * drivers 1134 */ 1135 #define DSA_TAG_DRIVER(__ops) \ 1136 static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \ 1137 .ops = &__ops, \ 1138 } 1139 1140 /** 1141 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag 1142 * driver 1143 * @__ops: Single tag driver structures 1144 * 1145 * Helper macro for DSA tag drivers which do not do anything special 1146 * in module init/exit. Each module may only use this macro once, and 1147 * calling it replaces module_init() and module_exit(). 1148 */ 1149 #define module_dsa_tag_driver(__ops) \ 1150 DSA_TAG_DRIVER(__ops); \ 1151 \ 1152 static struct dsa_tag_driver *dsa_tag_driver_array[] = { \ 1153 &DSA_TAG_DRIVER_NAME(__ops) \ 1154 }; \ 1155 module_dsa_tag_drivers(dsa_tag_driver_array) 1156 #endif 1157 1158