1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dsa/user.c - user device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7 #include <linux/list.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/phy.h> 11 #include <linux/phy_fixed.h> 12 #include <linux/phylink.h> 13 #include <linux/of_net.h> 14 #include <linux/of_mdio.h> 15 #include <linux/mdio.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/selftests.h> 19 #include <net/tc_act/tc_mirred.h> 20 #include <linux/if_bridge.h> 21 #include <linux/if_hsr.h> 22 #include <net/dcbnl.h> 23 #include <linux/netpoll.h> 24 #include <linux/string.h> 25 26 #include "conduit.h" 27 #include "dsa.h" 28 #include "netlink.h" 29 #include "port.h" 30 #include "switch.h" 31 #include "tag.h" 32 #include "user.h" 33 34 struct dsa_switchdev_event_work { 35 struct net_device *dev; 36 struct net_device *orig_dev; 37 struct work_struct work; 38 unsigned long event; 39 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and 40 * SWITCHDEV_FDB_DEL_TO_DEVICE 41 */ 42 unsigned char addr[ETH_ALEN]; 43 u16 vid; 44 bool host_addr; 45 }; 46 47 enum dsa_standalone_event { 48 DSA_UC_ADD, 49 DSA_UC_DEL, 50 DSA_MC_ADD, 51 DSA_MC_DEL, 52 }; 53 54 struct dsa_standalone_event_work { 55 struct work_struct work; 56 struct net_device *dev; 57 enum dsa_standalone_event event; 58 unsigned char addr[ETH_ALEN]; 59 u16 vid; 60 }; 61 62 struct dsa_host_vlan_rx_filtering_ctx { 63 struct net_device *dev; 64 const unsigned char *addr; 65 enum dsa_standalone_event event; 66 }; 67 68 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds) 69 { 70 return ds->ops->port_fdb_add && ds->ops->port_fdb_del && 71 ds->fdb_isolation && !ds->vlan_filtering_is_global && 72 !ds->needs_standalone_vlan_filtering; 73 } 74 75 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds) 76 { 77 return ds->ops->port_mdb_add && ds->ops->port_mdb_del && 78 ds->fdb_isolation && !ds->vlan_filtering_is_global && 79 !ds->needs_standalone_vlan_filtering; 80 } 81 82 static void dsa_user_standalone_event_work(struct work_struct *work) 83 { 84 struct dsa_standalone_event_work *standalone_work = 85 container_of(work, struct dsa_standalone_event_work, work); 86 const unsigned char *addr = standalone_work->addr; 87 struct net_device *dev = standalone_work->dev; 88 struct dsa_port *dp = dsa_user_to_port(dev); 89 struct switchdev_obj_port_mdb mdb; 90 struct dsa_switch *ds = dp->ds; 91 u16 vid = standalone_work->vid; 92 int err; 93 94 switch (standalone_work->event) { 95 case DSA_UC_ADD: 96 err = dsa_port_standalone_host_fdb_add(dp, addr, vid); 97 if (err) { 98 dev_err(ds->dev, 99 "port %d failed to add %pM vid %d to fdb: %d\n", 100 dp->index, addr, vid, err); 101 break; 102 } 103 break; 104 105 case DSA_UC_DEL: 106 err = dsa_port_standalone_host_fdb_del(dp, addr, vid); 107 if (err) { 108 dev_err(ds->dev, 109 "port %d failed to delete %pM vid %d from fdb: %d\n", 110 dp->index, addr, vid, err); 111 } 112 113 break; 114 case DSA_MC_ADD: 115 ether_addr_copy(mdb.addr, addr); 116 mdb.vid = vid; 117 118 err = dsa_port_standalone_host_mdb_add(dp, &mdb); 119 if (err) { 120 dev_err(ds->dev, 121 "port %d failed to add %pM vid %d to mdb: %d\n", 122 dp->index, addr, vid, err); 123 break; 124 } 125 break; 126 case DSA_MC_DEL: 127 ether_addr_copy(mdb.addr, addr); 128 mdb.vid = vid; 129 130 err = dsa_port_standalone_host_mdb_del(dp, &mdb); 131 if (err) { 132 dev_err(ds->dev, 133 "port %d failed to delete %pM vid %d from mdb: %d\n", 134 dp->index, addr, vid, err); 135 } 136 137 break; 138 } 139 140 kfree(standalone_work); 141 } 142 143 static int dsa_user_schedule_standalone_work(struct net_device *dev, 144 enum dsa_standalone_event event, 145 const unsigned char *addr, 146 u16 vid) 147 { 148 struct dsa_standalone_event_work *standalone_work; 149 150 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); 151 if (!standalone_work) 152 return -ENOMEM; 153 154 INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work); 155 standalone_work->event = event; 156 standalone_work->dev = dev; 157 158 ether_addr_copy(standalone_work->addr, addr); 159 standalone_work->vid = vid; 160 161 dsa_schedule_work(&standalone_work->work); 162 163 return 0; 164 } 165 166 static int dsa_user_host_vlan_rx_filtering(void *arg, int vid) 167 { 168 struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; 169 170 return dsa_user_schedule_standalone_work(ctx->dev, ctx->event, 171 ctx->addr, vid); 172 } 173 174 static int dsa_user_vlan_for_each(struct net_device *dev, 175 int (*cb)(void *arg, int vid), void *arg) 176 { 177 struct dsa_port *dp = dsa_user_to_port(dev); 178 struct dsa_vlan *v; 179 int err; 180 181 lockdep_assert_held(&dev->addr_list_lock); 182 183 err = cb(arg, 0); 184 if (err) 185 return err; 186 187 list_for_each_entry(v, &dp->user_vlans, list) { 188 err = cb(arg, v->vid); 189 if (err) 190 return err; 191 } 192 193 return 0; 194 } 195 196 static int dsa_user_sync_uc(struct net_device *dev, 197 const unsigned char *addr) 198 { 199 struct net_device *conduit = dsa_user_to_conduit(dev); 200 struct dsa_port *dp = dsa_user_to_port(dev); 201 struct dsa_host_vlan_rx_filtering_ctx ctx = { 202 .dev = dev, 203 .addr = addr, 204 .event = DSA_UC_ADD, 205 }; 206 207 dev_uc_add(conduit, addr); 208 209 if (!dsa_switch_supports_uc_filtering(dp->ds)) 210 return 0; 211 212 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, 213 &ctx); 214 } 215 216 static int dsa_user_unsync_uc(struct net_device *dev, 217 const unsigned char *addr) 218 { 219 struct net_device *conduit = dsa_user_to_conduit(dev); 220 struct dsa_port *dp = dsa_user_to_port(dev); 221 struct dsa_host_vlan_rx_filtering_ctx ctx = { 222 .dev = dev, 223 .addr = addr, 224 .event = DSA_UC_DEL, 225 }; 226 227 dev_uc_del(conduit, addr); 228 229 if (!dsa_switch_supports_uc_filtering(dp->ds)) 230 return 0; 231 232 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, 233 &ctx); 234 } 235 236 static int dsa_user_sync_mc(struct net_device *dev, 237 const unsigned char *addr) 238 { 239 struct net_device *conduit = dsa_user_to_conduit(dev); 240 struct dsa_port *dp = dsa_user_to_port(dev); 241 struct dsa_host_vlan_rx_filtering_ctx ctx = { 242 .dev = dev, 243 .addr = addr, 244 .event = DSA_MC_ADD, 245 }; 246 247 dev_mc_add(conduit, addr); 248 249 if (!dsa_switch_supports_mc_filtering(dp->ds)) 250 return 0; 251 252 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, 253 &ctx); 254 } 255 256 static int dsa_user_unsync_mc(struct net_device *dev, 257 const unsigned char *addr) 258 { 259 struct net_device *conduit = dsa_user_to_conduit(dev); 260 struct dsa_port *dp = dsa_user_to_port(dev); 261 struct dsa_host_vlan_rx_filtering_ctx ctx = { 262 .dev = dev, 263 .addr = addr, 264 .event = DSA_MC_DEL, 265 }; 266 267 dev_mc_del(conduit, addr); 268 269 if (!dsa_switch_supports_mc_filtering(dp->ds)) 270 return 0; 271 272 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, 273 &ctx); 274 } 275 276 void dsa_user_sync_ha(struct net_device *dev) 277 { 278 struct dsa_port *dp = dsa_user_to_port(dev); 279 struct dsa_switch *ds = dp->ds; 280 struct netdev_hw_addr *ha; 281 282 netif_addr_lock_bh(dev); 283 284 netdev_for_each_synced_mc_addr(ha, dev) 285 dsa_user_sync_mc(dev, ha->addr); 286 287 netdev_for_each_synced_uc_addr(ha, dev) 288 dsa_user_sync_uc(dev, ha->addr); 289 290 netif_addr_unlock_bh(dev); 291 292 if (dsa_switch_supports_uc_filtering(ds) || 293 dsa_switch_supports_mc_filtering(ds)) 294 dsa_flush_workqueue(); 295 } 296 297 void dsa_user_unsync_ha(struct net_device *dev) 298 { 299 struct dsa_port *dp = dsa_user_to_port(dev); 300 struct dsa_switch *ds = dp->ds; 301 struct netdev_hw_addr *ha; 302 303 netif_addr_lock_bh(dev); 304 305 netdev_for_each_synced_uc_addr(ha, dev) 306 dsa_user_unsync_uc(dev, ha->addr); 307 308 netdev_for_each_synced_mc_addr(ha, dev) 309 dsa_user_unsync_mc(dev, ha->addr); 310 311 netif_addr_unlock_bh(dev); 312 313 if (dsa_switch_supports_uc_filtering(ds) || 314 dsa_switch_supports_mc_filtering(ds)) 315 dsa_flush_workqueue(); 316 } 317 318 /* user mii_bus handling ***************************************************/ 319 static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg) 320 { 321 struct dsa_switch *ds = bus->priv; 322 323 if (ds->phys_mii_mask & (1 << addr)) 324 return ds->ops->phy_read(ds, addr, reg); 325 326 return 0xffff; 327 } 328 329 static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 330 { 331 struct dsa_switch *ds = bus->priv; 332 333 if (ds->phys_mii_mask & (1 << addr)) 334 return ds->ops->phy_write(ds, addr, reg, val); 335 336 return 0; 337 } 338 339 void dsa_user_mii_bus_init(struct dsa_switch *ds) 340 { 341 ds->user_mii_bus->priv = (void *)ds; 342 ds->user_mii_bus->name = "dsa user smi"; 343 ds->user_mii_bus->read = dsa_user_phy_read; 344 ds->user_mii_bus->write = dsa_user_phy_write; 345 snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 346 ds->dst->index, ds->index); 347 ds->user_mii_bus->parent = ds->dev; 348 ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask; 349 } 350 351 352 /* user device handling ****************************************************/ 353 static int dsa_user_get_iflink(const struct net_device *dev) 354 { 355 return READ_ONCE(dsa_user_to_conduit(dev)->ifindex); 356 } 357 358 static int dsa_user_open(struct net_device *dev) 359 { 360 struct net_device *conduit = dsa_user_to_conduit(dev); 361 struct dsa_port *dp = dsa_user_to_port(dev); 362 struct dsa_switch *ds = dp->ds; 363 int err; 364 365 err = dev_open(conduit, NULL); 366 if (err < 0) { 367 netdev_err(dev, "failed to open conduit %s\n", conduit->name); 368 goto out; 369 } 370 371 if (dsa_switch_supports_uc_filtering(ds)) { 372 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); 373 if (err) 374 goto out; 375 } 376 377 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) { 378 err = dev_uc_add(conduit, dev->dev_addr); 379 if (err < 0) 380 goto del_host_addr; 381 } 382 383 err = dsa_port_enable_rt(dp, dev->phydev); 384 if (err) 385 goto del_unicast; 386 387 return 0; 388 389 del_unicast: 390 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) 391 dev_uc_del(conduit, dev->dev_addr); 392 del_host_addr: 393 if (dsa_switch_supports_uc_filtering(ds)) 394 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 395 out: 396 return err; 397 } 398 399 static int dsa_user_close(struct net_device *dev) 400 { 401 struct net_device *conduit = dsa_user_to_conduit(dev); 402 struct dsa_port *dp = dsa_user_to_port(dev); 403 struct dsa_switch *ds = dp->ds; 404 405 dsa_port_disable_rt(dp); 406 407 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) 408 dev_uc_del(conduit, dev->dev_addr); 409 410 if (dsa_switch_supports_uc_filtering(ds)) 411 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 412 413 return 0; 414 } 415 416 static void dsa_user_manage_host_flood(struct net_device *dev) 417 { 418 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); 419 struct dsa_port *dp = dsa_user_to_port(dev); 420 bool uc = dev->flags & IFF_PROMISC; 421 422 dsa_port_set_host_flood(dp, uc, mc); 423 } 424 425 static void dsa_user_change_rx_flags(struct net_device *dev, int change) 426 { 427 struct net_device *conduit = dsa_user_to_conduit(dev); 428 struct dsa_port *dp = dsa_user_to_port(dev); 429 struct dsa_switch *ds = dp->ds; 430 431 if (change & IFF_ALLMULTI) 432 dev_set_allmulti(conduit, 433 dev->flags & IFF_ALLMULTI ? 1 : -1); 434 if (change & IFF_PROMISC) 435 dev_set_promiscuity(conduit, 436 dev->flags & IFF_PROMISC ? 1 : -1); 437 438 if (dsa_switch_supports_uc_filtering(ds) && 439 dsa_switch_supports_mc_filtering(ds)) 440 dsa_user_manage_host_flood(dev); 441 } 442 443 static void dsa_user_set_rx_mode(struct net_device *dev) 444 { 445 __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc); 446 __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc); 447 } 448 449 static int dsa_user_set_mac_address(struct net_device *dev, void *a) 450 { 451 struct net_device *conduit = dsa_user_to_conduit(dev); 452 struct dsa_port *dp = dsa_user_to_port(dev); 453 struct dsa_switch *ds = dp->ds; 454 struct sockaddr *addr = a; 455 int err; 456 457 if (!is_valid_ether_addr(addr->sa_data)) 458 return -EADDRNOTAVAIL; 459 460 if (ds->ops->port_set_mac_address) { 461 err = ds->ops->port_set_mac_address(ds, dp->index, 462 addr->sa_data); 463 if (err) 464 return err; 465 } 466 467 /* If the port is down, the address isn't synced yet to hardware or 468 * to the DSA conduit, so there is nothing to change. 469 */ 470 if (!(dev->flags & IFF_UP)) 471 goto out_change_dev_addr; 472 473 if (dsa_switch_supports_uc_filtering(ds)) { 474 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); 475 if (err) 476 return err; 477 } 478 479 if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) { 480 err = dev_uc_add(conduit, addr->sa_data); 481 if (err < 0) 482 goto del_unicast; 483 } 484 485 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) 486 dev_uc_del(conduit, dev->dev_addr); 487 488 if (dsa_switch_supports_uc_filtering(ds)) 489 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 490 491 out_change_dev_addr: 492 eth_hw_addr_set(dev, addr->sa_data); 493 494 return 0; 495 496 del_unicast: 497 if (dsa_switch_supports_uc_filtering(ds)) 498 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); 499 500 return err; 501 } 502 503 struct dsa_user_dump_ctx { 504 struct net_device *dev; 505 struct sk_buff *skb; 506 struct netlink_callback *cb; 507 int idx; 508 }; 509 510 static int 511 dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid, 512 bool is_static, void *data) 513 { 514 struct dsa_user_dump_ctx *dump = data; 515 u32 portid = NETLINK_CB(dump->cb->skb).portid; 516 u32 seq = dump->cb->nlh->nlmsg_seq; 517 struct nlmsghdr *nlh; 518 struct ndmsg *ndm; 519 520 if (dump->idx < dump->cb->args[2]) 521 goto skip; 522 523 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 524 sizeof(*ndm), NLM_F_MULTI); 525 if (!nlh) 526 return -EMSGSIZE; 527 528 ndm = nlmsg_data(nlh); 529 ndm->ndm_family = AF_BRIDGE; 530 ndm->ndm_pad1 = 0; 531 ndm->ndm_pad2 = 0; 532 ndm->ndm_flags = NTF_SELF; 533 ndm->ndm_type = 0; 534 ndm->ndm_ifindex = dump->dev->ifindex; 535 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 536 537 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 538 goto nla_put_failure; 539 540 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 541 goto nla_put_failure; 542 543 nlmsg_end(dump->skb, nlh); 544 545 skip: 546 dump->idx++; 547 return 0; 548 549 nla_put_failure: 550 nlmsg_cancel(dump->skb, nlh); 551 return -EMSGSIZE; 552 } 553 554 static int 555 dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 556 struct net_device *dev, struct net_device *filter_dev, 557 int *idx) 558 { 559 struct dsa_port *dp = dsa_user_to_port(dev); 560 struct dsa_user_dump_ctx dump = { 561 .dev = dev, 562 .skb = skb, 563 .cb = cb, 564 .idx = *idx, 565 }; 566 int err; 567 568 err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump); 569 *idx = dump.idx; 570 571 return err; 572 } 573 574 static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 575 { 576 struct dsa_user_priv *p = netdev_priv(dev); 577 struct dsa_switch *ds = p->dp->ds; 578 int port = p->dp->index; 579 580 /* Pass through to switch driver if it supports timestamping */ 581 switch (cmd) { 582 case SIOCGHWTSTAMP: 583 if (ds->ops->port_hwtstamp_get) 584 return ds->ops->port_hwtstamp_get(ds, port, ifr); 585 break; 586 case SIOCSHWTSTAMP: 587 if (ds->ops->port_hwtstamp_set) 588 return ds->ops->port_hwtstamp_set(ds, port, ifr); 589 break; 590 } 591 592 return phylink_mii_ioctl(p->dp->pl, ifr, cmd); 593 } 594 595 static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx, 596 const struct switchdev_attr *attr, 597 struct netlink_ext_ack *extack) 598 { 599 struct dsa_port *dp = dsa_user_to_port(dev); 600 int ret; 601 602 if (ctx && ctx != dp) 603 return 0; 604 605 switch (attr->id) { 606 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 607 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 608 return -EOPNOTSUPP; 609 610 ret = dsa_port_set_state(dp, attr->u.stp_state, true); 611 break; 612 case SWITCHDEV_ATTR_ID_PORT_MST_STATE: 613 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 614 return -EOPNOTSUPP; 615 616 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); 617 break; 618 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 619 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 620 return -EOPNOTSUPP; 621 622 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 623 extack); 624 break; 625 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 626 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 627 return -EOPNOTSUPP; 628 629 ret = dsa_port_ageing_time(dp, attr->u.ageing_time); 630 break; 631 case SWITCHDEV_ATTR_ID_BRIDGE_MST: 632 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 633 return -EOPNOTSUPP; 634 635 ret = dsa_port_mst_enable(dp, attr->u.mst, extack); 636 break; 637 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 638 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 639 return -EOPNOTSUPP; 640 641 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, 642 extack); 643 break; 644 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 645 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 646 return -EOPNOTSUPP; 647 648 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); 649 break; 650 case SWITCHDEV_ATTR_ID_VLAN_MSTI: 651 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 652 return -EOPNOTSUPP; 653 654 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); 655 break; 656 default: 657 ret = -EOPNOTSUPP; 658 break; 659 } 660 661 return ret; 662 } 663 664 /* Must be called under rcu_read_lock() */ 665 static int 666 dsa_user_vlan_check_for_8021q_uppers(struct net_device *user, 667 const struct switchdev_obj_port_vlan *vlan) 668 { 669 struct net_device *upper_dev; 670 struct list_head *iter; 671 672 netdev_for_each_upper_dev_rcu(user, upper_dev, iter) { 673 u16 vid; 674 675 if (!is_vlan_dev(upper_dev)) 676 continue; 677 678 vid = vlan_dev_vlan_id(upper_dev); 679 if (vid == vlan->vid) 680 return -EBUSY; 681 } 682 683 return 0; 684 } 685 686 static int dsa_user_vlan_add(struct net_device *dev, 687 const struct switchdev_obj *obj, 688 struct netlink_ext_ack *extack) 689 { 690 struct dsa_port *dp = dsa_user_to_port(dev); 691 struct switchdev_obj_port_vlan *vlan; 692 int err; 693 694 if (dsa_port_skip_vlan_configuration(dp)) { 695 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 696 return 0; 697 } 698 699 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 700 701 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with 702 * the same VID. 703 */ 704 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { 705 rcu_read_lock(); 706 err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan); 707 rcu_read_unlock(); 708 if (err) { 709 NL_SET_ERR_MSG_MOD(extack, 710 "Port already has a VLAN upper with this VID"); 711 return err; 712 } 713 } 714 715 return dsa_port_vlan_add(dp, vlan, extack); 716 } 717 718 /* Offload a VLAN installed on the bridge or on a foreign interface by 719 * installing it as a VLAN towards the CPU port. 720 */ 721 static int dsa_user_host_vlan_add(struct net_device *dev, 722 const struct switchdev_obj *obj, 723 struct netlink_ext_ack *extack) 724 { 725 struct dsa_port *dp = dsa_user_to_port(dev); 726 struct switchdev_obj_port_vlan vlan; 727 728 /* Do nothing if this is a software bridge */ 729 if (!dp->bridge) 730 return -EOPNOTSUPP; 731 732 if (dsa_port_skip_vlan_configuration(dp)) { 733 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 734 return 0; 735 } 736 737 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); 738 739 /* Even though drivers often handle CPU membership in special ways, 740 * it doesn't make sense to program a PVID, so clear this flag. 741 */ 742 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; 743 744 return dsa_port_host_vlan_add(dp, &vlan, extack); 745 } 746 747 static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx, 748 const struct switchdev_obj *obj, 749 struct netlink_ext_ack *extack) 750 { 751 struct dsa_port *dp = dsa_user_to_port(dev); 752 int err; 753 754 if (ctx && ctx != dp) 755 return 0; 756 757 switch (obj->id) { 758 case SWITCHDEV_OBJ_ID_PORT_MDB: 759 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 760 return -EOPNOTSUPP; 761 762 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 763 break; 764 case SWITCHDEV_OBJ_ID_HOST_MDB: 765 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 766 return -EOPNOTSUPP; 767 768 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 769 break; 770 case SWITCHDEV_OBJ_ID_PORT_VLAN: 771 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 772 err = dsa_user_vlan_add(dev, obj, extack); 773 else 774 err = dsa_user_host_vlan_add(dev, obj, extack); 775 break; 776 case SWITCHDEV_OBJ_ID_MRP: 777 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 778 return -EOPNOTSUPP; 779 780 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); 781 break; 782 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 783 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 784 return -EOPNOTSUPP; 785 786 err = dsa_port_mrp_add_ring_role(dp, 787 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 788 break; 789 default: 790 err = -EOPNOTSUPP; 791 break; 792 } 793 794 return err; 795 } 796 797 static int dsa_user_vlan_del(struct net_device *dev, 798 const struct switchdev_obj *obj) 799 { 800 struct dsa_port *dp = dsa_user_to_port(dev); 801 struct switchdev_obj_port_vlan *vlan; 802 803 if (dsa_port_skip_vlan_configuration(dp)) 804 return 0; 805 806 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 807 808 return dsa_port_vlan_del(dp, vlan); 809 } 810 811 static int dsa_user_host_vlan_del(struct net_device *dev, 812 const struct switchdev_obj *obj) 813 { 814 struct dsa_port *dp = dsa_user_to_port(dev); 815 struct switchdev_obj_port_vlan *vlan; 816 817 /* Do nothing if this is a software bridge */ 818 if (!dp->bridge) 819 return -EOPNOTSUPP; 820 821 if (dsa_port_skip_vlan_configuration(dp)) 822 return 0; 823 824 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 825 826 return dsa_port_host_vlan_del(dp, vlan); 827 } 828 829 static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx, 830 const struct switchdev_obj *obj) 831 { 832 struct dsa_port *dp = dsa_user_to_port(dev); 833 int err; 834 835 if (ctx && ctx != dp) 836 return 0; 837 838 switch (obj->id) { 839 case SWITCHDEV_OBJ_ID_PORT_MDB: 840 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 841 return -EOPNOTSUPP; 842 843 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 844 break; 845 case SWITCHDEV_OBJ_ID_HOST_MDB: 846 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 847 return -EOPNOTSUPP; 848 849 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 850 break; 851 case SWITCHDEV_OBJ_ID_PORT_VLAN: 852 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 853 err = dsa_user_vlan_del(dev, obj); 854 else 855 err = dsa_user_host_vlan_del(dev, obj); 856 break; 857 case SWITCHDEV_OBJ_ID_MRP: 858 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 859 return -EOPNOTSUPP; 860 861 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); 862 break; 863 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 864 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 865 return -EOPNOTSUPP; 866 867 err = dsa_port_mrp_del_ring_role(dp, 868 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 869 break; 870 default: 871 err = -EOPNOTSUPP; 872 break; 873 } 874 875 return err; 876 } 877 878 static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev, 879 struct sk_buff *skb) 880 { 881 #ifdef CONFIG_NET_POLL_CONTROLLER 882 struct dsa_user_priv *p = netdev_priv(dev); 883 884 return netpoll_send_skb(p->netpoll, skb); 885 #else 886 BUG(); 887 return NETDEV_TX_OK; 888 #endif 889 } 890 891 static void dsa_skb_tx_timestamp(struct dsa_user_priv *p, 892 struct sk_buff *skb) 893 { 894 struct dsa_switch *ds = p->dp->ds; 895 896 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 897 return; 898 899 if (!ds->ops->port_txtstamp) 900 return; 901 902 ds->ops->port_txtstamp(ds, p->dp->index, skb); 903 } 904 905 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) 906 { 907 /* SKB for netpoll still need to be mangled with the protocol-specific 908 * tag to be successfully transmitted 909 */ 910 if (unlikely(netpoll_tx_running(dev))) 911 return dsa_user_netpoll_send_skb(dev, skb); 912 913 /* Queue the SKB for transmission on the parent interface, but 914 * do not modify its EtherType 915 */ 916 skb->dev = dsa_user_to_conduit(dev); 917 dev_queue_xmit(skb); 918 919 return NETDEV_TX_OK; 920 } 921 EXPORT_SYMBOL_GPL(dsa_enqueue_skb); 922 923 static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev) 924 { 925 struct dsa_user_priv *p = netdev_priv(dev); 926 struct sk_buff *nskb; 927 928 dev_sw_netstats_tx_add(dev, 1, skb->len); 929 930 memset(skb->cb, 0, sizeof(skb->cb)); 931 932 /* Handle tx timestamp if any */ 933 dsa_skb_tx_timestamp(p, skb); 934 935 if (skb_ensure_writable_head_tail(skb, dev)) { 936 dev_kfree_skb_any(skb); 937 return NETDEV_TX_OK; 938 } 939 940 /* needed_tailroom should still be 'warm' in the cache line from 941 * skb_ensure_writable_head_tail(), which has also ensured that 942 * padding is safe. 943 */ 944 if (dev->needed_tailroom) 945 eth_skb_pad(skb); 946 947 /* Transmit function may have to reallocate the original SKB, 948 * in which case it must have freed it. Only free it here on error. 949 */ 950 nskb = p->xmit(skb, dev); 951 if (!nskb) { 952 kfree_skb(skb); 953 return NETDEV_TX_OK; 954 } 955 956 return dsa_enqueue_skb(nskb, dev); 957 } 958 959 /* ethtool operations *******************************************************/ 960 961 static void dsa_user_get_drvinfo(struct net_device *dev, 962 struct ethtool_drvinfo *drvinfo) 963 { 964 strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 965 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 966 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 967 } 968 969 static int dsa_user_get_regs_len(struct net_device *dev) 970 { 971 struct dsa_port *dp = dsa_user_to_port(dev); 972 struct dsa_switch *ds = dp->ds; 973 974 if (ds->ops->get_regs_len) 975 return ds->ops->get_regs_len(ds, dp->index); 976 977 return -EOPNOTSUPP; 978 } 979 980 static void 981 dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 982 { 983 struct dsa_port *dp = dsa_user_to_port(dev); 984 struct dsa_switch *ds = dp->ds; 985 986 if (ds->ops->get_regs) 987 ds->ops->get_regs(ds, dp->index, regs, _p); 988 } 989 990 static int dsa_user_nway_reset(struct net_device *dev) 991 { 992 struct dsa_port *dp = dsa_user_to_port(dev); 993 994 return phylink_ethtool_nway_reset(dp->pl); 995 } 996 997 static int dsa_user_get_eeprom_len(struct net_device *dev) 998 { 999 struct dsa_port *dp = dsa_user_to_port(dev); 1000 struct dsa_switch *ds = dp->ds; 1001 1002 if (ds->cd && ds->cd->eeprom_len) 1003 return ds->cd->eeprom_len; 1004 1005 if (ds->ops->get_eeprom_len) 1006 return ds->ops->get_eeprom_len(ds); 1007 1008 return 0; 1009 } 1010 1011 static int dsa_user_get_eeprom(struct net_device *dev, 1012 struct ethtool_eeprom *eeprom, u8 *data) 1013 { 1014 struct dsa_port *dp = dsa_user_to_port(dev); 1015 struct dsa_switch *ds = dp->ds; 1016 1017 if (ds->ops->get_eeprom) 1018 return ds->ops->get_eeprom(ds, eeprom, data); 1019 1020 return -EOPNOTSUPP; 1021 } 1022 1023 static int dsa_user_set_eeprom(struct net_device *dev, 1024 struct ethtool_eeprom *eeprom, u8 *data) 1025 { 1026 struct dsa_port *dp = dsa_user_to_port(dev); 1027 struct dsa_switch *ds = dp->ds; 1028 1029 if (ds->ops->set_eeprom) 1030 return ds->ops->set_eeprom(ds, eeprom, data); 1031 1032 return -EOPNOTSUPP; 1033 } 1034 1035 static void dsa_user_get_strings(struct net_device *dev, 1036 uint32_t stringset, uint8_t *data) 1037 { 1038 struct dsa_port *dp = dsa_user_to_port(dev); 1039 struct dsa_switch *ds = dp->ds; 1040 1041 if (stringset == ETH_SS_STATS) { 1042 int len = ETH_GSTRING_LEN; 1043 1044 strscpy_pad(data, "tx_packets", len); 1045 strscpy_pad(data + len, "tx_bytes", len); 1046 strscpy_pad(data + 2 * len, "rx_packets", len); 1047 strscpy_pad(data + 3 * len, "rx_bytes", len); 1048 if (ds->ops->get_strings) 1049 ds->ops->get_strings(ds, dp->index, stringset, 1050 data + 4 * len); 1051 } else if (stringset == ETH_SS_TEST) { 1052 net_selftest_get_strings(data); 1053 } 1054 1055 } 1056 1057 static void dsa_user_get_ethtool_stats(struct net_device *dev, 1058 struct ethtool_stats *stats, 1059 uint64_t *data) 1060 { 1061 struct dsa_port *dp = dsa_user_to_port(dev); 1062 struct dsa_switch *ds = dp->ds; 1063 struct pcpu_sw_netstats *s; 1064 unsigned int start; 1065 int i; 1066 1067 for_each_possible_cpu(i) { 1068 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 1069 1070 s = per_cpu_ptr(dev->tstats, i); 1071 do { 1072 start = u64_stats_fetch_begin(&s->syncp); 1073 tx_packets = u64_stats_read(&s->tx_packets); 1074 tx_bytes = u64_stats_read(&s->tx_bytes); 1075 rx_packets = u64_stats_read(&s->rx_packets); 1076 rx_bytes = u64_stats_read(&s->rx_bytes); 1077 } while (u64_stats_fetch_retry(&s->syncp, start)); 1078 data[0] += tx_packets; 1079 data[1] += tx_bytes; 1080 data[2] += rx_packets; 1081 data[3] += rx_bytes; 1082 } 1083 if (ds->ops->get_ethtool_stats) 1084 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 1085 } 1086 1087 static int dsa_user_get_sset_count(struct net_device *dev, int sset) 1088 { 1089 struct dsa_port *dp = dsa_user_to_port(dev); 1090 struct dsa_switch *ds = dp->ds; 1091 1092 if (sset == ETH_SS_STATS) { 1093 int count = 0; 1094 1095 if (ds->ops->get_sset_count) { 1096 count = ds->ops->get_sset_count(ds, dp->index, sset); 1097 if (count < 0) 1098 return count; 1099 } 1100 1101 return count + 4; 1102 } else if (sset == ETH_SS_TEST) { 1103 return net_selftest_get_count(); 1104 } 1105 1106 return -EOPNOTSUPP; 1107 } 1108 1109 static void dsa_user_get_eth_phy_stats(struct net_device *dev, 1110 struct ethtool_eth_phy_stats *phy_stats) 1111 { 1112 struct dsa_port *dp = dsa_user_to_port(dev); 1113 struct dsa_switch *ds = dp->ds; 1114 1115 if (ds->ops->get_eth_phy_stats) 1116 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); 1117 } 1118 1119 static void dsa_user_get_eth_mac_stats(struct net_device *dev, 1120 struct ethtool_eth_mac_stats *mac_stats) 1121 { 1122 struct dsa_port *dp = dsa_user_to_port(dev); 1123 struct dsa_switch *ds = dp->ds; 1124 1125 if (ds->ops->get_eth_mac_stats) 1126 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); 1127 } 1128 1129 static void 1130 dsa_user_get_eth_ctrl_stats(struct net_device *dev, 1131 struct ethtool_eth_ctrl_stats *ctrl_stats) 1132 { 1133 struct dsa_port *dp = dsa_user_to_port(dev); 1134 struct dsa_switch *ds = dp->ds; 1135 1136 if (ds->ops->get_eth_ctrl_stats) 1137 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); 1138 } 1139 1140 static void 1141 dsa_user_get_rmon_stats(struct net_device *dev, 1142 struct ethtool_rmon_stats *rmon_stats, 1143 const struct ethtool_rmon_hist_range **ranges) 1144 { 1145 struct dsa_port *dp = dsa_user_to_port(dev); 1146 struct dsa_switch *ds = dp->ds; 1147 1148 if (ds->ops->get_rmon_stats) 1149 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges); 1150 } 1151 1152 static void dsa_user_net_selftest(struct net_device *ndev, 1153 struct ethtool_test *etest, u64 *buf) 1154 { 1155 struct dsa_port *dp = dsa_user_to_port(ndev); 1156 struct dsa_switch *ds = dp->ds; 1157 1158 if (ds->ops->self_test) { 1159 ds->ops->self_test(ds, dp->index, etest, buf); 1160 return; 1161 } 1162 1163 net_selftest(ndev, etest, buf); 1164 } 1165 1166 static int dsa_user_get_mm(struct net_device *dev, 1167 struct ethtool_mm_state *state) 1168 { 1169 struct dsa_port *dp = dsa_user_to_port(dev); 1170 struct dsa_switch *ds = dp->ds; 1171 1172 if (!ds->ops->get_mm) 1173 return -EOPNOTSUPP; 1174 1175 return ds->ops->get_mm(ds, dp->index, state); 1176 } 1177 1178 static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg, 1179 struct netlink_ext_ack *extack) 1180 { 1181 struct dsa_port *dp = dsa_user_to_port(dev); 1182 struct dsa_switch *ds = dp->ds; 1183 1184 if (!ds->ops->set_mm) 1185 return -EOPNOTSUPP; 1186 1187 return ds->ops->set_mm(ds, dp->index, cfg, extack); 1188 } 1189 1190 static void dsa_user_get_mm_stats(struct net_device *dev, 1191 struct ethtool_mm_stats *stats) 1192 { 1193 struct dsa_port *dp = dsa_user_to_port(dev); 1194 struct dsa_switch *ds = dp->ds; 1195 1196 if (ds->ops->get_mm_stats) 1197 ds->ops->get_mm_stats(ds, dp->index, stats); 1198 } 1199 1200 static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1201 { 1202 struct dsa_port *dp = dsa_user_to_port(dev); 1203 struct dsa_switch *ds = dp->ds; 1204 1205 phylink_ethtool_get_wol(dp->pl, w); 1206 1207 if (ds->ops->get_wol) 1208 ds->ops->get_wol(ds, dp->index, w); 1209 } 1210 1211 static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1212 { 1213 struct dsa_port *dp = dsa_user_to_port(dev); 1214 struct dsa_switch *ds = dp->ds; 1215 int ret = -EOPNOTSUPP; 1216 1217 phylink_ethtool_set_wol(dp->pl, w); 1218 1219 if (ds->ops->set_wol) 1220 ret = ds->ops->set_wol(ds, dp->index, w); 1221 1222 return ret; 1223 } 1224 1225 static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e) 1226 { 1227 struct dsa_port *dp = dsa_user_to_port(dev); 1228 struct dsa_switch *ds = dp->ds; 1229 int ret; 1230 1231 /* Port's PHY and MAC both need to be EEE capable */ 1232 if (!dev->phydev || !dp->pl) 1233 return -ENODEV; 1234 1235 if (!ds->ops->set_mac_eee) 1236 return -EOPNOTSUPP; 1237 1238 ret = ds->ops->set_mac_eee(ds, dp->index, e); 1239 if (ret) 1240 return ret; 1241 1242 return phylink_ethtool_set_eee(dp->pl, e); 1243 } 1244 1245 static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e) 1246 { 1247 struct dsa_port *dp = dsa_user_to_port(dev); 1248 struct dsa_switch *ds = dp->ds; 1249 int ret; 1250 1251 /* Port's PHY and MAC both need to be EEE capable */ 1252 if (!dev->phydev || !dp->pl) 1253 return -ENODEV; 1254 1255 if (!ds->ops->get_mac_eee) 1256 return -EOPNOTSUPP; 1257 1258 ret = ds->ops->get_mac_eee(ds, dp->index, e); 1259 if (ret) 1260 return ret; 1261 1262 return phylink_ethtool_get_eee(dp->pl, e); 1263 } 1264 1265 static int dsa_user_get_link_ksettings(struct net_device *dev, 1266 struct ethtool_link_ksettings *cmd) 1267 { 1268 struct dsa_port *dp = dsa_user_to_port(dev); 1269 1270 return phylink_ethtool_ksettings_get(dp->pl, cmd); 1271 } 1272 1273 static int dsa_user_set_link_ksettings(struct net_device *dev, 1274 const struct ethtool_link_ksettings *cmd) 1275 { 1276 struct dsa_port *dp = dsa_user_to_port(dev); 1277 1278 return phylink_ethtool_ksettings_set(dp->pl, cmd); 1279 } 1280 1281 static void dsa_user_get_pause_stats(struct net_device *dev, 1282 struct ethtool_pause_stats *pause_stats) 1283 { 1284 struct dsa_port *dp = dsa_user_to_port(dev); 1285 struct dsa_switch *ds = dp->ds; 1286 1287 if (ds->ops->get_pause_stats) 1288 ds->ops->get_pause_stats(ds, dp->index, pause_stats); 1289 } 1290 1291 static void dsa_user_get_pauseparam(struct net_device *dev, 1292 struct ethtool_pauseparam *pause) 1293 { 1294 struct dsa_port *dp = dsa_user_to_port(dev); 1295 1296 phylink_ethtool_get_pauseparam(dp->pl, pause); 1297 } 1298 1299 static int dsa_user_set_pauseparam(struct net_device *dev, 1300 struct ethtool_pauseparam *pause) 1301 { 1302 struct dsa_port *dp = dsa_user_to_port(dev); 1303 1304 return phylink_ethtool_set_pauseparam(dp->pl, pause); 1305 } 1306 1307 #ifdef CONFIG_NET_POLL_CONTROLLER 1308 static int dsa_user_netpoll_setup(struct net_device *dev, 1309 struct netpoll_info *ni) 1310 { 1311 struct net_device *conduit = dsa_user_to_conduit(dev); 1312 struct dsa_user_priv *p = netdev_priv(dev); 1313 struct netpoll *netpoll; 1314 int err = 0; 1315 1316 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 1317 if (!netpoll) 1318 return -ENOMEM; 1319 1320 err = __netpoll_setup(netpoll, conduit); 1321 if (err) { 1322 kfree(netpoll); 1323 goto out; 1324 } 1325 1326 p->netpoll = netpoll; 1327 out: 1328 return err; 1329 } 1330 1331 static void dsa_user_netpoll_cleanup(struct net_device *dev) 1332 { 1333 struct dsa_user_priv *p = netdev_priv(dev); 1334 struct netpoll *netpoll = p->netpoll; 1335 1336 if (!netpoll) 1337 return; 1338 1339 p->netpoll = NULL; 1340 1341 __netpoll_free(netpoll); 1342 } 1343 1344 static void dsa_user_poll_controller(struct net_device *dev) 1345 { 1346 } 1347 #endif 1348 1349 static struct dsa_mall_tc_entry * 1350 dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 1351 { 1352 struct dsa_user_priv *p = netdev_priv(dev); 1353 struct dsa_mall_tc_entry *mall_tc_entry; 1354 1355 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 1356 if (mall_tc_entry->cookie == cookie) 1357 return mall_tc_entry; 1358 1359 return NULL; 1360 } 1361 1362 static int 1363 dsa_user_add_cls_matchall_mirred(struct net_device *dev, 1364 struct tc_cls_matchall_offload *cls, 1365 bool ingress) 1366 { 1367 struct netlink_ext_ack *extack = cls->common.extack; 1368 struct dsa_port *dp = dsa_user_to_port(dev); 1369 struct dsa_user_priv *p = netdev_priv(dev); 1370 struct dsa_mall_mirror_tc_entry *mirror; 1371 struct dsa_mall_tc_entry *mall_tc_entry; 1372 struct dsa_switch *ds = dp->ds; 1373 struct flow_action_entry *act; 1374 struct dsa_port *to_dp; 1375 int err; 1376 1377 if (!ds->ops->port_mirror_add) 1378 return -EOPNOTSUPP; 1379 1380 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1381 cls->common.extack)) 1382 return -EOPNOTSUPP; 1383 1384 act = &cls->rule->action.entries[0]; 1385 1386 if (!act->dev) 1387 return -EINVAL; 1388 1389 if (!dsa_user_dev_check(act->dev)) 1390 return -EOPNOTSUPP; 1391 1392 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1393 if (!mall_tc_entry) 1394 return -ENOMEM; 1395 1396 mall_tc_entry->cookie = cls->cookie; 1397 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 1398 mirror = &mall_tc_entry->mirror; 1399 1400 to_dp = dsa_user_to_port(act->dev); 1401 1402 mirror->to_local_port = to_dp->index; 1403 mirror->ingress = ingress; 1404 1405 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); 1406 if (err) { 1407 kfree(mall_tc_entry); 1408 return err; 1409 } 1410 1411 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1412 1413 return err; 1414 } 1415 1416 static int 1417 dsa_user_add_cls_matchall_police(struct net_device *dev, 1418 struct tc_cls_matchall_offload *cls, 1419 bool ingress) 1420 { 1421 struct netlink_ext_ack *extack = cls->common.extack; 1422 struct dsa_port *dp = dsa_user_to_port(dev); 1423 struct dsa_user_priv *p = netdev_priv(dev); 1424 struct dsa_mall_policer_tc_entry *policer; 1425 struct dsa_mall_tc_entry *mall_tc_entry; 1426 struct dsa_switch *ds = dp->ds; 1427 struct flow_action_entry *act; 1428 int err; 1429 1430 if (!ds->ops->port_policer_add) { 1431 NL_SET_ERR_MSG_MOD(extack, 1432 "Policing offload not implemented"); 1433 return -EOPNOTSUPP; 1434 } 1435 1436 if (!ingress) { 1437 NL_SET_ERR_MSG_MOD(extack, 1438 "Only supported on ingress qdisc"); 1439 return -EOPNOTSUPP; 1440 } 1441 1442 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1443 cls->common.extack)) 1444 return -EOPNOTSUPP; 1445 1446 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { 1447 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { 1448 NL_SET_ERR_MSG_MOD(extack, 1449 "Only one port policer allowed"); 1450 return -EEXIST; 1451 } 1452 } 1453 1454 act = &cls->rule->action.entries[0]; 1455 1456 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1457 if (!mall_tc_entry) 1458 return -ENOMEM; 1459 1460 mall_tc_entry->cookie = cls->cookie; 1461 mall_tc_entry->type = DSA_PORT_MALL_POLICER; 1462 policer = &mall_tc_entry->policer; 1463 policer->rate_bytes_per_sec = act->police.rate_bytes_ps; 1464 policer->burst = act->police.burst; 1465 1466 err = ds->ops->port_policer_add(ds, dp->index, policer); 1467 if (err) { 1468 kfree(mall_tc_entry); 1469 return err; 1470 } 1471 1472 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1473 1474 return err; 1475 } 1476 1477 static int dsa_user_add_cls_matchall(struct net_device *dev, 1478 struct tc_cls_matchall_offload *cls, 1479 bool ingress) 1480 { 1481 int err = -EOPNOTSUPP; 1482 1483 if (cls->common.protocol == htons(ETH_P_ALL) && 1484 flow_offload_has_one_action(&cls->rule->action) && 1485 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) 1486 err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress); 1487 else if (flow_offload_has_one_action(&cls->rule->action) && 1488 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) 1489 err = dsa_user_add_cls_matchall_police(dev, cls, ingress); 1490 1491 return err; 1492 } 1493 1494 static void dsa_user_del_cls_matchall(struct net_device *dev, 1495 struct tc_cls_matchall_offload *cls) 1496 { 1497 struct dsa_port *dp = dsa_user_to_port(dev); 1498 struct dsa_mall_tc_entry *mall_tc_entry; 1499 struct dsa_switch *ds = dp->ds; 1500 1501 mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie); 1502 if (!mall_tc_entry) 1503 return; 1504 1505 list_del(&mall_tc_entry->list); 1506 1507 switch (mall_tc_entry->type) { 1508 case DSA_PORT_MALL_MIRROR: 1509 if (ds->ops->port_mirror_del) 1510 ds->ops->port_mirror_del(ds, dp->index, 1511 &mall_tc_entry->mirror); 1512 break; 1513 case DSA_PORT_MALL_POLICER: 1514 if (ds->ops->port_policer_del) 1515 ds->ops->port_policer_del(ds, dp->index); 1516 break; 1517 default: 1518 WARN_ON(1); 1519 } 1520 1521 kfree(mall_tc_entry); 1522 } 1523 1524 static int dsa_user_setup_tc_cls_matchall(struct net_device *dev, 1525 struct tc_cls_matchall_offload *cls, 1526 bool ingress) 1527 { 1528 if (cls->common.chain_index) 1529 return -EOPNOTSUPP; 1530 1531 switch (cls->command) { 1532 case TC_CLSMATCHALL_REPLACE: 1533 return dsa_user_add_cls_matchall(dev, cls, ingress); 1534 case TC_CLSMATCHALL_DESTROY: 1535 dsa_user_del_cls_matchall(dev, cls); 1536 return 0; 1537 default: 1538 return -EOPNOTSUPP; 1539 } 1540 } 1541 1542 static int dsa_user_add_cls_flower(struct net_device *dev, 1543 struct flow_cls_offload *cls, 1544 bool ingress) 1545 { 1546 struct dsa_port *dp = dsa_user_to_port(dev); 1547 struct dsa_switch *ds = dp->ds; 1548 int port = dp->index; 1549 1550 if (!ds->ops->cls_flower_add) 1551 return -EOPNOTSUPP; 1552 1553 return ds->ops->cls_flower_add(ds, port, cls, ingress); 1554 } 1555 1556 static int dsa_user_del_cls_flower(struct net_device *dev, 1557 struct flow_cls_offload *cls, 1558 bool ingress) 1559 { 1560 struct dsa_port *dp = dsa_user_to_port(dev); 1561 struct dsa_switch *ds = dp->ds; 1562 int port = dp->index; 1563 1564 if (!ds->ops->cls_flower_del) 1565 return -EOPNOTSUPP; 1566 1567 return ds->ops->cls_flower_del(ds, port, cls, ingress); 1568 } 1569 1570 static int dsa_user_stats_cls_flower(struct net_device *dev, 1571 struct flow_cls_offload *cls, 1572 bool ingress) 1573 { 1574 struct dsa_port *dp = dsa_user_to_port(dev); 1575 struct dsa_switch *ds = dp->ds; 1576 int port = dp->index; 1577 1578 if (!ds->ops->cls_flower_stats) 1579 return -EOPNOTSUPP; 1580 1581 return ds->ops->cls_flower_stats(ds, port, cls, ingress); 1582 } 1583 1584 static int dsa_user_setup_tc_cls_flower(struct net_device *dev, 1585 struct flow_cls_offload *cls, 1586 bool ingress) 1587 { 1588 switch (cls->command) { 1589 case FLOW_CLS_REPLACE: 1590 return dsa_user_add_cls_flower(dev, cls, ingress); 1591 case FLOW_CLS_DESTROY: 1592 return dsa_user_del_cls_flower(dev, cls, ingress); 1593 case FLOW_CLS_STATS: 1594 return dsa_user_stats_cls_flower(dev, cls, ingress); 1595 default: 1596 return -EOPNOTSUPP; 1597 } 1598 } 1599 1600 static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1601 void *cb_priv, bool ingress) 1602 { 1603 struct net_device *dev = cb_priv; 1604 1605 if (!tc_can_offload(dev)) 1606 return -EOPNOTSUPP; 1607 1608 switch (type) { 1609 case TC_SETUP_CLSMATCHALL: 1610 return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress); 1611 case TC_SETUP_CLSFLOWER: 1612 return dsa_user_setup_tc_cls_flower(dev, type_data, ingress); 1613 default: 1614 return -EOPNOTSUPP; 1615 } 1616 } 1617 1618 static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type, 1619 void *type_data, void *cb_priv) 1620 { 1621 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true); 1622 } 1623 1624 static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type, 1625 void *type_data, void *cb_priv) 1626 { 1627 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false); 1628 } 1629 1630 static LIST_HEAD(dsa_user_block_cb_list); 1631 1632 static int dsa_user_setup_tc_block(struct net_device *dev, 1633 struct flow_block_offload *f) 1634 { 1635 struct flow_block_cb *block_cb; 1636 flow_setup_cb_t *cb; 1637 1638 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1639 cb = dsa_user_setup_tc_block_cb_ig; 1640 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1641 cb = dsa_user_setup_tc_block_cb_eg; 1642 else 1643 return -EOPNOTSUPP; 1644 1645 f->driver_block_list = &dsa_user_block_cb_list; 1646 1647 switch (f->command) { 1648 case FLOW_BLOCK_BIND: 1649 if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list)) 1650 return -EBUSY; 1651 1652 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 1653 if (IS_ERR(block_cb)) 1654 return PTR_ERR(block_cb); 1655 1656 flow_block_cb_add(block_cb, f); 1657 list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list); 1658 return 0; 1659 case FLOW_BLOCK_UNBIND: 1660 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1661 if (!block_cb) 1662 return -ENOENT; 1663 1664 flow_block_cb_remove(block_cb, f); 1665 list_del(&block_cb->driver_list); 1666 return 0; 1667 default: 1668 return -EOPNOTSUPP; 1669 } 1670 } 1671 1672 static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port, 1673 void *type_data) 1674 { 1675 struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port)); 1676 1677 if (!conduit->netdev_ops->ndo_setup_tc) 1678 return -EOPNOTSUPP; 1679 1680 return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data); 1681 } 1682 1683 static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type, 1684 void *type_data) 1685 { 1686 struct dsa_port *dp = dsa_user_to_port(dev); 1687 struct dsa_switch *ds = dp->ds; 1688 1689 switch (type) { 1690 case TC_SETUP_BLOCK: 1691 return dsa_user_setup_tc_block(dev, type_data); 1692 case TC_SETUP_FT: 1693 return dsa_user_setup_ft_block(ds, dp->index, type_data); 1694 default: 1695 break; 1696 } 1697 1698 if (!ds->ops->port_setup_tc) 1699 return -EOPNOTSUPP; 1700 1701 return ds->ops->port_setup_tc(ds, dp->index, type, type_data); 1702 } 1703 1704 static int dsa_user_get_rxnfc(struct net_device *dev, 1705 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1706 { 1707 struct dsa_port *dp = dsa_user_to_port(dev); 1708 struct dsa_switch *ds = dp->ds; 1709 1710 if (!ds->ops->get_rxnfc) 1711 return -EOPNOTSUPP; 1712 1713 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 1714 } 1715 1716 static int dsa_user_set_rxnfc(struct net_device *dev, 1717 struct ethtool_rxnfc *nfc) 1718 { 1719 struct dsa_port *dp = dsa_user_to_port(dev); 1720 struct dsa_switch *ds = dp->ds; 1721 1722 if (!ds->ops->set_rxnfc) 1723 return -EOPNOTSUPP; 1724 1725 return ds->ops->set_rxnfc(ds, dp->index, nfc); 1726 } 1727 1728 static int dsa_user_get_ts_info(struct net_device *dev, 1729 struct ethtool_ts_info *ts) 1730 { 1731 struct dsa_user_priv *p = netdev_priv(dev); 1732 struct dsa_switch *ds = p->dp->ds; 1733 1734 if (!ds->ops->get_ts_info) 1735 return -EOPNOTSUPP; 1736 1737 return ds->ops->get_ts_info(ds, p->dp->index, ts); 1738 } 1739 1740 static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto, 1741 u16 vid) 1742 { 1743 struct dsa_port *dp = dsa_user_to_port(dev); 1744 struct switchdev_obj_port_vlan vlan = { 1745 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 1746 .vid = vid, 1747 /* This API only allows programming tagged, non-PVID VIDs */ 1748 .flags = 0, 1749 }; 1750 struct netlink_ext_ack extack = {0}; 1751 struct dsa_switch *ds = dp->ds; 1752 struct netdev_hw_addr *ha; 1753 struct dsa_vlan *v; 1754 int ret; 1755 1756 /* User port... */ 1757 ret = dsa_port_vlan_add(dp, &vlan, &extack); 1758 if (ret) { 1759 if (extack._msg) 1760 netdev_err(dev, "%s\n", extack._msg); 1761 return ret; 1762 } 1763 1764 /* And CPU port... */ 1765 ret = dsa_port_host_vlan_add(dp, &vlan, &extack); 1766 if (ret) { 1767 if (extack._msg) 1768 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, 1769 extack._msg); 1770 return ret; 1771 } 1772 1773 if (!dsa_switch_supports_uc_filtering(ds) && 1774 !dsa_switch_supports_mc_filtering(ds)) 1775 return 0; 1776 1777 v = kzalloc(sizeof(*v), GFP_KERNEL); 1778 if (!v) { 1779 ret = -ENOMEM; 1780 goto rollback; 1781 } 1782 1783 netif_addr_lock_bh(dev); 1784 1785 v->vid = vid; 1786 list_add_tail(&v->list, &dp->user_vlans); 1787 1788 if (dsa_switch_supports_mc_filtering(ds)) { 1789 netdev_for_each_synced_mc_addr(ha, dev) { 1790 dsa_user_schedule_standalone_work(dev, DSA_MC_ADD, 1791 ha->addr, vid); 1792 } 1793 } 1794 1795 if (dsa_switch_supports_uc_filtering(ds)) { 1796 netdev_for_each_synced_uc_addr(ha, dev) { 1797 dsa_user_schedule_standalone_work(dev, DSA_UC_ADD, 1798 ha->addr, vid); 1799 } 1800 } 1801 1802 netif_addr_unlock_bh(dev); 1803 1804 dsa_flush_workqueue(); 1805 1806 return 0; 1807 1808 rollback: 1809 dsa_port_host_vlan_del(dp, &vlan); 1810 dsa_port_vlan_del(dp, &vlan); 1811 1812 return ret; 1813 } 1814 1815 static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, 1816 u16 vid) 1817 { 1818 struct dsa_port *dp = dsa_user_to_port(dev); 1819 struct switchdev_obj_port_vlan vlan = { 1820 .vid = vid, 1821 /* This API only allows programming tagged, non-PVID VIDs */ 1822 .flags = 0, 1823 }; 1824 struct dsa_switch *ds = dp->ds; 1825 struct netdev_hw_addr *ha; 1826 struct dsa_vlan *v; 1827 int err; 1828 1829 err = dsa_port_vlan_del(dp, &vlan); 1830 if (err) 1831 return err; 1832 1833 err = dsa_port_host_vlan_del(dp, &vlan); 1834 if (err) 1835 return err; 1836 1837 if (!dsa_switch_supports_uc_filtering(ds) && 1838 !dsa_switch_supports_mc_filtering(ds)) 1839 return 0; 1840 1841 netif_addr_lock_bh(dev); 1842 1843 v = dsa_vlan_find(&dp->user_vlans, &vlan); 1844 if (!v) { 1845 netif_addr_unlock_bh(dev); 1846 return -ENOENT; 1847 } 1848 1849 list_del(&v->list); 1850 kfree(v); 1851 1852 if (dsa_switch_supports_mc_filtering(ds)) { 1853 netdev_for_each_synced_mc_addr(ha, dev) { 1854 dsa_user_schedule_standalone_work(dev, DSA_MC_DEL, 1855 ha->addr, vid); 1856 } 1857 } 1858 1859 if (dsa_switch_supports_uc_filtering(ds)) { 1860 netdev_for_each_synced_uc_addr(ha, dev) { 1861 dsa_user_schedule_standalone_work(dev, DSA_UC_DEL, 1862 ha->addr, vid); 1863 } 1864 } 1865 1866 netif_addr_unlock_bh(dev); 1867 1868 dsa_flush_workqueue(); 1869 1870 return 0; 1871 } 1872 1873 static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg) 1874 { 1875 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1876 1877 return dsa_user_vlan_rx_add_vid(arg, proto, vid); 1878 } 1879 1880 static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg) 1881 { 1882 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1883 1884 return dsa_user_vlan_rx_kill_vid(arg, proto, vid); 1885 } 1886 1887 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN 1888 * filtering is enabled. The baseline is that only ports that offload a 1889 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, 1890 * but there are exceptions for quirky hardware. 1891 * 1892 * If ds->vlan_filtering_is_global = true, then standalone ports which share 1893 * the same switch with other ports that offload a VLAN-aware bridge are also 1894 * inevitably VLAN-aware. 1895 * 1896 * To summarize, a DSA switch port offloads: 1897 * 1898 * - If standalone (this includes software bridge, software LAG): 1899 * - if ds->needs_standalone_vlan_filtering = true, OR if 1900 * (ds->vlan_filtering_is_global = true AND there are bridges spanning 1901 * this switch chip which have vlan_filtering=1) 1902 * - the 8021q upper VLANs 1903 * - else (standalone VLAN filtering is not needed, VLAN filtering is not 1904 * global, or it is, but no port is under a VLAN-aware bridge): 1905 * - no VLAN (any 8021q upper is a software VLAN) 1906 * 1907 * - If under a vlan_filtering=0 bridge which it offload: 1908 * - if ds->configure_vlan_while_not_filtering = true (default): 1909 * - the bridge VLANs. These VLANs are committed to hardware but inactive. 1910 * - else (deprecated): 1911 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is 1912 * enabled, so this behavior is broken and discouraged. 1913 * 1914 * - If under a vlan_filtering=1 bridge which it offload: 1915 * - the bridge VLANs 1916 * - the 8021q upper VLANs 1917 */ 1918 int dsa_user_manage_vlan_filtering(struct net_device *user, 1919 bool vlan_filtering) 1920 { 1921 int err; 1922 1923 if (vlan_filtering) { 1924 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1925 1926 err = vlan_for_each(user, dsa_user_restore_vlan, user); 1927 if (err) { 1928 vlan_for_each(user, dsa_user_clear_vlan, user); 1929 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1930 return err; 1931 } 1932 } else { 1933 err = vlan_for_each(user, dsa_user_clear_vlan, user); 1934 if (err) 1935 return err; 1936 1937 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1938 } 1939 1940 return 0; 1941 } 1942 1943 struct dsa_hw_port { 1944 struct list_head list; 1945 struct net_device *dev; 1946 int old_mtu; 1947 }; 1948 1949 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) 1950 { 1951 const struct dsa_hw_port *p; 1952 int err; 1953 1954 list_for_each_entry(p, hw_port_list, list) { 1955 if (p->dev->mtu == mtu) 1956 continue; 1957 1958 err = dev_set_mtu(p->dev, mtu); 1959 if (err) 1960 goto rollback; 1961 } 1962 1963 return 0; 1964 1965 rollback: 1966 list_for_each_entry_continue_reverse(p, hw_port_list, list) { 1967 if (p->dev->mtu == p->old_mtu) 1968 continue; 1969 1970 if (dev_set_mtu(p->dev, p->old_mtu)) 1971 netdev_err(p->dev, "Failed to restore MTU\n"); 1972 } 1973 1974 return err; 1975 } 1976 1977 static void dsa_hw_port_list_free(struct list_head *hw_port_list) 1978 { 1979 struct dsa_hw_port *p, *n; 1980 1981 list_for_each_entry_safe(p, n, hw_port_list, list) 1982 kfree(p); 1983 } 1984 1985 /* Make the hardware datapath to/from @dev limited to a common MTU */ 1986 static void dsa_bridge_mtu_normalization(struct dsa_port *dp) 1987 { 1988 struct list_head hw_port_list; 1989 struct dsa_switch_tree *dst; 1990 int min_mtu = ETH_MAX_MTU; 1991 struct dsa_port *other_dp; 1992 int err; 1993 1994 if (!dp->ds->mtu_enforcement_ingress) 1995 return; 1996 1997 if (!dp->bridge) 1998 return; 1999 2000 INIT_LIST_HEAD(&hw_port_list); 2001 2002 /* Populate the list of ports that are part of the same bridge 2003 * as the newly added/modified port 2004 */ 2005 list_for_each_entry(dst, &dsa_tree_list, list) { 2006 list_for_each_entry(other_dp, &dst->ports, list) { 2007 struct dsa_hw_port *hw_port; 2008 struct net_device *user; 2009 2010 if (other_dp->type != DSA_PORT_TYPE_USER) 2011 continue; 2012 2013 if (!dsa_port_bridge_same(dp, other_dp)) 2014 continue; 2015 2016 if (!other_dp->ds->mtu_enforcement_ingress) 2017 continue; 2018 2019 user = other_dp->user; 2020 2021 if (min_mtu > user->mtu) 2022 min_mtu = user->mtu; 2023 2024 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); 2025 if (!hw_port) 2026 goto out; 2027 2028 hw_port->dev = user; 2029 hw_port->old_mtu = user->mtu; 2030 2031 list_add(&hw_port->list, &hw_port_list); 2032 } 2033 } 2034 2035 /* Attempt to configure the entire hardware bridge to the newly added 2036 * interface's MTU first, regardless of whether the intention of the 2037 * user was to raise or lower it. 2038 */ 2039 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu); 2040 if (!err) 2041 goto out; 2042 2043 /* Clearly that didn't work out so well, so just set the minimum MTU on 2044 * all hardware bridge ports now. If this fails too, then all ports will 2045 * still have their old MTU rolled back anyway. 2046 */ 2047 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); 2048 2049 out: 2050 dsa_hw_port_list_free(&hw_port_list); 2051 } 2052 2053 int dsa_user_change_mtu(struct net_device *dev, int new_mtu) 2054 { 2055 struct net_device *conduit = dsa_user_to_conduit(dev); 2056 struct dsa_port *dp = dsa_user_to_port(dev); 2057 struct dsa_port *cpu_dp = dp->cpu_dp; 2058 struct dsa_switch *ds = dp->ds; 2059 struct dsa_port *other_dp; 2060 int largest_mtu = 0; 2061 int new_conduit_mtu; 2062 int old_conduit_mtu; 2063 int mtu_limit; 2064 int overhead; 2065 int cpu_mtu; 2066 int err; 2067 2068 if (!ds->ops->port_change_mtu) 2069 return -EOPNOTSUPP; 2070 2071 dsa_tree_for_each_user_port(other_dp, ds->dst) { 2072 int user_mtu; 2073 2074 /* During probe, this function will be called for each user 2075 * device, while not all of them have been allocated. That's 2076 * ok, it doesn't change what the maximum is, so ignore it. 2077 */ 2078 if (!other_dp->user) 2079 continue; 2080 2081 /* Pretend that we already applied the setting, which we 2082 * actually haven't (still haven't done all integrity checks) 2083 */ 2084 if (dp == other_dp) 2085 user_mtu = new_mtu; 2086 else 2087 user_mtu = other_dp->user->mtu; 2088 2089 if (largest_mtu < user_mtu) 2090 largest_mtu = user_mtu; 2091 } 2092 2093 overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops); 2094 mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead); 2095 old_conduit_mtu = conduit->mtu; 2096 new_conduit_mtu = largest_mtu + overhead; 2097 if (new_conduit_mtu > mtu_limit) 2098 return -ERANGE; 2099 2100 /* If the conduit MTU isn't over limit, there's no need to check the CPU 2101 * MTU, since that surely isn't either. 2102 */ 2103 cpu_mtu = largest_mtu; 2104 2105 /* Start applying stuff */ 2106 if (new_conduit_mtu != old_conduit_mtu) { 2107 err = dev_set_mtu(conduit, new_conduit_mtu); 2108 if (err < 0) 2109 goto out_conduit_failed; 2110 2111 /* We only need to propagate the MTU of the CPU port to 2112 * upstream switches, so emit a notifier which updates them. 2113 */ 2114 err = dsa_port_mtu_change(cpu_dp, cpu_mtu); 2115 if (err) 2116 goto out_cpu_failed; 2117 } 2118 2119 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu); 2120 if (err) 2121 goto out_port_failed; 2122 2123 WRITE_ONCE(dev->mtu, new_mtu); 2124 2125 dsa_bridge_mtu_normalization(dp); 2126 2127 return 0; 2128 2129 out_port_failed: 2130 if (new_conduit_mtu != old_conduit_mtu) 2131 dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead); 2132 out_cpu_failed: 2133 if (new_conduit_mtu != old_conduit_mtu) 2134 dev_set_mtu(conduit, old_conduit_mtu); 2135 out_conduit_failed: 2136 return err; 2137 } 2138 2139 static int __maybe_unused 2140 dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel) 2141 { 2142 struct dsa_port *dp = dsa_user_to_port(dev); 2143 struct dsa_switch *ds = dp->ds; 2144 int port = dp->index; 2145 2146 if (!ds->ops->port_set_apptrust) 2147 return -EOPNOTSUPP; 2148 2149 return ds->ops->port_set_apptrust(ds, port, sel, nsel); 2150 } 2151 2152 static int __maybe_unused 2153 dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel) 2154 { 2155 struct dsa_port *dp = dsa_user_to_port(dev); 2156 struct dsa_switch *ds = dp->ds; 2157 int port = dp->index; 2158 2159 if (!ds->ops->port_get_apptrust) 2160 return -EOPNOTSUPP; 2161 2162 return ds->ops->port_get_apptrust(ds, port, sel, nsel); 2163 } 2164 2165 static int __maybe_unused 2166 dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) 2167 { 2168 struct dsa_port *dp = dsa_user_to_port(dev); 2169 struct dsa_switch *ds = dp->ds; 2170 unsigned long mask, new_prio; 2171 int err, port = dp->index; 2172 2173 if (!ds->ops->port_set_default_prio) 2174 return -EOPNOTSUPP; 2175 2176 err = dcb_ieee_setapp(dev, app); 2177 if (err) 2178 return err; 2179 2180 mask = dcb_ieee_getapp_mask(dev, app); 2181 new_prio = __fls(mask); 2182 2183 err = ds->ops->port_set_default_prio(ds, port, new_prio); 2184 if (err) { 2185 dcb_ieee_delapp(dev, app); 2186 return err; 2187 } 2188 2189 return 0; 2190 } 2191 2192 /* Update the DSCP prio entries on all user ports of the switch in case 2193 * the switch supports global DSCP prio instead of per port DSCP prios. 2194 */ 2195 static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev, 2196 struct dcb_app *app, bool del) 2197 { 2198 int (*setdel)(struct net_device *dev, struct dcb_app *app); 2199 struct dsa_port *dp = dsa_user_to_port(dev); 2200 struct dsa_switch *ds = dp->ds; 2201 struct dsa_port *other_dp; 2202 int err, restore_err; 2203 2204 if (del) 2205 setdel = dcb_ieee_delapp; 2206 else 2207 setdel = dcb_ieee_setapp; 2208 2209 dsa_switch_for_each_user_port(other_dp, ds) { 2210 struct net_device *user = other_dp->user; 2211 2212 if (!user || user == dev) 2213 continue; 2214 2215 err = setdel(user, app); 2216 if (err) 2217 goto err_try_to_restore; 2218 } 2219 2220 return 0; 2221 2222 err_try_to_restore: 2223 2224 /* Revert logic to restore previous state of app entries */ 2225 if (!del) 2226 setdel = dcb_ieee_delapp; 2227 else 2228 setdel = dcb_ieee_setapp; 2229 2230 dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) { 2231 struct net_device *user = other_dp->user; 2232 2233 if (!user || user == dev) 2234 continue; 2235 2236 restore_err = setdel(user, app); 2237 if (restore_err) 2238 netdev_err(user, "Failed to restore DSCP prio entry configuration\n"); 2239 } 2240 2241 return err; 2242 } 2243 2244 static int __maybe_unused 2245 dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) 2246 { 2247 struct dsa_port *dp = dsa_user_to_port(dev); 2248 struct dsa_switch *ds = dp->ds; 2249 unsigned long mask, new_prio; 2250 int err, port = dp->index; 2251 u8 dscp = app->protocol; 2252 2253 if (!ds->ops->port_add_dscp_prio) 2254 return -EOPNOTSUPP; 2255 2256 if (dscp >= 64) { 2257 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", 2258 dscp); 2259 return -EINVAL; 2260 } 2261 2262 err = dcb_ieee_setapp(dev, app); 2263 if (err) 2264 return err; 2265 2266 mask = dcb_ieee_getapp_mask(dev, app); 2267 new_prio = __fls(mask); 2268 2269 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); 2270 if (err) { 2271 dcb_ieee_delapp(dev, app); 2272 return err; 2273 } 2274 2275 if (!ds->dscp_prio_mapping_is_global) 2276 return 0; 2277 2278 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false); 2279 if (err) { 2280 if (ds->ops->port_del_dscp_prio) 2281 ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio); 2282 dcb_ieee_delapp(dev, app); 2283 return err; 2284 } 2285 2286 return 0; 2287 } 2288 2289 static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev, 2290 struct dcb_app *app) 2291 { 2292 switch (app->selector) { 2293 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 2294 switch (app->protocol) { 2295 case 0: 2296 return dsa_user_dcbnl_set_default_prio(dev, app); 2297 default: 2298 return -EOPNOTSUPP; 2299 } 2300 break; 2301 case IEEE_8021QAZ_APP_SEL_DSCP: 2302 return dsa_user_dcbnl_add_dscp_prio(dev, app); 2303 default: 2304 return -EOPNOTSUPP; 2305 } 2306 } 2307 2308 static int __maybe_unused 2309 dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) 2310 { 2311 struct dsa_port *dp = dsa_user_to_port(dev); 2312 struct dsa_switch *ds = dp->ds; 2313 unsigned long mask, new_prio; 2314 int err, port = dp->index; 2315 2316 if (!ds->ops->port_set_default_prio) 2317 return -EOPNOTSUPP; 2318 2319 err = dcb_ieee_delapp(dev, app); 2320 if (err) 2321 return err; 2322 2323 mask = dcb_ieee_getapp_mask(dev, app); 2324 new_prio = mask ? __fls(mask) : 0; 2325 2326 err = ds->ops->port_set_default_prio(ds, port, new_prio); 2327 if (err) { 2328 dcb_ieee_setapp(dev, app); 2329 return err; 2330 } 2331 2332 return 0; 2333 } 2334 2335 static int __maybe_unused 2336 dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) 2337 { 2338 struct dsa_port *dp = dsa_user_to_port(dev); 2339 struct dsa_switch *ds = dp->ds; 2340 int err, port = dp->index; 2341 u8 dscp = app->protocol; 2342 2343 if (!ds->ops->port_del_dscp_prio) 2344 return -EOPNOTSUPP; 2345 2346 err = dcb_ieee_delapp(dev, app); 2347 if (err) 2348 return err; 2349 2350 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); 2351 if (err) { 2352 dcb_ieee_setapp(dev, app); 2353 return err; 2354 } 2355 2356 if (!ds->dscp_prio_mapping_is_global) 2357 return 0; 2358 2359 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true); 2360 if (err) { 2361 if (ds->ops->port_add_dscp_prio) 2362 ds->ops->port_add_dscp_prio(ds, port, dscp, 2363 app->priority); 2364 dcb_ieee_setapp(dev, app); 2365 return err; 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev, 2372 struct dcb_app *app) 2373 { 2374 switch (app->selector) { 2375 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 2376 switch (app->protocol) { 2377 case 0: 2378 return dsa_user_dcbnl_del_default_prio(dev, app); 2379 default: 2380 return -EOPNOTSUPP; 2381 } 2382 break; 2383 case IEEE_8021QAZ_APP_SEL_DSCP: 2384 return dsa_user_dcbnl_del_dscp_prio(dev, app); 2385 default: 2386 return -EOPNOTSUPP; 2387 } 2388 } 2389 2390 /* Pre-populate the DCB application priority table with the priorities 2391 * configured during switch setup, which we read from hardware here. 2392 */ 2393 static int dsa_user_dcbnl_init(struct net_device *dev) 2394 { 2395 struct dsa_port *dp = dsa_user_to_port(dev); 2396 struct dsa_switch *ds = dp->ds; 2397 int port = dp->index; 2398 int err; 2399 2400 if (ds->ops->port_get_default_prio) { 2401 int prio = ds->ops->port_get_default_prio(ds, port); 2402 struct dcb_app app = { 2403 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 2404 .protocol = 0, 2405 .priority = prio, 2406 }; 2407 2408 if (prio < 0) 2409 return prio; 2410 2411 err = dcb_ieee_setapp(dev, &app); 2412 if (err) 2413 return err; 2414 } 2415 2416 if (ds->ops->port_get_dscp_prio) { 2417 int protocol; 2418 2419 for (protocol = 0; protocol < 64; protocol++) { 2420 struct dcb_app app = { 2421 .selector = IEEE_8021QAZ_APP_SEL_DSCP, 2422 .protocol = protocol, 2423 }; 2424 int prio; 2425 2426 prio = ds->ops->port_get_dscp_prio(ds, port, protocol); 2427 if (prio == -EOPNOTSUPP) 2428 continue; 2429 if (prio < 0) 2430 return prio; 2431 2432 app.priority = prio; 2433 2434 err = dcb_ieee_setapp(dev, &app); 2435 if (err) 2436 return err; 2437 } 2438 } 2439 2440 return 0; 2441 } 2442 2443 static const struct ethtool_ops dsa_user_ethtool_ops = { 2444 .get_drvinfo = dsa_user_get_drvinfo, 2445 .get_regs_len = dsa_user_get_regs_len, 2446 .get_regs = dsa_user_get_regs, 2447 .nway_reset = dsa_user_nway_reset, 2448 .get_link = ethtool_op_get_link, 2449 .get_eeprom_len = dsa_user_get_eeprom_len, 2450 .get_eeprom = dsa_user_get_eeprom, 2451 .set_eeprom = dsa_user_set_eeprom, 2452 .get_strings = dsa_user_get_strings, 2453 .get_ethtool_stats = dsa_user_get_ethtool_stats, 2454 .get_sset_count = dsa_user_get_sset_count, 2455 .get_eth_phy_stats = dsa_user_get_eth_phy_stats, 2456 .get_eth_mac_stats = dsa_user_get_eth_mac_stats, 2457 .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats, 2458 .get_rmon_stats = dsa_user_get_rmon_stats, 2459 .set_wol = dsa_user_set_wol, 2460 .get_wol = dsa_user_get_wol, 2461 .set_eee = dsa_user_set_eee, 2462 .get_eee = dsa_user_get_eee, 2463 .get_link_ksettings = dsa_user_get_link_ksettings, 2464 .set_link_ksettings = dsa_user_set_link_ksettings, 2465 .get_pause_stats = dsa_user_get_pause_stats, 2466 .get_pauseparam = dsa_user_get_pauseparam, 2467 .set_pauseparam = dsa_user_set_pauseparam, 2468 .get_rxnfc = dsa_user_get_rxnfc, 2469 .set_rxnfc = dsa_user_set_rxnfc, 2470 .get_ts_info = dsa_user_get_ts_info, 2471 .self_test = dsa_user_net_selftest, 2472 .get_mm = dsa_user_get_mm, 2473 .set_mm = dsa_user_set_mm, 2474 .get_mm_stats = dsa_user_get_mm_stats, 2475 }; 2476 2477 static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = { 2478 .ieee_setapp = dsa_user_dcbnl_ieee_setapp, 2479 .ieee_delapp = dsa_user_dcbnl_ieee_delapp, 2480 .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust, 2481 .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust, 2482 }; 2483 2484 static void dsa_user_get_stats64(struct net_device *dev, 2485 struct rtnl_link_stats64 *s) 2486 { 2487 struct dsa_port *dp = dsa_user_to_port(dev); 2488 struct dsa_switch *ds = dp->ds; 2489 2490 if (ds->ops->get_stats64) 2491 ds->ops->get_stats64(ds, dp->index, s); 2492 else 2493 dev_get_tstats64(dev, s); 2494 } 2495 2496 static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx, 2497 struct net_device_path *path) 2498 { 2499 struct dsa_port *dp = dsa_user_to_port(ctx->dev); 2500 struct net_device *conduit = dsa_port_to_conduit(dp); 2501 struct dsa_port *cpu_dp = dp->cpu_dp; 2502 2503 path->dev = ctx->dev; 2504 path->type = DEV_PATH_DSA; 2505 path->dsa.proto = cpu_dp->tag_ops->proto; 2506 path->dsa.port = dp->index; 2507 ctx->dev = conduit; 2508 2509 return 0; 2510 } 2511 2512 static const struct net_device_ops dsa_user_netdev_ops = { 2513 .ndo_open = dsa_user_open, 2514 .ndo_stop = dsa_user_close, 2515 .ndo_start_xmit = dsa_user_xmit, 2516 .ndo_change_rx_flags = dsa_user_change_rx_flags, 2517 .ndo_set_rx_mode = dsa_user_set_rx_mode, 2518 .ndo_set_mac_address = dsa_user_set_mac_address, 2519 .ndo_fdb_dump = dsa_user_fdb_dump, 2520 .ndo_eth_ioctl = dsa_user_ioctl, 2521 .ndo_get_iflink = dsa_user_get_iflink, 2522 #ifdef CONFIG_NET_POLL_CONTROLLER 2523 .ndo_netpoll_setup = dsa_user_netpoll_setup, 2524 .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup, 2525 .ndo_poll_controller = dsa_user_poll_controller, 2526 #endif 2527 .ndo_setup_tc = dsa_user_setup_tc, 2528 .ndo_get_stats64 = dsa_user_get_stats64, 2529 .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid, 2530 .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid, 2531 .ndo_change_mtu = dsa_user_change_mtu, 2532 .ndo_fill_forward_path = dsa_user_fill_forward_path, 2533 }; 2534 2535 static const struct device_type dsa_type = { 2536 .name = "dsa", 2537 }; 2538 2539 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 2540 { 2541 const struct dsa_port *dp = dsa_to_port(ds, port); 2542 2543 if (dp->pl) 2544 phylink_mac_change(dp->pl, up); 2545 } 2546 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); 2547 2548 static void dsa_user_phylink_fixed_state(struct phylink_config *config, 2549 struct phylink_link_state *state) 2550 { 2551 struct dsa_port *dp = dsa_phylink_to_port(config); 2552 struct dsa_switch *ds = dp->ds; 2553 2554 /* No need to check that this operation is valid, the callback would 2555 * not be called if it was not. 2556 */ 2557 ds->ops->phylink_fixed_state(ds, dp->index, state); 2558 } 2559 2560 /* user device setup *******************************************************/ 2561 static int dsa_user_phy_connect(struct net_device *user_dev, int addr, 2562 u32 flags) 2563 { 2564 struct dsa_port *dp = dsa_user_to_port(user_dev); 2565 struct dsa_switch *ds = dp->ds; 2566 2567 user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr); 2568 if (!user_dev->phydev) { 2569 netdev_err(user_dev, "no phy at %d\n", addr); 2570 return -ENODEV; 2571 } 2572 2573 user_dev->phydev->dev_flags |= flags; 2574 2575 return phylink_connect_phy(dp->pl, user_dev->phydev); 2576 } 2577 2578 static int dsa_user_phy_setup(struct net_device *user_dev) 2579 { 2580 struct dsa_port *dp = dsa_user_to_port(user_dev); 2581 struct device_node *port_dn = dp->dn; 2582 struct dsa_switch *ds = dp->ds; 2583 u32 phy_flags = 0; 2584 int ret; 2585 2586 dp->pl_config.dev = &user_dev->dev; 2587 dp->pl_config.type = PHYLINK_NETDEV; 2588 2589 /* The get_fixed_state callback takes precedence over polling the 2590 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set 2591 * this if the switch provides such a callback. 2592 */ 2593 if (ds->ops->phylink_fixed_state) { 2594 dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state; 2595 dp->pl_config.poll_fixed_state = true; 2596 } 2597 2598 ret = dsa_port_phylink_create(dp); 2599 if (ret) 2600 return ret; 2601 2602 if (ds->ops->get_phy_flags) 2603 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 2604 2605 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); 2606 if (ret == -ENODEV && ds->user_mii_bus) { 2607 /* We could not connect to a designated PHY or SFP, so try to 2608 * use the switch internal MDIO bus instead 2609 */ 2610 ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags); 2611 } 2612 if (ret) { 2613 netdev_err(user_dev, "failed to connect to PHY: %pe\n", 2614 ERR_PTR(ret)); 2615 dsa_port_phylink_destroy(dp); 2616 } 2617 2618 return ret; 2619 } 2620 2621 void dsa_user_setup_tagger(struct net_device *user) 2622 { 2623 struct dsa_port *dp = dsa_user_to_port(user); 2624 struct net_device *conduit = dsa_port_to_conduit(dp); 2625 struct dsa_user_priv *p = netdev_priv(user); 2626 const struct dsa_port *cpu_dp = dp->cpu_dp; 2627 const struct dsa_switch *ds = dp->ds; 2628 2629 user->needed_headroom = cpu_dp->tag_ops->needed_headroom; 2630 user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; 2631 /* Try to save one extra realloc later in the TX path (in the conduit) 2632 * by also inheriting the conduit's needed headroom and tailroom. 2633 * The 8021q driver also does this. 2634 */ 2635 user->needed_headroom += conduit->needed_headroom; 2636 user->needed_tailroom += conduit->needed_tailroom; 2637 2638 p->xmit = cpu_dp->tag_ops->xmit; 2639 2640 user->features = conduit->vlan_features | NETIF_F_HW_TC; 2641 user->hw_features |= NETIF_F_HW_TC; 2642 user->features |= NETIF_F_LLTX; 2643 if (user->needed_tailroom) 2644 user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); 2645 if (ds->needs_standalone_vlan_filtering) 2646 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2647 } 2648 2649 int dsa_user_suspend(struct net_device *user_dev) 2650 { 2651 struct dsa_port *dp = dsa_user_to_port(user_dev); 2652 2653 if (!netif_running(user_dev)) 2654 return 0; 2655 2656 netif_device_detach(user_dev); 2657 2658 rtnl_lock(); 2659 phylink_stop(dp->pl); 2660 rtnl_unlock(); 2661 2662 return 0; 2663 } 2664 2665 int dsa_user_resume(struct net_device *user_dev) 2666 { 2667 struct dsa_port *dp = dsa_user_to_port(user_dev); 2668 2669 if (!netif_running(user_dev)) 2670 return 0; 2671 2672 netif_device_attach(user_dev); 2673 2674 rtnl_lock(); 2675 phylink_start(dp->pl); 2676 rtnl_unlock(); 2677 2678 return 0; 2679 } 2680 2681 int dsa_user_create(struct dsa_port *port) 2682 { 2683 struct net_device *conduit = dsa_port_to_conduit(port); 2684 struct dsa_switch *ds = port->ds; 2685 struct net_device *user_dev; 2686 struct dsa_user_priv *p; 2687 const char *name; 2688 int assign_type; 2689 int ret; 2690 2691 if (!ds->num_tx_queues) 2692 ds->num_tx_queues = 1; 2693 2694 if (port->name) { 2695 name = port->name; 2696 assign_type = NET_NAME_PREDICTABLE; 2697 } else { 2698 name = "eth%d"; 2699 assign_type = NET_NAME_ENUM; 2700 } 2701 2702 user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name, 2703 assign_type, ether_setup, 2704 ds->num_tx_queues, 1); 2705 if (user_dev == NULL) 2706 return -ENOMEM; 2707 2708 user_dev->rtnl_link_ops = &dsa_link_ops; 2709 user_dev->ethtool_ops = &dsa_user_ethtool_ops; 2710 #if IS_ENABLED(CONFIG_DCB) 2711 user_dev->dcbnl_ops = &dsa_user_dcbnl_ops; 2712 #endif 2713 if (!is_zero_ether_addr(port->mac)) 2714 eth_hw_addr_set(user_dev, port->mac); 2715 else 2716 eth_hw_addr_inherit(user_dev, conduit); 2717 user_dev->priv_flags |= IFF_NO_QUEUE; 2718 if (dsa_switch_supports_uc_filtering(ds)) 2719 user_dev->priv_flags |= IFF_UNICAST_FLT; 2720 user_dev->netdev_ops = &dsa_user_netdev_ops; 2721 if (ds->ops->port_max_mtu) 2722 user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); 2723 SET_NETDEV_DEVTYPE(user_dev, &dsa_type); 2724 2725 SET_NETDEV_DEV(user_dev, port->ds->dev); 2726 SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port); 2727 user_dev->dev.of_node = port->dn; 2728 user_dev->vlan_features = conduit->vlan_features; 2729 2730 p = netdev_priv(user_dev); 2731 user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2732 2733 ret = gro_cells_init(&p->gcells, user_dev); 2734 if (ret) 2735 goto out_free; 2736 2737 p->dp = port; 2738 INIT_LIST_HEAD(&p->mall_tc_list); 2739 port->user = user_dev; 2740 dsa_user_setup_tagger(user_dev); 2741 2742 netif_carrier_off(user_dev); 2743 2744 ret = dsa_user_phy_setup(user_dev); 2745 if (ret) { 2746 netdev_err(user_dev, 2747 "error %d setting up PHY for tree %d, switch %d, port %d\n", 2748 ret, ds->dst->index, ds->index, port->index); 2749 goto out_gcells; 2750 } 2751 2752 rtnl_lock(); 2753 2754 ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN); 2755 if (ret && ret != -EOPNOTSUPP) 2756 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", 2757 ret, ETH_DATA_LEN, port->index); 2758 2759 ret = register_netdevice(user_dev); 2760 if (ret) { 2761 netdev_err(conduit, "error %d registering interface %s\n", 2762 ret, user_dev->name); 2763 rtnl_unlock(); 2764 goto out_phy; 2765 } 2766 2767 if (IS_ENABLED(CONFIG_DCB)) { 2768 ret = dsa_user_dcbnl_init(user_dev); 2769 if (ret) { 2770 netdev_err(user_dev, 2771 "failed to initialize DCB: %pe\n", 2772 ERR_PTR(ret)); 2773 rtnl_unlock(); 2774 goto out_unregister; 2775 } 2776 } 2777 2778 ret = netdev_upper_dev_link(conduit, user_dev, NULL); 2779 2780 rtnl_unlock(); 2781 2782 if (ret) 2783 goto out_unregister; 2784 2785 return 0; 2786 2787 out_unregister: 2788 unregister_netdev(user_dev); 2789 out_phy: 2790 rtnl_lock(); 2791 phylink_disconnect_phy(p->dp->pl); 2792 rtnl_unlock(); 2793 dsa_port_phylink_destroy(p->dp); 2794 out_gcells: 2795 gro_cells_destroy(&p->gcells); 2796 out_free: 2797 free_netdev(user_dev); 2798 port->user = NULL; 2799 return ret; 2800 } 2801 2802 void dsa_user_destroy(struct net_device *user_dev) 2803 { 2804 struct net_device *conduit = dsa_user_to_conduit(user_dev); 2805 struct dsa_port *dp = dsa_user_to_port(user_dev); 2806 struct dsa_user_priv *p = netdev_priv(user_dev); 2807 2808 netif_carrier_off(user_dev); 2809 rtnl_lock(); 2810 netdev_upper_dev_unlink(conduit, user_dev); 2811 unregister_netdevice(user_dev); 2812 phylink_disconnect_phy(dp->pl); 2813 rtnl_unlock(); 2814 2815 dsa_port_phylink_destroy(dp); 2816 gro_cells_destroy(&p->gcells); 2817 free_netdev(user_dev); 2818 } 2819 2820 int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit, 2821 struct netlink_ext_ack *extack) 2822 { 2823 struct net_device *old_conduit = dsa_user_to_conduit(dev); 2824 struct dsa_port *dp = dsa_user_to_port(dev); 2825 struct dsa_switch *ds = dp->ds; 2826 struct net_device *upper; 2827 struct list_head *iter; 2828 int err; 2829 2830 if (conduit == old_conduit) 2831 return 0; 2832 2833 if (!ds->ops->port_change_conduit) { 2834 NL_SET_ERR_MSG_MOD(extack, 2835 "Driver does not support changing DSA conduit"); 2836 return -EOPNOTSUPP; 2837 } 2838 2839 if (!netdev_uses_dsa(conduit)) { 2840 NL_SET_ERR_MSG_MOD(extack, 2841 "Interface not eligible as DSA conduit"); 2842 return -EOPNOTSUPP; 2843 } 2844 2845 netdev_for_each_upper_dev_rcu(conduit, upper, iter) { 2846 if (dsa_user_dev_check(upper)) 2847 continue; 2848 if (netif_is_bridge_master(upper)) 2849 continue; 2850 NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers"); 2851 return -EOPNOTSUPP; 2852 } 2853 2854 /* Since we allow live-changing the DSA conduit, plus we auto-open the 2855 * DSA conduit when the user port opens => we need to ensure that the 2856 * new DSA conduit is open too. 2857 */ 2858 if (dev->flags & IFF_UP) { 2859 err = dev_open(conduit, extack); 2860 if (err) 2861 return err; 2862 } 2863 2864 netdev_upper_dev_unlink(old_conduit, dev); 2865 2866 err = netdev_upper_dev_link(conduit, dev, extack); 2867 if (err) 2868 goto out_revert_old_conduit_unlink; 2869 2870 err = dsa_port_change_conduit(dp, conduit, extack); 2871 if (err) 2872 goto out_revert_conduit_link; 2873 2874 /* Update the MTU of the new CPU port through cross-chip notifiers */ 2875 err = dsa_user_change_mtu(dev, dev->mtu); 2876 if (err && err != -EOPNOTSUPP) { 2877 netdev_warn(dev, 2878 "nonfatal error updating MTU with new conduit: %pe\n", 2879 ERR_PTR(err)); 2880 } 2881 2882 /* If the port doesn't have its own MAC address and relies on the DSA 2883 * conduit's one, inherit it again from the new DSA conduit. 2884 */ 2885 if (is_zero_ether_addr(dp->mac)) 2886 eth_hw_addr_inherit(dev, conduit); 2887 2888 return 0; 2889 2890 out_revert_conduit_link: 2891 netdev_upper_dev_unlink(conduit, dev); 2892 out_revert_old_conduit_unlink: 2893 netdev_upper_dev_link(old_conduit, dev, NULL); 2894 return err; 2895 } 2896 2897 bool dsa_user_dev_check(const struct net_device *dev) 2898 { 2899 return dev->netdev_ops == &dsa_user_netdev_ops; 2900 } 2901 EXPORT_SYMBOL_GPL(dsa_user_dev_check); 2902 2903 static int dsa_user_changeupper(struct net_device *dev, 2904 struct netdev_notifier_changeupper_info *info) 2905 { 2906 struct netlink_ext_ack *extack; 2907 int err = NOTIFY_DONE; 2908 struct dsa_port *dp; 2909 2910 if (!dsa_user_dev_check(dev)) 2911 return err; 2912 2913 dp = dsa_user_to_port(dev); 2914 extack = netdev_notifier_info_to_extack(&info->info); 2915 2916 if (netif_is_bridge_master(info->upper_dev)) { 2917 if (info->linking) { 2918 err = dsa_port_bridge_join(dp, info->upper_dev, extack); 2919 if (!err) 2920 dsa_bridge_mtu_normalization(dp); 2921 if (err == -EOPNOTSUPP) { 2922 NL_SET_ERR_MSG_WEAK_MOD(extack, 2923 "Offloading not supported"); 2924 err = 0; 2925 } 2926 err = notifier_from_errno(err); 2927 } else { 2928 dsa_port_bridge_leave(dp, info->upper_dev); 2929 err = NOTIFY_OK; 2930 } 2931 } else if (netif_is_lag_master(info->upper_dev)) { 2932 if (info->linking) { 2933 err = dsa_port_lag_join(dp, info->upper_dev, 2934 info->upper_info, extack); 2935 if (err == -EOPNOTSUPP) { 2936 NL_SET_ERR_MSG_WEAK_MOD(extack, 2937 "Offloading not supported"); 2938 err = 0; 2939 } 2940 err = notifier_from_errno(err); 2941 } else { 2942 dsa_port_lag_leave(dp, info->upper_dev); 2943 err = NOTIFY_OK; 2944 } 2945 } else if (is_hsr_master(info->upper_dev)) { 2946 if (info->linking) { 2947 err = dsa_port_hsr_join(dp, info->upper_dev, extack); 2948 if (err == -EOPNOTSUPP) { 2949 NL_SET_ERR_MSG_WEAK_MOD(extack, 2950 "Offloading not supported"); 2951 err = 0; 2952 } 2953 err = notifier_from_errno(err); 2954 } else { 2955 dsa_port_hsr_leave(dp, info->upper_dev); 2956 err = NOTIFY_OK; 2957 } 2958 } 2959 2960 return err; 2961 } 2962 2963 static int dsa_user_prechangeupper(struct net_device *dev, 2964 struct netdev_notifier_changeupper_info *info) 2965 { 2966 struct dsa_port *dp; 2967 2968 if (!dsa_user_dev_check(dev)) 2969 return NOTIFY_DONE; 2970 2971 dp = dsa_user_to_port(dev); 2972 2973 if (netif_is_bridge_master(info->upper_dev) && !info->linking) 2974 dsa_port_pre_bridge_leave(dp, info->upper_dev); 2975 else if (netif_is_lag_master(info->upper_dev) && !info->linking) 2976 dsa_port_pre_lag_leave(dp, info->upper_dev); 2977 /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot 2978 * meaningfully placed under a bridge yet 2979 */ 2980 2981 return NOTIFY_DONE; 2982 } 2983 2984 static int 2985 dsa_user_lag_changeupper(struct net_device *dev, 2986 struct netdev_notifier_changeupper_info *info) 2987 { 2988 struct net_device *lower; 2989 struct list_head *iter; 2990 int err = NOTIFY_DONE; 2991 struct dsa_port *dp; 2992 2993 if (!netif_is_lag_master(dev)) 2994 return err; 2995 2996 netdev_for_each_lower_dev(dev, lower, iter) { 2997 if (!dsa_user_dev_check(lower)) 2998 continue; 2999 3000 dp = dsa_user_to_port(lower); 3001 if (!dp->lag) 3002 /* Software LAG */ 3003 continue; 3004 3005 err = dsa_user_changeupper(lower, info); 3006 if (notifier_to_errno(err)) 3007 break; 3008 } 3009 3010 return err; 3011 } 3012 3013 /* Same as dsa_user_lag_changeupper() except that it calls 3014 * dsa_user_prechangeupper() 3015 */ 3016 static int 3017 dsa_user_lag_prechangeupper(struct net_device *dev, 3018 struct netdev_notifier_changeupper_info *info) 3019 { 3020 struct net_device *lower; 3021 struct list_head *iter; 3022 int err = NOTIFY_DONE; 3023 struct dsa_port *dp; 3024 3025 if (!netif_is_lag_master(dev)) 3026 return err; 3027 3028 netdev_for_each_lower_dev(dev, lower, iter) { 3029 if (!dsa_user_dev_check(lower)) 3030 continue; 3031 3032 dp = dsa_user_to_port(lower); 3033 if (!dp->lag) 3034 /* Software LAG */ 3035 continue; 3036 3037 err = dsa_user_prechangeupper(lower, info); 3038 if (notifier_to_errno(err)) 3039 break; 3040 } 3041 3042 return err; 3043 } 3044 3045 static int 3046 dsa_prevent_bridging_8021q_upper(struct net_device *dev, 3047 struct netdev_notifier_changeupper_info *info) 3048 { 3049 struct netlink_ext_ack *ext_ack; 3050 struct net_device *user, *br; 3051 struct dsa_port *dp; 3052 3053 ext_ack = netdev_notifier_info_to_extack(&info->info); 3054 3055 if (!is_vlan_dev(dev)) 3056 return NOTIFY_DONE; 3057 3058 user = vlan_dev_real_dev(dev); 3059 if (!dsa_user_dev_check(user)) 3060 return NOTIFY_DONE; 3061 3062 dp = dsa_user_to_port(user); 3063 br = dsa_port_bridge_dev_get(dp); 3064 if (!br) 3065 return NOTIFY_DONE; 3066 3067 /* Deny enslaving a VLAN device into a VLAN-aware bridge */ 3068 if (br_vlan_enabled(br) && 3069 netif_is_bridge_master(info->upper_dev) && info->linking) { 3070 NL_SET_ERR_MSG_MOD(ext_ack, 3071 "Cannot make VLAN device join VLAN-aware bridge"); 3072 return notifier_from_errno(-EINVAL); 3073 } 3074 3075 return NOTIFY_DONE; 3076 } 3077 3078 static int 3079 dsa_user_check_8021q_upper(struct net_device *dev, 3080 struct netdev_notifier_changeupper_info *info) 3081 { 3082 struct dsa_port *dp = dsa_user_to_port(dev); 3083 struct net_device *br = dsa_port_bridge_dev_get(dp); 3084 struct bridge_vlan_info br_info; 3085 struct netlink_ext_ack *extack; 3086 int err = NOTIFY_DONE; 3087 u16 vid; 3088 3089 if (!br || !br_vlan_enabled(br)) 3090 return NOTIFY_DONE; 3091 3092 extack = netdev_notifier_info_to_extack(&info->info); 3093 vid = vlan_dev_vlan_id(info->upper_dev); 3094 3095 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 3096 * device, respectively the VID is not found, returning 3097 * 0 means success, which is a failure for us here. 3098 */ 3099 err = br_vlan_get_info(br, vid, &br_info); 3100 if (err == 0) { 3101 NL_SET_ERR_MSG_MOD(extack, 3102 "This VLAN is already configured by the bridge"); 3103 return notifier_from_errno(-EBUSY); 3104 } 3105 3106 return NOTIFY_DONE; 3107 } 3108 3109 static int 3110 dsa_user_prechangeupper_sanity_check(struct net_device *dev, 3111 struct netdev_notifier_changeupper_info *info) 3112 { 3113 struct dsa_switch *ds; 3114 struct dsa_port *dp; 3115 int err; 3116 3117 if (!dsa_user_dev_check(dev)) 3118 return dsa_prevent_bridging_8021q_upper(dev, info); 3119 3120 dp = dsa_user_to_port(dev); 3121 ds = dp->ds; 3122 3123 if (ds->ops->port_prechangeupper) { 3124 err = ds->ops->port_prechangeupper(ds, dp->index, info); 3125 if (err) 3126 return notifier_from_errno(err); 3127 } 3128 3129 if (is_vlan_dev(info->upper_dev)) 3130 return dsa_user_check_8021q_upper(dev, info); 3131 3132 return NOTIFY_DONE; 3133 } 3134 3135 /* To be eligible as a DSA conduit, a LAG must have all lower interfaces be 3136 * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of 3137 * switches in the same switch tree. 3138 */ 3139 static int dsa_lag_conduit_validate(struct net_device *lag_dev, 3140 struct netlink_ext_ack *extack) 3141 { 3142 struct net_device *lower1, *lower2; 3143 struct list_head *iter1, *iter2; 3144 3145 netdev_for_each_lower_dev(lag_dev, lower1, iter1) { 3146 netdev_for_each_lower_dev(lag_dev, lower2, iter2) { 3147 if (!netdev_uses_dsa(lower1) || 3148 !netdev_uses_dsa(lower2)) { 3149 NL_SET_ERR_MSG_MOD(extack, 3150 "All LAG ports must be eligible as DSA conduits"); 3151 return notifier_from_errno(-EINVAL); 3152 } 3153 3154 if (lower1 == lower2) 3155 continue; 3156 3157 if (!dsa_port_tree_same(lower1->dsa_ptr, 3158 lower2->dsa_ptr)) { 3159 NL_SET_ERR_MSG_MOD(extack, 3160 "LAG contains DSA conduits of disjoint switch trees"); 3161 return notifier_from_errno(-EINVAL); 3162 } 3163 } 3164 } 3165 3166 return NOTIFY_DONE; 3167 } 3168 3169 static int 3170 dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit, 3171 struct netdev_notifier_changeupper_info *info) 3172 { 3173 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); 3174 3175 if (!netdev_uses_dsa(conduit)) 3176 return NOTIFY_DONE; 3177 3178 if (!info->linking) 3179 return NOTIFY_DONE; 3180 3181 /* Allow DSA switch uppers */ 3182 if (dsa_user_dev_check(info->upper_dev)) 3183 return NOTIFY_DONE; 3184 3185 /* Allow bridge uppers of DSA conduits, subject to further 3186 * restrictions in dsa_bridge_prechangelower_sanity_check() 3187 */ 3188 if (netif_is_bridge_master(info->upper_dev)) 3189 return NOTIFY_DONE; 3190 3191 /* Allow LAG uppers, subject to further restrictions in 3192 * dsa_lag_conduit_prechangelower_sanity_check() 3193 */ 3194 if (netif_is_lag_master(info->upper_dev)) 3195 return dsa_lag_conduit_validate(info->upper_dev, extack); 3196 3197 NL_SET_ERR_MSG_MOD(extack, 3198 "DSA conduit cannot join unknown upper interfaces"); 3199 return notifier_from_errno(-EBUSY); 3200 } 3201 3202 static int 3203 dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev, 3204 struct netdev_notifier_changeupper_info *info) 3205 { 3206 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); 3207 struct net_device *lag_dev = info->upper_dev; 3208 struct net_device *lower; 3209 struct list_head *iter; 3210 3211 if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev)) 3212 return NOTIFY_DONE; 3213 3214 if (!info->linking) 3215 return NOTIFY_DONE; 3216 3217 if (!netdev_uses_dsa(dev)) { 3218 NL_SET_ERR_MSG(extack, 3219 "Only DSA conduits can join a LAG DSA conduit"); 3220 return notifier_from_errno(-EINVAL); 3221 } 3222 3223 netdev_for_each_lower_dev(lag_dev, lower, iter) { 3224 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) { 3225 NL_SET_ERR_MSG(extack, 3226 "Interface is DSA conduit for a different switch tree than this LAG"); 3227 return notifier_from_errno(-EINVAL); 3228 } 3229 3230 break; 3231 } 3232 3233 return NOTIFY_DONE; 3234 } 3235 3236 /* Don't allow bridging of DSA conduits, since the bridge layer rx_handler 3237 * prevents the DSA fake ethertype handler to be invoked, so we don't get the 3238 * chance to strip off and parse the DSA switch tag protocol header (the bridge 3239 * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these 3240 * frames). 3241 * The only case where that would not be an issue is when bridging can already 3242 * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev 3243 * port, and is bridged only with other ports from the same hardware device. 3244 */ 3245 static int 3246 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower, 3247 struct netdev_notifier_changeupper_info *info) 3248 { 3249 struct net_device *br = info->upper_dev; 3250 struct netlink_ext_ack *extack; 3251 struct net_device *lower; 3252 struct list_head *iter; 3253 3254 if (!netif_is_bridge_master(br)) 3255 return NOTIFY_DONE; 3256 3257 if (!info->linking) 3258 return NOTIFY_DONE; 3259 3260 extack = netdev_notifier_info_to_extack(&info->info); 3261 3262 netdev_for_each_lower_dev(br, lower, iter) { 3263 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower)) 3264 continue; 3265 3266 if (!netdev_port_same_parent_id(lower, new_lower)) { 3267 NL_SET_ERR_MSG(extack, 3268 "Cannot do software bridging with a DSA conduit"); 3269 return notifier_from_errno(-EINVAL); 3270 } 3271 } 3272 3273 return NOTIFY_DONE; 3274 } 3275 3276 static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst, 3277 struct net_device *lag_dev) 3278 { 3279 struct net_device *new_conduit = dsa_tree_find_first_conduit(dst); 3280 struct dsa_port *dp; 3281 int err; 3282 3283 dsa_tree_for_each_user_port(dp, dst) { 3284 if (dsa_port_to_conduit(dp) != lag_dev) 3285 continue; 3286 3287 err = dsa_user_change_conduit(dp->user, new_conduit, NULL); 3288 if (err) { 3289 netdev_err(dp->user, 3290 "failed to restore conduit to %s: %pe\n", 3291 new_conduit->name, ERR_PTR(err)); 3292 } 3293 } 3294 } 3295 3296 static int dsa_conduit_lag_join(struct net_device *conduit, 3297 struct net_device *lag_dev, 3298 struct netdev_lag_upper_info *uinfo, 3299 struct netlink_ext_ack *extack) 3300 { 3301 struct dsa_port *cpu_dp = conduit->dsa_ptr; 3302 struct dsa_switch_tree *dst = cpu_dp->dst; 3303 struct dsa_port *dp; 3304 int err; 3305 3306 err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack); 3307 if (err) 3308 return err; 3309 3310 dsa_tree_for_each_user_port(dp, dst) { 3311 if (dsa_port_to_conduit(dp) != conduit) 3312 continue; 3313 3314 err = dsa_user_change_conduit(dp->user, lag_dev, extack); 3315 if (err) 3316 goto restore; 3317 } 3318 3319 return 0; 3320 3321 restore: 3322 dsa_tree_for_each_user_port_continue_reverse(dp, dst) { 3323 if (dsa_port_to_conduit(dp) != lag_dev) 3324 continue; 3325 3326 err = dsa_user_change_conduit(dp->user, conduit, NULL); 3327 if (err) { 3328 netdev_err(dp->user, 3329 "failed to restore conduit to %s: %pe\n", 3330 conduit->name, ERR_PTR(err)); 3331 } 3332 } 3333 3334 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr); 3335 3336 return err; 3337 } 3338 3339 static void dsa_conduit_lag_leave(struct net_device *conduit, 3340 struct net_device *lag_dev) 3341 { 3342 struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr; 3343 struct dsa_switch_tree *dst = cpu_dp->dst; 3344 struct dsa_port *new_cpu_dp = NULL; 3345 struct net_device *lower; 3346 struct list_head *iter; 3347 3348 netdev_for_each_lower_dev(lag_dev, lower, iter) { 3349 if (netdev_uses_dsa(lower)) { 3350 new_cpu_dp = lower->dsa_ptr; 3351 break; 3352 } 3353 } 3354 3355 if (new_cpu_dp) { 3356 /* Update the CPU port of the user ports still under the LAG 3357 * so that dsa_port_to_conduit() continues to work properly 3358 */ 3359 dsa_tree_for_each_user_port(dp, dst) 3360 if (dsa_port_to_conduit(dp) == lag_dev) 3361 dp->cpu_dp = new_cpu_dp; 3362 3363 /* Update the index of the virtual CPU port to match the lowest 3364 * physical CPU port 3365 */ 3366 lag_dev->dsa_ptr = new_cpu_dp; 3367 wmb(); 3368 } else { 3369 /* If the LAG DSA conduit has no ports left, migrate back all 3370 * user ports to the first physical CPU port 3371 */ 3372 dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev); 3373 } 3374 3375 /* This DSA conduit has left its LAG in any case, so let 3376 * the CPU port leave the hardware LAG as well 3377 */ 3378 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr); 3379 } 3380 3381 static int dsa_conduit_changeupper(struct net_device *dev, 3382 struct netdev_notifier_changeupper_info *info) 3383 { 3384 struct netlink_ext_ack *extack; 3385 int err = NOTIFY_DONE; 3386 3387 if (!netdev_uses_dsa(dev)) 3388 return err; 3389 3390 extack = netdev_notifier_info_to_extack(&info->info); 3391 3392 if (netif_is_lag_master(info->upper_dev)) { 3393 if (info->linking) { 3394 err = dsa_conduit_lag_join(dev, info->upper_dev, 3395 info->upper_info, extack); 3396 err = notifier_from_errno(err); 3397 } else { 3398 dsa_conduit_lag_leave(dev, info->upper_dev); 3399 err = NOTIFY_OK; 3400 } 3401 } 3402 3403 return err; 3404 } 3405 3406 static int dsa_user_netdevice_event(struct notifier_block *nb, 3407 unsigned long event, void *ptr) 3408 { 3409 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3410 3411 switch (event) { 3412 case NETDEV_PRECHANGEUPPER: { 3413 struct netdev_notifier_changeupper_info *info = ptr; 3414 int err; 3415 3416 err = dsa_user_prechangeupper_sanity_check(dev, info); 3417 if (notifier_to_errno(err)) 3418 return err; 3419 3420 err = dsa_conduit_prechangeupper_sanity_check(dev, info); 3421 if (notifier_to_errno(err)) 3422 return err; 3423 3424 err = dsa_lag_conduit_prechangelower_sanity_check(dev, info); 3425 if (notifier_to_errno(err)) 3426 return err; 3427 3428 err = dsa_bridge_prechangelower_sanity_check(dev, info); 3429 if (notifier_to_errno(err)) 3430 return err; 3431 3432 err = dsa_user_prechangeupper(dev, ptr); 3433 if (notifier_to_errno(err)) 3434 return err; 3435 3436 err = dsa_user_lag_prechangeupper(dev, ptr); 3437 if (notifier_to_errno(err)) 3438 return err; 3439 3440 break; 3441 } 3442 case NETDEV_CHANGEUPPER: { 3443 int err; 3444 3445 err = dsa_user_changeupper(dev, ptr); 3446 if (notifier_to_errno(err)) 3447 return err; 3448 3449 err = dsa_user_lag_changeupper(dev, ptr); 3450 if (notifier_to_errno(err)) 3451 return err; 3452 3453 err = dsa_conduit_changeupper(dev, ptr); 3454 if (notifier_to_errno(err)) 3455 return err; 3456 3457 break; 3458 } 3459 case NETDEV_CHANGELOWERSTATE: { 3460 struct netdev_notifier_changelowerstate_info *info = ptr; 3461 struct dsa_port *dp; 3462 int err = 0; 3463 3464 if (dsa_user_dev_check(dev)) { 3465 dp = dsa_user_to_port(dev); 3466 3467 err = dsa_port_lag_change(dp, info->lower_state_info); 3468 } 3469 3470 /* Mirror LAG port events on DSA conduits that are in 3471 * a LAG towards their respective switch CPU ports 3472 */ 3473 if (netdev_uses_dsa(dev)) { 3474 dp = dev->dsa_ptr; 3475 3476 err = dsa_port_lag_change(dp, info->lower_state_info); 3477 } 3478 3479 return notifier_from_errno(err); 3480 } 3481 case NETDEV_CHANGE: 3482 case NETDEV_UP: { 3483 /* Track state of conduit port. 3484 * DSA driver may require the conduit port (and indirectly 3485 * the tagger) to be available for some special operation. 3486 */ 3487 if (netdev_uses_dsa(dev)) { 3488 struct dsa_port *cpu_dp = dev->dsa_ptr; 3489 struct dsa_switch_tree *dst = cpu_dp->ds->dst; 3490 3491 /* Track when the conduit port is UP */ 3492 dsa_tree_conduit_oper_state_change(dst, dev, 3493 netif_oper_up(dev)); 3494 3495 /* Track when the conduit port is ready and can accept 3496 * packet. 3497 * NETDEV_UP event is not enough to flag a port as ready. 3498 * We also have to wait for linkwatch_do_dev to dev_activate 3499 * and emit a NETDEV_CHANGE event. 3500 * We check if a conduit port is ready by checking if the dev 3501 * have a qdisc assigned and is not noop. 3502 */ 3503 dsa_tree_conduit_admin_state_change(dst, dev, 3504 !qdisc_tx_is_noop(dev)); 3505 3506 return NOTIFY_OK; 3507 } 3508 3509 return NOTIFY_DONE; 3510 } 3511 case NETDEV_GOING_DOWN: { 3512 struct dsa_port *dp, *cpu_dp; 3513 struct dsa_switch_tree *dst; 3514 LIST_HEAD(close_list); 3515 3516 if (!netdev_uses_dsa(dev)) 3517 return NOTIFY_DONE; 3518 3519 cpu_dp = dev->dsa_ptr; 3520 dst = cpu_dp->ds->dst; 3521 3522 dsa_tree_conduit_admin_state_change(dst, dev, false); 3523 3524 list_for_each_entry(dp, &dst->ports, list) { 3525 if (!dsa_port_is_user(dp)) 3526 continue; 3527 3528 if (dp->cpu_dp != cpu_dp) 3529 continue; 3530 3531 list_add(&dp->user->close_list, &close_list); 3532 } 3533 3534 dev_close_many(&close_list, true); 3535 3536 return NOTIFY_OK; 3537 } 3538 default: 3539 break; 3540 } 3541 3542 return NOTIFY_DONE; 3543 } 3544 3545 static void 3546 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) 3547 { 3548 struct switchdev_notifier_fdb_info info = {}; 3549 3550 info.addr = switchdev_work->addr; 3551 info.vid = switchdev_work->vid; 3552 info.offloaded = true; 3553 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 3554 switchdev_work->orig_dev, &info.info, NULL); 3555 } 3556 3557 static void dsa_user_switchdev_event_work(struct work_struct *work) 3558 { 3559 struct dsa_switchdev_event_work *switchdev_work = 3560 container_of(work, struct dsa_switchdev_event_work, work); 3561 const unsigned char *addr = switchdev_work->addr; 3562 struct net_device *dev = switchdev_work->dev; 3563 u16 vid = switchdev_work->vid; 3564 struct dsa_switch *ds; 3565 struct dsa_port *dp; 3566 int err; 3567 3568 dp = dsa_user_to_port(dev); 3569 ds = dp->ds; 3570 3571 switch (switchdev_work->event) { 3572 case SWITCHDEV_FDB_ADD_TO_DEVICE: 3573 if (switchdev_work->host_addr) 3574 err = dsa_port_bridge_host_fdb_add(dp, addr, vid); 3575 else if (dp->lag) 3576 err = dsa_port_lag_fdb_add(dp, addr, vid); 3577 else 3578 err = dsa_port_fdb_add(dp, addr, vid); 3579 if (err) { 3580 dev_err(ds->dev, 3581 "port %d failed to add %pM vid %d to fdb: %d\n", 3582 dp->index, addr, vid, err); 3583 break; 3584 } 3585 dsa_fdb_offload_notify(switchdev_work); 3586 break; 3587 3588 case SWITCHDEV_FDB_DEL_TO_DEVICE: 3589 if (switchdev_work->host_addr) 3590 err = dsa_port_bridge_host_fdb_del(dp, addr, vid); 3591 else if (dp->lag) 3592 err = dsa_port_lag_fdb_del(dp, addr, vid); 3593 else 3594 err = dsa_port_fdb_del(dp, addr, vid); 3595 if (err) { 3596 dev_err(ds->dev, 3597 "port %d failed to delete %pM vid %d from fdb: %d\n", 3598 dp->index, addr, vid, err); 3599 } 3600 3601 break; 3602 } 3603 3604 kfree(switchdev_work); 3605 } 3606 3607 static bool dsa_foreign_dev_check(const struct net_device *dev, 3608 const struct net_device *foreign_dev) 3609 { 3610 const struct dsa_port *dp = dsa_user_to_port(dev); 3611 struct dsa_switch_tree *dst = dp->ds->dst; 3612 3613 if (netif_is_bridge_master(foreign_dev)) 3614 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); 3615 3616 if (netif_is_bridge_port(foreign_dev)) 3617 return !dsa_tree_offloads_bridge_port(dst, foreign_dev); 3618 3619 /* Everything else is foreign */ 3620 return true; 3621 } 3622 3623 static int dsa_user_fdb_event(struct net_device *dev, 3624 struct net_device *orig_dev, 3625 unsigned long event, const void *ctx, 3626 const struct switchdev_notifier_fdb_info *fdb_info) 3627 { 3628 struct dsa_switchdev_event_work *switchdev_work; 3629 struct dsa_port *dp = dsa_user_to_port(dev); 3630 bool host_addr = fdb_info->is_local; 3631 struct dsa_switch *ds = dp->ds; 3632 3633 if (ctx && ctx != dp) 3634 return 0; 3635 3636 if (!dp->bridge) 3637 return 0; 3638 3639 if (switchdev_fdb_is_dynamically_learned(fdb_info)) { 3640 if (dsa_port_offloads_bridge_port(dp, orig_dev)) 3641 return 0; 3642 3643 /* FDB entries learned by the software bridge or by foreign 3644 * bridge ports should be installed as host addresses only if 3645 * the driver requests assisted learning. 3646 */ 3647 if (!ds->assisted_learning_on_cpu_port) 3648 return 0; 3649 } 3650 3651 /* Also treat FDB entries on foreign interfaces bridged with us as host 3652 * addresses. 3653 */ 3654 if (dsa_foreign_dev_check(dev, orig_dev)) 3655 host_addr = true; 3656 3657 /* Check early that we're not doing work in vain. 3658 * Host addresses on LAG ports still require regular FDB ops, 3659 * since the CPU port isn't in a LAG. 3660 */ 3661 if (dp->lag && !host_addr) { 3662 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) 3663 return -EOPNOTSUPP; 3664 } else { 3665 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) 3666 return -EOPNOTSUPP; 3667 } 3668 3669 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 3670 if (!switchdev_work) 3671 return -ENOMEM; 3672 3673 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", 3674 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", 3675 orig_dev->name, fdb_info->addr, fdb_info->vid, 3676 host_addr ? " as host address" : ""); 3677 3678 INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work); 3679 switchdev_work->event = event; 3680 switchdev_work->dev = dev; 3681 switchdev_work->orig_dev = orig_dev; 3682 3683 ether_addr_copy(switchdev_work->addr, fdb_info->addr); 3684 switchdev_work->vid = fdb_info->vid; 3685 switchdev_work->host_addr = host_addr; 3686 3687 dsa_schedule_work(&switchdev_work->work); 3688 3689 return 0; 3690 } 3691 3692 /* Called under rcu_read_lock() */ 3693 static int dsa_user_switchdev_event(struct notifier_block *unused, 3694 unsigned long event, void *ptr) 3695 { 3696 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3697 int err; 3698 3699 switch (event) { 3700 case SWITCHDEV_PORT_ATTR_SET: 3701 err = switchdev_handle_port_attr_set(dev, ptr, 3702 dsa_user_dev_check, 3703 dsa_user_port_attr_set); 3704 return notifier_from_errno(err); 3705 case SWITCHDEV_FDB_ADD_TO_DEVICE: 3706 case SWITCHDEV_FDB_DEL_TO_DEVICE: 3707 err = switchdev_handle_fdb_event_to_device(dev, event, ptr, 3708 dsa_user_dev_check, 3709 dsa_foreign_dev_check, 3710 dsa_user_fdb_event); 3711 return notifier_from_errno(err); 3712 default: 3713 return NOTIFY_DONE; 3714 } 3715 3716 return NOTIFY_OK; 3717 } 3718 3719 static int dsa_user_switchdev_blocking_event(struct notifier_block *unused, 3720 unsigned long event, void *ptr) 3721 { 3722 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3723 int err; 3724 3725 switch (event) { 3726 case SWITCHDEV_PORT_OBJ_ADD: 3727 err = switchdev_handle_port_obj_add_foreign(dev, ptr, 3728 dsa_user_dev_check, 3729 dsa_foreign_dev_check, 3730 dsa_user_port_obj_add); 3731 return notifier_from_errno(err); 3732 case SWITCHDEV_PORT_OBJ_DEL: 3733 err = switchdev_handle_port_obj_del_foreign(dev, ptr, 3734 dsa_user_dev_check, 3735 dsa_foreign_dev_check, 3736 dsa_user_port_obj_del); 3737 return notifier_from_errno(err); 3738 case SWITCHDEV_PORT_ATTR_SET: 3739 err = switchdev_handle_port_attr_set(dev, ptr, 3740 dsa_user_dev_check, 3741 dsa_user_port_attr_set); 3742 return notifier_from_errno(err); 3743 } 3744 3745 return NOTIFY_DONE; 3746 } 3747 3748 static struct notifier_block dsa_user_nb __read_mostly = { 3749 .notifier_call = dsa_user_netdevice_event, 3750 }; 3751 3752 struct notifier_block dsa_user_switchdev_notifier = { 3753 .notifier_call = dsa_user_switchdev_event, 3754 }; 3755 3756 struct notifier_block dsa_user_switchdev_blocking_notifier = { 3757 .notifier_call = dsa_user_switchdev_blocking_event, 3758 }; 3759 3760 int dsa_user_register_notifier(void) 3761 { 3762 struct notifier_block *nb; 3763 int err; 3764 3765 err = register_netdevice_notifier(&dsa_user_nb); 3766 if (err) 3767 return err; 3768 3769 err = register_switchdev_notifier(&dsa_user_switchdev_notifier); 3770 if (err) 3771 goto err_switchdev_nb; 3772 3773 nb = &dsa_user_switchdev_blocking_notifier; 3774 err = register_switchdev_blocking_notifier(nb); 3775 if (err) 3776 goto err_switchdev_blocking_nb; 3777 3778 return 0; 3779 3780 err_switchdev_blocking_nb: 3781 unregister_switchdev_notifier(&dsa_user_switchdev_notifier); 3782 err_switchdev_nb: 3783 unregister_netdevice_notifier(&dsa_user_nb); 3784 return err; 3785 } 3786 3787 void dsa_user_unregister_notifier(void) 3788 { 3789 struct notifier_block *nb; 3790 int err; 3791 3792 nb = &dsa_user_switchdev_blocking_notifier; 3793 err = unregister_switchdev_blocking_notifier(nb); 3794 if (err) 3795 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); 3796 3797 err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier); 3798 if (err) 3799 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 3800 3801 err = unregister_netdevice_notifier(&dsa_user_nb); 3802 if (err) 3803 pr_err("DSA: failed to unregister user notifier (%d)\n", err); 3804 } 3805