1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/kthread.h> 14 #include <linux/workqueue.h> 15 #include <linux/iommu.h> 16 #include <net/pkt_cls.h> 17 18 #include <linux/fsl/mc.h> 19 20 #include "dpaa2-switch.h" 21 22 /* Minimal supported DPSW version */ 23 #define DPSW_MIN_VER_MAJOR 8 24 #define DPSW_MIN_VER_MINOR 9 25 26 #define DEFAULT_VLAN_ID 1 27 28 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 29 { 30 return port_priv->fdb->fdb_id; 31 } 32 33 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 34 { 35 int i; 36 37 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 38 if (!ethsw->fdbs[i].in_use) 39 return ðsw->fdbs[i]; 40 return NULL; 41 } 42 43 static struct dpaa2_switch_filter_block * 44 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw) 45 { 46 int i; 47 48 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 49 if (!ethsw->filter_blocks[i].in_use) 50 return ðsw->filter_blocks[i]; 51 return NULL; 52 } 53 54 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 55 struct net_device *bridge_dev) 56 { 57 struct ethsw_port_priv *other_port_priv = NULL; 58 struct dpaa2_switch_fdb *fdb; 59 struct net_device *other_dev; 60 struct list_head *iter; 61 62 /* If we leave a bridge (bridge_dev is NULL), find an unused 63 * FDB and use that. 64 */ 65 if (!bridge_dev) { 66 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 67 68 /* If there is no unused FDB, we must be the last port that 69 * leaves the last bridge, all the others are standalone. We 70 * can just keep the FDB that we already have. 71 */ 72 73 if (!fdb) { 74 port_priv->fdb->bridge_dev = NULL; 75 return 0; 76 } 77 78 port_priv->fdb = fdb; 79 port_priv->fdb->in_use = true; 80 port_priv->fdb->bridge_dev = NULL; 81 return 0; 82 } 83 84 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 85 * being held. Assert on it so that it's easier to catch new code 86 * paths that reach this point without the RTNL lock. 87 */ 88 ASSERT_RTNL(); 89 90 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 91 * to be present in that bridge 92 */ 93 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 94 if (!dpaa2_switch_port_dev_check(other_dev)) 95 continue; 96 97 if (other_dev == port_priv->netdev) 98 continue; 99 100 other_port_priv = netdev_priv(other_dev); 101 break; 102 } 103 104 /* The current port is about to change its FDB to the one used by the 105 * first port that joined the bridge. 106 */ 107 if (other_port_priv) { 108 /* The previous FDB is about to become unused, since the 109 * interface is no longer standalone. 110 */ 111 port_priv->fdb->in_use = false; 112 port_priv->fdb->bridge_dev = NULL; 113 114 /* Get a reference to the new FDB */ 115 port_priv->fdb = other_port_priv->fdb; 116 } 117 118 /* Keep track of the new upper bridge device */ 119 port_priv->fdb->bridge_dev = bridge_dev; 120 121 return 0; 122 } 123 124 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 125 enum dpsw_flood_type type, 126 struct dpsw_egress_flood_cfg *cfg) 127 { 128 int i = 0, j; 129 130 memset(cfg, 0, sizeof(*cfg)); 131 132 /* Add all the DPAA2 switch ports found in the same bridging domain to 133 * the egress flooding domain 134 */ 135 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 136 if (!ethsw->ports[j]) 137 continue; 138 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 139 continue; 140 141 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 142 cfg->if_id[i++] = ethsw->ports[j]->idx; 143 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 144 cfg->if_id[i++] = ethsw->ports[j]->idx; 145 } 146 147 /* Add the CTRL interface to the egress flooding domain */ 148 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 149 150 cfg->fdb_id = fdb_id; 151 cfg->flood_type = type; 152 cfg->num_ifs = i; 153 } 154 155 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 156 { 157 struct dpsw_egress_flood_cfg flood_cfg; 158 int err; 159 160 /* Setup broadcast flooding domain */ 161 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 162 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 163 &flood_cfg); 164 if (err) { 165 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 166 return err; 167 } 168 169 /* Setup unknown flooding domain */ 170 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 171 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 172 &flood_cfg); 173 if (err) { 174 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 175 return err; 176 } 177 178 return 0; 179 } 180 181 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 182 dma_addr_t iova_addr) 183 { 184 phys_addr_t phys_addr; 185 186 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 187 188 return phys_to_virt(phys_addr); 189 } 190 191 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 192 { 193 struct ethsw_core *ethsw = port_priv->ethsw_data; 194 struct dpsw_vlan_cfg vcfg = {0}; 195 int err; 196 197 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 198 err = dpsw_vlan_add(ethsw->mc_io, 0, 199 ethsw->dpsw_handle, vid, &vcfg); 200 if (err) { 201 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 202 return err; 203 } 204 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 205 206 return 0; 207 } 208 209 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 210 { 211 struct net_device *netdev = port_priv->netdev; 212 struct dpsw_link_state state; 213 int err; 214 215 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 216 port_priv->ethsw_data->dpsw_handle, 217 port_priv->idx, &state); 218 if (err) { 219 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 220 return true; 221 } 222 223 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 224 225 return state.up ? true : false; 226 } 227 228 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 229 { 230 struct ethsw_core *ethsw = port_priv->ethsw_data; 231 struct net_device *netdev = port_priv->netdev; 232 struct dpsw_tci_cfg tci_cfg = { 0 }; 233 bool up; 234 int err, ret; 235 236 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 237 port_priv->idx, &tci_cfg); 238 if (err) { 239 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 240 return err; 241 } 242 243 tci_cfg.vlan_id = pvid; 244 245 /* Interface needs to be down to change PVID */ 246 up = dpaa2_switch_port_is_up(port_priv); 247 if (up) { 248 err = dpsw_if_disable(ethsw->mc_io, 0, 249 ethsw->dpsw_handle, 250 port_priv->idx); 251 if (err) { 252 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 253 return err; 254 } 255 } 256 257 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 258 port_priv->idx, &tci_cfg); 259 if (err) { 260 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 261 goto set_tci_error; 262 } 263 264 /* Delete previous PVID info and mark the new one */ 265 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 266 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 267 port_priv->pvid = pvid; 268 269 set_tci_error: 270 if (up) { 271 ret = dpsw_if_enable(ethsw->mc_io, 0, 272 ethsw->dpsw_handle, 273 port_priv->idx); 274 if (ret) { 275 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 276 return ret; 277 } 278 } 279 280 return err; 281 } 282 283 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 284 u16 vid, u16 flags) 285 { 286 struct ethsw_core *ethsw = port_priv->ethsw_data; 287 struct net_device *netdev = port_priv->netdev; 288 struct dpsw_vlan_if_cfg vcfg = {0}; 289 int err; 290 291 if (port_priv->vlans[vid]) { 292 netdev_err(netdev, "VLAN %d already configured\n", vid); 293 return -EEXIST; 294 } 295 296 /* If hit, this VLAN rule will lead the packet into the FDB table 297 * specified in the vlan configuration below 298 */ 299 vcfg.num_ifs = 1; 300 vcfg.if_id[0] = port_priv->idx; 301 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 302 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 303 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 304 if (err) { 305 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 306 return err; 307 } 308 309 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 310 311 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 312 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 313 ethsw->dpsw_handle, 314 vid, &vcfg); 315 if (err) { 316 netdev_err(netdev, 317 "dpsw_vlan_add_if_untagged err %d\n", err); 318 return err; 319 } 320 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 321 } 322 323 if (flags & BRIDGE_VLAN_INFO_PVID) { 324 err = dpaa2_switch_port_set_pvid(port_priv, vid); 325 if (err) 326 return err; 327 } 328 329 return 0; 330 } 331 332 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 333 { 334 switch (state) { 335 case BR_STATE_DISABLED: 336 return DPSW_STP_STATE_DISABLED; 337 case BR_STATE_LISTENING: 338 return DPSW_STP_STATE_LISTENING; 339 case BR_STATE_LEARNING: 340 return DPSW_STP_STATE_LEARNING; 341 case BR_STATE_FORWARDING: 342 return DPSW_STP_STATE_FORWARDING; 343 case BR_STATE_BLOCKING: 344 return DPSW_STP_STATE_BLOCKING; 345 default: 346 return DPSW_STP_STATE_DISABLED; 347 } 348 } 349 350 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 351 { 352 struct dpsw_stp_cfg stp_cfg = {0}; 353 int err; 354 u16 vid; 355 356 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 357 return 0; /* Nothing to do */ 358 359 stp_cfg.state = br_stp_state_to_dpsw(state); 360 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 361 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 362 stp_cfg.vlan_id = vid; 363 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 364 port_priv->ethsw_data->dpsw_handle, 365 port_priv->idx, &stp_cfg); 366 if (err) { 367 netdev_err(port_priv->netdev, 368 "dpsw_if_set_stp err %d\n", err); 369 return err; 370 } 371 } 372 } 373 374 port_priv->stp_state = state; 375 376 return 0; 377 } 378 379 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 380 { 381 struct ethsw_port_priv *ppriv_local = NULL; 382 int i, err; 383 384 if (!ethsw->vlans[vid]) 385 return -ENOENT; 386 387 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 388 if (err) { 389 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 390 return err; 391 } 392 ethsw->vlans[vid] = 0; 393 394 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 395 ppriv_local = ethsw->ports[i]; 396 if (ppriv_local) 397 ppriv_local->vlans[vid] = 0; 398 } 399 400 return 0; 401 } 402 403 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 404 const unsigned char *addr) 405 { 406 struct dpsw_fdb_unicast_cfg entry = {0}; 407 u16 fdb_id; 408 int err; 409 410 entry.if_egress = port_priv->idx; 411 entry.type = DPSW_FDB_ENTRY_STATIC; 412 ether_addr_copy(entry.mac_addr, addr); 413 414 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 415 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 416 port_priv->ethsw_data->dpsw_handle, 417 fdb_id, &entry); 418 if (err) 419 netdev_err(port_priv->netdev, 420 "dpsw_fdb_add_unicast err %d\n", err); 421 return err; 422 } 423 424 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 425 const unsigned char *addr) 426 { 427 struct dpsw_fdb_unicast_cfg entry = {0}; 428 u16 fdb_id; 429 int err; 430 431 entry.if_egress = port_priv->idx; 432 entry.type = DPSW_FDB_ENTRY_STATIC; 433 ether_addr_copy(entry.mac_addr, addr); 434 435 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 436 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 437 port_priv->ethsw_data->dpsw_handle, 438 fdb_id, &entry); 439 /* Silently discard error for calling multiple times the del command */ 440 if (err && err != -ENXIO) 441 netdev_err(port_priv->netdev, 442 "dpsw_fdb_remove_unicast err %d\n", err); 443 return err; 444 } 445 446 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 447 const unsigned char *addr) 448 { 449 struct dpsw_fdb_multicast_cfg entry = {0}; 450 u16 fdb_id; 451 int err; 452 453 ether_addr_copy(entry.mac_addr, addr); 454 entry.type = DPSW_FDB_ENTRY_STATIC; 455 entry.num_ifs = 1; 456 entry.if_id[0] = port_priv->idx; 457 458 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 459 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 460 port_priv->ethsw_data->dpsw_handle, 461 fdb_id, &entry); 462 /* Silently discard error for calling multiple times the add command */ 463 if (err && err != -ENXIO) 464 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 465 err); 466 return err; 467 } 468 469 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 470 const unsigned char *addr) 471 { 472 struct dpsw_fdb_multicast_cfg entry = {0}; 473 u16 fdb_id; 474 int err; 475 476 ether_addr_copy(entry.mac_addr, addr); 477 entry.type = DPSW_FDB_ENTRY_STATIC; 478 entry.num_ifs = 1; 479 entry.if_id[0] = port_priv->idx; 480 481 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 482 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 483 port_priv->ethsw_data->dpsw_handle, 484 fdb_id, &entry); 485 /* Silently discard error for calling multiple times the del command */ 486 if (err && err != -ENAVAIL) 487 netdev_err(port_priv->netdev, 488 "dpsw_fdb_remove_multicast err %d\n", err); 489 return err; 490 } 491 492 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 493 struct rtnl_link_stats64 *stats) 494 { 495 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 496 u64 tmp; 497 int err; 498 499 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 500 port_priv->ethsw_data->dpsw_handle, 501 port_priv->idx, 502 DPSW_CNT_ING_FRAME, &stats->rx_packets); 503 if (err) 504 goto error; 505 506 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 507 port_priv->ethsw_data->dpsw_handle, 508 port_priv->idx, 509 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 510 if (err) 511 goto error; 512 513 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 514 port_priv->ethsw_data->dpsw_handle, 515 port_priv->idx, 516 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 517 if (err) 518 goto error; 519 520 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 521 port_priv->ethsw_data->dpsw_handle, 522 port_priv->idx, 523 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 524 if (err) 525 goto error; 526 527 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 528 port_priv->ethsw_data->dpsw_handle, 529 port_priv->idx, 530 DPSW_CNT_ING_FRAME_DISCARD, 531 &stats->rx_dropped); 532 if (err) 533 goto error; 534 535 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 536 port_priv->ethsw_data->dpsw_handle, 537 port_priv->idx, 538 DPSW_CNT_ING_FLTR_FRAME, 539 &tmp); 540 if (err) 541 goto error; 542 stats->rx_dropped += tmp; 543 544 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 545 port_priv->ethsw_data->dpsw_handle, 546 port_priv->idx, 547 DPSW_CNT_EGR_FRAME_DISCARD, 548 &stats->tx_dropped); 549 if (err) 550 goto error; 551 552 return; 553 554 error: 555 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 556 } 557 558 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 559 int attr_id) 560 { 561 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 562 } 563 564 static int dpaa2_switch_port_get_offload_stats(int attr_id, 565 const struct net_device *netdev, 566 void *sp) 567 { 568 switch (attr_id) { 569 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 570 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 571 return 0; 572 } 573 574 return -EINVAL; 575 } 576 577 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 578 { 579 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 580 int err; 581 582 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 583 0, 584 port_priv->ethsw_data->dpsw_handle, 585 port_priv->idx, 586 (u16)ETHSW_L2_MAX_FRM(mtu)); 587 if (err) { 588 netdev_err(netdev, 589 "dpsw_if_set_max_frame_length() err %d\n", err); 590 return err; 591 } 592 593 WRITE_ONCE(netdev->mtu, mtu); 594 return 0; 595 } 596 597 static int dpaa2_switch_port_link_state_update(struct net_device *netdev) 598 { 599 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 600 struct dpsw_link_state state; 601 int err; 602 603 /* When we manage the MAC/PHY using phylink there is no need 604 * to manually update the netif_carrier. 605 * We can avoid locking because we are called from the "link changed" 606 * IRQ handler, which is the same as the "endpoint changed" IRQ handler 607 * (the writer to port_priv->mac), so we cannot race with it. 608 */ 609 if (dpaa2_mac_is_type_phy(port_priv->mac)) 610 return 0; 611 612 /* Interrupts are received even though no one issued an 'ifconfig up' 613 * on the switch interface. Ignore these link state update interrupts 614 */ 615 if (!netif_running(netdev)) 616 return 0; 617 618 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 619 port_priv->ethsw_data->dpsw_handle, 620 port_priv->idx, &state); 621 if (err) { 622 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 623 return err; 624 } 625 626 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 627 628 if (state.up != port_priv->link_state) { 629 if (state.up) { 630 netif_carrier_on(netdev); 631 netif_tx_start_all_queues(netdev); 632 } else { 633 netif_carrier_off(netdev); 634 netif_tx_stop_all_queues(netdev); 635 } 636 port_priv->link_state = state.up; 637 } 638 639 return 0; 640 } 641 642 /* Manage all NAPI instances for the control interface. 643 * 644 * We only have one RX queue and one Tx Conf queue for all 645 * switch ports. Therefore, we only need to enable the NAPI instance once, the 646 * first time one of the switch ports runs .dev_open(). 647 */ 648 649 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 650 { 651 int i; 652 653 /* Access to the ethsw->napi_users relies on the RTNL lock */ 654 ASSERT_RTNL(); 655 656 /* a new interface is using the NAPI instance */ 657 ethsw->napi_users++; 658 659 /* if there is already a user of the instance, return */ 660 if (ethsw->napi_users > 1) 661 return; 662 663 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 664 napi_enable(ðsw->fq[i].napi); 665 } 666 667 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 668 { 669 int i; 670 671 /* Access to the ethsw->napi_users relies on the RTNL lock */ 672 ASSERT_RTNL(); 673 674 /* If we are not the last interface using the NAPI, return */ 675 ethsw->napi_users--; 676 if (ethsw->napi_users) 677 return; 678 679 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 680 napi_disable(ðsw->fq[i].napi); 681 } 682 683 static int dpaa2_switch_port_open(struct net_device *netdev) 684 { 685 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 686 struct ethsw_core *ethsw = port_priv->ethsw_data; 687 int err; 688 689 mutex_lock(&port_priv->mac_lock); 690 691 if (!dpaa2_switch_port_is_type_phy(port_priv)) { 692 /* Explicitly set carrier off, otherwise 693 * netif_carrier_ok() will return true and cause 'ip link show' 694 * to report the LOWER_UP flag, even though the link 695 * notification wasn't even received. 696 */ 697 netif_carrier_off(netdev); 698 } 699 700 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 701 port_priv->ethsw_data->dpsw_handle, 702 port_priv->idx); 703 if (err) { 704 mutex_unlock(&port_priv->mac_lock); 705 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 706 return err; 707 } 708 709 dpaa2_switch_enable_ctrl_if_napi(ethsw); 710 711 if (dpaa2_switch_port_is_type_phy(port_priv)) 712 dpaa2_mac_start(port_priv->mac); 713 714 mutex_unlock(&port_priv->mac_lock); 715 716 return 0; 717 } 718 719 static int dpaa2_switch_port_stop(struct net_device *netdev) 720 { 721 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 722 struct ethsw_core *ethsw = port_priv->ethsw_data; 723 int err; 724 725 mutex_lock(&port_priv->mac_lock); 726 727 if (dpaa2_switch_port_is_type_phy(port_priv)) { 728 dpaa2_mac_stop(port_priv->mac); 729 } else { 730 netif_tx_stop_all_queues(netdev); 731 netif_carrier_off(netdev); 732 } 733 734 mutex_unlock(&port_priv->mac_lock); 735 736 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 737 port_priv->ethsw_data->dpsw_handle, 738 port_priv->idx); 739 if (err) { 740 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 741 return err; 742 } 743 744 dpaa2_switch_disable_ctrl_if_napi(ethsw); 745 746 return 0; 747 } 748 749 static int dpaa2_switch_port_parent_id(struct net_device *dev, 750 struct netdev_phys_item_id *ppid) 751 { 752 struct ethsw_port_priv *port_priv = netdev_priv(dev); 753 754 ppid->id_len = 1; 755 ppid->id[0] = port_priv->ethsw_data->dev_id; 756 757 return 0; 758 } 759 760 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 761 size_t len) 762 { 763 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 764 int err; 765 766 err = snprintf(name, len, "p%d", port_priv->idx); 767 if (err >= len) 768 return -EINVAL; 769 770 return 0; 771 } 772 773 struct ethsw_dump_ctx { 774 struct net_device *dev; 775 struct sk_buff *skb; 776 struct netlink_callback *cb; 777 int idx; 778 }; 779 780 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 781 struct ethsw_dump_ctx *dump) 782 { 783 struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; 784 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 785 u32 portid = NETLINK_CB(dump->cb->skb).portid; 786 u32 seq = dump->cb->nlh->nlmsg_seq; 787 struct nlmsghdr *nlh; 788 struct ndmsg *ndm; 789 790 if (dump->idx < ctx->fdb_idx) 791 goto skip; 792 793 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 794 sizeof(*ndm), NLM_F_MULTI); 795 if (!nlh) 796 return -EMSGSIZE; 797 798 ndm = nlmsg_data(nlh); 799 ndm->ndm_family = AF_BRIDGE; 800 ndm->ndm_pad1 = 0; 801 ndm->ndm_pad2 = 0; 802 ndm->ndm_flags = NTF_SELF; 803 ndm->ndm_type = 0; 804 ndm->ndm_ifindex = dump->dev->ifindex; 805 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 806 807 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 808 goto nla_put_failure; 809 810 nlmsg_end(dump->skb, nlh); 811 812 skip: 813 dump->idx++; 814 return 0; 815 816 nla_put_failure: 817 nlmsg_cancel(dump->skb, nlh); 818 return -EMSGSIZE; 819 } 820 821 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 822 struct ethsw_port_priv *port_priv) 823 { 824 int idx = port_priv->idx; 825 int valid; 826 827 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 828 valid = entry->if_info == port_priv->idx; 829 else 830 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 831 832 return valid; 833 } 834 835 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 836 dpaa2_switch_fdb_cb_t cb, void *data) 837 { 838 struct net_device *net_dev = port_priv->netdev; 839 struct ethsw_core *ethsw = port_priv->ethsw_data; 840 struct device *dev = net_dev->dev.parent; 841 struct fdb_dump_entry *fdb_entries; 842 struct fdb_dump_entry fdb_entry; 843 dma_addr_t fdb_dump_iova; 844 u16 num_fdb_entries; 845 u32 fdb_dump_size; 846 int err = 0, i; 847 u8 *dma_mem; 848 u16 fdb_id; 849 850 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 851 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 852 if (!dma_mem) 853 return -ENOMEM; 854 855 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 856 DMA_FROM_DEVICE); 857 if (dma_mapping_error(dev, fdb_dump_iova)) { 858 netdev_err(net_dev, "dma_map_single() failed\n"); 859 err = -ENOMEM; 860 goto err_map; 861 } 862 863 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 864 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 865 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 866 if (err) { 867 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 868 goto err_dump; 869 } 870 871 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 872 873 fdb_entries = (struct fdb_dump_entry *)dma_mem; 874 for (i = 0; i < num_fdb_entries; i++) { 875 fdb_entry = fdb_entries[i]; 876 877 err = cb(port_priv, &fdb_entry, data); 878 if (err) 879 goto end; 880 } 881 882 end: 883 kfree(dma_mem); 884 885 return 0; 886 887 err_dump: 888 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 889 err_map: 890 kfree(dma_mem); 891 return err; 892 } 893 894 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 895 struct fdb_dump_entry *fdb_entry, 896 void *data) 897 { 898 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 899 return 0; 900 901 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 902 } 903 904 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 905 struct net_device *net_dev, 906 struct net_device *filter_dev, int *idx) 907 { 908 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 909 struct ethsw_dump_ctx dump = { 910 .dev = net_dev, 911 .skb = skb, 912 .cb = cb, 913 .idx = *idx, 914 }; 915 int err; 916 917 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 918 *idx = dump.idx; 919 920 return err; 921 } 922 923 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 924 struct fdb_dump_entry *fdb_entry, 925 void *data __always_unused) 926 { 927 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 928 return 0; 929 930 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 931 return 0; 932 933 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 934 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 935 else 936 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 937 938 return 0; 939 } 940 941 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 942 { 943 dpaa2_switch_fdb_iterate(port_priv, 944 dpaa2_switch_fdb_entry_fast_age, NULL); 945 } 946 947 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 948 u16 vid) 949 { 950 struct switchdev_obj_port_vlan vlan = { 951 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 952 .vid = vid, 953 .obj.orig_dev = netdev, 954 /* This API only allows programming tagged, non-PVID VIDs */ 955 .flags = 0, 956 }; 957 958 return dpaa2_switch_port_vlans_add(netdev, &vlan); 959 } 960 961 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 962 u16 vid) 963 { 964 struct switchdev_obj_port_vlan vlan = { 965 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 966 .vid = vid, 967 .obj.orig_dev = netdev, 968 /* This API only allows programming tagged, non-PVID VIDs */ 969 .flags = 0, 970 }; 971 972 return dpaa2_switch_port_vlans_del(netdev, &vlan); 973 } 974 975 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 976 { 977 struct ethsw_core *ethsw = port_priv->ethsw_data; 978 struct net_device *net_dev = port_priv->netdev; 979 struct device *dev = net_dev->dev.parent; 980 u8 mac_addr[ETH_ALEN]; 981 int err; 982 983 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 984 return 0; 985 986 /* Get firmware address, if any */ 987 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 988 port_priv->idx, mac_addr); 989 if (err) { 990 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 991 return err; 992 } 993 994 /* First check if firmware has any address configured by bootloader */ 995 if (!is_zero_ether_addr(mac_addr)) { 996 eth_hw_addr_set(net_dev, mac_addr); 997 } else { 998 /* No MAC address configured, fill in net_dev->dev_addr 999 * with a random one 1000 */ 1001 eth_hw_addr_random(net_dev); 1002 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 1003 1004 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 1005 * practical purposes, this will be our "permanent" mac address, 1006 * at least until the next reboot. This move will also permit 1007 * register_netdevice() to properly fill up net_dev->perm_addr. 1008 */ 1009 net_dev->addr_assign_type = NET_ADDR_PERM; 1010 } 1011 1012 return 0; 1013 } 1014 1015 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 1016 const struct dpaa2_fd *fd) 1017 { 1018 struct device *dev = ethsw->dev; 1019 unsigned char *buffer_start; 1020 struct sk_buff **skbh, *skb; 1021 dma_addr_t fd_addr; 1022 1023 fd_addr = dpaa2_fd_get_addr(fd); 1024 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 1025 1026 skb = *skbh; 1027 buffer_start = (unsigned char *)skbh; 1028 1029 dma_unmap_single(dev, fd_addr, 1030 skb_tail_pointer(skb) - buffer_start, 1031 DMA_TO_DEVICE); 1032 1033 /* Move on with skb release */ 1034 dev_kfree_skb(skb); 1035 } 1036 1037 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1038 struct sk_buff *skb, 1039 struct dpaa2_fd *fd) 1040 { 1041 struct device *dev = ethsw->dev; 1042 struct sk_buff **skbh; 1043 dma_addr_t addr; 1044 u8 *buff_start; 1045 void *hwa; 1046 1047 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1048 DPAA2_SWITCH_TX_BUF_ALIGN, 1049 DPAA2_SWITCH_TX_BUF_ALIGN); 1050 1051 /* Clear FAS to have consistent values for TX confirmation. It is 1052 * located in the first 8 bytes of the buffer's hardware annotation 1053 * area 1054 */ 1055 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1056 memset(hwa, 0, 8); 1057 1058 /* Store a backpointer to the skb at the beginning of the buffer 1059 * (in the private data area) such that we can release it 1060 * on Tx confirm 1061 */ 1062 skbh = (struct sk_buff **)buff_start; 1063 *skbh = skb; 1064 1065 addr = dma_map_single(dev, buff_start, 1066 skb_tail_pointer(skb) - buff_start, 1067 DMA_TO_DEVICE); 1068 if (unlikely(dma_mapping_error(dev, addr))) 1069 return -ENOMEM; 1070 1071 /* Setup the FD fields */ 1072 memset(fd, 0, sizeof(*fd)); 1073 1074 dpaa2_fd_set_addr(fd, addr); 1075 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1076 dpaa2_fd_set_len(fd, skb->len); 1077 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1078 1079 return 0; 1080 } 1081 1082 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1083 struct net_device *net_dev) 1084 { 1085 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1086 struct ethsw_core *ethsw = port_priv->ethsw_data; 1087 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1088 struct dpaa2_fd fd; 1089 int err; 1090 1091 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1092 struct sk_buff *ns; 1093 1094 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1095 if (unlikely(!ns)) { 1096 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1097 goto err_free_skb; 1098 } 1099 dev_consume_skb_any(skb); 1100 skb = ns; 1101 } 1102 1103 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1104 skb = skb_unshare(skb, GFP_ATOMIC); 1105 if (unlikely(!skb)) { 1106 /* skb_unshare() has already freed the skb */ 1107 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1108 goto err_exit; 1109 } 1110 1111 /* At this stage, we do not support non-linear skbs so just try to 1112 * linearize the skb and if that's not working, just drop the packet. 1113 */ 1114 err = skb_linearize(skb); 1115 if (err) { 1116 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1117 goto err_free_skb; 1118 } 1119 1120 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1121 if (unlikely(err)) { 1122 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1123 goto err_free_skb; 1124 } 1125 1126 do { 1127 err = dpaa2_io_service_enqueue_qd(NULL, 1128 port_priv->tx_qdid, 1129 8, 0, &fd); 1130 retries--; 1131 } while (err == -EBUSY && retries); 1132 1133 if (unlikely(err < 0)) { 1134 dpaa2_switch_free_fd(ethsw, &fd); 1135 goto err_exit; 1136 } 1137 1138 return NETDEV_TX_OK; 1139 1140 err_free_skb: 1141 dev_kfree_skb(skb); 1142 err_exit: 1143 return NETDEV_TX_OK; 1144 } 1145 1146 static int 1147 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block, 1148 struct flow_cls_offload *f) 1149 { 1150 switch (f->command) { 1151 case FLOW_CLS_REPLACE: 1152 return dpaa2_switch_cls_flower_replace(filter_block, f); 1153 case FLOW_CLS_DESTROY: 1154 return dpaa2_switch_cls_flower_destroy(filter_block, f); 1155 default: 1156 return -EOPNOTSUPP; 1157 } 1158 } 1159 1160 static int 1161 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block, 1162 struct tc_cls_matchall_offload *f) 1163 { 1164 switch (f->command) { 1165 case TC_CLSMATCHALL_REPLACE: 1166 return dpaa2_switch_cls_matchall_replace(block, f); 1167 case TC_CLSMATCHALL_DESTROY: 1168 return dpaa2_switch_cls_matchall_destroy(block, f); 1169 default: 1170 return -EOPNOTSUPP; 1171 } 1172 } 1173 1174 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, 1175 void *type_data, 1176 void *cb_priv) 1177 { 1178 switch (type) { 1179 case TC_SETUP_CLSFLOWER: 1180 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); 1181 case TC_SETUP_CLSMATCHALL: 1182 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); 1183 default: 1184 return -EOPNOTSUPP; 1185 } 1186 } 1187 1188 static LIST_HEAD(dpaa2_switch_block_cb_list); 1189 1190 static int 1191 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, 1192 struct dpaa2_switch_filter_block *block) 1193 { 1194 struct ethsw_core *ethsw = port_priv->ethsw_data; 1195 struct net_device *netdev = port_priv->netdev; 1196 struct dpsw_acl_if_cfg acl_if_cfg; 1197 int err; 1198 1199 if (port_priv->filter_block) 1200 return -EINVAL; 1201 1202 acl_if_cfg.if_id[0] = port_priv->idx; 1203 acl_if_cfg.num_ifs = 1; 1204 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1205 block->acl_id, &acl_if_cfg); 1206 if (err) { 1207 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1208 return err; 1209 } 1210 1211 block->ports |= BIT(port_priv->idx); 1212 port_priv->filter_block = block; 1213 1214 return 0; 1215 } 1216 1217 static int 1218 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, 1219 struct dpaa2_switch_filter_block *block) 1220 { 1221 struct ethsw_core *ethsw = port_priv->ethsw_data; 1222 struct net_device *netdev = port_priv->netdev; 1223 struct dpsw_acl_if_cfg acl_if_cfg; 1224 int err; 1225 1226 if (port_priv->filter_block != block) 1227 return -EINVAL; 1228 1229 acl_if_cfg.if_id[0] = port_priv->idx; 1230 acl_if_cfg.num_ifs = 1; 1231 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1232 block->acl_id, &acl_if_cfg); 1233 if (err) { 1234 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1235 return err; 1236 } 1237 1238 block->ports &= ~BIT(port_priv->idx); 1239 port_priv->filter_block = NULL; 1240 return 0; 1241 } 1242 1243 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, 1244 struct dpaa2_switch_filter_block *block) 1245 { 1246 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block; 1247 int err; 1248 1249 /* Offload all the mirror entries found in the block on this new port 1250 * joining it. 1251 */ 1252 err = dpaa2_switch_block_offload_mirror(block, port_priv); 1253 if (err) 1254 return err; 1255 1256 /* If the port is already bound to this ACL table then do nothing. This 1257 * can happen when this port is the first one to join a tc block 1258 */ 1259 if (port_priv->filter_block == block) 1260 return 0; 1261 1262 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block); 1263 if (err) 1264 return err; 1265 1266 /* Mark the previous ACL table as being unused if this was the last 1267 * port that was using it. 1268 */ 1269 if (old_block->ports == 0) 1270 old_block->in_use = false; 1271 1272 return dpaa2_switch_port_acl_tbl_bind(port_priv, block); 1273 } 1274 1275 static int 1276 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, 1277 struct dpaa2_switch_filter_block *block) 1278 { 1279 struct ethsw_core *ethsw = port_priv->ethsw_data; 1280 struct dpaa2_switch_filter_block *new_block; 1281 int err; 1282 1283 /* Unoffload all the mirror entries found in the block from the 1284 * port leaving it. 1285 */ 1286 err = dpaa2_switch_block_unoffload_mirror(block, port_priv); 1287 if (err) 1288 return err; 1289 1290 /* We are the last port that leaves a block (an ACL table). 1291 * We'll continue to use this table. 1292 */ 1293 if (block->ports == BIT(port_priv->idx)) 1294 return 0; 1295 1296 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block); 1297 if (err) 1298 return err; 1299 1300 if (block->ports == 0) 1301 block->in_use = false; 1302 1303 new_block = dpaa2_switch_filter_block_get_unused(ethsw); 1304 new_block->in_use = true; 1305 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block); 1306 } 1307 1308 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, 1309 struct flow_block_offload *f) 1310 { 1311 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1312 struct ethsw_core *ethsw = port_priv->ethsw_data; 1313 struct dpaa2_switch_filter_block *filter_block; 1314 struct flow_block_cb *block_cb; 1315 bool register_block = false; 1316 int err; 1317 1318 block_cb = flow_block_cb_lookup(f->block, 1319 dpaa2_switch_port_setup_tc_block_cb_ig, 1320 ethsw); 1321 1322 if (!block_cb) { 1323 /* If the filter block is not already known, then this port 1324 * must be the first to join it. In this case, we can just 1325 * continue to use our private table 1326 */ 1327 filter_block = port_priv->filter_block; 1328 1329 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, 1330 ethsw, filter_block, NULL); 1331 if (IS_ERR(block_cb)) 1332 return PTR_ERR(block_cb); 1333 1334 register_block = true; 1335 } else { 1336 filter_block = flow_block_cb_priv(block_cb); 1337 } 1338 1339 flow_block_cb_incref(block_cb); 1340 err = dpaa2_switch_port_block_bind(port_priv, filter_block); 1341 if (err) 1342 goto err_block_bind; 1343 1344 if (register_block) { 1345 flow_block_cb_add(block_cb, f); 1346 list_add_tail(&block_cb->driver_list, 1347 &dpaa2_switch_block_cb_list); 1348 } 1349 1350 return 0; 1351 1352 err_block_bind: 1353 if (!flow_block_cb_decref(block_cb)) 1354 flow_block_cb_free(block_cb); 1355 return err; 1356 } 1357 1358 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, 1359 struct flow_block_offload *f) 1360 { 1361 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1362 struct ethsw_core *ethsw = port_priv->ethsw_data; 1363 struct dpaa2_switch_filter_block *filter_block; 1364 struct flow_block_cb *block_cb; 1365 int err; 1366 1367 block_cb = flow_block_cb_lookup(f->block, 1368 dpaa2_switch_port_setup_tc_block_cb_ig, 1369 ethsw); 1370 if (!block_cb) 1371 return; 1372 1373 filter_block = flow_block_cb_priv(block_cb); 1374 err = dpaa2_switch_port_block_unbind(port_priv, filter_block); 1375 if (!err && !flow_block_cb_decref(block_cb)) { 1376 flow_block_cb_remove(block_cb, f); 1377 list_del(&block_cb->driver_list); 1378 } 1379 } 1380 1381 static int dpaa2_switch_setup_tc_block(struct net_device *netdev, 1382 struct flow_block_offload *f) 1383 { 1384 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1385 return -EOPNOTSUPP; 1386 1387 f->driver_block_list = &dpaa2_switch_block_cb_list; 1388 1389 switch (f->command) { 1390 case FLOW_BLOCK_BIND: 1391 return dpaa2_switch_setup_tc_block_bind(netdev, f); 1392 case FLOW_BLOCK_UNBIND: 1393 dpaa2_switch_setup_tc_block_unbind(netdev, f); 1394 return 0; 1395 default: 1396 return -EOPNOTSUPP; 1397 } 1398 } 1399 1400 static int dpaa2_switch_port_setup_tc(struct net_device *netdev, 1401 enum tc_setup_type type, 1402 void *type_data) 1403 { 1404 switch (type) { 1405 case TC_SETUP_BLOCK: { 1406 return dpaa2_switch_setup_tc_block(netdev, type_data); 1407 } 1408 default: 1409 return -EOPNOTSUPP; 1410 } 1411 1412 return 0; 1413 } 1414 1415 static const struct net_device_ops dpaa2_switch_port_ops = { 1416 .ndo_open = dpaa2_switch_port_open, 1417 .ndo_stop = dpaa2_switch_port_stop, 1418 1419 .ndo_set_mac_address = eth_mac_addr, 1420 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1421 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1422 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1423 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1424 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1425 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1426 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1427 1428 .ndo_start_xmit = dpaa2_switch_port_tx, 1429 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1430 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1431 .ndo_setup_tc = dpaa2_switch_port_setup_tc, 1432 }; 1433 1434 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1435 { 1436 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1437 } 1438 1439 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) 1440 { 1441 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev; 1442 struct dpaa2_mac *mac; 1443 int err; 1444 1445 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent); 1446 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx); 1447 1448 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) 1449 return PTR_ERR(dpmac_dev); 1450 1451 if (IS_ERR(dpmac_dev)) 1452 return 0; 1453 1454 if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { 1455 err = 0; 1456 goto out_put_device; 1457 } 1458 1459 mac = kzalloc(sizeof(*mac), GFP_KERNEL); 1460 if (!mac) { 1461 err = -ENOMEM; 1462 goto out_put_device; 1463 } 1464 1465 mac->mc_dev = dpmac_dev; 1466 mac->mc_io = port_priv->ethsw_data->mc_io; 1467 mac->net_dev = port_priv->netdev; 1468 1469 err = dpaa2_mac_open(mac); 1470 if (err) 1471 goto err_free_mac; 1472 1473 if (dpaa2_mac_is_type_phy(mac)) { 1474 err = dpaa2_mac_connect(mac); 1475 if (err) { 1476 netdev_err(port_priv->netdev, 1477 "Error connecting to the MAC endpoint %pe\n", 1478 ERR_PTR(err)); 1479 goto err_close_mac; 1480 } 1481 } 1482 1483 mutex_lock(&port_priv->mac_lock); 1484 port_priv->mac = mac; 1485 mutex_unlock(&port_priv->mac_lock); 1486 1487 return 0; 1488 1489 err_close_mac: 1490 dpaa2_mac_close(mac); 1491 err_free_mac: 1492 kfree(mac); 1493 out_put_device: 1494 put_device(&dpmac_dev->dev); 1495 return err; 1496 } 1497 1498 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) 1499 { 1500 struct dpaa2_mac *mac; 1501 1502 mutex_lock(&port_priv->mac_lock); 1503 mac = port_priv->mac; 1504 port_priv->mac = NULL; 1505 mutex_unlock(&port_priv->mac_lock); 1506 1507 if (!mac) 1508 return; 1509 1510 if (dpaa2_mac_is_type_phy(mac)) 1511 dpaa2_mac_disconnect(mac); 1512 1513 dpaa2_mac_close(mac); 1514 kfree(mac); 1515 } 1516 1517 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1518 { 1519 struct device *dev = (struct device *)arg; 1520 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1521 struct ethsw_port_priv *port_priv; 1522 int err, if_id; 1523 bool had_mac; 1524 u32 status; 1525 1526 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1527 DPSW_IRQ_INDEX_IF, &status); 1528 if (err) { 1529 dev_err(dev, "Can't get irq status (err %d)\n", err); 1530 goto out; 1531 } 1532 1533 if_id = (status & 0xFFFF0000) >> 16; 1534 port_priv = ethsw->ports[if_id]; 1535 1536 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) 1537 dpaa2_switch_port_link_state_update(port_priv->netdev); 1538 1539 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { 1540 dpaa2_switch_port_set_mac_addr(port_priv); 1541 /* We can avoid locking because the "endpoint changed" IRQ 1542 * handler is the only one who changes priv->mac at runtime, 1543 * so we are not racing with anyone. 1544 */ 1545 had_mac = !!port_priv->mac; 1546 if (had_mac) 1547 dpaa2_switch_port_disconnect_mac(port_priv); 1548 else 1549 dpaa2_switch_port_connect_mac(port_priv); 1550 } 1551 1552 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1553 DPSW_IRQ_INDEX_IF, status); 1554 if (err) 1555 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1556 1557 out: 1558 return IRQ_HANDLED; 1559 } 1560 1561 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1562 { 1563 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED | DPSW_IRQ_EVENT_ENDPOINT_CHANGED; 1564 struct device *dev = &sw_dev->dev; 1565 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1566 struct fsl_mc_device_irq *irq; 1567 int err; 1568 1569 err = fsl_mc_allocate_irqs(sw_dev); 1570 if (err) { 1571 dev_err(dev, "MC irqs allocation failed\n"); 1572 return err; 1573 } 1574 1575 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1576 err = -EINVAL; 1577 goto free_irq; 1578 } 1579 1580 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1581 DPSW_IRQ_INDEX_IF, 0); 1582 if (err) { 1583 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1584 goto free_irq; 1585 } 1586 1587 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1588 1589 err = devm_request_threaded_irq(dev, irq->virq, NULL, 1590 dpaa2_switch_irq0_handler_thread, 1591 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1592 dev_name(dev), dev); 1593 if (err) { 1594 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1595 goto free_irq; 1596 } 1597 1598 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1599 DPSW_IRQ_INDEX_IF, mask); 1600 if (err) { 1601 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1602 goto free_devm_irq; 1603 } 1604 1605 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1606 DPSW_IRQ_INDEX_IF, 1); 1607 if (err) { 1608 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1609 goto free_devm_irq; 1610 } 1611 1612 return 0; 1613 1614 free_devm_irq: 1615 devm_free_irq(dev, irq->virq, dev); 1616 free_irq: 1617 fsl_mc_free_irqs(sw_dev); 1618 return err; 1619 } 1620 1621 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1622 { 1623 struct device *dev = &sw_dev->dev; 1624 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1625 int err; 1626 1627 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1628 DPSW_IRQ_INDEX_IF, 0); 1629 if (err) 1630 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1631 1632 fsl_mc_free_irqs(sw_dev); 1633 } 1634 1635 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1636 { 1637 struct ethsw_core *ethsw = port_priv->ethsw_data; 1638 enum dpsw_learning_mode learn_mode; 1639 int err; 1640 1641 if (enable) 1642 learn_mode = DPSW_LEARNING_MODE_HW; 1643 else 1644 learn_mode = DPSW_LEARNING_MODE_DIS; 1645 1646 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1647 port_priv->idx, learn_mode); 1648 if (err) 1649 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1650 1651 if (!enable) 1652 dpaa2_switch_port_fast_age(port_priv); 1653 1654 return err; 1655 } 1656 1657 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1658 u8 state) 1659 { 1660 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1661 int err; 1662 1663 err = dpaa2_switch_port_set_stp_state(port_priv, state); 1664 if (err) 1665 return err; 1666 1667 switch (state) { 1668 case BR_STATE_DISABLED: 1669 case BR_STATE_BLOCKING: 1670 case BR_STATE_LISTENING: 1671 err = dpaa2_switch_port_set_learning(port_priv, false); 1672 break; 1673 case BR_STATE_LEARNING: 1674 case BR_STATE_FORWARDING: 1675 err = dpaa2_switch_port_set_learning(port_priv, 1676 port_priv->learn_ena); 1677 break; 1678 } 1679 1680 return err; 1681 } 1682 1683 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1684 struct switchdev_brport_flags flags) 1685 { 1686 struct ethsw_core *ethsw = port_priv->ethsw_data; 1687 1688 if (flags.mask & BR_BCAST_FLOOD) 1689 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1690 1691 if (flags.mask & BR_FLOOD) 1692 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1693 1694 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1695 } 1696 1697 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1698 struct switchdev_brport_flags flags, 1699 struct netlink_ext_ack *extack) 1700 { 1701 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1702 BR_MCAST_FLOOD)) 1703 return -EINVAL; 1704 1705 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1706 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1707 bool unicast = !!(flags.val & BR_FLOOD); 1708 1709 if (unicast != multicast) { 1710 NL_SET_ERR_MSG_MOD(extack, 1711 "Cannot configure multicast flooding independently of unicast"); 1712 return -EINVAL; 1713 } 1714 } 1715 1716 return 0; 1717 } 1718 1719 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1720 struct switchdev_brport_flags flags, 1721 struct netlink_ext_ack *extack) 1722 { 1723 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1724 int err; 1725 1726 if (flags.mask & BR_LEARNING) { 1727 bool learn_ena = !!(flags.val & BR_LEARNING); 1728 1729 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1730 if (err) 1731 return err; 1732 port_priv->learn_ena = learn_ena; 1733 } 1734 1735 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1736 err = dpaa2_switch_port_flood(port_priv, flags); 1737 if (err) 1738 return err; 1739 } 1740 1741 return 0; 1742 } 1743 1744 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx, 1745 const struct switchdev_attr *attr, 1746 struct netlink_ext_ack *extack) 1747 { 1748 int err = 0; 1749 1750 switch (attr->id) { 1751 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1752 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1753 attr->u.stp_state); 1754 break; 1755 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1756 if (!attr->u.vlan_filtering) { 1757 NL_SET_ERR_MSG_MOD(extack, 1758 "The DPAA2 switch does not support VLAN-unaware operation"); 1759 return -EOPNOTSUPP; 1760 } 1761 break; 1762 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1763 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1764 break; 1765 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1766 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1767 break; 1768 default: 1769 err = -EOPNOTSUPP; 1770 break; 1771 } 1772 1773 return err; 1774 } 1775 1776 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1777 const struct switchdev_obj_port_vlan *vlan) 1778 { 1779 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1780 struct ethsw_core *ethsw = port_priv->ethsw_data; 1781 struct dpsw_attr *attr = ðsw->sw_attr; 1782 int err = 0; 1783 1784 /* Make sure that the VLAN is not already configured 1785 * on the switch port 1786 */ 1787 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) { 1788 netdev_err(netdev, "VLAN %d already configured\n", vlan->vid); 1789 return -EEXIST; 1790 } 1791 1792 /* Check if there is space for a new VLAN */ 1793 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1794 ðsw->sw_attr); 1795 if (err) { 1796 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1797 return err; 1798 } 1799 if (attr->max_vlans - attr->num_vlans < 1) 1800 return -ENOSPC; 1801 1802 /* Check if there is space for a new VLAN */ 1803 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1804 ðsw->sw_attr); 1805 if (err) { 1806 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1807 return err; 1808 } 1809 if (attr->max_vlans - attr->num_vlans < 1) 1810 return -ENOSPC; 1811 1812 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1813 /* this is a new VLAN */ 1814 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1815 if (err) 1816 return err; 1817 1818 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1819 } 1820 1821 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1822 } 1823 1824 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1825 const unsigned char *addr) 1826 { 1827 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1828 struct netdev_hw_addr *ha; 1829 1830 netif_addr_lock_bh(netdev); 1831 list_for_each_entry(ha, &list->list, list) { 1832 if (ether_addr_equal(ha->addr, addr)) { 1833 netif_addr_unlock_bh(netdev); 1834 return 1; 1835 } 1836 } 1837 netif_addr_unlock_bh(netdev); 1838 return 0; 1839 } 1840 1841 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1842 const struct switchdev_obj_port_mdb *mdb) 1843 { 1844 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1845 int err; 1846 1847 /* Check if address is already set on this port */ 1848 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1849 return -EEXIST; 1850 1851 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1852 if (err) 1853 return err; 1854 1855 err = dev_mc_add(netdev, mdb->addr); 1856 if (err) { 1857 netdev_err(netdev, "dev_mc_add err %d\n", err); 1858 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1859 } 1860 1861 return err; 1862 } 1863 1864 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1865 const struct switchdev_obj *obj) 1866 { 1867 int err; 1868 1869 switch (obj->id) { 1870 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1871 err = dpaa2_switch_port_vlans_add(netdev, 1872 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1873 break; 1874 case SWITCHDEV_OBJ_ID_PORT_MDB: 1875 err = dpaa2_switch_port_mdb_add(netdev, 1876 SWITCHDEV_OBJ_PORT_MDB(obj)); 1877 break; 1878 default: 1879 err = -EOPNOTSUPP; 1880 break; 1881 } 1882 1883 return err; 1884 } 1885 1886 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1887 { 1888 struct ethsw_core *ethsw = port_priv->ethsw_data; 1889 struct net_device *netdev = port_priv->netdev; 1890 struct dpsw_vlan_if_cfg vcfg; 1891 int i, err; 1892 1893 if (!port_priv->vlans[vid]) 1894 return -ENOENT; 1895 1896 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1897 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1898 * as we are sure that neither the bridge nor the 8021q module 1899 * will use it 1900 */ 1901 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1902 if (err) 1903 return err; 1904 } 1905 1906 vcfg.num_ifs = 1; 1907 vcfg.if_id[0] = port_priv->idx; 1908 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1909 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1910 ethsw->dpsw_handle, 1911 vid, &vcfg); 1912 if (err) { 1913 netdev_err(netdev, 1914 "dpsw_vlan_remove_if_untagged err %d\n", 1915 err); 1916 } 1917 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1918 } 1919 1920 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1921 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1922 vid, &vcfg); 1923 if (err) { 1924 netdev_err(netdev, 1925 "dpsw_vlan_remove_if err %d\n", err); 1926 return err; 1927 } 1928 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1929 1930 /* Delete VLAN from switch if it is no longer configured on 1931 * any port 1932 */ 1933 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1934 if (ethsw->ports[i] && 1935 ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1936 return 0; /* Found a port member in VID */ 1937 } 1938 1939 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1940 1941 err = dpaa2_switch_dellink(ethsw, vid); 1942 if (err) 1943 return err; 1944 } 1945 1946 return 0; 1947 } 1948 1949 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1950 const struct switchdev_obj_port_vlan *vlan) 1951 { 1952 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1953 1954 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1955 return -EOPNOTSUPP; 1956 1957 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1958 } 1959 1960 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1961 const struct switchdev_obj_port_mdb *mdb) 1962 { 1963 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1964 int err; 1965 1966 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1967 return -ENOENT; 1968 1969 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1970 if (err) 1971 return err; 1972 1973 err = dev_mc_del(netdev, mdb->addr); 1974 if (err) { 1975 netdev_err(netdev, "dev_mc_del err %d\n", err); 1976 return err; 1977 } 1978 1979 return err; 1980 } 1981 1982 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1983 const struct switchdev_obj *obj) 1984 { 1985 int err; 1986 1987 switch (obj->id) { 1988 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1989 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1990 break; 1991 case SWITCHDEV_OBJ_ID_PORT_MDB: 1992 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1993 break; 1994 default: 1995 err = -EOPNOTSUPP; 1996 break; 1997 } 1998 return err; 1999 } 2000 2001 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 2002 struct switchdev_notifier_port_attr_info *ptr) 2003 { 2004 int err; 2005 2006 err = switchdev_handle_port_attr_set(netdev, ptr, 2007 dpaa2_switch_port_dev_check, 2008 dpaa2_switch_port_attr_set); 2009 return notifier_from_errno(err); 2010 } 2011 2012 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 2013 struct net_device *upper_dev, 2014 struct netlink_ext_ack *extack) 2015 { 2016 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 2017 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 2018 struct ethsw_core *ethsw = port_priv->ethsw_data; 2019 bool learn_ena; 2020 int err; 2021 2022 /* Delete the previously manually installed VLAN 1 */ 2023 err = dpaa2_switch_port_del_vlan(port_priv, 1); 2024 if (err) 2025 return err; 2026 2027 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 2028 2029 /* Inherit the initial bridge port learning state */ 2030 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 2031 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 2032 port_priv->learn_ena = learn_ena; 2033 2034 /* Setup the egress flood policy (broadcast, unknown unicast) */ 2035 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2036 if (err) 2037 goto err_egress_flood; 2038 2039 /* Recreate the egress flood domain of the FDB that we just left. */ 2040 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 2041 if (err) 2042 goto err_egress_flood; 2043 2044 err = switchdev_bridge_port_offload(netdev, netdev, NULL, 2045 NULL, NULL, false, extack); 2046 if (err) 2047 goto err_switchdev_offload; 2048 2049 return 0; 2050 2051 err_switchdev_offload: 2052 err_egress_flood: 2053 dpaa2_switch_port_set_fdb(port_priv, NULL); 2054 return err; 2055 } 2056 2057 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 2058 { 2059 __be16 vlan_proto = htons(ETH_P_8021Q); 2060 2061 if (vdev) 2062 vlan_proto = vlan_dev_vlan_proto(vdev); 2063 2064 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 2065 } 2066 2067 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 2068 { 2069 __be16 vlan_proto = htons(ETH_P_8021Q); 2070 2071 if (vdev) 2072 vlan_proto = vlan_dev_vlan_proto(vdev); 2073 2074 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 2075 } 2076 2077 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) 2078 { 2079 switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL); 2080 } 2081 2082 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 2083 { 2084 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 2085 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 2086 struct ethsw_core *ethsw = port_priv->ethsw_data; 2087 int err; 2088 2089 /* First of all, fast age any learn FDB addresses on this switch port */ 2090 dpaa2_switch_port_fast_age(port_priv); 2091 2092 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 2093 * upper devices or otherwise from the FDB table that we are about to 2094 * leave 2095 */ 2096 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 2097 if (err) 2098 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 2099 2100 dpaa2_switch_port_set_fdb(port_priv, NULL); 2101 2102 /* Restore all RX VLANs into the new FDB table that we just joined */ 2103 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 2104 if (err) 2105 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 2106 2107 /* Reset the flooding state to denote that this port can send any 2108 * packet in standalone mode. With this, we are also ensuring that any 2109 * later bridge join will have the flooding flag on. 2110 */ 2111 port_priv->bcast_flood = true; 2112 port_priv->ucast_flood = true; 2113 2114 /* Setup the egress flood policy (broadcast, unknown unicast). 2115 * When the port is not under a bridge, only the CTRL interface is part 2116 * of the flooding domain besides the actual port 2117 */ 2118 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2119 if (err) 2120 return err; 2121 2122 /* Recreate the egress flood domain of the FDB that we just left */ 2123 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 2124 if (err) 2125 return err; 2126 2127 /* No HW learning when not under a bridge */ 2128 err = dpaa2_switch_port_set_learning(port_priv, false); 2129 if (err) 2130 return err; 2131 port_priv->learn_ena = false; 2132 2133 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 2134 * the dpaa2 switch interfaces are not capable to be VLAN unaware 2135 */ 2136 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 2137 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 2138 } 2139 2140 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 2141 { 2142 struct net_device *upper_dev; 2143 struct list_head *iter; 2144 2145 /* RCU read lock not necessary because we have write-side protection 2146 * (rtnl_mutex), however a non-rcu iterator does not exist. 2147 */ 2148 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 2149 if (is_vlan_dev(upper_dev)) 2150 return -EOPNOTSUPP; 2151 2152 return 0; 2153 } 2154 2155 static int 2156 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, 2157 struct net_device *upper_dev, 2158 struct netlink_ext_ack *extack) 2159 { 2160 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 2161 struct ethsw_port_priv *other_port_priv; 2162 struct net_device *other_dev; 2163 struct list_head *iter; 2164 int err; 2165 2166 if (!br_vlan_enabled(upper_dev)) { 2167 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 2168 return -EOPNOTSUPP; 2169 } 2170 2171 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 2172 if (err) { 2173 NL_SET_ERR_MSG_MOD(extack, 2174 "Cannot join a bridge while VLAN uppers are present"); 2175 return 0; 2176 } 2177 2178 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 2179 if (!dpaa2_switch_port_dev_check(other_dev)) 2180 continue; 2181 2182 other_port_priv = netdev_priv(other_dev); 2183 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 2184 NL_SET_ERR_MSG_MOD(extack, 2185 "Interface from a different DPSW is in the bridge already"); 2186 return -EINVAL; 2187 } 2188 } 2189 2190 return 0; 2191 } 2192 2193 static int dpaa2_switch_port_prechangeupper(struct net_device *netdev, 2194 struct netdev_notifier_changeupper_info *info) 2195 { 2196 struct netlink_ext_ack *extack; 2197 struct net_device *upper_dev; 2198 int err; 2199 2200 if (!dpaa2_switch_port_dev_check(netdev)) 2201 return 0; 2202 2203 extack = netdev_notifier_info_to_extack(&info->info); 2204 upper_dev = info->upper_dev; 2205 if (netif_is_bridge_master(upper_dev)) { 2206 err = dpaa2_switch_prechangeupper_sanity_checks(netdev, 2207 upper_dev, 2208 extack); 2209 if (err) 2210 return err; 2211 2212 if (!info->linking) 2213 dpaa2_switch_port_pre_bridge_leave(netdev); 2214 } 2215 2216 return 0; 2217 } 2218 2219 static int dpaa2_switch_port_changeupper(struct net_device *netdev, 2220 struct netdev_notifier_changeupper_info *info) 2221 { 2222 struct netlink_ext_ack *extack; 2223 struct net_device *upper_dev; 2224 2225 if (!dpaa2_switch_port_dev_check(netdev)) 2226 return 0; 2227 2228 extack = netdev_notifier_info_to_extack(&info->info); 2229 2230 upper_dev = info->upper_dev; 2231 if (netif_is_bridge_master(upper_dev)) { 2232 if (info->linking) 2233 return dpaa2_switch_port_bridge_join(netdev, 2234 upper_dev, 2235 extack); 2236 else 2237 return dpaa2_switch_port_bridge_leave(netdev); 2238 } 2239 2240 return 0; 2241 } 2242 2243 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 2244 unsigned long event, void *ptr) 2245 { 2246 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 2247 int err = 0; 2248 2249 switch (event) { 2250 case NETDEV_PRECHANGEUPPER: 2251 err = dpaa2_switch_port_prechangeupper(netdev, ptr); 2252 if (err) 2253 return notifier_from_errno(err); 2254 2255 break; 2256 case NETDEV_CHANGEUPPER: 2257 err = dpaa2_switch_port_changeupper(netdev, ptr); 2258 if (err) 2259 return notifier_from_errno(err); 2260 2261 break; 2262 } 2263 2264 return NOTIFY_DONE; 2265 } 2266 2267 struct ethsw_switchdev_event_work { 2268 struct work_struct work; 2269 struct switchdev_notifier_fdb_info fdb_info; 2270 struct net_device *dev; 2271 unsigned long event; 2272 }; 2273 2274 static void dpaa2_switch_event_work(struct work_struct *work) 2275 { 2276 struct ethsw_switchdev_event_work *switchdev_work = 2277 container_of(work, struct ethsw_switchdev_event_work, work); 2278 struct net_device *dev = switchdev_work->dev; 2279 struct switchdev_notifier_fdb_info *fdb_info; 2280 int err; 2281 2282 rtnl_lock(); 2283 fdb_info = &switchdev_work->fdb_info; 2284 2285 switch (switchdev_work->event) { 2286 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2287 if (!fdb_info->added_by_user || fdb_info->is_local) 2288 break; 2289 if (is_unicast_ether_addr(fdb_info->addr)) 2290 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 2291 fdb_info->addr); 2292 else 2293 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 2294 fdb_info->addr); 2295 if (err) 2296 break; 2297 fdb_info->offloaded = true; 2298 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 2299 &fdb_info->info, NULL); 2300 break; 2301 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2302 if (!fdb_info->added_by_user || fdb_info->is_local) 2303 break; 2304 if (is_unicast_ether_addr(fdb_info->addr)) 2305 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 2306 else 2307 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 2308 break; 2309 } 2310 2311 rtnl_unlock(); 2312 kfree(switchdev_work->fdb_info.addr); 2313 kfree(switchdev_work); 2314 dev_put(dev); 2315 } 2316 2317 /* Called under rcu_read_lock() */ 2318 static int dpaa2_switch_port_event(struct notifier_block *nb, 2319 unsigned long event, void *ptr) 2320 { 2321 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2322 struct ethsw_port_priv *port_priv = netdev_priv(dev); 2323 struct ethsw_switchdev_event_work *switchdev_work; 2324 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2325 struct ethsw_core *ethsw = port_priv->ethsw_data; 2326 2327 if (event == SWITCHDEV_PORT_ATTR_SET) 2328 return dpaa2_switch_port_attr_set_event(dev, ptr); 2329 2330 if (!dpaa2_switch_port_dev_check(dev)) 2331 return NOTIFY_DONE; 2332 2333 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2334 if (!switchdev_work) 2335 return NOTIFY_BAD; 2336 2337 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 2338 switchdev_work->dev = dev; 2339 switchdev_work->event = event; 2340 2341 switch (event) { 2342 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2343 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2344 memcpy(&switchdev_work->fdb_info, ptr, 2345 sizeof(switchdev_work->fdb_info)); 2346 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2347 if (!switchdev_work->fdb_info.addr) 2348 goto err_addr_alloc; 2349 2350 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2351 fdb_info->addr); 2352 2353 /* Take a reference on the device to avoid being freed. */ 2354 dev_hold(dev); 2355 break; 2356 default: 2357 kfree(switchdev_work); 2358 return NOTIFY_DONE; 2359 } 2360 2361 queue_work(ethsw->workqueue, &switchdev_work->work); 2362 2363 return NOTIFY_DONE; 2364 2365 err_addr_alloc: 2366 kfree(switchdev_work); 2367 return NOTIFY_BAD; 2368 } 2369 2370 static int dpaa2_switch_port_obj_event(unsigned long event, 2371 struct net_device *netdev, 2372 struct switchdev_notifier_port_obj_info *port_obj_info) 2373 { 2374 int err = -EOPNOTSUPP; 2375 2376 if (!dpaa2_switch_port_dev_check(netdev)) 2377 return NOTIFY_DONE; 2378 2379 switch (event) { 2380 case SWITCHDEV_PORT_OBJ_ADD: 2381 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 2382 break; 2383 case SWITCHDEV_PORT_OBJ_DEL: 2384 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 2385 break; 2386 } 2387 2388 port_obj_info->handled = true; 2389 return notifier_from_errno(err); 2390 } 2391 2392 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 2393 unsigned long event, void *ptr) 2394 { 2395 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2396 2397 switch (event) { 2398 case SWITCHDEV_PORT_OBJ_ADD: 2399 case SWITCHDEV_PORT_OBJ_DEL: 2400 return dpaa2_switch_port_obj_event(event, dev, ptr); 2401 case SWITCHDEV_PORT_ATTR_SET: 2402 return dpaa2_switch_port_attr_set_event(dev, ptr); 2403 } 2404 2405 return NOTIFY_DONE; 2406 } 2407 2408 /* Build a linear skb based on a single-buffer frame descriptor */ 2409 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 2410 const struct dpaa2_fd *fd) 2411 { 2412 u16 fd_offset = dpaa2_fd_get_offset(fd); 2413 dma_addr_t addr = dpaa2_fd_get_addr(fd); 2414 u32 fd_length = dpaa2_fd_get_len(fd); 2415 struct device *dev = ethsw->dev; 2416 struct sk_buff *skb = NULL; 2417 void *fd_vaddr; 2418 2419 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 2420 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 2421 DMA_FROM_DEVICE); 2422 2423 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 2424 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 2425 if (unlikely(!skb)) { 2426 dev_err(dev, "build_skb() failed\n"); 2427 return NULL; 2428 } 2429 2430 skb_reserve(skb, fd_offset); 2431 skb_put(skb, fd_length); 2432 2433 ethsw->buf_count--; 2434 2435 return skb; 2436 } 2437 2438 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 2439 const struct dpaa2_fd *fd) 2440 { 2441 dpaa2_switch_free_fd(fq->ethsw, fd); 2442 } 2443 2444 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 2445 const struct dpaa2_fd *fd) 2446 { 2447 struct ethsw_core *ethsw = fq->ethsw; 2448 struct ethsw_port_priv *port_priv; 2449 struct net_device *netdev; 2450 struct vlan_ethhdr *hdr; 2451 struct sk_buff *skb; 2452 u16 vlan_tci, vid; 2453 int if_id, err; 2454 2455 /* get switch ingress interface ID */ 2456 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 2457 2458 if (if_id >= ethsw->sw_attr.num_ifs) { 2459 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 2460 goto err_free_fd; 2461 } 2462 port_priv = ethsw->ports[if_id]; 2463 netdev = port_priv->netdev; 2464 2465 /* build the SKB based on the FD received */ 2466 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 2467 if (net_ratelimit()) { 2468 netdev_err(netdev, "Received invalid frame format\n"); 2469 goto err_free_fd; 2470 } 2471 } 2472 2473 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2474 if (unlikely(!skb)) 2475 goto err_free_fd; 2476 2477 skb_reset_mac_header(skb); 2478 2479 /* Remove the VLAN header if the packet that we just received has a vid 2480 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2481 * VLAN-aware mode and no alterations are made on the packet when it's 2482 * redirected/mirrored to the control interface, we are sure that there 2483 * will always be a VLAN header present. 2484 */ 2485 hdr = vlan_eth_hdr(skb); 2486 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2487 if (vid == port_priv->pvid) { 2488 err = __skb_vlan_pop(skb, &vlan_tci); 2489 if (err) { 2490 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2491 goto err_free_fd; 2492 } 2493 } 2494 2495 skb->dev = netdev; 2496 skb->protocol = eth_type_trans(skb, skb->dev); 2497 2498 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2499 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2500 2501 netif_receive_skb(skb); 2502 2503 return; 2504 2505 err_free_fd: 2506 dpaa2_switch_free_fd(ethsw, fd); 2507 } 2508 2509 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2510 { 2511 ethsw->features = 0; 2512 2513 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2514 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2515 } 2516 2517 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2518 { 2519 struct dpsw_ctrl_if_attr ctrl_if_attr; 2520 struct device *dev = ethsw->dev; 2521 int i = 0; 2522 int err; 2523 2524 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2525 &ctrl_if_attr); 2526 if (err) { 2527 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2528 return err; 2529 } 2530 2531 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2532 ethsw->fq[i].ethsw = ethsw; 2533 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2534 2535 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2536 ethsw->fq[i].ethsw = ethsw; 2537 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2538 2539 return 0; 2540 } 2541 2542 /* Free buffers acquired from the buffer pool or which were meant to 2543 * be released in the pool 2544 */ 2545 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2546 { 2547 struct device *dev = ethsw->dev; 2548 void *vaddr; 2549 int i; 2550 2551 for (i = 0; i < count; i++) { 2552 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2553 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2554 DMA_FROM_DEVICE); 2555 free_pages((unsigned long)vaddr, 0); 2556 } 2557 } 2558 2559 /* Perform a single release command to add buffers 2560 * to the specified buffer pool 2561 */ 2562 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2563 { 2564 struct device *dev = ethsw->dev; 2565 u64 buf_array[BUFS_PER_CMD]; 2566 struct page *page; 2567 int retries = 0; 2568 dma_addr_t addr; 2569 int err; 2570 int i; 2571 2572 for (i = 0; i < BUFS_PER_CMD; i++) { 2573 /* Allocate one page for each Rx buffer. WRIOP sees 2574 * the entire page except for a tailroom reserved for 2575 * skb shared info 2576 */ 2577 page = dev_alloc_pages(0); 2578 if (!page) { 2579 dev_err(dev, "buffer allocation failed\n"); 2580 goto err_alloc; 2581 } 2582 2583 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2584 DMA_FROM_DEVICE); 2585 if (dma_mapping_error(dev, addr)) { 2586 dev_err(dev, "dma_map_single() failed\n"); 2587 goto err_map; 2588 } 2589 buf_array[i] = addr; 2590 } 2591 2592 release_bufs: 2593 /* In case the portal is busy, retry until successful or 2594 * max retries hit. 2595 */ 2596 while ((err = dpaa2_io_service_release(NULL, bpid, 2597 buf_array, i)) == -EBUSY) { 2598 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2599 break; 2600 2601 cpu_relax(); 2602 } 2603 2604 /* If release command failed, clean up and bail out. */ 2605 if (err) { 2606 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2607 return 0; 2608 } 2609 2610 return i; 2611 2612 err_map: 2613 __free_pages(page, 0); 2614 err_alloc: 2615 /* If we managed to allocate at least some buffers, 2616 * release them to hardware 2617 */ 2618 if (i) 2619 goto release_bufs; 2620 2621 return 0; 2622 } 2623 2624 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2625 { 2626 int *count = ðsw->buf_count; 2627 int new_count; 2628 int err = 0; 2629 2630 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2631 do { 2632 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2633 if (unlikely(!new_count)) { 2634 /* Out of memory; abort for now, we'll 2635 * try later on 2636 */ 2637 break; 2638 } 2639 *count += new_count; 2640 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2641 2642 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2643 err = -ENOMEM; 2644 } 2645 2646 return err; 2647 } 2648 2649 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2650 { 2651 int *count, ret, i; 2652 2653 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2654 ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2655 count = ðsw->buf_count; 2656 *count += ret; 2657 2658 if (unlikely(ret < BUFS_PER_CMD)) 2659 return -ENOMEM; 2660 } 2661 2662 return 0; 2663 } 2664 2665 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2666 { 2667 u64 buf_array[BUFS_PER_CMD]; 2668 int ret; 2669 2670 do { 2671 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2672 buf_array, BUFS_PER_CMD); 2673 if (ret < 0) { 2674 dev_err(ethsw->dev, 2675 "dpaa2_io_service_acquire() = %d\n", ret); 2676 return; 2677 } 2678 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2679 2680 } while (ret); 2681 } 2682 2683 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2684 { 2685 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2686 struct device *dev = ethsw->dev; 2687 struct fsl_mc_device *dpbp_dev; 2688 struct dpbp_attr dpbp_attrs; 2689 int err; 2690 2691 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2692 &dpbp_dev); 2693 if (err) { 2694 if (err == -ENXIO) 2695 err = -EPROBE_DEFER; 2696 else 2697 dev_err(dev, "DPBP device allocation failed\n"); 2698 return err; 2699 } 2700 ethsw->dpbp_dev = dpbp_dev; 2701 2702 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2703 &dpbp_dev->mc_handle); 2704 if (err) { 2705 dev_err(dev, "dpbp_open() failed\n"); 2706 goto err_open; 2707 } 2708 2709 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2710 if (err) { 2711 dev_err(dev, "dpbp_reset() failed\n"); 2712 goto err_reset; 2713 } 2714 2715 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2716 if (err) { 2717 dev_err(dev, "dpbp_enable() failed\n"); 2718 goto err_enable; 2719 } 2720 2721 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2722 &dpbp_attrs); 2723 if (err) { 2724 dev_err(dev, "dpbp_get_attributes() failed\n"); 2725 goto err_get_attr; 2726 } 2727 2728 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2729 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2730 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2731 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2732 2733 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2734 &dpsw_ctrl_if_pools_cfg); 2735 if (err) { 2736 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2737 goto err_get_attr; 2738 } 2739 ethsw->bpid = dpbp_attrs.id; 2740 2741 return 0; 2742 2743 err_get_attr: 2744 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2745 err_enable: 2746 err_reset: 2747 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2748 err_open: 2749 fsl_mc_object_free(dpbp_dev); 2750 return err; 2751 } 2752 2753 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2754 { 2755 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2756 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2757 fsl_mc_object_free(ethsw->dpbp_dev); 2758 } 2759 2760 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2761 { 2762 int i; 2763 2764 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2765 ethsw->fq[i].store = 2766 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2767 ethsw->dev); 2768 if (!ethsw->fq[i].store) { 2769 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2770 while (--i >= 0) 2771 dpaa2_io_store_destroy(ethsw->fq[i].store); 2772 return -ENOMEM; 2773 } 2774 } 2775 2776 return 0; 2777 } 2778 2779 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2780 { 2781 int i; 2782 2783 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2784 dpaa2_io_store_destroy(ethsw->fq[i].store); 2785 } 2786 2787 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2788 { 2789 int err, retries = 0; 2790 2791 /* Try to pull from the FQ while the portal is busy and we didn't hit 2792 * the maximum number fo retries 2793 */ 2794 do { 2795 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2796 cpu_relax(); 2797 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2798 2799 if (unlikely(err)) 2800 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2801 2802 return err; 2803 } 2804 2805 /* Consume all frames pull-dequeued into the store */ 2806 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2807 { 2808 struct ethsw_core *ethsw = fq->ethsw; 2809 int cleaned = 0, is_last; 2810 struct dpaa2_dq *dq; 2811 int retries = 0; 2812 2813 do { 2814 /* Get the next available FD from the store */ 2815 dq = dpaa2_io_store_next(fq->store, &is_last); 2816 if (unlikely(!dq)) { 2817 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2818 dev_err_once(ethsw->dev, 2819 "No valid dequeue response\n"); 2820 return -ETIMEDOUT; 2821 } 2822 continue; 2823 } 2824 2825 if (fq->type == DPSW_QUEUE_RX) 2826 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2827 else 2828 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2829 cleaned++; 2830 2831 } while (!is_last); 2832 2833 return cleaned; 2834 } 2835 2836 /* NAPI poll routine */ 2837 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2838 { 2839 int err, cleaned = 0, store_cleaned, work_done; 2840 struct dpaa2_switch_fq *fq; 2841 int retries = 0; 2842 2843 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2844 2845 do { 2846 err = dpaa2_switch_pull_fq(fq); 2847 if (unlikely(err)) 2848 break; 2849 2850 /* Refill pool if appropriate */ 2851 dpaa2_switch_refill_bp(fq->ethsw); 2852 2853 store_cleaned = dpaa2_switch_store_consume(fq); 2854 cleaned += store_cleaned; 2855 2856 if (cleaned >= budget) { 2857 work_done = budget; 2858 goto out; 2859 } 2860 2861 } while (store_cleaned); 2862 2863 /* We didn't consume the entire budget, so finish napi and re-enable 2864 * data availability notifications 2865 */ 2866 napi_complete_done(napi, cleaned); 2867 do { 2868 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2869 cpu_relax(); 2870 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2871 2872 work_done = max(cleaned, 1); 2873 out: 2874 2875 return work_done; 2876 } 2877 2878 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2879 { 2880 struct dpaa2_switch_fq *fq; 2881 2882 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2883 2884 napi_schedule(&fq->napi); 2885 } 2886 2887 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2888 { 2889 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2890 struct dpaa2_io_notification_ctx *nctx; 2891 int err, i, j; 2892 2893 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2894 nctx = ðsw->fq[i].nctx; 2895 2896 /* Register a new software context for the FQID. 2897 * By using NULL as the first parameter, we specify that we do 2898 * not care on which cpu are interrupts received for this queue 2899 */ 2900 nctx->is_cdan = 0; 2901 nctx->id = ethsw->fq[i].fqid; 2902 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2903 nctx->cb = dpaa2_switch_fqdan_cb; 2904 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2905 if (err) { 2906 err = -EPROBE_DEFER; 2907 goto err_register; 2908 } 2909 2910 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2911 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2912 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2913 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2914 queue_cfg.dest_cfg.priority = 0; 2915 queue_cfg.user_ctx = nctx->qman64; 2916 2917 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2918 ethsw->dpsw_handle, 2919 ethsw->fq[i].type, 2920 &queue_cfg); 2921 if (err) 2922 goto err_set_queue; 2923 } 2924 2925 return 0; 2926 2927 err_set_queue: 2928 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2929 err_register: 2930 for (j = 0; j < i; j++) 2931 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2932 ethsw->dev); 2933 2934 return err; 2935 } 2936 2937 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2938 { 2939 int i; 2940 2941 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2942 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2943 ethsw->dev); 2944 } 2945 2946 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2947 { 2948 int err; 2949 2950 /* setup FQs for Rx and Tx Conf */ 2951 err = dpaa2_switch_setup_fqs(ethsw); 2952 if (err) 2953 return err; 2954 2955 /* setup the buffer pool needed on the Rx path */ 2956 err = dpaa2_switch_setup_dpbp(ethsw); 2957 if (err) 2958 return err; 2959 2960 err = dpaa2_switch_alloc_rings(ethsw); 2961 if (err) 2962 goto err_free_dpbp; 2963 2964 err = dpaa2_switch_setup_dpio(ethsw); 2965 if (err) 2966 goto err_destroy_rings; 2967 2968 err = dpaa2_switch_seed_bp(ethsw); 2969 if (err) 2970 goto err_deregister_dpio; 2971 2972 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2973 if (err) { 2974 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2975 goto err_drain_dpbp; 2976 } 2977 2978 return 0; 2979 2980 err_drain_dpbp: 2981 dpaa2_switch_drain_bp(ethsw); 2982 err_deregister_dpio: 2983 dpaa2_switch_free_dpio(ethsw); 2984 err_destroy_rings: 2985 dpaa2_switch_destroy_rings(ethsw); 2986 err_free_dpbp: 2987 dpaa2_switch_free_dpbp(ethsw); 2988 2989 return err; 2990 } 2991 2992 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, 2993 u16 port_idx) 2994 { 2995 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; 2996 2997 dpaa2_switch_port_disconnect_mac(port_priv); 2998 free_netdev(port_priv->netdev); 2999 ethsw->ports[port_idx] = NULL; 3000 } 3001 3002 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 3003 { 3004 struct device *dev = &sw_dev->dev; 3005 struct ethsw_core *ethsw = dev_get_drvdata(dev); 3006 struct dpsw_vlan_if_cfg vcfg = {0}; 3007 struct dpsw_tci_cfg tci_cfg = {0}; 3008 struct dpsw_stp_cfg stp_cfg; 3009 int err; 3010 u16 i; 3011 3012 ethsw->dev_id = sw_dev->obj_desc.id; 3013 3014 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 3015 if (err) { 3016 dev_err(dev, "dpsw_open err %d\n", err); 3017 return err; 3018 } 3019 3020 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 3021 ðsw->sw_attr); 3022 if (err) { 3023 dev_err(dev, "dpsw_get_attributes err %d\n", err); 3024 goto err_close; 3025 } 3026 3027 err = dpsw_get_api_version(ethsw->mc_io, 0, 3028 ðsw->major, 3029 ðsw->minor); 3030 if (err) { 3031 dev_err(dev, "dpsw_get_api_version err %d\n", err); 3032 goto err_close; 3033 } 3034 3035 /* Minimum supported DPSW version check */ 3036 if (ethsw->major < DPSW_MIN_VER_MAJOR || 3037 (ethsw->major == DPSW_MIN_VER_MAJOR && 3038 ethsw->minor < DPSW_MIN_VER_MINOR)) { 3039 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 3040 ethsw->major, ethsw->minor); 3041 err = -EOPNOTSUPP; 3042 goto err_close; 3043 } 3044 3045 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 3046 err = -EOPNOTSUPP; 3047 goto err_close; 3048 } 3049 3050 dpaa2_switch_detect_features(ethsw); 3051 3052 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 3053 if (err) { 3054 dev_err(dev, "dpsw_reset err %d\n", err); 3055 goto err_close; 3056 } 3057 3058 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 3059 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 3060 3061 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3062 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 3063 if (err) { 3064 dev_err(dev, "dpsw_if_disable err %d\n", err); 3065 goto err_close; 3066 } 3067 3068 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 3069 &stp_cfg); 3070 if (err) { 3071 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 3072 err, i); 3073 goto err_close; 3074 } 3075 3076 /* Switch starts with all ports configured to VLAN 1. Need to 3077 * remove this setting to allow configuration at bridge join 3078 */ 3079 vcfg.num_ifs = 1; 3080 vcfg.if_id[0] = i; 3081 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 3082 DEFAULT_VLAN_ID, &vcfg); 3083 if (err) { 3084 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 3085 err); 3086 goto err_close; 3087 } 3088 3089 tci_cfg.vlan_id = 4095; 3090 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 3091 if (err) { 3092 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 3093 goto err_close; 3094 } 3095 3096 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 3097 DEFAULT_VLAN_ID, &vcfg); 3098 if (err) { 3099 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 3100 goto err_close; 3101 } 3102 } 3103 3104 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 3105 if (err) { 3106 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 3107 goto err_close; 3108 } 3109 3110 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 3111 WQ_MEM_RECLAIM, "ethsw", 3112 ethsw->sw_attr.id); 3113 if (!ethsw->workqueue) { 3114 err = -ENOMEM; 3115 goto err_close; 3116 } 3117 3118 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 3119 if (err) 3120 goto err_destroy_ordered_workqueue; 3121 3122 err = dpaa2_switch_ctrl_if_setup(ethsw); 3123 if (err) 3124 goto err_destroy_ordered_workqueue; 3125 3126 return 0; 3127 3128 err_destroy_ordered_workqueue: 3129 destroy_workqueue(ethsw->workqueue); 3130 3131 err_close: 3132 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3133 return err; 3134 } 3135 3136 /* Add an ACL to redirect frames with specific destination MAC address to 3137 * control interface 3138 */ 3139 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, 3140 const char *mac) 3141 { 3142 struct dpaa2_switch_acl_entry acl_entry = {0}; 3143 3144 /* Match on the destination MAC address */ 3145 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); 3146 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); 3147 3148 /* Trap to CPU */ 3149 acl_entry.cfg.precedence = 0; 3150 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; 3151 3152 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry); 3153 } 3154 3155 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 3156 { 3157 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 3158 struct switchdev_obj_port_vlan vlan = { 3159 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 3160 .vid = DEFAULT_VLAN_ID, 3161 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 3162 }; 3163 struct net_device *netdev = port_priv->netdev; 3164 struct ethsw_core *ethsw = port_priv->ethsw_data; 3165 struct dpaa2_switch_filter_block *filter_block; 3166 struct dpsw_fdb_cfg fdb_cfg = {0}; 3167 struct dpsw_if_attr dpsw_if_attr; 3168 struct dpaa2_switch_fdb *fdb; 3169 struct dpsw_acl_cfg acl_cfg; 3170 u16 fdb_id, acl_tbl_id; 3171 int err; 3172 3173 /* Get the Tx queue for this specific port */ 3174 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 3175 port_priv->idx, &dpsw_if_attr); 3176 if (err) { 3177 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 3178 return err; 3179 } 3180 port_priv->tx_qdid = dpsw_if_attr.qdid; 3181 3182 /* Create a FDB table for this particular switch port */ 3183 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 3184 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3185 &fdb_id, &fdb_cfg); 3186 if (err) { 3187 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 3188 return err; 3189 } 3190 3191 /* Find an unused dpaa2_switch_fdb structure and use it */ 3192 fdb = dpaa2_switch_fdb_get_unused(ethsw); 3193 fdb->fdb_id = fdb_id; 3194 fdb->in_use = true; 3195 fdb->bridge_dev = NULL; 3196 port_priv->fdb = fdb; 3197 3198 /* We need to add VLAN 1 as the PVID on this port until it is under a 3199 * bridge since the DPAA2 switch is not able to handle the traffic in a 3200 * VLAN unaware fashion 3201 */ 3202 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 3203 if (err) 3204 return err; 3205 3206 /* Setup the egress flooding domains (broadcast, unknown unicast */ 3207 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 3208 if (err) 3209 return err; 3210 3211 /* Create an ACL table to be used by this switch port */ 3212 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 3213 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3214 &acl_tbl_id, &acl_cfg); 3215 if (err) { 3216 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 3217 return err; 3218 } 3219 3220 filter_block = dpaa2_switch_filter_block_get_unused(ethsw); 3221 filter_block->ethsw = ethsw; 3222 filter_block->acl_id = acl_tbl_id; 3223 filter_block->in_use = true; 3224 filter_block->num_acl_rules = 0; 3225 INIT_LIST_HEAD(&filter_block->acl_entries); 3226 INIT_LIST_HEAD(&filter_block->mirror_entries); 3227 3228 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block); 3229 if (err) 3230 return err; 3231 3232 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); 3233 if (err) 3234 return err; 3235 3236 return err; 3237 } 3238 3239 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 3240 { 3241 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3242 dpaa2_switch_free_dpio(ethsw); 3243 dpaa2_switch_destroy_rings(ethsw); 3244 dpaa2_switch_drain_bp(ethsw); 3245 dpaa2_switch_free_dpbp(ethsw); 3246 } 3247 3248 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev) 3249 { 3250 struct device *dev = &sw_dev->dev; 3251 struct ethsw_core *ethsw = dev_get_drvdata(dev); 3252 int err; 3253 3254 dpaa2_switch_ctrl_if_teardown(ethsw); 3255 3256 destroy_workqueue(ethsw->workqueue); 3257 3258 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3259 if (err) 3260 dev_warn(dev, "dpsw_close err %d\n", err); 3261 } 3262 3263 static void dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 3264 { 3265 struct ethsw_port_priv *port_priv; 3266 struct ethsw_core *ethsw; 3267 struct device *dev; 3268 int i; 3269 3270 dev = &sw_dev->dev; 3271 ethsw = dev_get_drvdata(dev); 3272 3273 dpaa2_switch_teardown_irqs(sw_dev); 3274 3275 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3276 3277 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3278 port_priv = ethsw->ports[i]; 3279 unregister_netdev(port_priv->netdev); 3280 dpaa2_switch_remove_port(ethsw, i); 3281 } 3282 3283 kfree(ethsw->fdbs); 3284 kfree(ethsw->filter_blocks); 3285 kfree(ethsw->ports); 3286 3287 dpaa2_switch_teardown(sw_dev); 3288 3289 fsl_mc_portal_free(ethsw->mc_io); 3290 3291 kfree(ethsw); 3292 3293 dev_set_drvdata(dev, NULL); 3294 } 3295 3296 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 3297 u16 port_idx) 3298 { 3299 struct ethsw_port_priv *port_priv; 3300 struct device *dev = ethsw->dev; 3301 struct net_device *port_netdev; 3302 int err; 3303 3304 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 3305 if (!port_netdev) { 3306 dev_err(dev, "alloc_etherdev error\n"); 3307 return -ENOMEM; 3308 } 3309 3310 port_priv = netdev_priv(port_netdev); 3311 port_priv->netdev = port_netdev; 3312 port_priv->ethsw_data = ethsw; 3313 3314 mutex_init(&port_priv->mac_lock); 3315 3316 port_priv->idx = port_idx; 3317 port_priv->stp_state = BR_STATE_FORWARDING; 3318 3319 SET_NETDEV_DEV(port_netdev, dev); 3320 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 3321 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 3322 3323 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 3324 3325 port_priv->bcast_flood = true; 3326 port_priv->ucast_flood = true; 3327 3328 /* Set MTU limits */ 3329 port_netdev->min_mtu = ETH_MIN_MTU; 3330 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 3331 3332 /* Populate the private port structure so that later calls to 3333 * dpaa2_switch_port_init() can use it. 3334 */ 3335 ethsw->ports[port_idx] = port_priv; 3336 3337 /* The DPAA2 switch's ingress path depends on the VLAN table, 3338 * thus we are not able to disable VLAN filtering. 3339 */ 3340 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | 3341 NETIF_F_HW_VLAN_STAG_FILTER | 3342 NETIF_F_HW_TC; 3343 port_netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 3344 3345 err = dpaa2_switch_port_init(port_priv, port_idx); 3346 if (err) 3347 goto err_port_probe; 3348 3349 err = dpaa2_switch_port_set_mac_addr(port_priv); 3350 if (err) 3351 goto err_port_probe; 3352 3353 err = dpaa2_switch_port_set_learning(port_priv, false); 3354 if (err) 3355 goto err_port_probe; 3356 port_priv->learn_ena = false; 3357 3358 err = dpaa2_switch_port_connect_mac(port_priv); 3359 if (err) 3360 goto err_port_probe; 3361 3362 return 0; 3363 3364 err_port_probe: 3365 free_netdev(port_netdev); 3366 ethsw->ports[port_idx] = NULL; 3367 3368 return err; 3369 } 3370 3371 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 3372 { 3373 struct device *dev = &sw_dev->dev; 3374 struct ethsw_core *ethsw; 3375 int i, err; 3376 3377 /* Allocate switch core*/ 3378 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 3379 3380 if (!ethsw) 3381 return -ENOMEM; 3382 3383 ethsw->dev = dev; 3384 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 3385 dev_set_drvdata(dev, ethsw); 3386 3387 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 3388 ðsw->mc_io); 3389 if (err) { 3390 if (err == -ENXIO) 3391 err = -EPROBE_DEFER; 3392 else 3393 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 3394 goto err_free_drvdata; 3395 } 3396 3397 err = dpaa2_switch_init(sw_dev); 3398 if (err) 3399 goto err_free_cmdport; 3400 3401 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 3402 GFP_KERNEL); 3403 if (!(ethsw->ports)) { 3404 err = -ENOMEM; 3405 goto err_teardown; 3406 } 3407 3408 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 3409 GFP_KERNEL); 3410 if (!ethsw->fdbs) { 3411 err = -ENOMEM; 3412 goto err_free_ports; 3413 } 3414 3415 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs, 3416 sizeof(*ethsw->filter_blocks), 3417 GFP_KERNEL); 3418 if (!ethsw->filter_blocks) { 3419 err = -ENOMEM; 3420 goto err_free_fdbs; 3421 } 3422 3423 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3424 err = dpaa2_switch_probe_port(ethsw, i); 3425 if (err) 3426 goto err_free_netdev; 3427 } 3428 3429 /* Add a NAPI instance for each of the Rx queues. The first port's 3430 * net_device will be associated with the instances since we do not have 3431 * different queues for each switch ports. 3432 */ 3433 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 3434 netif_napi_add(ethsw->ports[0]->netdev, ðsw->fq[i].napi, 3435 dpaa2_switch_poll); 3436 3437 /* Setup IRQs */ 3438 err = dpaa2_switch_setup_irqs(sw_dev); 3439 if (err) 3440 goto err_stop; 3441 3442 /* By convention, if the mirror port is equal to the number of switch 3443 * interfaces, then mirroring of any kind is disabled. 3444 */ 3445 ethsw->mirror_port = ethsw->sw_attr.num_ifs; 3446 3447 /* Register the netdev only when the entire setup is done and the 3448 * switch port interfaces are ready to receive traffic 3449 */ 3450 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3451 err = register_netdev(ethsw->ports[i]->netdev); 3452 if (err < 0) { 3453 dev_err(dev, "register_netdev error %d\n", err); 3454 goto err_unregister_ports; 3455 } 3456 } 3457 3458 return 0; 3459 3460 err_unregister_ports: 3461 for (i--; i >= 0; i--) 3462 unregister_netdev(ethsw->ports[i]->netdev); 3463 dpaa2_switch_teardown_irqs(sw_dev); 3464 err_stop: 3465 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3466 err_free_netdev: 3467 for (i--; i >= 0; i--) 3468 dpaa2_switch_remove_port(ethsw, i); 3469 kfree(ethsw->filter_blocks); 3470 err_free_fdbs: 3471 kfree(ethsw->fdbs); 3472 err_free_ports: 3473 kfree(ethsw->ports); 3474 3475 err_teardown: 3476 dpaa2_switch_teardown(sw_dev); 3477 3478 err_free_cmdport: 3479 fsl_mc_portal_free(ethsw->mc_io); 3480 3481 err_free_drvdata: 3482 kfree(ethsw); 3483 dev_set_drvdata(dev, NULL); 3484 3485 return err; 3486 } 3487 3488 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 3489 { 3490 .vendor = FSL_MC_VENDOR_FREESCALE, 3491 .obj_type = "dpsw", 3492 }, 3493 { .vendor = 0x0 } 3494 }; 3495 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 3496 3497 static struct fsl_mc_driver dpaa2_switch_drv = { 3498 .driver = { 3499 .name = KBUILD_MODNAME, 3500 }, 3501 .probe = dpaa2_switch_probe, 3502 .remove = dpaa2_switch_remove, 3503 .match_id_table = dpaa2_switch_match_id_table 3504 }; 3505 3506 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 3507 .notifier_call = dpaa2_switch_port_netdevice_event, 3508 }; 3509 3510 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 3511 .notifier_call = dpaa2_switch_port_event, 3512 }; 3513 3514 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 3515 .notifier_call = dpaa2_switch_port_blocking_event, 3516 }; 3517 3518 static int dpaa2_switch_register_notifiers(void) 3519 { 3520 int err; 3521 3522 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 3523 if (err) { 3524 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 3525 return err; 3526 } 3527 3528 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3529 if (err) { 3530 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3531 goto err_switchdev_nb; 3532 } 3533 3534 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3535 if (err) { 3536 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3537 goto err_switchdev_blocking_nb; 3538 } 3539 3540 return 0; 3541 3542 err_switchdev_blocking_nb: 3543 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3544 err_switchdev_nb: 3545 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3546 3547 return err; 3548 } 3549 3550 static void dpaa2_switch_unregister_notifiers(void) 3551 { 3552 int err; 3553 3554 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3555 if (err) 3556 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3557 err); 3558 3559 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3560 if (err) 3561 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3562 3563 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3564 if (err) 3565 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3566 } 3567 3568 static int __init dpaa2_switch_driver_init(void) 3569 { 3570 int err; 3571 3572 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3573 if (err) 3574 return err; 3575 3576 err = dpaa2_switch_register_notifiers(); 3577 if (err) { 3578 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3579 return err; 3580 } 3581 3582 return 0; 3583 } 3584 3585 static void __exit dpaa2_switch_driver_exit(void) 3586 { 3587 dpaa2_switch_unregister_notifiers(); 3588 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3589 } 3590 3591 module_init(dpaa2_switch_driver_init); 3592 module_exit(dpaa2_switch_driver_exit); 3593 3594 MODULE_LICENSE("GPL v2"); 3595 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3596