1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dma/ti-cppi5.h> 14 #include <linux/etherdevice.h> 15 #include <linux/genalloc.h> 16 #include <linux/if_hsr.h> 17 #include <linux/if_vlan.h> 18 #include <linux/interrupt.h> 19 #include <linux/io-64-nonatomic-hi-lo.h> 20 #include <linux/kernel.h> 21 #include <linux/mfd/syscon.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/platform_device.h> 27 #include <linux/phy.h> 28 #include <linux/property.h> 29 #include <linux/remoteproc/pruss.h> 30 #include <linux/regmap.h> 31 #include <linux/remoteproc.h> 32 #include <net/switchdev.h> 33 34 #include "icssg_prueth.h" 35 #include "icssg_mii_rt.h" 36 #include "icssg_switchdev.h" 37 #include "../k3-cppi-desc-pool.h" 38 39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" 40 41 #define DEFAULT_VID 1 42 #define DEFAULT_PORT_MASK 1 43 #define DEFAULT_UNTAG_MASK 1 44 45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \ 46 NETIF_F_HW_HSR_DUP | \ 47 NETIF_F_HW_HSR_TAG_INS | \ 48 NETIF_F_HW_HSR_TAG_RM) 49 50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 52 53 static int emac_get_tx_ts(struct prueth_emac *emac, 54 struct emac_tx_ts_response *rsp) 55 { 56 struct prueth *prueth = emac->prueth; 57 int slice = prueth_emac_slice(emac); 58 int addr; 59 60 addr = icssg_queue_pop(prueth, slice == 0 ? 61 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); 62 if (addr < 0) 63 return addr; 64 65 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 66 /* return buffer back for to pool */ 67 icssg_queue_push(prueth, slice == 0 ? 68 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); 69 70 return 0; 71 } 72 73 static void tx_ts_work(struct prueth_emac *emac) 74 { 75 struct skb_shared_hwtstamps ssh; 76 struct emac_tx_ts_response tsr; 77 struct sk_buff *skb; 78 int ret = 0; 79 u32 hi_sw; 80 u64 ns; 81 82 /* There may be more than one pending requests */ 83 while (1) { 84 ret = emac_get_tx_ts(emac, &tsr); 85 if (ret) /* nothing more */ 86 break; 87 88 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || 89 !emac->tx_ts_skb[tsr.cookie]) { 90 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", 91 tsr.cookie); 92 break; 93 } 94 95 skb = emac->tx_ts_skb[tsr.cookie]; 96 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ 97 if (!skb) { 98 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); 99 break; 100 } 101 102 hi_sw = readl(emac->prueth->shram.va + 103 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 104 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, 105 IEP_DEFAULT_CYCLE_TIME_NS); 106 107 memset(&ssh, 0, sizeof(ssh)); 108 ssh.hwtstamp = ns_to_ktime(ns); 109 110 skb_tstamp_tx(skb, &ssh); 111 dev_consume_skb_any(skb); 112 113 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ 114 break; 115 } 116 } 117 118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) 119 { 120 struct prueth_emac *emac = dev_id; 121 122 /* currently only TX timestamp is being returned */ 123 tx_ts_work(emac); 124 125 return IRQ_HANDLED; 126 } 127 128 static int prueth_start(struct rproc *rproc, const char *fw_name) 129 { 130 int ret; 131 132 ret = rproc_set_firmware(rproc, fw_name); 133 if (ret) 134 return ret; 135 return rproc_boot(rproc); 136 } 137 138 static void prueth_shutdown(struct rproc *rproc) 139 { 140 rproc_shutdown(rproc); 141 } 142 143 static int prueth_emac_start(struct prueth *prueth) 144 { 145 struct icssg_firmwares *firmwares; 146 struct device *dev = prueth->dev; 147 int ret, slice; 148 149 if (prueth->is_switch_mode) 150 firmwares = prueth->icssg_switch_firmwares; 151 else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version) 152 firmwares = prueth->icssg_hsr_firmwares; 153 else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version) 154 firmwares = prueth->icssg_prp_firmwares; 155 else 156 firmwares = prueth->icssg_emac_firmwares; 157 158 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 159 ret = prueth_start(prueth->pru[slice], firmwares[slice].pru); 160 if (ret) { 161 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); 162 goto unwind_slices; 163 } 164 165 ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu); 166 if (ret) { 167 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); 168 rproc_shutdown(prueth->pru[slice]); 169 goto unwind_slices; 170 } 171 172 ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru); 173 if (ret) { 174 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); 175 rproc_shutdown(prueth->rtu[slice]); 176 rproc_shutdown(prueth->pru[slice]); 177 goto unwind_slices; 178 } 179 } 180 181 return 0; 182 183 unwind_slices: 184 while (--slice >= 0) { 185 prueth_shutdown(prueth->txpru[slice]); 186 prueth_shutdown(prueth->rtu[slice]); 187 prueth_shutdown(prueth->pru[slice]); 188 } 189 190 return ret; 191 } 192 193 static void prueth_emac_stop(struct prueth *prueth) 194 { 195 int slice; 196 197 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 198 prueth_shutdown(prueth->txpru[slice]); 199 prueth_shutdown(prueth->rtu[slice]); 200 prueth_shutdown(prueth->pru[slice]); 201 } 202 } 203 204 static int prueth_emac_common_start(struct prueth *prueth) 205 { 206 struct prueth_emac *emac; 207 int ret = 0; 208 int slice; 209 210 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 211 return -EINVAL; 212 213 /* clear SMEM and MSMC settings for all slices */ 214 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); 215 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); 216 217 icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false); 218 icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false); 219 220 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 221 icssg_init_fw_offload_mode(prueth); 222 else 223 icssg_init_emac_mode(prueth); 224 225 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 226 emac = prueth->emac[slice]; 227 if (!emac) 228 continue; 229 ret = icssg_config(prueth, emac, slice); 230 if (ret) 231 goto disable_class; 232 } 233 234 ret = prueth_emac_start(prueth); 235 if (ret) 236 goto disable_class; 237 238 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 239 prueth->emac[ICSS_SLICE1]; 240 ret = icss_iep_init(emac->iep, &prueth_iep_clockops, 241 emac, IEP_DEFAULT_CYCLE_TIME_NS); 242 if (ret) { 243 dev_err(prueth->dev, "Failed to initialize IEP module\n"); 244 goto stop_pruss; 245 } 246 247 return 0; 248 249 stop_pruss: 250 prueth_emac_stop(prueth); 251 252 disable_class: 253 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 254 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 255 256 return ret; 257 } 258 259 static int prueth_emac_common_stop(struct prueth *prueth) 260 { 261 struct prueth_emac *emac; 262 263 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 264 return -EINVAL; 265 266 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 267 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 268 269 prueth_emac_stop(prueth); 270 271 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 272 prueth->emac[ICSS_SLICE1]; 273 icss_iep_exit(emac->iep); 274 275 return 0; 276 } 277 278 /* called back by PHY layer if there is change in link state of hw port*/ 279 static void emac_adjust_link(struct net_device *ndev) 280 { 281 struct prueth_emac *emac = netdev_priv(ndev); 282 struct phy_device *phydev = ndev->phydev; 283 struct prueth *prueth = emac->prueth; 284 bool new_state = false; 285 unsigned long flags; 286 287 if (phydev->link) { 288 /* check the mode of operation - full/half duplex */ 289 if (phydev->duplex != emac->duplex) { 290 new_state = true; 291 emac->duplex = phydev->duplex; 292 } 293 if (phydev->speed != emac->speed) { 294 new_state = true; 295 emac->speed = phydev->speed; 296 } 297 if (!emac->link) { 298 new_state = true; 299 emac->link = 1; 300 } 301 } else if (emac->link) { 302 new_state = true; 303 emac->link = 0; 304 305 /* f/w should support 100 & 1000 */ 306 emac->speed = SPEED_1000; 307 308 /* half duplex may not be supported by f/w */ 309 emac->duplex = DUPLEX_FULL; 310 } 311 312 if (new_state) { 313 phy_print_status(phydev); 314 315 /* update RGMII and MII configuration based on PHY negotiated 316 * values 317 */ 318 if (emac->link) { 319 if (emac->duplex == DUPLEX_HALF) 320 icssg_config_half_duplex(emac); 321 /* Set the RGMII cfg for gig en and full duplex */ 322 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 323 324 /* update the Tx IPG based on 100M/1G speed */ 325 spin_lock_irqsave(&emac->lock, flags); 326 icssg_config_ipg(emac); 327 spin_unlock_irqrestore(&emac->lock, flags); 328 icssg_config_set_speed(emac); 329 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); 330 331 } else { 332 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); 333 } 334 } 335 336 if (emac->link) { 337 /* reactivate the transmit queue */ 338 netif_tx_wake_all_queues(ndev); 339 } else { 340 netif_tx_stop_all_queues(ndev); 341 prueth_cleanup_tx_ts(emac); 342 } 343 } 344 345 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) 346 { 347 struct prueth_emac *emac = 348 container_of(timer, struct prueth_emac, rx_hrtimer); 349 int rx_flow = PRUETH_RX_FLOW_DATA; 350 351 enable_irq(emac->rx_chns.irq[rx_flow]); 352 return HRTIMER_NORESTART; 353 } 354 355 static int emac_phy_connect(struct prueth_emac *emac) 356 { 357 struct prueth *prueth = emac->prueth; 358 struct net_device *ndev = emac->ndev; 359 /* connect PHY */ 360 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, 361 &emac_adjust_link, 0, 362 emac->phy_if); 363 if (!ndev->phydev) { 364 dev_err(prueth->dev, "couldn't connect to phy %s\n", 365 emac->phy_node->full_name); 366 return -ENODEV; 367 } 368 369 if (!emac->half_duplex) { 370 dev_dbg(prueth->dev, "half duplex mode is not supported\n"); 371 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 372 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 373 } 374 375 /* remove unsupported modes */ 376 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 377 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); 378 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 379 380 if (emac->phy_if == PHY_INTERFACE_MODE_MII) 381 phy_set_max_speed(ndev->phydev, SPEED_100); 382 383 return 0; 384 } 385 386 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) 387 { 388 u32 hi_rollover_count, hi_rollover_count_r; 389 struct prueth_emac *emac = clockops_data; 390 struct prueth *prueth = emac->prueth; 391 void __iomem *fw_hi_r_count_addr; 392 void __iomem *fw_count_hi_addr; 393 u32 iepcount_hi, iepcount_hi_r; 394 unsigned long flags; 395 u32 iepcount_lo; 396 u64 ts = 0; 397 398 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; 399 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; 400 401 local_irq_save(flags); 402 do { 403 iepcount_hi = icss_iep_get_count_hi(emac->iep); 404 iepcount_hi += readl(fw_count_hi_addr); 405 hi_rollover_count = readl(fw_hi_r_count_addr); 406 ptp_read_system_prets(sts); 407 iepcount_lo = icss_iep_get_count_low(emac->iep); 408 ptp_read_system_postts(sts); 409 410 iepcount_hi_r = icss_iep_get_count_hi(emac->iep); 411 iepcount_hi_r += readl(fw_count_hi_addr); 412 hi_rollover_count_r = readl(fw_hi_r_count_addr); 413 } while ((iepcount_hi_r != iepcount_hi) || 414 (hi_rollover_count != hi_rollover_count_r)); 415 local_irq_restore(flags); 416 417 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; 418 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; 419 420 return ts; 421 } 422 423 static void prueth_iep_settime(void *clockops_data, u64 ns) 424 { 425 struct icssg_setclock_desc __iomem *sc_descp; 426 struct prueth_emac *emac = clockops_data; 427 struct icssg_setclock_desc sc_desc; 428 u64 cyclecount; 429 u32 cycletime; 430 int timeout; 431 432 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; 433 434 cycletime = IEP_DEFAULT_CYCLE_TIME_NS; 435 cyclecount = ns / cycletime; 436 437 memset(&sc_desc, 0, sizeof(sc_desc)); 438 sc_desc.margin = cycletime - 1000; 439 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); 440 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; 441 sc_desc.iepcount_set = ns % cycletime; 442 /* Count from 0 to (cycle time) - emac->iep->def_inc */ 443 sc_desc.CMP0_current = cycletime - emac->iep->def_inc; 444 445 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); 446 447 writeb(1, &sc_descp->request); 448 449 timeout = 5; /* fw should take 2-3 ms */ 450 while (timeout--) { 451 if (readb(&sc_descp->acknowledgment)) 452 return; 453 454 usleep_range(500, 1000); 455 } 456 457 dev_err(emac->prueth->dev, "settime timeout\n"); 458 } 459 460 static int prueth_perout_enable(void *clockops_data, 461 struct ptp_perout_request *req, int on, 462 u64 *cmp) 463 { 464 struct prueth_emac *emac = clockops_data; 465 u32 reduction_factor = 0, offset = 0; 466 struct timespec64 ts; 467 u64 current_cycle; 468 u64 start_offset; 469 u64 ns_period; 470 471 if (!on) 472 return 0; 473 474 /* Any firmware specific stuff for PPS/PEROUT handling */ 475 ts.tv_sec = req->period.sec; 476 ts.tv_nsec = req->period.nsec; 477 ns_period = timespec64_to_ns(&ts); 478 479 /* f/w doesn't support period less than cycle time */ 480 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) 481 return -ENXIO; 482 483 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; 484 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; 485 486 /* f/w requires at least 1uS within a cycle so CMP 487 * can trigger after SYNC is enabled 488 */ 489 if (offset < 5 * NSEC_PER_USEC) 490 offset = 5 * NSEC_PER_USEC; 491 492 /* if offset is close to cycle time then we will miss 493 * the CMP event for last tick when IEP rolls over. 494 * In normal mode, IEP tick is 4ns. 495 * In slow compensation it could be 0ns or 8ns at 496 * every slow compensation cycle. 497 */ 498 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) 499 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; 500 501 /* we're in shadow mode so need to set upper 32-bits */ 502 *cmp = (u64)offset << 32; 503 504 writel(reduction_factor, emac->prueth->shram.va + 505 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); 506 507 current_cycle = icssg_read_time(emac->prueth->shram.va + 508 TIMESYNC_FW_WC_CYCLECOUNT_OFFSET); 509 510 /* Rounding of current_cycle count to next second */ 511 start_offset = roundup(current_cycle, MSEC_PER_SEC); 512 513 hi_lo_writeq(start_offset, emac->prueth->shram.va + 514 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); 515 516 return 0; 517 } 518 519 const struct icss_iep_clockops prueth_iep_clockops = { 520 .settime = prueth_iep_settime, 521 .gettime = prueth_iep_gettime, 522 .perout_enable = prueth_perout_enable, 523 }; 524 525 static int prueth_create_xdp_rxqs(struct prueth_emac *emac) 526 { 527 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 528 struct page_pool *pool = emac->rx_chns.pg_pool; 529 int ret; 530 531 ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); 532 if (ret) 533 return ret; 534 535 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 536 if (ret) 537 xdp_rxq_info_unreg(rxq); 538 539 return ret; 540 } 541 542 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) 543 { 544 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 545 546 if (!xdp_rxq_info_is_reg(rxq)) 547 return; 548 549 xdp_rxq_info_unreg(rxq); 550 } 551 552 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) 553 { 554 struct net_device *real_dev; 555 struct prueth_emac *emac; 556 int port_mask; 557 u8 vlan_id; 558 559 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 560 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 561 emac = netdev_priv(real_dev); 562 563 port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id); 564 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true); 565 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true); 566 567 return 0; 568 } 569 570 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) 571 { 572 struct net_device *real_dev; 573 struct prueth_emac *emac; 574 int other_port_mask; 575 int port_mask; 576 u8 vlan_id; 577 578 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 579 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 580 emac = netdev_priv(real_dev); 581 582 port_mask = BIT(emac->port_id); 583 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id); 584 585 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false); 586 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false); 587 588 if (other_port_mask) { 589 icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true); 590 icssg_vtbl_modify(emac, vlan_id, other_port_mask, 591 other_port_mask, true); 592 } 593 594 return 0; 595 } 596 597 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac, 598 const u8 *addr, u8 vid, bool add) 599 { 600 icssg_fdb_add_del(emac, addr, vid, 601 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 602 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 603 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 604 ICSSG_FDB_ENTRY_BLOCK, add); 605 606 if (add) 607 icssg_vtbl_modify(emac, vid, BIT(emac->port_id), 608 BIT(emac->port_id), add); 609 } 610 611 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) 612 { 613 struct net_device *real_dev; 614 struct prueth_emac *emac; 615 u8 vlan_id, i; 616 617 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; 618 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 619 620 if (is_hsr_master(real_dev)) { 621 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { 622 emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); 623 if (!emac) 624 return -EINVAL; 625 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, 626 true); 627 } 628 } else { 629 emac = netdev_priv(real_dev); 630 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true); 631 } 632 633 return 0; 634 } 635 636 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) 637 { 638 struct net_device *real_dev; 639 struct prueth_emac *emac; 640 u8 vlan_id, i; 641 642 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; 643 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 644 645 if (is_hsr_master(real_dev)) { 646 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { 647 emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); 648 if (!emac) 649 return -EINVAL; 650 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, 651 false); 652 } 653 } else { 654 emac = netdev_priv(real_dev); 655 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false); 656 } 657 658 return 0; 659 } 660 661 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, 662 void *args) 663 { 664 struct prueth_emac *emac = args; 665 666 if (!vdev || !vid) 667 return 0; 668 669 netif_addr_lock_bh(vdev); 670 __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc, 671 vdev->addr_len); 672 netif_addr_unlock_bh(vdev); 673 674 if (emac->prueth->is_hsr_offload_mode) 675 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, 676 icssg_prueth_hsr_add_mcast, 677 icssg_prueth_hsr_del_mcast); 678 else 679 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, 680 icssg_prueth_add_mcast, 681 icssg_prueth_del_mcast); 682 683 return 0; 684 } 685 686 /** 687 * emac_ndo_open - EMAC device open 688 * @ndev: network adapter device 689 * 690 * Called when system wants to start the interface. 691 * 692 * Return: 0 for a successful open, or appropriate error code 693 */ 694 static int emac_ndo_open(struct net_device *ndev) 695 { 696 struct prueth_emac *emac = netdev_priv(ndev); 697 int ret, i, num_data_chn = emac->tx_ch_num; 698 struct icssg_flow_cfg __iomem *flow_cfg; 699 struct prueth *prueth = emac->prueth; 700 int slice = prueth_emac_slice(emac); 701 struct device *dev = prueth->dev; 702 int max_rx_flows; 703 int rx_flow; 704 705 /* set h/w MAC as user might have re-configured */ 706 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 707 708 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 709 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 710 711 /* Notify the stack of the actual queue counts. */ 712 ret = netif_set_real_num_tx_queues(ndev, num_data_chn); 713 if (ret) { 714 dev_err(dev, "cannot set real number of tx queues\n"); 715 return ret; 716 } 717 718 init_completion(&emac->cmd_complete); 719 ret = prueth_init_tx_chns(emac); 720 if (ret) { 721 dev_err(dev, "failed to init tx channel: %d\n", ret); 722 return ret; 723 } 724 725 max_rx_flows = PRUETH_MAX_RX_FLOWS; 726 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", 727 max_rx_flows, PRUETH_MAX_RX_DESC); 728 if (ret) { 729 dev_err(dev, "failed to init rx channel: %d\n", ret); 730 goto cleanup_tx; 731 } 732 733 ret = prueth_ndev_add_tx_napi(emac); 734 if (ret) 735 goto cleanup_rx; 736 737 /* we use only the highest priority flow for now i.e. @irq[3] */ 738 rx_flow = PRUETH_RX_FLOW_DATA; 739 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, 740 IRQF_TRIGGER_HIGH, dev_name(dev), emac); 741 if (ret) { 742 dev_err(dev, "unable to request RX IRQ\n"); 743 goto cleanup_napi; 744 } 745 746 if (!prueth->emacs_initialized) { 747 ret = prueth_emac_common_start(prueth); 748 if (ret) 749 goto free_rx_irq; 750 } 751 752 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; 753 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); 754 ret = emac_fdb_flow_id_updated(emac); 755 756 if (ret) { 757 netdev_err(ndev, "Failed to update Rx Flow ID %d", ret); 758 goto stop; 759 } 760 761 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); 762 763 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, 764 IRQF_ONESHOT, dev_name(dev), emac); 765 if (ret) 766 goto stop; 767 768 /* Prepare RX */ 769 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); 770 if (ret) 771 goto free_tx_ts_irq; 772 773 ret = prueth_create_xdp_rxqs(emac); 774 if (ret) 775 goto reset_rx_chn; 776 777 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); 778 if (ret) 779 goto destroy_xdp_rxqs; 780 781 for (i = 0; i < emac->tx_ch_num; i++) { 782 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); 783 if (ret) 784 goto reset_tx_chan; 785 } 786 787 /* Enable NAPI in Tx and Rx direction */ 788 for (i = 0; i < emac->tx_ch_num; i++) 789 napi_enable(&emac->tx_chns[i].napi_tx); 790 napi_enable(&emac->napi_rx); 791 792 /* start PHY */ 793 phy_start(ndev->phydev); 794 795 prueth->emacs_initialized++; 796 797 queue_work(system_long_wq, &emac->stats_work.work); 798 799 return 0; 800 801 reset_tx_chan: 802 /* Since interface is not yet up, there is wouldn't be 803 * any SKB for completion. So set false to free_skb 804 */ 805 prueth_reset_tx_chan(emac, i, false); 806 destroy_xdp_rxqs: 807 prueth_destroy_xdp_rxqs(emac); 808 reset_rx_chn: 809 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); 810 free_tx_ts_irq: 811 free_irq(emac->tx_ts_irq, emac); 812 stop: 813 if (!prueth->emacs_initialized) 814 prueth_emac_common_stop(prueth); 815 free_rx_irq: 816 free_irq(emac->rx_chns.irq[rx_flow], emac); 817 cleanup_napi: 818 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 819 cleanup_rx: 820 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 821 cleanup_tx: 822 prueth_cleanup_tx_chns(emac); 823 824 return ret; 825 } 826 827 /** 828 * emac_ndo_stop - EMAC device stop 829 * @ndev: network adapter device 830 * 831 * Called when system wants to stop or down the interface. 832 * 833 * Return: Always 0 (Success) 834 */ 835 static int emac_ndo_stop(struct net_device *ndev) 836 { 837 struct prueth_emac *emac = netdev_priv(ndev); 838 struct prueth *prueth = emac->prueth; 839 int rx_flow = PRUETH_RX_FLOW_DATA; 840 int max_rx_flows; 841 int ret, i; 842 843 /* inform the upper layers. */ 844 netif_tx_stop_all_queues(ndev); 845 846 /* block packets from wire */ 847 if (ndev->phydev) 848 phy_stop(ndev->phydev); 849 850 if (emac->prueth->is_hsr_offload_mode) 851 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); 852 else 853 __dev_mc_unsync(ndev, icssg_prueth_del_mcast); 854 855 atomic_set(&emac->tdown_cnt, emac->tx_ch_num); 856 /* ensure new tdown_cnt value is visible */ 857 smp_mb__after_atomic(); 858 /* tear down and disable UDMA channels */ 859 reinit_completion(&emac->tdown_complete); 860 for (i = 0; i < emac->tx_ch_num; i++) 861 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); 862 863 ret = wait_for_completion_timeout(&emac->tdown_complete, 864 msecs_to_jiffies(1000)); 865 if (!ret) 866 netdev_err(ndev, "tx teardown timeout\n"); 867 868 prueth_reset_tx_chan(emac, emac->tx_ch_num, true); 869 for (i = 0; i < emac->tx_ch_num; i++) { 870 napi_disable(&emac->tx_chns[i].napi_tx); 871 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); 872 } 873 874 max_rx_flows = PRUETH_MAX_RX_FLOWS; 875 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); 876 877 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); 878 prueth_destroy_xdp_rxqs(emac); 879 napi_disable(&emac->napi_rx); 880 hrtimer_cancel(&emac->rx_hrtimer); 881 882 cancel_work_sync(&emac->rx_mode_work); 883 884 /* Destroying the queued work in ndo_stop() */ 885 cancel_delayed_work_sync(&emac->stats_work); 886 887 /* stop PRUs */ 888 if (prueth->emacs_initialized == 1) 889 prueth_emac_common_stop(prueth); 890 891 free_irq(emac->tx_ts_irq, emac); 892 893 free_irq(emac->rx_chns.irq[rx_flow], emac); 894 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 895 896 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 897 prueth_cleanup_tx_chns(emac); 898 899 prueth->emacs_initialized--; 900 901 return 0; 902 } 903 904 static void emac_ndo_set_rx_mode_work(struct work_struct *work) 905 { 906 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); 907 struct net_device *ndev = emac->ndev; 908 bool promisc, allmulti; 909 910 if (!netif_running(ndev)) 911 return; 912 913 promisc = ndev->flags & IFF_PROMISC; 914 allmulti = ndev->flags & IFF_ALLMULTI; 915 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); 916 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); 917 918 if (promisc) { 919 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); 920 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 921 return; 922 } 923 924 if (allmulti) { 925 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 926 return; 927 } 928 929 if (emac->prueth->is_hsr_offload_mode) { 930 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, 931 icssg_prueth_hsr_del_mcast); 932 if (rtnl_trylock()) { 933 vlan_for_each(emac->prueth->hsr_dev, 934 icssg_update_vlan_mcast, emac); 935 rtnl_unlock(); 936 } 937 } else { 938 __dev_mc_sync(ndev, icssg_prueth_add_mcast, 939 icssg_prueth_del_mcast); 940 if (rtnl_trylock()) { 941 vlan_for_each(ndev, icssg_update_vlan_mcast, emac); 942 rtnl_unlock(); 943 } 944 } 945 } 946 947 /** 948 * emac_ndo_set_rx_mode - EMAC set receive mode function 949 * @ndev: The EMAC network adapter 950 * 951 * Called when system wants to set the receive mode of the device. 952 * 953 */ 954 static void emac_ndo_set_rx_mode(struct net_device *ndev) 955 { 956 struct prueth_emac *emac = netdev_priv(ndev); 957 958 queue_work(emac->cmd_wq, &emac->rx_mode_work); 959 } 960 961 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, 962 netdev_features_t features) 963 { 964 /* hsr tag insertion offload and hsr dup offload are tightly coupled in 965 * firmware implementation. Both these features need to be enabled / 966 * disabled together. 967 */ 968 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS))) 969 if ((features & NETIF_F_HW_HSR_DUP) || 970 (features & NETIF_F_HW_HSR_TAG_INS)) 971 features |= NETIF_F_HW_HSR_DUP | 972 NETIF_F_HW_HSR_TAG_INS; 973 974 if ((ndev->features & NETIF_F_HW_HSR_DUP) || 975 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 976 if (!(features & NETIF_F_HW_HSR_DUP) || 977 !(features & NETIF_F_HW_HSR_TAG_INS)) 978 features &= ~(NETIF_F_HW_HSR_DUP | 979 NETIF_F_HW_HSR_TAG_INS); 980 981 return features; 982 } 983 984 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, 985 __be16 proto, u16 vid) 986 { 987 struct prueth_emac *emac = netdev_priv(ndev); 988 struct prueth *prueth = emac->prueth; 989 int port_mask = BIT(emac->port_id); 990 int untag_mask = 0; 991 992 if (prueth->is_hsr_offload_mode) 993 port_mask |= BIT(PRUETH_PORT_HOST); 994 995 __hw_addr_init(&emac->vlan_mcast_list[vid]); 996 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", 997 vid, port_mask, untag_mask); 998 999 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); 1000 icssg_set_pvid(emac->prueth, vid, emac->port_id); 1001 1002 return 0; 1003 } 1004 1005 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, 1006 __be16 proto, u16 vid) 1007 { 1008 struct prueth_emac *emac = netdev_priv(ndev); 1009 struct prueth *prueth = emac->prueth; 1010 int port_mask = BIT(emac->port_id); 1011 int untag_mask = 0; 1012 1013 if (prueth->is_hsr_offload_mode) 1014 port_mask = BIT(PRUETH_PORT_HOST); 1015 1016 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", 1017 vid, port_mask, untag_mask); 1018 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); 1019 1020 return 0; 1021 } 1022 1023 /** 1024 * emac_xdp_xmit - Implements ndo_xdp_xmit 1025 * @dev: netdev 1026 * @n: number of frames 1027 * @frames: array of XDP buffer pointers 1028 * @flags: XDP extra info 1029 * 1030 * Return: number of frames successfully sent. Failed frames 1031 * will be free'ed by XDP core. 1032 * 1033 * For error cases, a negative errno code is returned and no-frames 1034 * are transmitted (caller must handle freeing frames). 1035 **/ 1036 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1037 u32 flags) 1038 { 1039 struct prueth_emac *emac = netdev_priv(dev); 1040 struct net_device *ndev = emac->ndev; 1041 struct netdev_queue *netif_txq; 1042 int cpu = smp_processor_id(); 1043 struct xdp_frame *xdpf; 1044 unsigned int q_idx; 1045 int nxmit = 0; 1046 u32 err; 1047 int i; 1048 1049 q_idx = cpu % emac->tx_ch_num; 1050 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1051 1052 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1053 return -EINVAL; 1054 1055 __netif_tx_lock(netif_txq, cpu); 1056 for (i = 0; i < n; i++) { 1057 xdpf = frames[i]; 1058 err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); 1059 if (err != ICSSG_XDP_TX) { 1060 ndev->stats.tx_dropped++; 1061 break; 1062 } 1063 nxmit++; 1064 } 1065 __netif_tx_unlock(netif_txq); 1066 1067 return nxmit; 1068 } 1069 1070 /** 1071 * emac_xdp_setup - add/remove an XDP program 1072 * @emac: emac device 1073 * @bpf: XDP program 1074 * 1075 * Return: Always 0 (Success) 1076 **/ 1077 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) 1078 { 1079 struct bpf_prog *prog = bpf->prog; 1080 1081 if (!emac->xdpi.prog && !prog) 1082 return 0; 1083 1084 WRITE_ONCE(emac->xdp_prog, prog); 1085 1086 xdp_attachment_setup(&emac->xdpi, bpf); 1087 1088 return 0; 1089 } 1090 1091 /** 1092 * emac_ndo_bpf - implements ndo_bpf for icssg_prueth 1093 * @ndev: network adapter device 1094 * @bpf: XDP program 1095 * 1096 * Return: 0 on success, error code on failure. 1097 **/ 1098 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1099 { 1100 struct prueth_emac *emac = netdev_priv(ndev); 1101 1102 switch (bpf->command) { 1103 case XDP_SETUP_PROG: 1104 return emac_xdp_setup(emac, bpf); 1105 default: 1106 return -EINVAL; 1107 } 1108 } 1109 1110 static const struct net_device_ops emac_netdev_ops = { 1111 .ndo_open = emac_ndo_open, 1112 .ndo_stop = emac_ndo_stop, 1113 .ndo_start_xmit = icssg_ndo_start_xmit, 1114 .ndo_set_mac_address = eth_mac_addr, 1115 .ndo_validate_addr = eth_validate_addr, 1116 .ndo_tx_timeout = icssg_ndo_tx_timeout, 1117 .ndo_set_rx_mode = emac_ndo_set_rx_mode, 1118 .ndo_eth_ioctl = icssg_ndo_ioctl, 1119 .ndo_get_stats64 = icssg_ndo_get_stats64, 1120 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, 1121 .ndo_fix_features = emac_ndo_fix_features, 1122 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, 1123 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, 1124 .ndo_bpf = emac_ndo_bpf, 1125 .ndo_xdp_xmit = emac_xdp_xmit, 1126 }; 1127 1128 static int prueth_netdev_init(struct prueth *prueth, 1129 struct device_node *eth_node) 1130 { 1131 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; 1132 struct prueth_emac *emac; 1133 struct net_device *ndev; 1134 enum prueth_port port; 1135 const char *irq_name; 1136 enum prueth_mac mac; 1137 1138 port = prueth_node_port(eth_node); 1139 if (port == PRUETH_PORT_INVALID) 1140 return -EINVAL; 1141 1142 mac = prueth_node_mac(eth_node); 1143 if (mac == PRUETH_MAC_INVALID) 1144 return -EINVAL; 1145 1146 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); 1147 if (!ndev) 1148 return -ENOMEM; 1149 1150 emac = netdev_priv(ndev); 1151 emac->prueth = prueth; 1152 emac->ndev = ndev; 1153 emac->port_id = port; 1154 emac->xdp_prog = NULL; 1155 emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1156 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); 1157 if (!emac->cmd_wq) { 1158 ret = -ENOMEM; 1159 goto free_ndev; 1160 } 1161 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); 1162 1163 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); 1164 1165 ret = pruss_request_mem_region(prueth->pruss, 1166 port == PRUETH_PORT_MII0 ? 1167 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, 1168 &emac->dram); 1169 if (ret) { 1170 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); 1171 ret = -ENOMEM; 1172 goto free_wq; 1173 } 1174 1175 emac->tx_ch_num = 1; 1176 1177 irq_name = "tx_ts0"; 1178 if (emac->port_id == PRUETH_PORT_MII1) 1179 irq_name = "tx_ts1"; 1180 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); 1181 if (emac->tx_ts_irq < 0) { 1182 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); 1183 goto free; 1184 } 1185 1186 SET_NETDEV_DEV(ndev, prueth->dev); 1187 spin_lock_init(&emac->lock); 1188 mutex_init(&emac->cmd_lock); 1189 1190 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); 1191 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { 1192 dev_err(prueth->dev, "couldn't find phy-handle\n"); 1193 ret = -ENODEV; 1194 goto free; 1195 } else if (of_phy_is_fixed_link(eth_node)) { 1196 ret = of_phy_register_fixed_link(eth_node); 1197 if (ret) { 1198 ret = dev_err_probe(prueth->dev, ret, 1199 "failed to register fixed-link phy\n"); 1200 goto free; 1201 } 1202 1203 emac->phy_node = eth_node; 1204 } 1205 1206 ret = of_get_phy_mode(eth_node, &emac->phy_if); 1207 if (ret) { 1208 dev_err(prueth->dev, "could not get phy-mode property\n"); 1209 goto free; 1210 } 1211 1212 if (emac->phy_if != PHY_INTERFACE_MODE_MII && 1213 !phy_interface_mode_is_rgmii(emac->phy_if)) { 1214 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); 1215 ret = -EINVAL; 1216 goto free; 1217 } 1218 1219 /* AM65 SR2.0 has TX Internal delay always enabled by hardware 1220 * and it is not possible to disable TX Internal delay. The below 1221 * switch case block describes how we handle different phy modes 1222 * based on hardware restriction. 1223 */ 1224 switch (emac->phy_if) { 1225 case PHY_INTERFACE_MODE_RGMII_ID: 1226 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 1227 break; 1228 case PHY_INTERFACE_MODE_RGMII_TXID: 1229 emac->phy_if = PHY_INTERFACE_MODE_RGMII; 1230 break; 1231 case PHY_INTERFACE_MODE_RGMII: 1232 case PHY_INTERFACE_MODE_RGMII_RXID: 1233 dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); 1234 ret = -EINVAL; 1235 goto free; 1236 default: 1237 break; 1238 } 1239 1240 /* get mac address from DT and set private and netdev addr */ 1241 ret = of_get_ethdev_address(eth_node, ndev); 1242 if (!is_valid_ether_addr(ndev->dev_addr)) { 1243 eth_hw_addr_random(ndev); 1244 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", 1245 port, ndev->dev_addr); 1246 } 1247 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 1248 1249 ndev->dev.of_node = eth_node; 1250 ndev->min_mtu = PRUETH_MIN_PKT_SIZE; 1251 ndev->max_mtu = PRUETH_MAX_MTU; 1252 ndev->netdev_ops = &emac_netdev_ops; 1253 ndev->ethtool_ops = &icssg_ethtool_ops; 1254 ndev->hw_features = NETIF_F_SG; 1255 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 1256 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; 1257 xdp_set_features_flag(ndev, 1258 NETDEV_XDP_ACT_BASIC | 1259 NETDEV_XDP_ACT_REDIRECT | 1260 NETDEV_XDP_ACT_NDO_XMIT); 1261 1262 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); 1263 hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, 1264 HRTIMER_MODE_REL_PINNED); 1265 prueth->emac[mac] = emac; 1266 1267 return 0; 1268 1269 free: 1270 pruss_release_mem_region(prueth->pruss, &emac->dram); 1271 free_wq: 1272 destroy_workqueue(emac->cmd_wq); 1273 free_ndev: 1274 emac->ndev = NULL; 1275 prueth->emac[mac] = NULL; 1276 free_netdev(ndev); 1277 1278 return ret; 1279 } 1280 1281 bool prueth_dev_check(const struct net_device *ndev) 1282 { 1283 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { 1284 struct prueth_emac *emac = netdev_priv(ndev); 1285 1286 return emac->prueth->is_switch_mode; 1287 } 1288 1289 return false; 1290 } 1291 1292 static void prueth_offload_fwd_mark_update(struct prueth *prueth) 1293 { 1294 int set_val = 0; 1295 int i; 1296 1297 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) 1298 set_val = 1; 1299 1300 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); 1301 1302 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { 1303 struct prueth_emac *emac = prueth->emac[i]; 1304 1305 if (!emac || !emac->ndev) 1306 continue; 1307 1308 emac->offload_fwd_mark = set_val; 1309 } 1310 } 1311 1312 static int prueth_emac_restart(struct prueth *prueth) 1313 { 1314 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; 1315 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; 1316 int ret; 1317 1318 /* Detach the net_device for both PRUeth ports*/ 1319 if (netif_running(emac0->ndev)) 1320 netif_device_detach(emac0->ndev); 1321 if (netif_running(emac1->ndev)) 1322 netif_device_detach(emac1->ndev); 1323 1324 /* Disable both PRUeth ports */ 1325 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); 1326 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); 1327 if (ret) 1328 return ret; 1329 1330 /* Stop both pru cores for both PRUeth ports*/ 1331 ret = prueth_emac_common_stop(prueth); 1332 if (ret) { 1333 dev_err(prueth->dev, "Failed to stop the firmwares"); 1334 return ret; 1335 } 1336 1337 /* Start both pru cores for both PRUeth ports */ 1338 ret = prueth_emac_common_start(prueth); 1339 if (ret) { 1340 dev_err(prueth->dev, "Failed to start the firmwares"); 1341 return ret; 1342 } 1343 1344 /* Enable forwarding for both PRUeth ports */ 1345 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); 1346 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); 1347 1348 /* Attache net_device for both PRUeth ports */ 1349 netif_device_attach(emac0->ndev); 1350 netif_device_attach(emac1->ndev); 1351 1352 return ret; 1353 } 1354 1355 static void icssg_change_mode(struct prueth *prueth) 1356 { 1357 struct prueth_emac *emac; 1358 int mac, ret; 1359 1360 ret = prueth_emac_restart(prueth); 1361 if (ret) { 1362 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1363 return; 1364 } 1365 1366 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 1367 emac = prueth->emac[mac]; 1368 if (prueth->is_hsr_offload_mode) { 1369 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 1370 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 1371 else 1372 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 1373 } 1374 1375 if (netif_running(emac->ndev)) { 1376 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 1377 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 1378 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 1379 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 1380 ICSSG_FDB_ENTRY_BLOCK, 1381 true); 1382 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 1383 BIT(emac->port_id) | DEFAULT_PORT_MASK, 1384 BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 1385 true); 1386 if (prueth->is_hsr_offload_mode) 1387 icssg_vtbl_modify(emac, DEFAULT_VID, 1388 DEFAULT_PORT_MASK, 1389 DEFAULT_UNTAG_MASK, true); 1390 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1391 if (prueth->is_switch_mode) 1392 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1393 } 1394 } 1395 } 1396 1397 static int prueth_netdevice_port_link(struct net_device *ndev, 1398 struct net_device *br_ndev, 1399 struct netlink_ext_ack *extack) 1400 { 1401 struct prueth_emac *emac = netdev_priv(ndev); 1402 struct prueth *prueth = emac->prueth; 1403 int err; 1404 1405 if (!prueth->br_members) { 1406 prueth->hw_bridge_dev = br_ndev; 1407 } else { 1408 /* This is adding the port to a second bridge, this is 1409 * unsupported 1410 */ 1411 if (prueth->hw_bridge_dev != br_ndev) 1412 return -EOPNOTSUPP; 1413 } 1414 1415 err = switchdev_bridge_port_offload(br_ndev, ndev, emac, 1416 &prueth->prueth_switchdev_nb, 1417 &prueth->prueth_switchdev_bl_nb, 1418 false, extack); 1419 if (err) 1420 return err; 1421 1422 prueth->br_members |= BIT(emac->port_id); 1423 1424 if (!prueth->is_switch_mode) { 1425 if (prueth->br_members & BIT(PRUETH_PORT_MII0) && 1426 prueth->br_members & BIT(PRUETH_PORT_MII1)) { 1427 prueth->is_switch_mode = true; 1428 prueth->default_vlan = PRUETH_DFLT_VLAN_SW; 1429 emac->port_vlan = prueth->default_vlan; 1430 icssg_change_mode(prueth); 1431 } 1432 } 1433 1434 prueth_offload_fwd_mark_update(prueth); 1435 1436 return NOTIFY_DONE; 1437 } 1438 1439 static void prueth_netdevice_port_unlink(struct net_device *ndev) 1440 { 1441 struct prueth_emac *emac = netdev_priv(ndev); 1442 struct prueth *prueth = emac->prueth; 1443 int ret; 1444 1445 prueth->br_members &= ~BIT(emac->port_id); 1446 1447 if (prueth->is_switch_mode) { 1448 prueth->is_switch_mode = false; 1449 emac->port_vlan = 0; 1450 ret = prueth_emac_restart(prueth); 1451 if (ret) { 1452 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1453 return; 1454 } 1455 } 1456 1457 prueth_offload_fwd_mark_update(prueth); 1458 1459 if (!prueth->br_members) 1460 prueth->hw_bridge_dev = NULL; 1461 } 1462 1463 static int prueth_hsr_port_link(struct net_device *ndev) 1464 { 1465 struct prueth_emac *emac = netdev_priv(ndev); 1466 struct prueth *prueth = emac->prueth; 1467 struct prueth_emac *emac0; 1468 struct prueth_emac *emac1; 1469 1470 emac0 = prueth->emac[PRUETH_MAC0]; 1471 emac1 = prueth->emac[PRUETH_MAC1]; 1472 1473 if (prueth->is_switch_mode) 1474 return -EOPNOTSUPP; 1475 1476 prueth->hsr_members |= BIT(emac->port_id); 1477 if (!prueth->is_hsr_offload_mode) { 1478 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && 1479 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { 1480 if (!(emac0->ndev->features & 1481 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1482 !(emac1->ndev->features & 1483 NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) 1484 return -EOPNOTSUPP; 1485 prueth->is_hsr_offload_mode = true; 1486 prueth->default_vlan = PRUETH_DFLT_VLAN_HSR; 1487 emac0->port_vlan = prueth->default_vlan; 1488 emac1->port_vlan = prueth->default_vlan; 1489 icssg_change_mode(prueth); 1490 netdev_dbg(ndev, "Enabling HSR offload mode\n"); 1491 } 1492 } 1493 1494 return 0; 1495 } 1496 1497 static void prueth_hsr_port_unlink(struct net_device *ndev) 1498 { 1499 struct prueth_emac *emac = netdev_priv(ndev); 1500 struct prueth *prueth = emac->prueth; 1501 struct prueth_emac *emac0; 1502 struct prueth_emac *emac1; 1503 int ret; 1504 1505 emac0 = prueth->emac[PRUETH_MAC0]; 1506 emac1 = prueth->emac[PRUETH_MAC1]; 1507 1508 prueth->hsr_members &= ~BIT(emac->port_id); 1509 if (prueth->is_hsr_offload_mode) { 1510 prueth->is_hsr_offload_mode = false; 1511 emac0->port_vlan = 0; 1512 emac1->port_vlan = 0; 1513 prueth->hsr_dev = NULL; 1514 ret = prueth_emac_restart(prueth); 1515 if (ret) { 1516 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1517 return; 1518 } 1519 netdev_dbg(ndev, "Disabling HSR Offload mode\n"); 1520 } 1521 } 1522 1523 /* netdev notifier */ 1524 static int prueth_netdevice_event(struct notifier_block *unused, 1525 unsigned long event, void *ptr) 1526 { 1527 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 1528 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1529 struct netdev_notifier_changeupper_info *info; 1530 struct prueth_emac *emac = netdev_priv(ndev); 1531 struct prueth *prueth = emac->prueth; 1532 enum hsr_version hsr_ndev_version; 1533 int ret = NOTIFY_DONE; 1534 1535 if (ndev->netdev_ops != &emac_netdev_ops) 1536 return NOTIFY_DONE; 1537 1538 switch (event) { 1539 case NETDEV_CHANGEUPPER: 1540 info = ptr; 1541 1542 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1543 is_hsr_master(info->upper_dev)) { 1544 hsr_get_version(info->upper_dev, &hsr_ndev_version); 1545 if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1) 1546 return -EOPNOTSUPP; 1547 prueth->hsr_prp_version = hsr_ndev_version; 1548 1549 if (info->linking) { 1550 if (!prueth->hsr_dev) { 1551 prueth->hsr_dev = info->upper_dev; 1552 icssg_class_set_host_mac_addr(prueth->miig_rt, 1553 prueth->hsr_dev->dev_addr); 1554 } else { 1555 if (prueth->hsr_dev != info->upper_dev) { 1556 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); 1557 return -EOPNOTSUPP; 1558 } 1559 } 1560 prueth_hsr_port_link(ndev); 1561 } else { 1562 prueth_hsr_port_unlink(ndev); 1563 } 1564 } 1565 1566 if (netif_is_bridge_master(info->upper_dev)) { 1567 if (info->linking) 1568 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); 1569 else 1570 prueth_netdevice_port_unlink(ndev); 1571 } 1572 break; 1573 default: 1574 return NOTIFY_DONE; 1575 } 1576 1577 return notifier_from_errno(ret); 1578 } 1579 1580 static int prueth_register_notifiers(struct prueth *prueth) 1581 { 1582 int ret = 0; 1583 1584 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; 1585 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); 1586 if (ret) { 1587 dev_err(prueth->dev, "can't register netdevice notifier\n"); 1588 return ret; 1589 } 1590 1591 ret = prueth_switchdev_register_notifiers(prueth); 1592 if (ret) 1593 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1594 1595 return ret; 1596 } 1597 1598 static void prueth_unregister_notifiers(struct prueth *prueth) 1599 { 1600 prueth_switchdev_unregister_notifiers(prueth); 1601 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1602 } 1603 1604 static void icssg_read_firmware_names(struct device_node *np, 1605 struct icssg_firmwares *fw) 1606 { 1607 int i; 1608 1609 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1610 of_property_read_string_index(np, "firmware-name", i * 3 + 0, 1611 &fw[i].pru); 1612 of_property_read_string_index(np, "firmware-name", i * 3 + 1, 1613 &fw[i].rtu); 1614 of_property_read_string_index(np, "firmware-name", i * 3 + 2, 1615 &fw[i].txpru); 1616 } 1617 } 1618 1619 /* icssg_firmware_name_replace - Replace a substring in firmware name 1620 * @dev: device pointer for memory allocation 1621 * @src: source firmware name string 1622 * @from: substring to replace 1623 * @to: replacement substring 1624 * 1625 * Return: a newly allocated string with the replacement, or the original 1626 * string if replacement is not possible. 1627 */ 1628 static const char *icssg_firmware_name_replace(struct device *dev, 1629 const char *src, 1630 const char *from, 1631 const char *to) 1632 { 1633 size_t prefix, from_len, to_len, total; 1634 const char *p = strstr(src, from); 1635 char *buf; 1636 1637 if (!p) 1638 return src; /* fallback: no replacement, use original */ 1639 1640 prefix = p - src; 1641 from_len = strlen(from); 1642 to_len = strlen(to); 1643 total = strlen(src) - from_len + to_len + 1; 1644 1645 buf = devm_kzalloc(dev, total, GFP_KERNEL); 1646 if (!buf) 1647 return src; /* fallback: allocation failed, use original */ 1648 1649 strscpy(buf, src, prefix + 1); 1650 strscpy(buf + prefix, to, to_len + 1); 1651 strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len); 1652 1653 return buf; 1654 } 1655 1656 /** 1657 * icssg_mode_firmware_names - Generate firmware names for a specific mode 1658 * @dev: device pointer for logging and context 1659 * @src: source array of firmware name structures 1660 * @dst: destination array to store updated firmware name structures 1661 * @from: substring in firmware names to be replaced 1662 * @to: substring to replace @from in firmware names 1663 * 1664 * Iterates over all MACs and replaces occurrences of the @from substring 1665 * with @to in the firmware names (pru, rtu, txpru) for each MAC. The 1666 * updated firmware names are stored in the @dst array. 1667 */ 1668 static void icssg_mode_firmware_names(struct device *dev, 1669 struct icssg_firmwares *src, 1670 struct icssg_firmwares *dst, 1671 const char *from, const char *to) 1672 { 1673 int i; 1674 1675 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1676 dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru, 1677 from, to); 1678 dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu, 1679 from, to); 1680 dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru, 1681 from, to); 1682 } 1683 } 1684 1685 static int prueth_probe(struct platform_device *pdev) 1686 { 1687 struct device_node *eth_node, *eth_ports_node; 1688 struct device_node *eth0_node = NULL; 1689 struct device_node *eth1_node = NULL; 1690 struct genpool_data_align gp_data = { 1691 .align = SZ_64K, 1692 }; 1693 struct device *dev = &pdev->dev; 1694 struct device_node *np; 1695 struct prueth *prueth; 1696 struct pruss *pruss; 1697 u32 msmc_ram_size; 1698 int i, ret; 1699 1700 np = dev->of_node; 1701 1702 BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE), 1703 "insufficient SW_DATA size"); 1704 1705 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); 1706 if (!prueth) 1707 return -ENOMEM; 1708 1709 dev_set_drvdata(dev, prueth); 1710 prueth->pdev = pdev; 1711 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); 1712 1713 prueth->dev = dev; 1714 eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); 1715 if (!eth_ports_node) 1716 return -ENOENT; 1717 1718 for_each_child_of_node(eth_ports_node, eth_node) { 1719 u32 reg; 1720 1721 if (strcmp(eth_node->name, "port")) 1722 continue; 1723 ret = of_property_read_u32(eth_node, "reg", ®); 1724 if (ret < 0) { 1725 dev_err(dev, "%pOF error reading port_id %d\n", 1726 eth_node, ret); 1727 } 1728 1729 of_node_get(eth_node); 1730 1731 if (reg == 0) { 1732 eth0_node = eth_node; 1733 if (!of_device_is_available(eth0_node)) { 1734 of_node_put(eth0_node); 1735 eth0_node = NULL; 1736 } 1737 } else if (reg == 1) { 1738 eth1_node = eth_node; 1739 if (!of_device_is_available(eth1_node)) { 1740 of_node_put(eth1_node); 1741 eth1_node = NULL; 1742 } 1743 } else { 1744 dev_err(dev, "port reg should be 0 or 1\n"); 1745 } 1746 } 1747 1748 of_node_put(eth_ports_node); 1749 1750 /* At least one node must be present and available else we fail */ 1751 if (!eth0_node && !eth1_node) { 1752 dev_err(dev, "neither port0 nor port1 node available\n"); 1753 return -ENODEV; 1754 } 1755 1756 if (eth0_node == eth1_node) { 1757 dev_err(dev, "port0 and port1 can't have same reg\n"); 1758 of_node_put(eth0_node); 1759 return -ENODEV; 1760 } 1761 1762 prueth->eth_node[PRUETH_MAC0] = eth0_node; 1763 prueth->eth_node[PRUETH_MAC1] = eth1_node; 1764 1765 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); 1766 if (IS_ERR(prueth->miig_rt)) { 1767 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); 1768 return -ENODEV; 1769 } 1770 1771 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); 1772 if (IS_ERR(prueth->mii_rt)) { 1773 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); 1774 return -ENODEV; 1775 } 1776 1777 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); 1778 if (IS_ERR(prueth->pa_stats)) { 1779 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); 1780 prueth->pa_stats = NULL; 1781 } 1782 1783 if (eth0_node || eth1_node) { 1784 ret = prueth_get_cores(prueth, ICSS_SLICE0, false); 1785 if (ret) 1786 goto put_cores; 1787 ret = prueth_get_cores(prueth, ICSS_SLICE1, false); 1788 if (ret) 1789 goto put_cores; 1790 } 1791 1792 pruss = pruss_get(eth0_node ? 1793 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); 1794 if (IS_ERR(pruss)) { 1795 ret = PTR_ERR(pruss); 1796 dev_err(dev, "unable to get pruss handle\n"); 1797 goto put_cores; 1798 } 1799 1800 prueth->pruss = pruss; 1801 1802 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, 1803 &prueth->shram); 1804 if (ret) { 1805 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); 1806 goto put_pruss; 1807 } 1808 1809 prueth->sram_pool = of_gen_pool_get(np, "sram", 0); 1810 if (!prueth->sram_pool) { 1811 dev_err(dev, "unable to get SRAM pool\n"); 1812 ret = -ENODEV; 1813 1814 goto put_mem; 1815 } 1816 1817 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1818 if (prueth->pdata.banked_ms_ram) { 1819 /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */ 1820 msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE); 1821 } else { 1822 msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE; 1823 if (prueth->is_switchmode_supported) 1824 msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE; 1825 } 1826 1827 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1828 prueth->msmcram.va = 1829 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, 1830 msmc_ram_size, 1831 gen_pool_first_fit_align, 1832 &gp_data); 1833 1834 if (!prueth->msmcram.va) { 1835 ret = -ENOMEM; 1836 dev_err(dev, "unable to allocate MSMC resource\n"); 1837 goto put_mem; 1838 } 1839 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, 1840 (unsigned long)prueth->msmcram.va); 1841 prueth->msmcram.size = msmc_ram_size; 1842 memset_io(prueth->msmcram.va, 0, msmc_ram_size); 1843 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, 1844 prueth->msmcram.va, prueth->msmcram.size); 1845 1846 prueth->iep0 = icss_iep_get_idx(np, 0); 1847 if (IS_ERR(prueth->iep0)) { 1848 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); 1849 prueth->iep0 = NULL; 1850 goto free_pool; 1851 } 1852 1853 prueth->iep1 = icss_iep_get_idx(np, 1); 1854 if (IS_ERR(prueth->iep1)) { 1855 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); 1856 goto put_iep0; 1857 } 1858 1859 if (prueth->pdata.quirk_10m_link_issue) { 1860 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX 1861 * traffic. 1862 */ 1863 icss_iep_init_fw(prueth->iep1); 1864 } 1865 1866 /* Read EMAC firmware names from device tree */ 1867 icssg_read_firmware_names(np, prueth->icssg_emac_firmwares); 1868 1869 /* Generate other mode firmware names based on EMAC firmware names */ 1870 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares, 1871 prueth->icssg_switch_firmwares, "eth", "sw"); 1872 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares, 1873 prueth->icssg_hsr_firmwares, "eth", "hsr"); 1874 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares, 1875 prueth->icssg_prp_firmwares, "eth", "prp"); 1876 1877 spin_lock_init(&prueth->vtbl_lock); 1878 spin_lock_init(&prueth->stats_lock); 1879 /* setup netdev interfaces */ 1880 if (eth0_node) { 1881 ret = prueth_netdev_init(prueth, eth0_node); 1882 if (ret) { 1883 dev_err_probe(dev, ret, "netdev init %s failed\n", 1884 eth0_node->name); 1885 goto exit_iep; 1886 } 1887 1888 prueth->emac[PRUETH_MAC0]->half_duplex = 1889 of_property_read_bool(eth0_node, "ti,half-duplex-capable"); 1890 1891 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; 1892 } 1893 1894 if (eth1_node) { 1895 ret = prueth_netdev_init(prueth, eth1_node); 1896 if (ret) { 1897 dev_err_probe(dev, ret, "netdev init %s failed\n", 1898 eth1_node->name); 1899 goto netdev_exit; 1900 } 1901 1902 prueth->emac[PRUETH_MAC1]->half_duplex = 1903 of_property_read_bool(eth1_node, "ti,half-duplex-capable"); 1904 1905 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; 1906 } 1907 1908 /* register the network devices */ 1909 if (eth0_node) { 1910 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); 1911 if (ret) { 1912 dev_err(dev, "can't register netdev for port MII0"); 1913 goto netdev_exit; 1914 } 1915 1916 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; 1917 1918 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); 1919 if (ret) { 1920 dev_err(dev, 1921 "can't connect to MII0 PHY, error -%d", ret); 1922 goto netdev_unregister; 1923 } 1924 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); 1925 } 1926 1927 if (eth1_node) { 1928 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); 1929 if (ret) { 1930 dev_err(dev, "can't register netdev for port MII1"); 1931 goto netdev_unregister; 1932 } 1933 1934 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; 1935 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); 1936 if (ret) { 1937 dev_err(dev, 1938 "can't connect to MII1 PHY, error %d", ret); 1939 goto netdev_unregister; 1940 } 1941 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); 1942 } 1943 1944 if (prueth->is_switchmode_supported) { 1945 ret = prueth_register_notifiers(prueth); 1946 if (ret) 1947 goto netdev_unregister; 1948 1949 sprintf(prueth->switch_id, "%s", dev_name(dev)); 1950 } 1951 1952 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", 1953 (!eth0_node || !eth1_node) ? "single" : "dual"); 1954 1955 if (eth1_node) 1956 of_node_put(eth1_node); 1957 if (eth0_node) 1958 of_node_put(eth0_node); 1959 return 0; 1960 1961 netdev_unregister: 1962 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1963 if (!prueth->registered_netdevs[i]) 1964 continue; 1965 if (prueth->emac[i]->ndev->phydev) { 1966 phy_disconnect(prueth->emac[i]->ndev->phydev); 1967 prueth->emac[i]->ndev->phydev = NULL; 1968 } 1969 unregister_netdev(prueth->registered_netdevs[i]); 1970 } 1971 1972 netdev_exit: 1973 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1974 eth_node = prueth->eth_node[i]; 1975 if (!eth_node) 1976 continue; 1977 1978 prueth_netdev_exit(prueth, eth_node); 1979 } 1980 1981 exit_iep: 1982 if (prueth->pdata.quirk_10m_link_issue) 1983 icss_iep_exit_fw(prueth->iep1); 1984 icss_iep_put(prueth->iep1); 1985 1986 put_iep0: 1987 icss_iep_put(prueth->iep0); 1988 prueth->iep0 = NULL; 1989 prueth->iep1 = NULL; 1990 1991 free_pool: 1992 gen_pool_free(prueth->sram_pool, 1993 (unsigned long)prueth->msmcram.va, 1994 prueth->msmcram.size); 1995 1996 put_mem: 1997 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1998 1999 put_pruss: 2000 pruss_put(prueth->pruss); 2001 2002 put_cores: 2003 if (eth0_node || eth1_node) { 2004 prueth_put_cores(prueth, ICSS_SLICE0); 2005 of_node_put(eth0_node); 2006 2007 prueth_put_cores(prueth, ICSS_SLICE1); 2008 of_node_put(eth1_node); 2009 } 2010 2011 return ret; 2012 } 2013 2014 static void prueth_remove(struct platform_device *pdev) 2015 { 2016 struct prueth *prueth = platform_get_drvdata(pdev); 2017 struct device_node *eth_node; 2018 int i; 2019 2020 prueth_unregister_notifiers(prueth); 2021 2022 for (i = 0; i < PRUETH_NUM_MACS; i++) { 2023 if (!prueth->registered_netdevs[i]) 2024 continue; 2025 phy_stop(prueth->emac[i]->ndev->phydev); 2026 phy_disconnect(prueth->emac[i]->ndev->phydev); 2027 prueth->emac[i]->ndev->phydev = NULL; 2028 unregister_netdev(prueth->registered_netdevs[i]); 2029 } 2030 2031 for (i = 0; i < PRUETH_NUM_MACS; i++) { 2032 eth_node = prueth->eth_node[i]; 2033 if (!eth_node) 2034 continue; 2035 2036 prueth_netdev_exit(prueth, eth_node); 2037 } 2038 2039 if (prueth->pdata.quirk_10m_link_issue) 2040 icss_iep_exit_fw(prueth->iep1); 2041 2042 icss_iep_put(prueth->iep1); 2043 icss_iep_put(prueth->iep0); 2044 2045 gen_pool_free(prueth->sram_pool, 2046 (unsigned long)prueth->msmcram.va, 2047 prueth->msmcram.size); 2048 2049 pruss_release_mem_region(prueth->pruss, &prueth->shram); 2050 2051 pruss_put(prueth->pruss); 2052 2053 if (prueth->eth_node[PRUETH_MAC1]) 2054 prueth_put_cores(prueth, ICSS_SLICE1); 2055 2056 if (prueth->eth_node[PRUETH_MAC0]) 2057 prueth_put_cores(prueth, ICSS_SLICE0); 2058 } 2059 2060 static const struct prueth_pdata am654_icssg_pdata = { 2061 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 2062 .quirk_10m_link_issue = 1, 2063 .switch_mode = 1, 2064 .banked_ms_ram = 0, 2065 }; 2066 2067 static const struct prueth_pdata am64x_icssg_pdata = { 2068 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 2069 .quirk_10m_link_issue = 1, 2070 .switch_mode = 1, 2071 .banked_ms_ram = 1, 2072 }; 2073 2074 static const struct of_device_id prueth_dt_match[] = { 2075 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, 2076 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, 2077 { /* sentinel */ } 2078 }; 2079 MODULE_DEVICE_TABLE(of, prueth_dt_match); 2080 2081 static struct platform_driver prueth_driver = { 2082 .probe = prueth_probe, 2083 .remove = prueth_remove, 2084 .driver = { 2085 .name = "icssg-prueth", 2086 .of_match_table = prueth_dt_match, 2087 .pm = &prueth_dev_pm_ops, 2088 }, 2089 }; 2090 module_platform_driver(prueth_driver); 2091 2092 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 2093 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 2094 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); 2095 MODULE_LICENSE("GPL"); 2096