1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dma/ti-cppi5.h> 14 #include <linux/etherdevice.h> 15 #include <linux/genalloc.h> 16 #include <linux/if_hsr.h> 17 #include <linux/if_vlan.h> 18 #include <linux/interrupt.h> 19 #include <linux/kernel.h> 20 #include <linux/mfd/syscon.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_net.h> 25 #include <linux/platform_device.h> 26 #include <linux/phy.h> 27 #include <linux/property.h> 28 #include <linux/remoteproc/pruss.h> 29 #include <linux/regmap.h> 30 #include <linux/remoteproc.h> 31 #include <net/switchdev.h> 32 33 #include "icssg_prueth.h" 34 #include "icssg_mii_rt.h" 35 #include "icssg_switchdev.h" 36 #include "../k3-cppi-desc-pool.h" 37 38 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" 39 40 #define DEFAULT_VID 1 41 #define DEFAULT_PORT_MASK 1 42 #define DEFAULT_UNTAG_MASK 1 43 44 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES NETIF_F_HW_HSR_FWD 45 46 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 47 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 48 49 static int emac_get_tx_ts(struct prueth_emac *emac, 50 struct emac_tx_ts_response *rsp) 51 { 52 struct prueth *prueth = emac->prueth; 53 int slice = prueth_emac_slice(emac); 54 int addr; 55 56 addr = icssg_queue_pop(prueth, slice == 0 ? 57 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); 58 if (addr < 0) 59 return addr; 60 61 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 62 /* return buffer back for to pool */ 63 icssg_queue_push(prueth, slice == 0 ? 64 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); 65 66 return 0; 67 } 68 69 static void tx_ts_work(struct prueth_emac *emac) 70 { 71 struct skb_shared_hwtstamps ssh; 72 struct emac_tx_ts_response tsr; 73 struct sk_buff *skb; 74 int ret = 0; 75 u32 hi_sw; 76 u64 ns; 77 78 /* There may be more than one pending requests */ 79 while (1) { 80 ret = emac_get_tx_ts(emac, &tsr); 81 if (ret) /* nothing more */ 82 break; 83 84 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || 85 !emac->tx_ts_skb[tsr.cookie]) { 86 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", 87 tsr.cookie); 88 break; 89 } 90 91 skb = emac->tx_ts_skb[tsr.cookie]; 92 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ 93 if (!skb) { 94 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); 95 break; 96 } 97 98 hi_sw = readl(emac->prueth->shram.va + 99 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 100 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, 101 IEP_DEFAULT_CYCLE_TIME_NS); 102 103 memset(&ssh, 0, sizeof(ssh)); 104 ssh.hwtstamp = ns_to_ktime(ns); 105 106 skb_tstamp_tx(skb, &ssh); 107 dev_consume_skb_any(skb); 108 109 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ 110 break; 111 } 112 } 113 114 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) 115 { 116 struct prueth_emac *emac = dev_id; 117 118 /* currently only TX timestamp is being returned */ 119 tx_ts_work(emac); 120 121 return IRQ_HANDLED; 122 } 123 124 static struct icssg_firmwares icssg_hsr_firmwares[] = { 125 { 126 .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", 127 .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", 128 .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", 129 }, 130 { 131 .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", 132 .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", 133 .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", 134 } 135 }; 136 137 static struct icssg_firmwares icssg_switch_firmwares[] = { 138 { 139 .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", 140 .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", 141 .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", 142 }, 143 { 144 .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", 145 .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", 146 .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", 147 } 148 }; 149 150 static struct icssg_firmwares icssg_emac_firmwares[] = { 151 { 152 .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", 153 .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", 154 .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", 155 }, 156 { 157 .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", 158 .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", 159 .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", 160 } 161 }; 162 163 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) 164 { 165 struct icssg_firmwares *firmwares; 166 struct device *dev = prueth->dev; 167 int slice, ret; 168 169 if (prueth->is_switch_mode) 170 firmwares = icssg_switch_firmwares; 171 else if (prueth->is_hsr_offload_mode) 172 firmwares = icssg_hsr_firmwares; 173 else 174 firmwares = icssg_emac_firmwares; 175 176 slice = prueth_emac_slice(emac); 177 if (slice < 0) { 178 netdev_err(emac->ndev, "invalid port\n"); 179 return -EINVAL; 180 } 181 182 ret = icssg_config(prueth, emac, slice); 183 if (ret) 184 return ret; 185 186 ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru); 187 ret = rproc_boot(prueth->pru[slice]); 188 if (ret) { 189 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); 190 return -EINVAL; 191 } 192 193 ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu); 194 ret = rproc_boot(prueth->rtu[slice]); 195 if (ret) { 196 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); 197 goto halt_pru; 198 } 199 200 ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru); 201 ret = rproc_boot(prueth->txpru[slice]); 202 if (ret) { 203 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); 204 goto halt_rtu; 205 } 206 207 emac->fw_running = 1; 208 return 0; 209 210 halt_rtu: 211 rproc_shutdown(prueth->rtu[slice]); 212 213 halt_pru: 214 rproc_shutdown(prueth->pru[slice]); 215 216 return ret; 217 } 218 219 /* called back by PHY layer if there is change in link state of hw port*/ 220 static void emac_adjust_link(struct net_device *ndev) 221 { 222 struct prueth_emac *emac = netdev_priv(ndev); 223 struct phy_device *phydev = ndev->phydev; 224 struct prueth *prueth = emac->prueth; 225 bool new_state = false; 226 unsigned long flags; 227 228 if (phydev->link) { 229 /* check the mode of operation - full/half duplex */ 230 if (phydev->duplex != emac->duplex) { 231 new_state = true; 232 emac->duplex = phydev->duplex; 233 } 234 if (phydev->speed != emac->speed) { 235 new_state = true; 236 emac->speed = phydev->speed; 237 } 238 if (!emac->link) { 239 new_state = true; 240 emac->link = 1; 241 } 242 } else if (emac->link) { 243 new_state = true; 244 emac->link = 0; 245 246 /* f/w should support 100 & 1000 */ 247 emac->speed = SPEED_1000; 248 249 /* half duplex may not be supported by f/w */ 250 emac->duplex = DUPLEX_FULL; 251 } 252 253 if (new_state) { 254 phy_print_status(phydev); 255 256 /* update RGMII and MII configuration based on PHY negotiated 257 * values 258 */ 259 if (emac->link) { 260 if (emac->duplex == DUPLEX_HALF) 261 icssg_config_half_duplex(emac); 262 /* Set the RGMII cfg for gig en and full duplex */ 263 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 264 265 /* update the Tx IPG based on 100M/1G speed */ 266 spin_lock_irqsave(&emac->lock, flags); 267 icssg_config_ipg(emac); 268 spin_unlock_irqrestore(&emac->lock, flags); 269 icssg_config_set_speed(emac); 270 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); 271 272 } else { 273 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); 274 } 275 } 276 277 if (emac->link) { 278 /* reactivate the transmit queue */ 279 netif_tx_wake_all_queues(ndev); 280 } else { 281 netif_tx_stop_all_queues(ndev); 282 prueth_cleanup_tx_ts(emac); 283 } 284 } 285 286 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) 287 { 288 struct prueth_emac *emac = 289 container_of(timer, struct prueth_emac, rx_hrtimer); 290 int rx_flow = PRUETH_RX_FLOW_DATA; 291 292 enable_irq(emac->rx_chns.irq[rx_flow]); 293 return HRTIMER_NORESTART; 294 } 295 296 static int emac_phy_connect(struct prueth_emac *emac) 297 { 298 struct prueth *prueth = emac->prueth; 299 struct net_device *ndev = emac->ndev; 300 /* connect PHY */ 301 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, 302 &emac_adjust_link, 0, 303 emac->phy_if); 304 if (!ndev->phydev) { 305 dev_err(prueth->dev, "couldn't connect to phy %s\n", 306 emac->phy_node->full_name); 307 return -ENODEV; 308 } 309 310 if (!emac->half_duplex) { 311 dev_dbg(prueth->dev, "half duplex mode is not supported\n"); 312 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 313 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 314 } 315 316 /* remove unsupported modes */ 317 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 318 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); 319 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 320 321 if (emac->phy_if == PHY_INTERFACE_MODE_MII) 322 phy_set_max_speed(ndev->phydev, SPEED_100); 323 324 return 0; 325 } 326 327 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) 328 { 329 u32 hi_rollover_count, hi_rollover_count_r; 330 struct prueth_emac *emac = clockops_data; 331 struct prueth *prueth = emac->prueth; 332 void __iomem *fw_hi_r_count_addr; 333 void __iomem *fw_count_hi_addr; 334 u32 iepcount_hi, iepcount_hi_r; 335 unsigned long flags; 336 u32 iepcount_lo; 337 u64 ts = 0; 338 339 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; 340 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; 341 342 local_irq_save(flags); 343 do { 344 iepcount_hi = icss_iep_get_count_hi(emac->iep); 345 iepcount_hi += readl(fw_count_hi_addr); 346 hi_rollover_count = readl(fw_hi_r_count_addr); 347 ptp_read_system_prets(sts); 348 iepcount_lo = icss_iep_get_count_low(emac->iep); 349 ptp_read_system_postts(sts); 350 351 iepcount_hi_r = icss_iep_get_count_hi(emac->iep); 352 iepcount_hi_r += readl(fw_count_hi_addr); 353 hi_rollover_count_r = readl(fw_hi_r_count_addr); 354 } while ((iepcount_hi_r != iepcount_hi) || 355 (hi_rollover_count != hi_rollover_count_r)); 356 local_irq_restore(flags); 357 358 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; 359 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; 360 361 return ts; 362 } 363 364 static void prueth_iep_settime(void *clockops_data, u64 ns) 365 { 366 struct icssg_setclock_desc __iomem *sc_descp; 367 struct prueth_emac *emac = clockops_data; 368 struct icssg_setclock_desc sc_desc; 369 u64 cyclecount; 370 u32 cycletime; 371 int timeout; 372 373 if (!emac->fw_running) 374 return; 375 376 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; 377 378 cycletime = IEP_DEFAULT_CYCLE_TIME_NS; 379 cyclecount = ns / cycletime; 380 381 memset(&sc_desc, 0, sizeof(sc_desc)); 382 sc_desc.margin = cycletime - 1000; 383 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); 384 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; 385 sc_desc.iepcount_set = ns % cycletime; 386 /* Count from 0 to (cycle time) - emac->iep->def_inc */ 387 sc_desc.CMP0_current = cycletime - emac->iep->def_inc; 388 389 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); 390 391 writeb(1, &sc_descp->request); 392 393 timeout = 5; /* fw should take 2-3 ms */ 394 while (timeout--) { 395 if (readb(&sc_descp->acknowledgment)) 396 return; 397 398 usleep_range(500, 1000); 399 } 400 401 dev_err(emac->prueth->dev, "settime timeout\n"); 402 } 403 404 static int prueth_perout_enable(void *clockops_data, 405 struct ptp_perout_request *req, int on, 406 u64 *cmp) 407 { 408 struct prueth_emac *emac = clockops_data; 409 u32 reduction_factor = 0, offset = 0; 410 struct timespec64 ts; 411 u64 ns_period; 412 413 if (!on) 414 return 0; 415 416 /* Any firmware specific stuff for PPS/PEROUT handling */ 417 ts.tv_sec = req->period.sec; 418 ts.tv_nsec = req->period.nsec; 419 ns_period = timespec64_to_ns(&ts); 420 421 /* f/w doesn't support period less than cycle time */ 422 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) 423 return -ENXIO; 424 425 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; 426 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; 427 428 /* f/w requires at least 1uS within a cycle so CMP 429 * can trigger after SYNC is enabled 430 */ 431 if (offset < 5 * NSEC_PER_USEC) 432 offset = 5 * NSEC_PER_USEC; 433 434 /* if offset is close to cycle time then we will miss 435 * the CMP event for last tick when IEP rolls over. 436 * In normal mode, IEP tick is 4ns. 437 * In slow compensation it could be 0ns or 8ns at 438 * every slow compensation cycle. 439 */ 440 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) 441 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; 442 443 /* we're in shadow mode so need to set upper 32-bits */ 444 *cmp = (u64)offset << 32; 445 446 writel(reduction_factor, emac->prueth->shram.va + 447 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); 448 449 writel(0, emac->prueth->shram.va + 450 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); 451 452 return 0; 453 } 454 455 const struct icss_iep_clockops prueth_iep_clockops = { 456 .settime = prueth_iep_settime, 457 .gettime = prueth_iep_gettime, 458 .perout_enable = prueth_perout_enable, 459 }; 460 461 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) 462 { 463 struct prueth_emac *emac = netdev_priv(ndev); 464 int port_mask = BIT(emac->port_id); 465 466 port_mask |= icssg_fdb_lookup(emac, addr, 0); 467 icssg_fdb_add_del(emac, addr, 0, port_mask, true); 468 icssg_vtbl_modify(emac, 0, port_mask, port_mask, true); 469 470 return 0; 471 } 472 473 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) 474 { 475 struct prueth_emac *emac = netdev_priv(ndev); 476 int port_mask = BIT(emac->port_id); 477 int other_port_mask; 478 479 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0); 480 481 icssg_fdb_add_del(emac, addr, 0, port_mask, false); 482 icssg_vtbl_modify(emac, 0, port_mask, port_mask, false); 483 484 if (other_port_mask) { 485 icssg_fdb_add_del(emac, addr, 0, other_port_mask, true); 486 icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true); 487 } 488 489 return 0; 490 } 491 492 /** 493 * emac_ndo_open - EMAC device open 494 * @ndev: network adapter device 495 * 496 * Called when system wants to start the interface. 497 * 498 * Return: 0 for a successful open, or appropriate error code 499 */ 500 static int emac_ndo_open(struct net_device *ndev) 501 { 502 struct prueth_emac *emac = netdev_priv(ndev); 503 int ret, i, num_data_chn = emac->tx_ch_num; 504 struct prueth *prueth = emac->prueth; 505 int slice = prueth_emac_slice(emac); 506 struct device *dev = prueth->dev; 507 int max_rx_flows; 508 int rx_flow; 509 510 /* clear SMEM and MSMC settings for all slices */ 511 if (!prueth->emacs_initialized) { 512 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); 513 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); 514 } 515 516 /* set h/w MAC as user might have re-configured */ 517 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 518 519 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 520 icssg_class_default(prueth->miig_rt, slice, 0, false); 521 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 522 523 /* Notify the stack of the actual queue counts. */ 524 ret = netif_set_real_num_tx_queues(ndev, num_data_chn); 525 if (ret) { 526 dev_err(dev, "cannot set real number of tx queues\n"); 527 return ret; 528 } 529 530 init_completion(&emac->cmd_complete); 531 ret = prueth_init_tx_chns(emac); 532 if (ret) { 533 dev_err(dev, "failed to init tx channel: %d\n", ret); 534 return ret; 535 } 536 537 max_rx_flows = PRUETH_MAX_RX_FLOWS; 538 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", 539 max_rx_flows, PRUETH_MAX_RX_DESC); 540 if (ret) { 541 dev_err(dev, "failed to init rx channel: %d\n", ret); 542 goto cleanup_tx; 543 } 544 545 ret = prueth_ndev_add_tx_napi(emac); 546 if (ret) 547 goto cleanup_rx; 548 549 /* we use only the highest priority flow for now i.e. @irq[3] */ 550 rx_flow = PRUETH_RX_FLOW_DATA; 551 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, 552 IRQF_TRIGGER_HIGH, dev_name(dev), emac); 553 if (ret) { 554 dev_err(dev, "unable to request RX IRQ\n"); 555 goto cleanup_napi; 556 } 557 558 /* reset and start PRU firmware */ 559 ret = prueth_emac_start(prueth, emac); 560 if (ret) 561 goto free_rx_irq; 562 563 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); 564 565 if (!prueth->emacs_initialized) { 566 ret = icss_iep_init(emac->iep, &prueth_iep_clockops, 567 emac, IEP_DEFAULT_CYCLE_TIME_NS); 568 } 569 570 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, 571 IRQF_ONESHOT, dev_name(dev), emac); 572 if (ret) 573 goto stop; 574 575 /* Prepare RX */ 576 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); 577 if (ret) 578 goto free_tx_ts_irq; 579 580 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); 581 if (ret) 582 goto reset_rx_chn; 583 584 for (i = 0; i < emac->tx_ch_num; i++) { 585 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); 586 if (ret) 587 goto reset_tx_chan; 588 } 589 590 /* Enable NAPI in Tx and Rx direction */ 591 for (i = 0; i < emac->tx_ch_num; i++) 592 napi_enable(&emac->tx_chns[i].napi_tx); 593 napi_enable(&emac->napi_rx); 594 595 /* start PHY */ 596 phy_start(ndev->phydev); 597 598 prueth->emacs_initialized++; 599 600 queue_work(system_long_wq, &emac->stats_work.work); 601 602 return 0; 603 604 reset_tx_chan: 605 /* Since interface is not yet up, there is wouldn't be 606 * any SKB for completion. So set false to free_skb 607 */ 608 prueth_reset_tx_chan(emac, i, false); 609 reset_rx_chn: 610 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); 611 free_tx_ts_irq: 612 free_irq(emac->tx_ts_irq, emac); 613 stop: 614 prueth_emac_stop(emac); 615 free_rx_irq: 616 free_irq(emac->rx_chns.irq[rx_flow], emac); 617 cleanup_napi: 618 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 619 cleanup_rx: 620 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 621 cleanup_tx: 622 prueth_cleanup_tx_chns(emac); 623 624 return ret; 625 } 626 627 /** 628 * emac_ndo_stop - EMAC device stop 629 * @ndev: network adapter device 630 * 631 * Called when system wants to stop or down the interface. 632 * 633 * Return: Always 0 (Success) 634 */ 635 static int emac_ndo_stop(struct net_device *ndev) 636 { 637 struct prueth_emac *emac = netdev_priv(ndev); 638 struct prueth *prueth = emac->prueth; 639 int rx_flow = PRUETH_RX_FLOW_DATA; 640 int max_rx_flows; 641 int ret, i; 642 643 /* inform the upper layers. */ 644 netif_tx_stop_all_queues(ndev); 645 646 /* block packets from wire */ 647 if (ndev->phydev) 648 phy_stop(ndev->phydev); 649 650 icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); 651 652 __dev_mc_unsync(ndev, icssg_prueth_del_mcast); 653 654 atomic_set(&emac->tdown_cnt, emac->tx_ch_num); 655 /* ensure new tdown_cnt value is visible */ 656 smp_mb__after_atomic(); 657 /* tear down and disable UDMA channels */ 658 reinit_completion(&emac->tdown_complete); 659 for (i = 0; i < emac->tx_ch_num; i++) 660 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); 661 662 ret = wait_for_completion_timeout(&emac->tdown_complete, 663 msecs_to_jiffies(1000)); 664 if (!ret) 665 netdev_err(ndev, "tx teardown timeout\n"); 666 667 prueth_reset_tx_chan(emac, emac->tx_ch_num, true); 668 for (i = 0; i < emac->tx_ch_num; i++) { 669 napi_disable(&emac->tx_chns[i].napi_tx); 670 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); 671 } 672 673 max_rx_flows = PRUETH_MAX_RX_FLOWS; 674 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); 675 676 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); 677 678 napi_disable(&emac->napi_rx); 679 hrtimer_cancel(&emac->rx_hrtimer); 680 681 cancel_work_sync(&emac->rx_mode_work); 682 683 /* Destroying the queued work in ndo_stop() */ 684 cancel_delayed_work_sync(&emac->stats_work); 685 686 if (prueth->emacs_initialized == 1) 687 icss_iep_exit(emac->iep); 688 689 /* stop PRUs */ 690 prueth_emac_stop(emac); 691 692 free_irq(emac->tx_ts_irq, emac); 693 694 free_irq(emac->rx_chns.irq[rx_flow], emac); 695 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 696 697 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 698 prueth_cleanup_tx_chns(emac); 699 700 prueth->emacs_initialized--; 701 702 return 0; 703 } 704 705 static void emac_ndo_set_rx_mode_work(struct work_struct *work) 706 { 707 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); 708 struct net_device *ndev = emac->ndev; 709 bool promisc, allmulti; 710 711 if (!netif_running(ndev)) 712 return; 713 714 promisc = ndev->flags & IFF_PROMISC; 715 allmulti = ndev->flags & IFF_ALLMULTI; 716 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); 717 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); 718 719 if (promisc) { 720 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); 721 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 722 return; 723 } 724 725 if (allmulti) { 726 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 727 return; 728 } 729 730 __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast); 731 } 732 733 /** 734 * emac_ndo_set_rx_mode - EMAC set receive mode function 735 * @ndev: The EMAC network adapter 736 * 737 * Called when system wants to set the receive mode of the device. 738 * 739 */ 740 static void emac_ndo_set_rx_mode(struct net_device *ndev) 741 { 742 struct prueth_emac *emac = netdev_priv(ndev); 743 744 queue_work(emac->cmd_wq, &emac->rx_mode_work); 745 } 746 747 static const struct net_device_ops emac_netdev_ops = { 748 .ndo_open = emac_ndo_open, 749 .ndo_stop = emac_ndo_stop, 750 .ndo_start_xmit = icssg_ndo_start_xmit, 751 .ndo_set_mac_address = eth_mac_addr, 752 .ndo_validate_addr = eth_validate_addr, 753 .ndo_tx_timeout = icssg_ndo_tx_timeout, 754 .ndo_set_rx_mode = emac_ndo_set_rx_mode, 755 .ndo_eth_ioctl = icssg_ndo_ioctl, 756 .ndo_get_stats64 = icssg_ndo_get_stats64, 757 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, 758 }; 759 760 static int prueth_netdev_init(struct prueth *prueth, 761 struct device_node *eth_node) 762 { 763 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; 764 struct prueth_emac *emac; 765 struct net_device *ndev; 766 enum prueth_port port; 767 const char *irq_name; 768 enum prueth_mac mac; 769 770 port = prueth_node_port(eth_node); 771 if (port == PRUETH_PORT_INVALID) 772 return -EINVAL; 773 774 mac = prueth_node_mac(eth_node); 775 if (mac == PRUETH_MAC_INVALID) 776 return -EINVAL; 777 778 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); 779 if (!ndev) 780 return -ENOMEM; 781 782 emac = netdev_priv(ndev); 783 emac->prueth = prueth; 784 emac->ndev = ndev; 785 emac->port_id = port; 786 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); 787 if (!emac->cmd_wq) { 788 ret = -ENOMEM; 789 goto free_ndev; 790 } 791 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); 792 793 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); 794 795 ret = pruss_request_mem_region(prueth->pruss, 796 port == PRUETH_PORT_MII0 ? 797 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, 798 &emac->dram); 799 if (ret) { 800 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); 801 ret = -ENOMEM; 802 goto free_wq; 803 } 804 805 emac->tx_ch_num = 1; 806 807 irq_name = "tx_ts0"; 808 if (emac->port_id == PRUETH_PORT_MII1) 809 irq_name = "tx_ts1"; 810 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); 811 if (emac->tx_ts_irq < 0) { 812 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); 813 goto free; 814 } 815 816 SET_NETDEV_DEV(ndev, prueth->dev); 817 spin_lock_init(&emac->lock); 818 mutex_init(&emac->cmd_lock); 819 820 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); 821 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { 822 dev_err(prueth->dev, "couldn't find phy-handle\n"); 823 ret = -ENODEV; 824 goto free; 825 } else if (of_phy_is_fixed_link(eth_node)) { 826 ret = of_phy_register_fixed_link(eth_node); 827 if (ret) { 828 ret = dev_err_probe(prueth->dev, ret, 829 "failed to register fixed-link phy\n"); 830 goto free; 831 } 832 833 emac->phy_node = eth_node; 834 } 835 836 ret = of_get_phy_mode(eth_node, &emac->phy_if); 837 if (ret) { 838 dev_err(prueth->dev, "could not get phy-mode property\n"); 839 goto free; 840 } 841 842 if (emac->phy_if != PHY_INTERFACE_MODE_MII && 843 !phy_interface_mode_is_rgmii(emac->phy_if)) { 844 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); 845 ret = -EINVAL; 846 goto free; 847 } 848 849 /* AM65 SR2.0 has TX Internal delay always enabled by hardware 850 * and it is not possible to disable TX Internal delay. The below 851 * switch case block describes how we handle different phy modes 852 * based on hardware restriction. 853 */ 854 switch (emac->phy_if) { 855 case PHY_INTERFACE_MODE_RGMII_ID: 856 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 857 break; 858 case PHY_INTERFACE_MODE_RGMII_TXID: 859 emac->phy_if = PHY_INTERFACE_MODE_RGMII; 860 break; 861 case PHY_INTERFACE_MODE_RGMII: 862 case PHY_INTERFACE_MODE_RGMII_RXID: 863 dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); 864 ret = -EINVAL; 865 goto free; 866 default: 867 break; 868 } 869 870 /* get mac address from DT and set private and netdev addr */ 871 ret = of_get_ethdev_address(eth_node, ndev); 872 if (!is_valid_ether_addr(ndev->dev_addr)) { 873 eth_hw_addr_random(ndev); 874 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", 875 port, ndev->dev_addr); 876 } 877 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 878 879 ndev->dev.of_node = eth_node; 880 ndev->min_mtu = PRUETH_MIN_PKT_SIZE; 881 ndev->max_mtu = PRUETH_MAX_MTU; 882 ndev->netdev_ops = &emac_netdev_ops; 883 ndev->ethtool_ops = &icssg_ethtool_ops; 884 ndev->hw_features = NETIF_F_SG; 885 ndev->features = ndev->hw_features; 886 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; 887 888 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); 889 hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, 890 HRTIMER_MODE_REL_PINNED); 891 emac->rx_hrtimer.function = &emac_rx_timer_callback; 892 prueth->emac[mac] = emac; 893 894 return 0; 895 896 free: 897 pruss_release_mem_region(prueth->pruss, &emac->dram); 898 free_wq: 899 destroy_workqueue(emac->cmd_wq); 900 free_ndev: 901 emac->ndev = NULL; 902 prueth->emac[mac] = NULL; 903 free_netdev(ndev); 904 905 return ret; 906 } 907 908 bool prueth_dev_check(const struct net_device *ndev) 909 { 910 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { 911 struct prueth_emac *emac = netdev_priv(ndev); 912 913 return emac->prueth->is_switch_mode; 914 } 915 916 return false; 917 } 918 919 static void prueth_offload_fwd_mark_update(struct prueth *prueth) 920 { 921 int set_val = 0; 922 int i; 923 924 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) 925 set_val = 1; 926 927 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); 928 929 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { 930 struct prueth_emac *emac = prueth->emac[i]; 931 932 if (!emac || !emac->ndev) 933 continue; 934 935 emac->offload_fwd_mark = set_val; 936 } 937 } 938 939 static void prueth_emac_restart(struct prueth *prueth) 940 { 941 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; 942 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; 943 944 /* Detach the net_device for both PRUeth ports*/ 945 if (netif_running(emac0->ndev)) 946 netif_device_detach(emac0->ndev); 947 if (netif_running(emac1->ndev)) 948 netif_device_detach(emac1->ndev); 949 950 /* Disable both PRUeth ports */ 951 icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); 952 icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); 953 954 /* Stop both pru cores for both PRUeth ports*/ 955 prueth_emac_stop(emac0); 956 prueth->emacs_initialized--; 957 prueth_emac_stop(emac1); 958 prueth->emacs_initialized--; 959 960 /* Start both pru cores for both PRUeth ports */ 961 prueth_emac_start(prueth, emac0); 962 prueth->emacs_initialized++; 963 prueth_emac_start(prueth, emac1); 964 prueth->emacs_initialized++; 965 966 /* Enable forwarding for both PRUeth ports */ 967 icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); 968 icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); 969 970 /* Attache net_device for both PRUeth ports */ 971 netif_device_attach(emac0->ndev); 972 netif_device_attach(emac1->ndev); 973 } 974 975 static void icssg_change_mode(struct prueth *prueth) 976 { 977 struct prueth_emac *emac; 978 int mac; 979 980 prueth_emac_restart(prueth); 981 982 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 983 emac = prueth->emac[mac]; 984 if (netif_running(emac->ndev)) { 985 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 986 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 987 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 988 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 989 ICSSG_FDB_ENTRY_BLOCK, 990 true); 991 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 992 BIT(emac->port_id) | DEFAULT_PORT_MASK, 993 BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 994 true); 995 if (prueth->is_hsr_offload_mode) 996 icssg_vtbl_modify(emac, DEFAULT_VID, 997 DEFAULT_PORT_MASK, 998 DEFAULT_UNTAG_MASK, true); 999 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1000 if (prueth->is_switch_mode) 1001 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1002 } 1003 } 1004 } 1005 1006 static int prueth_netdevice_port_link(struct net_device *ndev, 1007 struct net_device *br_ndev, 1008 struct netlink_ext_ack *extack) 1009 { 1010 struct prueth_emac *emac = netdev_priv(ndev); 1011 struct prueth *prueth = emac->prueth; 1012 int err; 1013 1014 if (!prueth->br_members) { 1015 prueth->hw_bridge_dev = br_ndev; 1016 } else { 1017 /* This is adding the port to a second bridge, this is 1018 * unsupported 1019 */ 1020 if (prueth->hw_bridge_dev != br_ndev) 1021 return -EOPNOTSUPP; 1022 } 1023 1024 err = switchdev_bridge_port_offload(br_ndev, ndev, emac, 1025 &prueth->prueth_switchdev_nb, 1026 &prueth->prueth_switchdev_bl_nb, 1027 false, extack); 1028 if (err) 1029 return err; 1030 1031 prueth->br_members |= BIT(emac->port_id); 1032 1033 if (!prueth->is_switch_mode) { 1034 if (prueth->br_members & BIT(PRUETH_PORT_MII0) && 1035 prueth->br_members & BIT(PRUETH_PORT_MII1)) { 1036 prueth->is_switch_mode = true; 1037 prueth->default_vlan = 1; 1038 emac->port_vlan = prueth->default_vlan; 1039 icssg_change_mode(prueth); 1040 } 1041 } 1042 1043 prueth_offload_fwd_mark_update(prueth); 1044 1045 return NOTIFY_DONE; 1046 } 1047 1048 static void prueth_netdevice_port_unlink(struct net_device *ndev) 1049 { 1050 struct prueth_emac *emac = netdev_priv(ndev); 1051 struct prueth *prueth = emac->prueth; 1052 1053 prueth->br_members &= ~BIT(emac->port_id); 1054 1055 if (prueth->is_switch_mode) { 1056 prueth->is_switch_mode = false; 1057 emac->port_vlan = 0; 1058 prueth_emac_restart(prueth); 1059 } 1060 1061 prueth_offload_fwd_mark_update(prueth); 1062 1063 if (!prueth->br_members) 1064 prueth->hw_bridge_dev = NULL; 1065 } 1066 1067 static int prueth_hsr_port_link(struct net_device *ndev) 1068 { 1069 struct prueth_emac *emac = netdev_priv(ndev); 1070 struct prueth *prueth = emac->prueth; 1071 struct prueth_emac *emac0; 1072 struct prueth_emac *emac1; 1073 1074 emac0 = prueth->emac[PRUETH_MAC0]; 1075 emac1 = prueth->emac[PRUETH_MAC1]; 1076 1077 if (prueth->is_switch_mode) 1078 return -EOPNOTSUPP; 1079 1080 prueth->hsr_members |= BIT(emac->port_id); 1081 if (!prueth->is_hsr_offload_mode) { 1082 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && 1083 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { 1084 if (!(emac0->ndev->features & 1085 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1086 !(emac1->ndev->features & 1087 NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) 1088 return -EOPNOTSUPP; 1089 prueth->is_hsr_offload_mode = true; 1090 prueth->default_vlan = 1; 1091 emac0->port_vlan = prueth->default_vlan; 1092 emac1->port_vlan = prueth->default_vlan; 1093 icssg_change_mode(prueth); 1094 netdev_dbg(ndev, "Enabling HSR offload mode\n"); 1095 } 1096 } 1097 1098 return 0; 1099 } 1100 1101 static void prueth_hsr_port_unlink(struct net_device *ndev) 1102 { 1103 struct prueth_emac *emac = netdev_priv(ndev); 1104 struct prueth *prueth = emac->prueth; 1105 struct prueth_emac *emac0; 1106 struct prueth_emac *emac1; 1107 1108 emac0 = prueth->emac[PRUETH_MAC0]; 1109 emac1 = prueth->emac[PRUETH_MAC1]; 1110 1111 prueth->hsr_members &= ~BIT(emac->port_id); 1112 if (prueth->is_hsr_offload_mode) { 1113 prueth->is_hsr_offload_mode = false; 1114 emac0->port_vlan = 0; 1115 emac1->port_vlan = 0; 1116 prueth->hsr_dev = NULL; 1117 prueth_emac_restart(prueth); 1118 netdev_dbg(ndev, "Disabling HSR Offload mode\n"); 1119 } 1120 } 1121 1122 /* netdev notifier */ 1123 static int prueth_netdevice_event(struct notifier_block *unused, 1124 unsigned long event, void *ptr) 1125 { 1126 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 1127 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1128 struct netdev_notifier_changeupper_info *info; 1129 struct prueth_emac *emac = netdev_priv(ndev); 1130 struct prueth *prueth = emac->prueth; 1131 int ret = NOTIFY_DONE; 1132 1133 if (ndev->netdev_ops != &emac_netdev_ops) 1134 return NOTIFY_DONE; 1135 1136 switch (event) { 1137 case NETDEV_CHANGEUPPER: 1138 info = ptr; 1139 1140 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1141 is_hsr_master(info->upper_dev)) { 1142 if (info->linking) { 1143 if (!prueth->hsr_dev) { 1144 prueth->hsr_dev = info->upper_dev; 1145 icssg_class_set_host_mac_addr(prueth->miig_rt, 1146 prueth->hsr_dev->dev_addr); 1147 } else { 1148 if (prueth->hsr_dev != info->upper_dev) { 1149 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); 1150 return -EOPNOTSUPP; 1151 } 1152 } 1153 prueth_hsr_port_link(ndev); 1154 } else { 1155 prueth_hsr_port_unlink(ndev); 1156 } 1157 } 1158 1159 if (netif_is_bridge_master(info->upper_dev)) { 1160 if (info->linking) 1161 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); 1162 else 1163 prueth_netdevice_port_unlink(ndev); 1164 } 1165 break; 1166 default: 1167 return NOTIFY_DONE; 1168 } 1169 1170 return notifier_from_errno(ret); 1171 } 1172 1173 static int prueth_register_notifiers(struct prueth *prueth) 1174 { 1175 int ret = 0; 1176 1177 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; 1178 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); 1179 if (ret) { 1180 dev_err(prueth->dev, "can't register netdevice notifier\n"); 1181 return ret; 1182 } 1183 1184 ret = prueth_switchdev_register_notifiers(prueth); 1185 if (ret) 1186 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1187 1188 return ret; 1189 } 1190 1191 static void prueth_unregister_notifiers(struct prueth *prueth) 1192 { 1193 prueth_switchdev_unregister_notifiers(prueth); 1194 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1195 } 1196 1197 static int prueth_probe(struct platform_device *pdev) 1198 { 1199 struct device_node *eth_node, *eth_ports_node; 1200 struct device_node *eth0_node = NULL; 1201 struct device_node *eth1_node = NULL; 1202 struct genpool_data_align gp_data = { 1203 .align = SZ_64K, 1204 }; 1205 struct device *dev = &pdev->dev; 1206 struct device_node *np; 1207 struct prueth *prueth; 1208 struct pruss *pruss; 1209 u32 msmc_ram_size; 1210 int i, ret; 1211 1212 np = dev->of_node; 1213 1214 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); 1215 if (!prueth) 1216 return -ENOMEM; 1217 1218 dev_set_drvdata(dev, prueth); 1219 prueth->pdev = pdev; 1220 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); 1221 1222 prueth->dev = dev; 1223 eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); 1224 if (!eth_ports_node) 1225 return -ENOENT; 1226 1227 for_each_child_of_node(eth_ports_node, eth_node) { 1228 u32 reg; 1229 1230 if (strcmp(eth_node->name, "port")) 1231 continue; 1232 ret = of_property_read_u32(eth_node, "reg", ®); 1233 if (ret < 0) { 1234 dev_err(dev, "%pOF error reading port_id %d\n", 1235 eth_node, ret); 1236 } 1237 1238 of_node_get(eth_node); 1239 1240 if (reg == 0) { 1241 eth0_node = eth_node; 1242 if (!of_device_is_available(eth0_node)) { 1243 of_node_put(eth0_node); 1244 eth0_node = NULL; 1245 } 1246 } else if (reg == 1) { 1247 eth1_node = eth_node; 1248 if (!of_device_is_available(eth1_node)) { 1249 of_node_put(eth1_node); 1250 eth1_node = NULL; 1251 } 1252 } else { 1253 dev_err(dev, "port reg should be 0 or 1\n"); 1254 } 1255 } 1256 1257 of_node_put(eth_ports_node); 1258 1259 /* At least one node must be present and available else we fail */ 1260 if (!eth0_node && !eth1_node) { 1261 dev_err(dev, "neither port0 nor port1 node available\n"); 1262 return -ENODEV; 1263 } 1264 1265 if (eth0_node == eth1_node) { 1266 dev_err(dev, "port0 and port1 can't have same reg\n"); 1267 of_node_put(eth0_node); 1268 return -ENODEV; 1269 } 1270 1271 prueth->eth_node[PRUETH_MAC0] = eth0_node; 1272 prueth->eth_node[PRUETH_MAC1] = eth1_node; 1273 1274 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); 1275 if (IS_ERR(prueth->miig_rt)) { 1276 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); 1277 return -ENODEV; 1278 } 1279 1280 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); 1281 if (IS_ERR(prueth->mii_rt)) { 1282 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); 1283 return -ENODEV; 1284 } 1285 1286 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); 1287 if (IS_ERR(prueth->pa_stats)) { 1288 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); 1289 prueth->pa_stats = NULL; 1290 } 1291 1292 if (eth0_node) { 1293 ret = prueth_get_cores(prueth, ICSS_SLICE0, false); 1294 if (ret) 1295 goto put_cores; 1296 } 1297 1298 if (eth1_node) { 1299 ret = prueth_get_cores(prueth, ICSS_SLICE1, false); 1300 if (ret) 1301 goto put_cores; 1302 } 1303 1304 pruss = pruss_get(eth0_node ? 1305 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); 1306 if (IS_ERR(pruss)) { 1307 ret = PTR_ERR(pruss); 1308 dev_err(dev, "unable to get pruss handle\n"); 1309 goto put_cores; 1310 } 1311 1312 prueth->pruss = pruss; 1313 1314 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, 1315 &prueth->shram); 1316 if (ret) { 1317 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); 1318 goto put_pruss; 1319 } 1320 1321 prueth->sram_pool = of_gen_pool_get(np, "sram", 0); 1322 if (!prueth->sram_pool) { 1323 dev_err(dev, "unable to get SRAM pool\n"); 1324 ret = -ENODEV; 1325 1326 goto put_mem; 1327 } 1328 1329 msmc_ram_size = MSMC_RAM_SIZE; 1330 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1331 if (prueth->is_switchmode_supported) 1332 msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; 1333 1334 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1335 prueth->msmcram.va = 1336 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, 1337 msmc_ram_size, 1338 gen_pool_first_fit_align, 1339 &gp_data); 1340 1341 if (!prueth->msmcram.va) { 1342 ret = -ENOMEM; 1343 dev_err(dev, "unable to allocate MSMC resource\n"); 1344 goto put_mem; 1345 } 1346 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, 1347 (unsigned long)prueth->msmcram.va); 1348 prueth->msmcram.size = msmc_ram_size; 1349 memset_io(prueth->msmcram.va, 0, msmc_ram_size); 1350 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, 1351 prueth->msmcram.va, prueth->msmcram.size); 1352 1353 prueth->iep0 = icss_iep_get_idx(np, 0); 1354 if (IS_ERR(prueth->iep0)) { 1355 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); 1356 prueth->iep0 = NULL; 1357 goto free_pool; 1358 } 1359 1360 prueth->iep1 = icss_iep_get_idx(np, 1); 1361 if (IS_ERR(prueth->iep1)) { 1362 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); 1363 goto put_iep0; 1364 } 1365 1366 if (prueth->pdata.quirk_10m_link_issue) { 1367 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX 1368 * traffic. 1369 */ 1370 icss_iep_init_fw(prueth->iep1); 1371 } 1372 1373 /* setup netdev interfaces */ 1374 if (eth0_node) { 1375 ret = prueth_netdev_init(prueth, eth0_node); 1376 if (ret) { 1377 dev_err_probe(dev, ret, "netdev init %s failed\n", 1378 eth0_node->name); 1379 goto exit_iep; 1380 } 1381 1382 prueth->emac[PRUETH_MAC0]->half_duplex = 1383 of_property_read_bool(eth0_node, "ti,half-duplex-capable"); 1384 1385 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; 1386 } 1387 1388 if (eth1_node) { 1389 ret = prueth_netdev_init(prueth, eth1_node); 1390 if (ret) { 1391 dev_err_probe(dev, ret, "netdev init %s failed\n", 1392 eth1_node->name); 1393 goto netdev_exit; 1394 } 1395 1396 prueth->emac[PRUETH_MAC1]->half_duplex = 1397 of_property_read_bool(eth1_node, "ti,half-duplex-capable"); 1398 1399 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; 1400 } 1401 1402 /* register the network devices */ 1403 if (eth0_node) { 1404 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); 1405 if (ret) { 1406 dev_err(dev, "can't register netdev for port MII0"); 1407 goto netdev_exit; 1408 } 1409 1410 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; 1411 1412 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); 1413 if (ret) { 1414 dev_err(dev, 1415 "can't connect to MII0 PHY, error -%d", ret); 1416 goto netdev_unregister; 1417 } 1418 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); 1419 } 1420 1421 if (eth1_node) { 1422 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); 1423 if (ret) { 1424 dev_err(dev, "can't register netdev for port MII1"); 1425 goto netdev_unregister; 1426 } 1427 1428 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; 1429 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); 1430 if (ret) { 1431 dev_err(dev, 1432 "can't connect to MII1 PHY, error %d", ret); 1433 goto netdev_unregister; 1434 } 1435 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); 1436 } 1437 1438 if (prueth->is_switchmode_supported) { 1439 ret = prueth_register_notifiers(prueth); 1440 if (ret) 1441 goto netdev_unregister; 1442 1443 sprintf(prueth->switch_id, "%s", dev_name(dev)); 1444 } 1445 1446 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", 1447 (!eth0_node || !eth1_node) ? "single" : "dual"); 1448 1449 if (eth1_node) 1450 of_node_put(eth1_node); 1451 if (eth0_node) 1452 of_node_put(eth0_node); 1453 return 0; 1454 1455 netdev_unregister: 1456 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1457 if (!prueth->registered_netdevs[i]) 1458 continue; 1459 if (prueth->emac[i]->ndev->phydev) { 1460 phy_disconnect(prueth->emac[i]->ndev->phydev); 1461 prueth->emac[i]->ndev->phydev = NULL; 1462 } 1463 unregister_netdev(prueth->registered_netdevs[i]); 1464 } 1465 1466 netdev_exit: 1467 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1468 eth_node = prueth->eth_node[i]; 1469 if (!eth_node) 1470 continue; 1471 1472 prueth_netdev_exit(prueth, eth_node); 1473 } 1474 1475 exit_iep: 1476 if (prueth->pdata.quirk_10m_link_issue) 1477 icss_iep_exit_fw(prueth->iep1); 1478 icss_iep_put(prueth->iep1); 1479 1480 put_iep0: 1481 icss_iep_put(prueth->iep0); 1482 prueth->iep0 = NULL; 1483 prueth->iep1 = NULL; 1484 1485 free_pool: 1486 gen_pool_free(prueth->sram_pool, 1487 (unsigned long)prueth->msmcram.va, msmc_ram_size); 1488 1489 put_mem: 1490 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1491 1492 put_pruss: 1493 pruss_put(prueth->pruss); 1494 1495 put_cores: 1496 if (eth1_node) { 1497 prueth_put_cores(prueth, ICSS_SLICE1); 1498 of_node_put(eth1_node); 1499 } 1500 1501 if (eth0_node) { 1502 prueth_put_cores(prueth, ICSS_SLICE0); 1503 of_node_put(eth0_node); 1504 } 1505 1506 return ret; 1507 } 1508 1509 static void prueth_remove(struct platform_device *pdev) 1510 { 1511 struct prueth *prueth = platform_get_drvdata(pdev); 1512 struct device_node *eth_node; 1513 int i; 1514 1515 prueth_unregister_notifiers(prueth); 1516 1517 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1518 if (!prueth->registered_netdevs[i]) 1519 continue; 1520 phy_stop(prueth->emac[i]->ndev->phydev); 1521 phy_disconnect(prueth->emac[i]->ndev->phydev); 1522 prueth->emac[i]->ndev->phydev = NULL; 1523 unregister_netdev(prueth->registered_netdevs[i]); 1524 } 1525 1526 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1527 eth_node = prueth->eth_node[i]; 1528 if (!eth_node) 1529 continue; 1530 1531 prueth_netdev_exit(prueth, eth_node); 1532 } 1533 1534 if (prueth->pdata.quirk_10m_link_issue) 1535 icss_iep_exit_fw(prueth->iep1); 1536 1537 icss_iep_put(prueth->iep1); 1538 icss_iep_put(prueth->iep0); 1539 1540 gen_pool_free(prueth->sram_pool, 1541 (unsigned long)prueth->msmcram.va, 1542 MSMC_RAM_SIZE); 1543 1544 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1545 1546 pruss_put(prueth->pruss); 1547 1548 if (prueth->eth_node[PRUETH_MAC1]) 1549 prueth_put_cores(prueth, ICSS_SLICE1); 1550 1551 if (prueth->eth_node[PRUETH_MAC0]) 1552 prueth_put_cores(prueth, ICSS_SLICE0); 1553 } 1554 1555 static const struct prueth_pdata am654_icssg_pdata = { 1556 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 1557 .quirk_10m_link_issue = 1, 1558 .switch_mode = 1, 1559 }; 1560 1561 static const struct prueth_pdata am64x_icssg_pdata = { 1562 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 1563 .quirk_10m_link_issue = 1, 1564 .switch_mode = 1, 1565 }; 1566 1567 static const struct of_device_id prueth_dt_match[] = { 1568 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, 1569 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, 1570 { /* sentinel */ } 1571 }; 1572 MODULE_DEVICE_TABLE(of, prueth_dt_match); 1573 1574 static struct platform_driver prueth_driver = { 1575 .probe = prueth_probe, 1576 .remove_new = prueth_remove, 1577 .driver = { 1578 .name = "icssg-prueth", 1579 .of_match_table = prueth_dt_match, 1580 .pm = &prueth_dev_pm_ops, 1581 }, 1582 }; 1583 module_platform_driver(prueth_driver); 1584 1585 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1586 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1587 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); 1588 MODULE_LICENSE("GPL"); 1589