1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dma/ti-cppi5.h> 14 #include <linux/etherdevice.h> 15 #include <linux/genalloc.h> 16 #include <linux/if_hsr.h> 17 #include <linux/if_vlan.h> 18 #include <linux/interrupt.h> 19 #include <linux/kernel.h> 20 #include <linux/mfd/syscon.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_net.h> 25 #include <linux/platform_device.h> 26 #include <linux/phy.h> 27 #include <linux/property.h> 28 #include <linux/remoteproc/pruss.h> 29 #include <linux/regmap.h> 30 #include <linux/remoteproc.h> 31 #include <net/switchdev.h> 32 33 #include "icssg_prueth.h" 34 #include "icssg_mii_rt.h" 35 #include "icssg_switchdev.h" 36 #include "../k3-cppi-desc-pool.h" 37 38 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" 39 40 #define DEFAULT_VID 1 41 #define DEFAULT_PORT_MASK 1 42 #define DEFAULT_UNTAG_MASK 1 43 44 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \ 45 NETIF_F_HW_HSR_DUP | \ 46 NETIF_F_HW_HSR_TAG_INS | \ 47 NETIF_F_HW_HSR_TAG_RM) 48 49 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 50 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 51 52 static int emac_get_tx_ts(struct prueth_emac *emac, 53 struct emac_tx_ts_response *rsp) 54 { 55 struct prueth *prueth = emac->prueth; 56 int slice = prueth_emac_slice(emac); 57 int addr; 58 59 addr = icssg_queue_pop(prueth, slice == 0 ? 60 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); 61 if (addr < 0) 62 return addr; 63 64 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 65 /* return buffer back for to pool */ 66 icssg_queue_push(prueth, slice == 0 ? 67 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); 68 69 return 0; 70 } 71 72 static void tx_ts_work(struct prueth_emac *emac) 73 { 74 struct skb_shared_hwtstamps ssh; 75 struct emac_tx_ts_response tsr; 76 struct sk_buff *skb; 77 int ret = 0; 78 u32 hi_sw; 79 u64 ns; 80 81 /* There may be more than one pending requests */ 82 while (1) { 83 ret = emac_get_tx_ts(emac, &tsr); 84 if (ret) /* nothing more */ 85 break; 86 87 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || 88 !emac->tx_ts_skb[tsr.cookie]) { 89 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", 90 tsr.cookie); 91 break; 92 } 93 94 skb = emac->tx_ts_skb[tsr.cookie]; 95 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ 96 if (!skb) { 97 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); 98 break; 99 } 100 101 hi_sw = readl(emac->prueth->shram.va + 102 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 103 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, 104 IEP_DEFAULT_CYCLE_TIME_NS); 105 106 memset(&ssh, 0, sizeof(ssh)); 107 ssh.hwtstamp = ns_to_ktime(ns); 108 109 skb_tstamp_tx(skb, &ssh); 110 dev_consume_skb_any(skb); 111 112 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ 113 break; 114 } 115 } 116 117 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) 118 { 119 struct prueth_emac *emac = dev_id; 120 121 /* currently only TX timestamp is being returned */ 122 tx_ts_work(emac); 123 124 return IRQ_HANDLED; 125 } 126 127 static struct icssg_firmwares icssg_hsr_firmwares[] = { 128 { 129 .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", 130 .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", 131 .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", 132 }, 133 { 134 .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", 135 .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", 136 .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", 137 } 138 }; 139 140 static struct icssg_firmwares icssg_switch_firmwares[] = { 141 { 142 .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", 143 .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", 144 .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", 145 }, 146 { 147 .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", 148 .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", 149 .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", 150 } 151 }; 152 153 static struct icssg_firmwares icssg_emac_firmwares[] = { 154 { 155 .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", 156 .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", 157 .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", 158 }, 159 { 160 .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", 161 .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", 162 .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", 163 } 164 }; 165 166 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) 167 { 168 struct icssg_firmwares *firmwares; 169 struct device *dev = prueth->dev; 170 int slice, ret; 171 172 if (prueth->is_switch_mode) 173 firmwares = icssg_switch_firmwares; 174 else if (prueth->is_hsr_offload_mode) 175 firmwares = icssg_hsr_firmwares; 176 else 177 firmwares = icssg_emac_firmwares; 178 179 slice = prueth_emac_slice(emac); 180 if (slice < 0) { 181 netdev_err(emac->ndev, "invalid port\n"); 182 return -EINVAL; 183 } 184 185 ret = icssg_config(prueth, emac, slice); 186 if (ret) 187 return ret; 188 189 ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru); 190 ret = rproc_boot(prueth->pru[slice]); 191 if (ret) { 192 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); 193 return -EINVAL; 194 } 195 196 ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu); 197 ret = rproc_boot(prueth->rtu[slice]); 198 if (ret) { 199 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); 200 goto halt_pru; 201 } 202 203 ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru); 204 ret = rproc_boot(prueth->txpru[slice]); 205 if (ret) { 206 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); 207 goto halt_rtu; 208 } 209 210 emac->fw_running = 1; 211 return 0; 212 213 halt_rtu: 214 rproc_shutdown(prueth->rtu[slice]); 215 216 halt_pru: 217 rproc_shutdown(prueth->pru[slice]); 218 219 return ret; 220 } 221 222 /* called back by PHY layer if there is change in link state of hw port*/ 223 static void emac_adjust_link(struct net_device *ndev) 224 { 225 struct prueth_emac *emac = netdev_priv(ndev); 226 struct phy_device *phydev = ndev->phydev; 227 struct prueth *prueth = emac->prueth; 228 bool new_state = false; 229 unsigned long flags; 230 231 if (phydev->link) { 232 /* check the mode of operation - full/half duplex */ 233 if (phydev->duplex != emac->duplex) { 234 new_state = true; 235 emac->duplex = phydev->duplex; 236 } 237 if (phydev->speed != emac->speed) { 238 new_state = true; 239 emac->speed = phydev->speed; 240 } 241 if (!emac->link) { 242 new_state = true; 243 emac->link = 1; 244 } 245 } else if (emac->link) { 246 new_state = true; 247 emac->link = 0; 248 249 /* f/w should support 100 & 1000 */ 250 emac->speed = SPEED_1000; 251 252 /* half duplex may not be supported by f/w */ 253 emac->duplex = DUPLEX_FULL; 254 } 255 256 if (new_state) { 257 phy_print_status(phydev); 258 259 /* update RGMII and MII configuration based on PHY negotiated 260 * values 261 */ 262 if (emac->link) { 263 if (emac->duplex == DUPLEX_HALF) 264 icssg_config_half_duplex(emac); 265 /* Set the RGMII cfg for gig en and full duplex */ 266 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 267 268 /* update the Tx IPG based on 100M/1G speed */ 269 spin_lock_irqsave(&emac->lock, flags); 270 icssg_config_ipg(emac); 271 spin_unlock_irqrestore(&emac->lock, flags); 272 icssg_config_set_speed(emac); 273 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); 274 275 } else { 276 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); 277 } 278 } 279 280 if (emac->link) { 281 /* reactivate the transmit queue */ 282 netif_tx_wake_all_queues(ndev); 283 } else { 284 netif_tx_stop_all_queues(ndev); 285 prueth_cleanup_tx_ts(emac); 286 } 287 } 288 289 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) 290 { 291 struct prueth_emac *emac = 292 container_of(timer, struct prueth_emac, rx_hrtimer); 293 int rx_flow = PRUETH_RX_FLOW_DATA; 294 295 enable_irq(emac->rx_chns.irq[rx_flow]); 296 return HRTIMER_NORESTART; 297 } 298 299 static int emac_phy_connect(struct prueth_emac *emac) 300 { 301 struct prueth *prueth = emac->prueth; 302 struct net_device *ndev = emac->ndev; 303 /* connect PHY */ 304 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, 305 &emac_adjust_link, 0, 306 emac->phy_if); 307 if (!ndev->phydev) { 308 dev_err(prueth->dev, "couldn't connect to phy %s\n", 309 emac->phy_node->full_name); 310 return -ENODEV; 311 } 312 313 if (!emac->half_duplex) { 314 dev_dbg(prueth->dev, "half duplex mode is not supported\n"); 315 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 316 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 317 } 318 319 /* remove unsupported modes */ 320 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 321 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); 322 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 323 324 if (emac->phy_if == PHY_INTERFACE_MODE_MII) 325 phy_set_max_speed(ndev->phydev, SPEED_100); 326 327 return 0; 328 } 329 330 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) 331 { 332 u32 hi_rollover_count, hi_rollover_count_r; 333 struct prueth_emac *emac = clockops_data; 334 struct prueth *prueth = emac->prueth; 335 void __iomem *fw_hi_r_count_addr; 336 void __iomem *fw_count_hi_addr; 337 u32 iepcount_hi, iepcount_hi_r; 338 unsigned long flags; 339 u32 iepcount_lo; 340 u64 ts = 0; 341 342 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; 343 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; 344 345 local_irq_save(flags); 346 do { 347 iepcount_hi = icss_iep_get_count_hi(emac->iep); 348 iepcount_hi += readl(fw_count_hi_addr); 349 hi_rollover_count = readl(fw_hi_r_count_addr); 350 ptp_read_system_prets(sts); 351 iepcount_lo = icss_iep_get_count_low(emac->iep); 352 ptp_read_system_postts(sts); 353 354 iepcount_hi_r = icss_iep_get_count_hi(emac->iep); 355 iepcount_hi_r += readl(fw_count_hi_addr); 356 hi_rollover_count_r = readl(fw_hi_r_count_addr); 357 } while ((iepcount_hi_r != iepcount_hi) || 358 (hi_rollover_count != hi_rollover_count_r)); 359 local_irq_restore(flags); 360 361 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; 362 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; 363 364 return ts; 365 } 366 367 static void prueth_iep_settime(void *clockops_data, u64 ns) 368 { 369 struct icssg_setclock_desc __iomem *sc_descp; 370 struct prueth_emac *emac = clockops_data; 371 struct icssg_setclock_desc sc_desc; 372 u64 cyclecount; 373 u32 cycletime; 374 int timeout; 375 376 if (!emac->fw_running) 377 return; 378 379 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; 380 381 cycletime = IEP_DEFAULT_CYCLE_TIME_NS; 382 cyclecount = ns / cycletime; 383 384 memset(&sc_desc, 0, sizeof(sc_desc)); 385 sc_desc.margin = cycletime - 1000; 386 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); 387 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; 388 sc_desc.iepcount_set = ns % cycletime; 389 /* Count from 0 to (cycle time) - emac->iep->def_inc */ 390 sc_desc.CMP0_current = cycletime - emac->iep->def_inc; 391 392 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); 393 394 writeb(1, &sc_descp->request); 395 396 timeout = 5; /* fw should take 2-3 ms */ 397 while (timeout--) { 398 if (readb(&sc_descp->acknowledgment)) 399 return; 400 401 usleep_range(500, 1000); 402 } 403 404 dev_err(emac->prueth->dev, "settime timeout\n"); 405 } 406 407 static int prueth_perout_enable(void *clockops_data, 408 struct ptp_perout_request *req, int on, 409 u64 *cmp) 410 { 411 struct prueth_emac *emac = clockops_data; 412 u32 reduction_factor = 0, offset = 0; 413 struct timespec64 ts; 414 u64 ns_period; 415 416 if (!on) 417 return 0; 418 419 /* Any firmware specific stuff for PPS/PEROUT handling */ 420 ts.tv_sec = req->period.sec; 421 ts.tv_nsec = req->period.nsec; 422 ns_period = timespec64_to_ns(&ts); 423 424 /* f/w doesn't support period less than cycle time */ 425 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) 426 return -ENXIO; 427 428 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; 429 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; 430 431 /* f/w requires at least 1uS within a cycle so CMP 432 * can trigger after SYNC is enabled 433 */ 434 if (offset < 5 * NSEC_PER_USEC) 435 offset = 5 * NSEC_PER_USEC; 436 437 /* if offset is close to cycle time then we will miss 438 * the CMP event for last tick when IEP rolls over. 439 * In normal mode, IEP tick is 4ns. 440 * In slow compensation it could be 0ns or 8ns at 441 * every slow compensation cycle. 442 */ 443 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) 444 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; 445 446 /* we're in shadow mode so need to set upper 32-bits */ 447 *cmp = (u64)offset << 32; 448 449 writel(reduction_factor, emac->prueth->shram.va + 450 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); 451 452 writel(0, emac->prueth->shram.va + 453 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); 454 455 return 0; 456 } 457 458 const struct icss_iep_clockops prueth_iep_clockops = { 459 .settime = prueth_iep_settime, 460 .gettime = prueth_iep_gettime, 461 .perout_enable = prueth_perout_enable, 462 }; 463 464 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) 465 { 466 struct prueth_emac *emac = netdev_priv(ndev); 467 int port_mask = BIT(emac->port_id); 468 469 port_mask |= icssg_fdb_lookup(emac, addr, 0); 470 icssg_fdb_add_del(emac, addr, 0, port_mask, true); 471 icssg_vtbl_modify(emac, 0, port_mask, port_mask, true); 472 473 return 0; 474 } 475 476 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) 477 { 478 struct prueth_emac *emac = netdev_priv(ndev); 479 int port_mask = BIT(emac->port_id); 480 int other_port_mask; 481 482 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0); 483 484 icssg_fdb_add_del(emac, addr, 0, port_mask, false); 485 icssg_vtbl_modify(emac, 0, port_mask, port_mask, false); 486 487 if (other_port_mask) { 488 icssg_fdb_add_del(emac, addr, 0, other_port_mask, true); 489 icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true); 490 } 491 492 return 0; 493 } 494 495 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) 496 { 497 struct prueth_emac *emac = netdev_priv(ndev); 498 struct prueth *prueth = emac->prueth; 499 500 icssg_fdb_add_del(emac, addr, prueth->default_vlan, 501 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 502 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 503 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 504 ICSSG_FDB_ENTRY_BLOCK, true); 505 506 icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id), 507 BIT(emac->port_id), true); 508 return 0; 509 } 510 511 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) 512 { 513 struct prueth_emac *emac = netdev_priv(ndev); 514 struct prueth *prueth = emac->prueth; 515 516 icssg_fdb_add_del(emac, addr, prueth->default_vlan, 517 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 518 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 519 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 520 ICSSG_FDB_ENTRY_BLOCK, false); 521 522 return 0; 523 } 524 525 /** 526 * emac_ndo_open - EMAC device open 527 * @ndev: network adapter device 528 * 529 * Called when system wants to start the interface. 530 * 531 * Return: 0 for a successful open, or appropriate error code 532 */ 533 static int emac_ndo_open(struct net_device *ndev) 534 { 535 struct prueth_emac *emac = netdev_priv(ndev); 536 int ret, i, num_data_chn = emac->tx_ch_num; 537 struct prueth *prueth = emac->prueth; 538 int slice = prueth_emac_slice(emac); 539 struct device *dev = prueth->dev; 540 int max_rx_flows; 541 int rx_flow; 542 543 /* clear SMEM and MSMC settings for all slices */ 544 if (!prueth->emacs_initialized) { 545 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); 546 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); 547 } 548 549 /* set h/w MAC as user might have re-configured */ 550 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 551 552 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 553 icssg_class_default(prueth->miig_rt, slice, 0, false); 554 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 555 556 /* Notify the stack of the actual queue counts. */ 557 ret = netif_set_real_num_tx_queues(ndev, num_data_chn); 558 if (ret) { 559 dev_err(dev, "cannot set real number of tx queues\n"); 560 return ret; 561 } 562 563 init_completion(&emac->cmd_complete); 564 ret = prueth_init_tx_chns(emac); 565 if (ret) { 566 dev_err(dev, "failed to init tx channel: %d\n", ret); 567 return ret; 568 } 569 570 max_rx_flows = PRUETH_MAX_RX_FLOWS; 571 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", 572 max_rx_flows, PRUETH_MAX_RX_DESC); 573 if (ret) { 574 dev_err(dev, "failed to init rx channel: %d\n", ret); 575 goto cleanup_tx; 576 } 577 578 ret = prueth_ndev_add_tx_napi(emac); 579 if (ret) 580 goto cleanup_rx; 581 582 /* we use only the highest priority flow for now i.e. @irq[3] */ 583 rx_flow = PRUETH_RX_FLOW_DATA; 584 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, 585 IRQF_TRIGGER_HIGH, dev_name(dev), emac); 586 if (ret) { 587 dev_err(dev, "unable to request RX IRQ\n"); 588 goto cleanup_napi; 589 } 590 591 /* reset and start PRU firmware */ 592 ret = prueth_emac_start(prueth, emac); 593 if (ret) 594 goto free_rx_irq; 595 596 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); 597 598 if (!prueth->emacs_initialized) { 599 ret = icss_iep_init(emac->iep, &prueth_iep_clockops, 600 emac, IEP_DEFAULT_CYCLE_TIME_NS); 601 } 602 603 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, 604 IRQF_ONESHOT, dev_name(dev), emac); 605 if (ret) 606 goto stop; 607 608 /* Prepare RX */ 609 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); 610 if (ret) 611 goto free_tx_ts_irq; 612 613 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); 614 if (ret) 615 goto reset_rx_chn; 616 617 for (i = 0; i < emac->tx_ch_num; i++) { 618 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); 619 if (ret) 620 goto reset_tx_chan; 621 } 622 623 /* Enable NAPI in Tx and Rx direction */ 624 for (i = 0; i < emac->tx_ch_num; i++) 625 napi_enable(&emac->tx_chns[i].napi_tx); 626 napi_enable(&emac->napi_rx); 627 628 /* start PHY */ 629 phy_start(ndev->phydev); 630 631 prueth->emacs_initialized++; 632 633 queue_work(system_long_wq, &emac->stats_work.work); 634 635 return 0; 636 637 reset_tx_chan: 638 /* Since interface is not yet up, there is wouldn't be 639 * any SKB for completion. So set false to free_skb 640 */ 641 prueth_reset_tx_chan(emac, i, false); 642 reset_rx_chn: 643 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); 644 free_tx_ts_irq: 645 free_irq(emac->tx_ts_irq, emac); 646 stop: 647 prueth_emac_stop(emac); 648 free_rx_irq: 649 free_irq(emac->rx_chns.irq[rx_flow], emac); 650 cleanup_napi: 651 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 652 cleanup_rx: 653 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 654 cleanup_tx: 655 prueth_cleanup_tx_chns(emac); 656 657 return ret; 658 } 659 660 /** 661 * emac_ndo_stop - EMAC device stop 662 * @ndev: network adapter device 663 * 664 * Called when system wants to stop or down the interface. 665 * 666 * Return: Always 0 (Success) 667 */ 668 static int emac_ndo_stop(struct net_device *ndev) 669 { 670 struct prueth_emac *emac = netdev_priv(ndev); 671 struct prueth *prueth = emac->prueth; 672 int rx_flow = PRUETH_RX_FLOW_DATA; 673 int max_rx_flows; 674 int ret, i; 675 676 /* inform the upper layers. */ 677 netif_tx_stop_all_queues(ndev); 678 679 /* block packets from wire */ 680 if (ndev->phydev) 681 phy_stop(ndev->phydev); 682 683 icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); 684 685 if (emac->prueth->is_hsr_offload_mode) 686 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); 687 else 688 __dev_mc_unsync(ndev, icssg_prueth_del_mcast); 689 690 atomic_set(&emac->tdown_cnt, emac->tx_ch_num); 691 /* ensure new tdown_cnt value is visible */ 692 smp_mb__after_atomic(); 693 /* tear down and disable UDMA channels */ 694 reinit_completion(&emac->tdown_complete); 695 for (i = 0; i < emac->tx_ch_num; i++) 696 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); 697 698 ret = wait_for_completion_timeout(&emac->tdown_complete, 699 msecs_to_jiffies(1000)); 700 if (!ret) 701 netdev_err(ndev, "tx teardown timeout\n"); 702 703 prueth_reset_tx_chan(emac, emac->tx_ch_num, true); 704 for (i = 0; i < emac->tx_ch_num; i++) { 705 napi_disable(&emac->tx_chns[i].napi_tx); 706 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); 707 } 708 709 max_rx_flows = PRUETH_MAX_RX_FLOWS; 710 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); 711 712 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); 713 714 napi_disable(&emac->napi_rx); 715 hrtimer_cancel(&emac->rx_hrtimer); 716 717 cancel_work_sync(&emac->rx_mode_work); 718 719 /* Destroying the queued work in ndo_stop() */ 720 cancel_delayed_work_sync(&emac->stats_work); 721 722 if (prueth->emacs_initialized == 1) 723 icss_iep_exit(emac->iep); 724 725 /* stop PRUs */ 726 prueth_emac_stop(emac); 727 728 free_irq(emac->tx_ts_irq, emac); 729 730 free_irq(emac->rx_chns.irq[rx_flow], emac); 731 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 732 733 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 734 prueth_cleanup_tx_chns(emac); 735 736 prueth->emacs_initialized--; 737 738 return 0; 739 } 740 741 static void emac_ndo_set_rx_mode_work(struct work_struct *work) 742 { 743 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); 744 struct net_device *ndev = emac->ndev; 745 bool promisc, allmulti; 746 747 if (!netif_running(ndev)) 748 return; 749 750 promisc = ndev->flags & IFF_PROMISC; 751 allmulti = ndev->flags & IFF_ALLMULTI; 752 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); 753 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); 754 755 if (promisc) { 756 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); 757 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 758 return; 759 } 760 761 if (allmulti) { 762 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 763 return; 764 } 765 766 if (emac->prueth->is_hsr_offload_mode) 767 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, 768 icssg_prueth_hsr_del_mcast); 769 else 770 __dev_mc_sync(ndev, icssg_prueth_add_mcast, 771 icssg_prueth_del_mcast); 772 } 773 774 /** 775 * emac_ndo_set_rx_mode - EMAC set receive mode function 776 * @ndev: The EMAC network adapter 777 * 778 * Called when system wants to set the receive mode of the device. 779 * 780 */ 781 static void emac_ndo_set_rx_mode(struct net_device *ndev) 782 { 783 struct prueth_emac *emac = netdev_priv(ndev); 784 785 queue_work(emac->cmd_wq, &emac->rx_mode_work); 786 } 787 788 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, 789 netdev_features_t features) 790 { 791 /* hsr tag insertion offload and hsr dup offload are tightly coupled in 792 * firmware implementation. Both these features need to be enabled / 793 * disabled together. 794 */ 795 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS))) 796 if ((features & NETIF_F_HW_HSR_DUP) || 797 (features & NETIF_F_HW_HSR_TAG_INS)) 798 features |= NETIF_F_HW_HSR_DUP | 799 NETIF_F_HW_HSR_TAG_INS; 800 801 if ((ndev->features & NETIF_F_HW_HSR_DUP) || 802 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 803 if (!(features & NETIF_F_HW_HSR_DUP) || 804 !(features & NETIF_F_HW_HSR_TAG_INS)) 805 features &= ~(NETIF_F_HW_HSR_DUP | 806 NETIF_F_HW_HSR_TAG_INS); 807 808 return features; 809 } 810 811 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, 812 __be16 proto, u16 vid) 813 { 814 struct prueth_emac *emac = netdev_priv(ndev); 815 struct prueth *prueth = emac->prueth; 816 int untag_mask = 0; 817 int port_mask; 818 819 if (prueth->is_hsr_offload_mode) { 820 port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id); 821 untag_mask = 0; 822 823 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", 824 vid, port_mask, untag_mask); 825 826 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); 827 icssg_set_pvid(emac->prueth, vid, emac->port_id); 828 } 829 return 0; 830 } 831 832 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, 833 __be16 proto, u16 vid) 834 { 835 struct prueth_emac *emac = netdev_priv(ndev); 836 struct prueth *prueth = emac->prueth; 837 int untag_mask = 0; 838 int port_mask; 839 840 if (prueth->is_hsr_offload_mode) { 841 port_mask = BIT(PRUETH_PORT_HOST); 842 untag_mask = 0; 843 844 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", 845 vid, port_mask, untag_mask); 846 847 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); 848 } 849 return 0; 850 } 851 852 static const struct net_device_ops emac_netdev_ops = { 853 .ndo_open = emac_ndo_open, 854 .ndo_stop = emac_ndo_stop, 855 .ndo_start_xmit = icssg_ndo_start_xmit, 856 .ndo_set_mac_address = eth_mac_addr, 857 .ndo_validate_addr = eth_validate_addr, 858 .ndo_tx_timeout = icssg_ndo_tx_timeout, 859 .ndo_set_rx_mode = emac_ndo_set_rx_mode, 860 .ndo_eth_ioctl = icssg_ndo_ioctl, 861 .ndo_get_stats64 = icssg_ndo_get_stats64, 862 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, 863 .ndo_fix_features = emac_ndo_fix_features, 864 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, 865 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, 866 }; 867 868 static int prueth_netdev_init(struct prueth *prueth, 869 struct device_node *eth_node) 870 { 871 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; 872 struct prueth_emac *emac; 873 struct net_device *ndev; 874 enum prueth_port port; 875 const char *irq_name; 876 enum prueth_mac mac; 877 878 port = prueth_node_port(eth_node); 879 if (port == PRUETH_PORT_INVALID) 880 return -EINVAL; 881 882 mac = prueth_node_mac(eth_node); 883 if (mac == PRUETH_MAC_INVALID) 884 return -EINVAL; 885 886 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); 887 if (!ndev) 888 return -ENOMEM; 889 890 emac = netdev_priv(ndev); 891 emac->prueth = prueth; 892 emac->ndev = ndev; 893 emac->port_id = port; 894 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); 895 if (!emac->cmd_wq) { 896 ret = -ENOMEM; 897 goto free_ndev; 898 } 899 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); 900 901 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); 902 903 ret = pruss_request_mem_region(prueth->pruss, 904 port == PRUETH_PORT_MII0 ? 905 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, 906 &emac->dram); 907 if (ret) { 908 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); 909 ret = -ENOMEM; 910 goto free_wq; 911 } 912 913 emac->tx_ch_num = 1; 914 915 irq_name = "tx_ts0"; 916 if (emac->port_id == PRUETH_PORT_MII1) 917 irq_name = "tx_ts1"; 918 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); 919 if (emac->tx_ts_irq < 0) { 920 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); 921 goto free; 922 } 923 924 SET_NETDEV_DEV(ndev, prueth->dev); 925 spin_lock_init(&emac->lock); 926 mutex_init(&emac->cmd_lock); 927 928 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); 929 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { 930 dev_err(prueth->dev, "couldn't find phy-handle\n"); 931 ret = -ENODEV; 932 goto free; 933 } else if (of_phy_is_fixed_link(eth_node)) { 934 ret = of_phy_register_fixed_link(eth_node); 935 if (ret) { 936 ret = dev_err_probe(prueth->dev, ret, 937 "failed to register fixed-link phy\n"); 938 goto free; 939 } 940 941 emac->phy_node = eth_node; 942 } 943 944 ret = of_get_phy_mode(eth_node, &emac->phy_if); 945 if (ret) { 946 dev_err(prueth->dev, "could not get phy-mode property\n"); 947 goto free; 948 } 949 950 if (emac->phy_if != PHY_INTERFACE_MODE_MII && 951 !phy_interface_mode_is_rgmii(emac->phy_if)) { 952 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); 953 ret = -EINVAL; 954 goto free; 955 } 956 957 /* AM65 SR2.0 has TX Internal delay always enabled by hardware 958 * and it is not possible to disable TX Internal delay. The below 959 * switch case block describes how we handle different phy modes 960 * based on hardware restriction. 961 */ 962 switch (emac->phy_if) { 963 case PHY_INTERFACE_MODE_RGMII_ID: 964 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 965 break; 966 case PHY_INTERFACE_MODE_RGMII_TXID: 967 emac->phy_if = PHY_INTERFACE_MODE_RGMII; 968 break; 969 case PHY_INTERFACE_MODE_RGMII: 970 case PHY_INTERFACE_MODE_RGMII_RXID: 971 dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); 972 ret = -EINVAL; 973 goto free; 974 default: 975 break; 976 } 977 978 /* get mac address from DT and set private and netdev addr */ 979 ret = of_get_ethdev_address(eth_node, ndev); 980 if (!is_valid_ether_addr(ndev->dev_addr)) { 981 eth_hw_addr_random(ndev); 982 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", 983 port, ndev->dev_addr); 984 } 985 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 986 987 ndev->dev.of_node = eth_node; 988 ndev->min_mtu = PRUETH_MIN_PKT_SIZE; 989 ndev->max_mtu = PRUETH_MAX_MTU; 990 ndev->netdev_ops = &emac_netdev_ops; 991 ndev->ethtool_ops = &icssg_ethtool_ops; 992 ndev->hw_features = NETIF_F_SG; 993 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 994 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; 995 996 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); 997 hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, 998 HRTIMER_MODE_REL_PINNED); 999 emac->rx_hrtimer.function = &emac_rx_timer_callback; 1000 prueth->emac[mac] = emac; 1001 1002 return 0; 1003 1004 free: 1005 pruss_release_mem_region(prueth->pruss, &emac->dram); 1006 free_wq: 1007 destroy_workqueue(emac->cmd_wq); 1008 free_ndev: 1009 emac->ndev = NULL; 1010 prueth->emac[mac] = NULL; 1011 free_netdev(ndev); 1012 1013 return ret; 1014 } 1015 1016 bool prueth_dev_check(const struct net_device *ndev) 1017 { 1018 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { 1019 struct prueth_emac *emac = netdev_priv(ndev); 1020 1021 return emac->prueth->is_switch_mode; 1022 } 1023 1024 return false; 1025 } 1026 1027 static void prueth_offload_fwd_mark_update(struct prueth *prueth) 1028 { 1029 int set_val = 0; 1030 int i; 1031 1032 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) 1033 set_val = 1; 1034 1035 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); 1036 1037 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { 1038 struct prueth_emac *emac = prueth->emac[i]; 1039 1040 if (!emac || !emac->ndev) 1041 continue; 1042 1043 emac->offload_fwd_mark = set_val; 1044 } 1045 } 1046 1047 static void prueth_emac_restart(struct prueth *prueth) 1048 { 1049 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; 1050 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; 1051 1052 /* Detach the net_device for both PRUeth ports*/ 1053 if (netif_running(emac0->ndev)) 1054 netif_device_detach(emac0->ndev); 1055 if (netif_running(emac1->ndev)) 1056 netif_device_detach(emac1->ndev); 1057 1058 /* Disable both PRUeth ports */ 1059 icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); 1060 icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); 1061 1062 /* Stop both pru cores for both PRUeth ports*/ 1063 prueth_emac_stop(emac0); 1064 prueth->emacs_initialized--; 1065 prueth_emac_stop(emac1); 1066 prueth->emacs_initialized--; 1067 1068 /* Start both pru cores for both PRUeth ports */ 1069 prueth_emac_start(prueth, emac0); 1070 prueth->emacs_initialized++; 1071 prueth_emac_start(prueth, emac1); 1072 prueth->emacs_initialized++; 1073 1074 /* Enable forwarding for both PRUeth ports */ 1075 icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); 1076 icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); 1077 1078 /* Attache net_device for both PRUeth ports */ 1079 netif_device_attach(emac0->ndev); 1080 netif_device_attach(emac1->ndev); 1081 } 1082 1083 static void icssg_change_mode(struct prueth *prueth) 1084 { 1085 struct prueth_emac *emac; 1086 int mac; 1087 1088 prueth_emac_restart(prueth); 1089 1090 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 1091 emac = prueth->emac[mac]; 1092 if (prueth->is_hsr_offload_mode) { 1093 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 1094 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 1095 else 1096 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 1097 } 1098 1099 if (netif_running(emac->ndev)) { 1100 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 1101 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 1102 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 1103 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 1104 ICSSG_FDB_ENTRY_BLOCK, 1105 true); 1106 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 1107 BIT(emac->port_id) | DEFAULT_PORT_MASK, 1108 BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 1109 true); 1110 if (prueth->is_hsr_offload_mode) 1111 icssg_vtbl_modify(emac, DEFAULT_VID, 1112 DEFAULT_PORT_MASK, 1113 DEFAULT_UNTAG_MASK, true); 1114 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1115 if (prueth->is_switch_mode) 1116 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1117 } 1118 } 1119 } 1120 1121 static int prueth_netdevice_port_link(struct net_device *ndev, 1122 struct net_device *br_ndev, 1123 struct netlink_ext_ack *extack) 1124 { 1125 struct prueth_emac *emac = netdev_priv(ndev); 1126 struct prueth *prueth = emac->prueth; 1127 int err; 1128 1129 if (!prueth->br_members) { 1130 prueth->hw_bridge_dev = br_ndev; 1131 } else { 1132 /* This is adding the port to a second bridge, this is 1133 * unsupported 1134 */ 1135 if (prueth->hw_bridge_dev != br_ndev) 1136 return -EOPNOTSUPP; 1137 } 1138 1139 err = switchdev_bridge_port_offload(br_ndev, ndev, emac, 1140 &prueth->prueth_switchdev_nb, 1141 &prueth->prueth_switchdev_bl_nb, 1142 false, extack); 1143 if (err) 1144 return err; 1145 1146 prueth->br_members |= BIT(emac->port_id); 1147 1148 if (!prueth->is_switch_mode) { 1149 if (prueth->br_members & BIT(PRUETH_PORT_MII0) && 1150 prueth->br_members & BIT(PRUETH_PORT_MII1)) { 1151 prueth->is_switch_mode = true; 1152 prueth->default_vlan = 1; 1153 emac->port_vlan = prueth->default_vlan; 1154 icssg_change_mode(prueth); 1155 } 1156 } 1157 1158 prueth_offload_fwd_mark_update(prueth); 1159 1160 return NOTIFY_DONE; 1161 } 1162 1163 static void prueth_netdevice_port_unlink(struct net_device *ndev) 1164 { 1165 struct prueth_emac *emac = netdev_priv(ndev); 1166 struct prueth *prueth = emac->prueth; 1167 1168 prueth->br_members &= ~BIT(emac->port_id); 1169 1170 if (prueth->is_switch_mode) { 1171 prueth->is_switch_mode = false; 1172 emac->port_vlan = 0; 1173 prueth_emac_restart(prueth); 1174 } 1175 1176 prueth_offload_fwd_mark_update(prueth); 1177 1178 if (!prueth->br_members) 1179 prueth->hw_bridge_dev = NULL; 1180 } 1181 1182 static int prueth_hsr_port_link(struct net_device *ndev) 1183 { 1184 struct prueth_emac *emac = netdev_priv(ndev); 1185 struct prueth *prueth = emac->prueth; 1186 struct prueth_emac *emac0; 1187 struct prueth_emac *emac1; 1188 1189 emac0 = prueth->emac[PRUETH_MAC0]; 1190 emac1 = prueth->emac[PRUETH_MAC1]; 1191 1192 if (prueth->is_switch_mode) 1193 return -EOPNOTSUPP; 1194 1195 prueth->hsr_members |= BIT(emac->port_id); 1196 if (!prueth->is_hsr_offload_mode) { 1197 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && 1198 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { 1199 if (!(emac0->ndev->features & 1200 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1201 !(emac1->ndev->features & 1202 NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) 1203 return -EOPNOTSUPP; 1204 prueth->is_hsr_offload_mode = true; 1205 prueth->default_vlan = 1; 1206 emac0->port_vlan = prueth->default_vlan; 1207 emac1->port_vlan = prueth->default_vlan; 1208 icssg_change_mode(prueth); 1209 netdev_dbg(ndev, "Enabling HSR offload mode\n"); 1210 } 1211 } 1212 1213 return 0; 1214 } 1215 1216 static void prueth_hsr_port_unlink(struct net_device *ndev) 1217 { 1218 struct prueth_emac *emac = netdev_priv(ndev); 1219 struct prueth *prueth = emac->prueth; 1220 struct prueth_emac *emac0; 1221 struct prueth_emac *emac1; 1222 1223 emac0 = prueth->emac[PRUETH_MAC0]; 1224 emac1 = prueth->emac[PRUETH_MAC1]; 1225 1226 prueth->hsr_members &= ~BIT(emac->port_id); 1227 if (prueth->is_hsr_offload_mode) { 1228 prueth->is_hsr_offload_mode = false; 1229 emac0->port_vlan = 0; 1230 emac1->port_vlan = 0; 1231 prueth->hsr_dev = NULL; 1232 prueth_emac_restart(prueth); 1233 netdev_dbg(ndev, "Disabling HSR Offload mode\n"); 1234 } 1235 } 1236 1237 /* netdev notifier */ 1238 static int prueth_netdevice_event(struct notifier_block *unused, 1239 unsigned long event, void *ptr) 1240 { 1241 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 1242 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1243 struct netdev_notifier_changeupper_info *info; 1244 struct prueth_emac *emac = netdev_priv(ndev); 1245 struct prueth *prueth = emac->prueth; 1246 int ret = NOTIFY_DONE; 1247 1248 if (ndev->netdev_ops != &emac_netdev_ops) 1249 return NOTIFY_DONE; 1250 1251 switch (event) { 1252 case NETDEV_CHANGEUPPER: 1253 info = ptr; 1254 1255 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1256 is_hsr_master(info->upper_dev)) { 1257 if (info->linking) { 1258 if (!prueth->hsr_dev) { 1259 prueth->hsr_dev = info->upper_dev; 1260 icssg_class_set_host_mac_addr(prueth->miig_rt, 1261 prueth->hsr_dev->dev_addr); 1262 } else { 1263 if (prueth->hsr_dev != info->upper_dev) { 1264 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); 1265 return -EOPNOTSUPP; 1266 } 1267 } 1268 prueth_hsr_port_link(ndev); 1269 } else { 1270 prueth_hsr_port_unlink(ndev); 1271 } 1272 } 1273 1274 if (netif_is_bridge_master(info->upper_dev)) { 1275 if (info->linking) 1276 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); 1277 else 1278 prueth_netdevice_port_unlink(ndev); 1279 } 1280 break; 1281 default: 1282 return NOTIFY_DONE; 1283 } 1284 1285 return notifier_from_errno(ret); 1286 } 1287 1288 static int prueth_register_notifiers(struct prueth *prueth) 1289 { 1290 int ret = 0; 1291 1292 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; 1293 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); 1294 if (ret) { 1295 dev_err(prueth->dev, "can't register netdevice notifier\n"); 1296 return ret; 1297 } 1298 1299 ret = prueth_switchdev_register_notifiers(prueth); 1300 if (ret) 1301 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1302 1303 return ret; 1304 } 1305 1306 static void prueth_unregister_notifiers(struct prueth *prueth) 1307 { 1308 prueth_switchdev_unregister_notifiers(prueth); 1309 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1310 } 1311 1312 static int prueth_probe(struct platform_device *pdev) 1313 { 1314 struct device_node *eth_node, *eth_ports_node; 1315 struct device_node *eth0_node = NULL; 1316 struct device_node *eth1_node = NULL; 1317 struct genpool_data_align gp_data = { 1318 .align = SZ_64K, 1319 }; 1320 struct device *dev = &pdev->dev; 1321 struct device_node *np; 1322 struct prueth *prueth; 1323 struct pruss *pruss; 1324 u32 msmc_ram_size; 1325 int i, ret; 1326 1327 np = dev->of_node; 1328 1329 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); 1330 if (!prueth) 1331 return -ENOMEM; 1332 1333 dev_set_drvdata(dev, prueth); 1334 prueth->pdev = pdev; 1335 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); 1336 1337 prueth->dev = dev; 1338 eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); 1339 if (!eth_ports_node) 1340 return -ENOENT; 1341 1342 for_each_child_of_node(eth_ports_node, eth_node) { 1343 u32 reg; 1344 1345 if (strcmp(eth_node->name, "port")) 1346 continue; 1347 ret = of_property_read_u32(eth_node, "reg", ®); 1348 if (ret < 0) { 1349 dev_err(dev, "%pOF error reading port_id %d\n", 1350 eth_node, ret); 1351 } 1352 1353 of_node_get(eth_node); 1354 1355 if (reg == 0) { 1356 eth0_node = eth_node; 1357 if (!of_device_is_available(eth0_node)) { 1358 of_node_put(eth0_node); 1359 eth0_node = NULL; 1360 } 1361 } else if (reg == 1) { 1362 eth1_node = eth_node; 1363 if (!of_device_is_available(eth1_node)) { 1364 of_node_put(eth1_node); 1365 eth1_node = NULL; 1366 } 1367 } else { 1368 dev_err(dev, "port reg should be 0 or 1\n"); 1369 } 1370 } 1371 1372 of_node_put(eth_ports_node); 1373 1374 /* At least one node must be present and available else we fail */ 1375 if (!eth0_node && !eth1_node) { 1376 dev_err(dev, "neither port0 nor port1 node available\n"); 1377 return -ENODEV; 1378 } 1379 1380 if (eth0_node == eth1_node) { 1381 dev_err(dev, "port0 and port1 can't have same reg\n"); 1382 of_node_put(eth0_node); 1383 return -ENODEV; 1384 } 1385 1386 prueth->eth_node[PRUETH_MAC0] = eth0_node; 1387 prueth->eth_node[PRUETH_MAC1] = eth1_node; 1388 1389 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); 1390 if (IS_ERR(prueth->miig_rt)) { 1391 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); 1392 return -ENODEV; 1393 } 1394 1395 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); 1396 if (IS_ERR(prueth->mii_rt)) { 1397 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); 1398 return -ENODEV; 1399 } 1400 1401 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); 1402 if (IS_ERR(prueth->pa_stats)) { 1403 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); 1404 prueth->pa_stats = NULL; 1405 } 1406 1407 if (eth0_node) { 1408 ret = prueth_get_cores(prueth, ICSS_SLICE0, false); 1409 if (ret) 1410 goto put_cores; 1411 } 1412 1413 if (eth1_node) { 1414 ret = prueth_get_cores(prueth, ICSS_SLICE1, false); 1415 if (ret) 1416 goto put_cores; 1417 } 1418 1419 pruss = pruss_get(eth0_node ? 1420 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); 1421 if (IS_ERR(pruss)) { 1422 ret = PTR_ERR(pruss); 1423 dev_err(dev, "unable to get pruss handle\n"); 1424 goto put_cores; 1425 } 1426 1427 prueth->pruss = pruss; 1428 1429 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, 1430 &prueth->shram); 1431 if (ret) { 1432 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); 1433 goto put_pruss; 1434 } 1435 1436 prueth->sram_pool = of_gen_pool_get(np, "sram", 0); 1437 if (!prueth->sram_pool) { 1438 dev_err(dev, "unable to get SRAM pool\n"); 1439 ret = -ENODEV; 1440 1441 goto put_mem; 1442 } 1443 1444 msmc_ram_size = MSMC_RAM_SIZE; 1445 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1446 if (prueth->is_switchmode_supported) 1447 msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; 1448 1449 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1450 prueth->msmcram.va = 1451 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, 1452 msmc_ram_size, 1453 gen_pool_first_fit_align, 1454 &gp_data); 1455 1456 if (!prueth->msmcram.va) { 1457 ret = -ENOMEM; 1458 dev_err(dev, "unable to allocate MSMC resource\n"); 1459 goto put_mem; 1460 } 1461 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, 1462 (unsigned long)prueth->msmcram.va); 1463 prueth->msmcram.size = msmc_ram_size; 1464 memset_io(prueth->msmcram.va, 0, msmc_ram_size); 1465 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, 1466 prueth->msmcram.va, prueth->msmcram.size); 1467 1468 prueth->iep0 = icss_iep_get_idx(np, 0); 1469 if (IS_ERR(prueth->iep0)) { 1470 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); 1471 prueth->iep0 = NULL; 1472 goto free_pool; 1473 } 1474 1475 prueth->iep1 = icss_iep_get_idx(np, 1); 1476 if (IS_ERR(prueth->iep1)) { 1477 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); 1478 goto put_iep0; 1479 } 1480 1481 if (prueth->pdata.quirk_10m_link_issue) { 1482 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX 1483 * traffic. 1484 */ 1485 icss_iep_init_fw(prueth->iep1); 1486 } 1487 1488 spin_lock_init(&prueth->vtbl_lock); 1489 /* setup netdev interfaces */ 1490 if (eth0_node) { 1491 ret = prueth_netdev_init(prueth, eth0_node); 1492 if (ret) { 1493 dev_err_probe(dev, ret, "netdev init %s failed\n", 1494 eth0_node->name); 1495 goto exit_iep; 1496 } 1497 1498 prueth->emac[PRUETH_MAC0]->half_duplex = 1499 of_property_read_bool(eth0_node, "ti,half-duplex-capable"); 1500 1501 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; 1502 } 1503 1504 if (eth1_node) { 1505 ret = prueth_netdev_init(prueth, eth1_node); 1506 if (ret) { 1507 dev_err_probe(dev, ret, "netdev init %s failed\n", 1508 eth1_node->name); 1509 goto netdev_exit; 1510 } 1511 1512 prueth->emac[PRUETH_MAC1]->half_duplex = 1513 of_property_read_bool(eth1_node, "ti,half-duplex-capable"); 1514 1515 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; 1516 } 1517 1518 /* register the network devices */ 1519 if (eth0_node) { 1520 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); 1521 if (ret) { 1522 dev_err(dev, "can't register netdev for port MII0"); 1523 goto netdev_exit; 1524 } 1525 1526 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; 1527 1528 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); 1529 if (ret) { 1530 dev_err(dev, 1531 "can't connect to MII0 PHY, error -%d", ret); 1532 goto netdev_unregister; 1533 } 1534 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); 1535 } 1536 1537 if (eth1_node) { 1538 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); 1539 if (ret) { 1540 dev_err(dev, "can't register netdev for port MII1"); 1541 goto netdev_unregister; 1542 } 1543 1544 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; 1545 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); 1546 if (ret) { 1547 dev_err(dev, 1548 "can't connect to MII1 PHY, error %d", ret); 1549 goto netdev_unregister; 1550 } 1551 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); 1552 } 1553 1554 if (prueth->is_switchmode_supported) { 1555 ret = prueth_register_notifiers(prueth); 1556 if (ret) 1557 goto netdev_unregister; 1558 1559 sprintf(prueth->switch_id, "%s", dev_name(dev)); 1560 } 1561 1562 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", 1563 (!eth0_node || !eth1_node) ? "single" : "dual"); 1564 1565 if (eth1_node) 1566 of_node_put(eth1_node); 1567 if (eth0_node) 1568 of_node_put(eth0_node); 1569 return 0; 1570 1571 netdev_unregister: 1572 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1573 if (!prueth->registered_netdevs[i]) 1574 continue; 1575 if (prueth->emac[i]->ndev->phydev) { 1576 phy_disconnect(prueth->emac[i]->ndev->phydev); 1577 prueth->emac[i]->ndev->phydev = NULL; 1578 } 1579 unregister_netdev(prueth->registered_netdevs[i]); 1580 } 1581 1582 netdev_exit: 1583 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1584 eth_node = prueth->eth_node[i]; 1585 if (!eth_node) 1586 continue; 1587 1588 prueth_netdev_exit(prueth, eth_node); 1589 } 1590 1591 exit_iep: 1592 if (prueth->pdata.quirk_10m_link_issue) 1593 icss_iep_exit_fw(prueth->iep1); 1594 icss_iep_put(prueth->iep1); 1595 1596 put_iep0: 1597 icss_iep_put(prueth->iep0); 1598 prueth->iep0 = NULL; 1599 prueth->iep1 = NULL; 1600 1601 free_pool: 1602 gen_pool_free(prueth->sram_pool, 1603 (unsigned long)prueth->msmcram.va, msmc_ram_size); 1604 1605 put_mem: 1606 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1607 1608 put_pruss: 1609 pruss_put(prueth->pruss); 1610 1611 put_cores: 1612 if (eth1_node) { 1613 prueth_put_cores(prueth, ICSS_SLICE1); 1614 of_node_put(eth1_node); 1615 } 1616 1617 if (eth0_node) { 1618 prueth_put_cores(prueth, ICSS_SLICE0); 1619 of_node_put(eth0_node); 1620 } 1621 1622 return ret; 1623 } 1624 1625 static void prueth_remove(struct platform_device *pdev) 1626 { 1627 struct prueth *prueth = platform_get_drvdata(pdev); 1628 struct device_node *eth_node; 1629 int i; 1630 1631 prueth_unregister_notifiers(prueth); 1632 1633 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1634 if (!prueth->registered_netdevs[i]) 1635 continue; 1636 phy_stop(prueth->emac[i]->ndev->phydev); 1637 phy_disconnect(prueth->emac[i]->ndev->phydev); 1638 prueth->emac[i]->ndev->phydev = NULL; 1639 unregister_netdev(prueth->registered_netdevs[i]); 1640 } 1641 1642 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1643 eth_node = prueth->eth_node[i]; 1644 if (!eth_node) 1645 continue; 1646 1647 prueth_netdev_exit(prueth, eth_node); 1648 } 1649 1650 if (prueth->pdata.quirk_10m_link_issue) 1651 icss_iep_exit_fw(prueth->iep1); 1652 1653 icss_iep_put(prueth->iep1); 1654 icss_iep_put(prueth->iep0); 1655 1656 gen_pool_free(prueth->sram_pool, 1657 (unsigned long)prueth->msmcram.va, 1658 MSMC_RAM_SIZE); 1659 1660 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1661 1662 pruss_put(prueth->pruss); 1663 1664 if (prueth->eth_node[PRUETH_MAC1]) 1665 prueth_put_cores(prueth, ICSS_SLICE1); 1666 1667 if (prueth->eth_node[PRUETH_MAC0]) 1668 prueth_put_cores(prueth, ICSS_SLICE0); 1669 } 1670 1671 static const struct prueth_pdata am654_icssg_pdata = { 1672 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 1673 .quirk_10m_link_issue = 1, 1674 .switch_mode = 1, 1675 }; 1676 1677 static const struct prueth_pdata am64x_icssg_pdata = { 1678 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 1679 .quirk_10m_link_issue = 1, 1680 .switch_mode = 1, 1681 }; 1682 1683 static const struct of_device_id prueth_dt_match[] = { 1684 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, 1685 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, 1686 { /* sentinel */ } 1687 }; 1688 MODULE_DEVICE_TABLE(of, prueth_dt_match); 1689 1690 static struct platform_driver prueth_driver = { 1691 .probe = prueth_probe, 1692 .remove = prueth_remove, 1693 .driver = { 1694 .name = "icssg-prueth", 1695 .of_match_table = prueth_dt_match, 1696 .pm = &prueth_dev_pm_ops, 1697 }, 1698 }; 1699 module_platform_driver(prueth_driver); 1700 1701 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1702 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1703 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); 1704 MODULE_LICENSE("GPL"); 1705