1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dma/ti-cppi5.h> 14 #include <linux/etherdevice.h> 15 #include <linux/genalloc.h> 16 #include <linux/if_hsr.h> 17 #include <linux/if_vlan.h> 18 #include <linux/interrupt.h> 19 #include <linux/io-64-nonatomic-hi-lo.h> 20 #include <linux/kernel.h> 21 #include <linux/mfd/syscon.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/platform_device.h> 27 #include <linux/phy.h> 28 #include <linux/property.h> 29 #include <linux/remoteproc/pruss.h> 30 #include <linux/regmap.h> 31 #include <linux/remoteproc.h> 32 #include <net/switchdev.h> 33 34 #include "icssg_prueth.h" 35 #include "icssg_mii_rt.h" 36 #include "icssg_switchdev.h" 37 #include "../k3-cppi-desc-pool.h" 38 39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" 40 41 #define DEFAULT_VID 1 42 #define DEFAULT_PORT_MASK 1 43 #define DEFAULT_UNTAG_MASK 1 44 45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \ 46 NETIF_F_HW_HSR_DUP | \ 47 NETIF_F_HW_HSR_TAG_INS | \ 48 NETIF_F_HW_HSR_TAG_RM) 49 50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 52 53 static int emac_get_tx_ts(struct prueth_emac *emac, 54 struct emac_tx_ts_response *rsp) 55 { 56 struct prueth *prueth = emac->prueth; 57 int slice = prueth_emac_slice(emac); 58 int addr; 59 60 addr = icssg_queue_pop(prueth, slice == 0 ? 61 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); 62 if (addr < 0) 63 return addr; 64 65 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 66 /* return buffer back for to pool */ 67 icssg_queue_push(prueth, slice == 0 ? 68 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); 69 70 return 0; 71 } 72 73 static void tx_ts_work(struct prueth_emac *emac) 74 { 75 struct skb_shared_hwtstamps ssh; 76 struct emac_tx_ts_response tsr; 77 struct sk_buff *skb; 78 int ret = 0; 79 u32 hi_sw; 80 u64 ns; 81 82 /* There may be more than one pending requests */ 83 while (1) { 84 ret = emac_get_tx_ts(emac, &tsr); 85 if (ret) /* nothing more */ 86 break; 87 88 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || 89 !emac->tx_ts_skb[tsr.cookie]) { 90 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", 91 tsr.cookie); 92 break; 93 } 94 95 skb = emac->tx_ts_skb[tsr.cookie]; 96 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ 97 if (!skb) { 98 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); 99 break; 100 } 101 102 hi_sw = readl(emac->prueth->shram.va + 103 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 104 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, 105 IEP_DEFAULT_CYCLE_TIME_NS); 106 107 memset(&ssh, 0, sizeof(ssh)); 108 ssh.hwtstamp = ns_to_ktime(ns); 109 110 skb_tstamp_tx(skb, &ssh); 111 dev_consume_skb_any(skb); 112 113 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ 114 break; 115 } 116 } 117 118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) 119 { 120 struct prueth_emac *emac = dev_id; 121 122 /* currently only TX timestamp is being returned */ 123 tx_ts_work(emac); 124 125 return IRQ_HANDLED; 126 } 127 128 static struct icssg_firmwares icssg_hsr_firmwares[] = { 129 { 130 .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", 131 .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", 132 .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", 133 }, 134 { 135 .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", 136 .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", 137 .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", 138 } 139 }; 140 141 static struct icssg_firmwares icssg_switch_firmwares[] = { 142 { 143 .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", 144 .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", 145 .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", 146 }, 147 { 148 .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", 149 .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", 150 .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", 151 } 152 }; 153 154 static struct icssg_firmwares icssg_emac_firmwares[] = { 155 { 156 .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", 157 .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", 158 .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", 159 }, 160 { 161 .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", 162 .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", 163 .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", 164 } 165 }; 166 167 static int prueth_start(struct rproc *rproc, const char *fw_name) 168 { 169 int ret; 170 171 ret = rproc_set_firmware(rproc, fw_name); 172 if (ret) 173 return ret; 174 return rproc_boot(rproc); 175 } 176 177 static void prueth_shutdown(struct rproc *rproc) 178 { 179 rproc_shutdown(rproc); 180 } 181 182 static int prueth_emac_start(struct prueth *prueth) 183 { 184 struct icssg_firmwares *firmwares; 185 struct device *dev = prueth->dev; 186 int ret, slice; 187 188 if (prueth->is_switch_mode) 189 firmwares = icssg_switch_firmwares; 190 else if (prueth->is_hsr_offload_mode) 191 firmwares = icssg_hsr_firmwares; 192 else 193 firmwares = icssg_emac_firmwares; 194 195 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 196 ret = prueth_start(prueth->pru[slice], firmwares[slice].pru); 197 if (ret) { 198 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); 199 goto unwind_slices; 200 } 201 202 ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu); 203 if (ret) { 204 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); 205 rproc_shutdown(prueth->pru[slice]); 206 goto unwind_slices; 207 } 208 209 ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru); 210 if (ret) { 211 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); 212 rproc_shutdown(prueth->rtu[slice]); 213 rproc_shutdown(prueth->pru[slice]); 214 goto unwind_slices; 215 } 216 } 217 218 return 0; 219 220 unwind_slices: 221 while (--slice >= 0) { 222 prueth_shutdown(prueth->txpru[slice]); 223 prueth_shutdown(prueth->rtu[slice]); 224 prueth_shutdown(prueth->pru[slice]); 225 } 226 227 return ret; 228 } 229 230 static void prueth_emac_stop(struct prueth *prueth) 231 { 232 int slice; 233 234 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 235 prueth_shutdown(prueth->txpru[slice]); 236 prueth_shutdown(prueth->rtu[slice]); 237 prueth_shutdown(prueth->pru[slice]); 238 } 239 } 240 241 static int prueth_emac_common_start(struct prueth *prueth) 242 { 243 struct prueth_emac *emac; 244 int ret = 0; 245 int slice; 246 247 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 248 return -EINVAL; 249 250 /* clear SMEM and MSMC settings for all slices */ 251 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); 252 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); 253 254 icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false); 255 icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false); 256 257 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 258 icssg_init_fw_offload_mode(prueth); 259 else 260 icssg_init_emac_mode(prueth); 261 262 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 263 emac = prueth->emac[slice]; 264 if (!emac) 265 continue; 266 ret = icssg_config(prueth, emac, slice); 267 if (ret) 268 goto disable_class; 269 } 270 271 ret = prueth_emac_start(prueth); 272 if (ret) 273 goto disable_class; 274 275 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 276 prueth->emac[ICSS_SLICE1]; 277 ret = icss_iep_init(emac->iep, &prueth_iep_clockops, 278 emac, IEP_DEFAULT_CYCLE_TIME_NS); 279 if (ret) { 280 dev_err(prueth->dev, "Failed to initialize IEP module\n"); 281 goto stop_pruss; 282 } 283 284 return 0; 285 286 stop_pruss: 287 prueth_emac_stop(prueth); 288 289 disable_class: 290 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 291 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 292 293 return ret; 294 } 295 296 static int prueth_emac_common_stop(struct prueth *prueth) 297 { 298 struct prueth_emac *emac; 299 300 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 301 return -EINVAL; 302 303 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 304 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 305 306 prueth_emac_stop(prueth); 307 308 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 309 prueth->emac[ICSS_SLICE1]; 310 icss_iep_exit(emac->iep); 311 312 return 0; 313 } 314 315 /* called back by PHY layer if there is change in link state of hw port*/ 316 static void emac_adjust_link(struct net_device *ndev) 317 { 318 struct prueth_emac *emac = netdev_priv(ndev); 319 struct phy_device *phydev = ndev->phydev; 320 struct prueth *prueth = emac->prueth; 321 bool new_state = false; 322 unsigned long flags; 323 324 if (phydev->link) { 325 /* check the mode of operation - full/half duplex */ 326 if (phydev->duplex != emac->duplex) { 327 new_state = true; 328 emac->duplex = phydev->duplex; 329 } 330 if (phydev->speed != emac->speed) { 331 new_state = true; 332 emac->speed = phydev->speed; 333 } 334 if (!emac->link) { 335 new_state = true; 336 emac->link = 1; 337 } 338 } else if (emac->link) { 339 new_state = true; 340 emac->link = 0; 341 342 /* f/w should support 100 & 1000 */ 343 emac->speed = SPEED_1000; 344 345 /* half duplex may not be supported by f/w */ 346 emac->duplex = DUPLEX_FULL; 347 } 348 349 if (new_state) { 350 phy_print_status(phydev); 351 352 /* update RGMII and MII configuration based on PHY negotiated 353 * values 354 */ 355 if (emac->link) { 356 if (emac->duplex == DUPLEX_HALF) 357 icssg_config_half_duplex(emac); 358 /* Set the RGMII cfg for gig en and full duplex */ 359 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 360 361 /* update the Tx IPG based on 100M/1G speed */ 362 spin_lock_irqsave(&emac->lock, flags); 363 icssg_config_ipg(emac); 364 spin_unlock_irqrestore(&emac->lock, flags); 365 icssg_config_set_speed(emac); 366 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); 367 368 } else { 369 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); 370 } 371 } 372 373 if (emac->link) { 374 /* reactivate the transmit queue */ 375 netif_tx_wake_all_queues(ndev); 376 } else { 377 netif_tx_stop_all_queues(ndev); 378 prueth_cleanup_tx_ts(emac); 379 } 380 } 381 382 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) 383 { 384 struct prueth_emac *emac = 385 container_of(timer, struct prueth_emac, rx_hrtimer); 386 int rx_flow = PRUETH_RX_FLOW_DATA; 387 388 enable_irq(emac->rx_chns.irq[rx_flow]); 389 return HRTIMER_NORESTART; 390 } 391 392 static int emac_phy_connect(struct prueth_emac *emac) 393 { 394 struct prueth *prueth = emac->prueth; 395 struct net_device *ndev = emac->ndev; 396 /* connect PHY */ 397 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, 398 &emac_adjust_link, 0, 399 emac->phy_if); 400 if (!ndev->phydev) { 401 dev_err(prueth->dev, "couldn't connect to phy %s\n", 402 emac->phy_node->full_name); 403 return -ENODEV; 404 } 405 406 if (!emac->half_duplex) { 407 dev_dbg(prueth->dev, "half duplex mode is not supported\n"); 408 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 409 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 410 } 411 412 /* remove unsupported modes */ 413 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 414 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); 415 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 416 417 if (emac->phy_if == PHY_INTERFACE_MODE_MII) 418 phy_set_max_speed(ndev->phydev, SPEED_100); 419 420 return 0; 421 } 422 423 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) 424 { 425 u32 hi_rollover_count, hi_rollover_count_r; 426 struct prueth_emac *emac = clockops_data; 427 struct prueth *prueth = emac->prueth; 428 void __iomem *fw_hi_r_count_addr; 429 void __iomem *fw_count_hi_addr; 430 u32 iepcount_hi, iepcount_hi_r; 431 unsigned long flags; 432 u32 iepcount_lo; 433 u64 ts = 0; 434 435 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; 436 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; 437 438 local_irq_save(flags); 439 do { 440 iepcount_hi = icss_iep_get_count_hi(emac->iep); 441 iepcount_hi += readl(fw_count_hi_addr); 442 hi_rollover_count = readl(fw_hi_r_count_addr); 443 ptp_read_system_prets(sts); 444 iepcount_lo = icss_iep_get_count_low(emac->iep); 445 ptp_read_system_postts(sts); 446 447 iepcount_hi_r = icss_iep_get_count_hi(emac->iep); 448 iepcount_hi_r += readl(fw_count_hi_addr); 449 hi_rollover_count_r = readl(fw_hi_r_count_addr); 450 } while ((iepcount_hi_r != iepcount_hi) || 451 (hi_rollover_count != hi_rollover_count_r)); 452 local_irq_restore(flags); 453 454 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; 455 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; 456 457 return ts; 458 } 459 460 static void prueth_iep_settime(void *clockops_data, u64 ns) 461 { 462 struct icssg_setclock_desc __iomem *sc_descp; 463 struct prueth_emac *emac = clockops_data; 464 struct icssg_setclock_desc sc_desc; 465 u64 cyclecount; 466 u32 cycletime; 467 int timeout; 468 469 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; 470 471 cycletime = IEP_DEFAULT_CYCLE_TIME_NS; 472 cyclecount = ns / cycletime; 473 474 memset(&sc_desc, 0, sizeof(sc_desc)); 475 sc_desc.margin = cycletime - 1000; 476 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); 477 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; 478 sc_desc.iepcount_set = ns % cycletime; 479 /* Count from 0 to (cycle time) - emac->iep->def_inc */ 480 sc_desc.CMP0_current = cycletime - emac->iep->def_inc; 481 482 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); 483 484 writeb(1, &sc_descp->request); 485 486 timeout = 5; /* fw should take 2-3 ms */ 487 while (timeout--) { 488 if (readb(&sc_descp->acknowledgment)) 489 return; 490 491 usleep_range(500, 1000); 492 } 493 494 dev_err(emac->prueth->dev, "settime timeout\n"); 495 } 496 497 static int prueth_perout_enable(void *clockops_data, 498 struct ptp_perout_request *req, int on, 499 u64 *cmp) 500 { 501 struct prueth_emac *emac = clockops_data; 502 u32 reduction_factor = 0, offset = 0; 503 struct timespec64 ts; 504 u64 current_cycle; 505 u64 start_offset; 506 u64 ns_period; 507 508 if (!on) 509 return 0; 510 511 /* Any firmware specific stuff for PPS/PEROUT handling */ 512 ts.tv_sec = req->period.sec; 513 ts.tv_nsec = req->period.nsec; 514 ns_period = timespec64_to_ns(&ts); 515 516 /* f/w doesn't support period less than cycle time */ 517 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) 518 return -ENXIO; 519 520 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; 521 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; 522 523 /* f/w requires at least 1uS within a cycle so CMP 524 * can trigger after SYNC is enabled 525 */ 526 if (offset < 5 * NSEC_PER_USEC) 527 offset = 5 * NSEC_PER_USEC; 528 529 /* if offset is close to cycle time then we will miss 530 * the CMP event for last tick when IEP rolls over. 531 * In normal mode, IEP tick is 4ns. 532 * In slow compensation it could be 0ns or 8ns at 533 * every slow compensation cycle. 534 */ 535 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) 536 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; 537 538 /* we're in shadow mode so need to set upper 32-bits */ 539 *cmp = (u64)offset << 32; 540 541 writel(reduction_factor, emac->prueth->shram.va + 542 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); 543 544 current_cycle = icssg_read_time(emac->prueth->shram.va + 545 TIMESYNC_FW_WC_CYCLECOUNT_OFFSET); 546 547 /* Rounding of current_cycle count to next second */ 548 start_offset = roundup(current_cycle, MSEC_PER_SEC); 549 550 hi_lo_writeq(start_offset, emac->prueth->shram.va + 551 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); 552 553 return 0; 554 } 555 556 const struct icss_iep_clockops prueth_iep_clockops = { 557 .settime = prueth_iep_settime, 558 .gettime = prueth_iep_gettime, 559 .perout_enable = prueth_perout_enable, 560 }; 561 562 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) 563 { 564 struct net_device *real_dev; 565 struct prueth_emac *emac; 566 int port_mask; 567 u8 vlan_id; 568 569 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 570 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 571 emac = netdev_priv(real_dev); 572 573 port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id); 574 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true); 575 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true); 576 577 return 0; 578 } 579 580 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) 581 { 582 struct net_device *real_dev; 583 struct prueth_emac *emac; 584 int other_port_mask; 585 int port_mask; 586 u8 vlan_id; 587 588 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 589 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 590 emac = netdev_priv(real_dev); 591 592 port_mask = BIT(emac->port_id); 593 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id); 594 595 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false); 596 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false); 597 598 if (other_port_mask) { 599 icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true); 600 icssg_vtbl_modify(emac, vlan_id, other_port_mask, 601 other_port_mask, true); 602 } 603 604 return 0; 605 } 606 607 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) 608 { 609 struct prueth_emac *emac = netdev_priv(ndev); 610 struct prueth *prueth = emac->prueth; 611 612 icssg_fdb_add_del(emac, addr, prueth->default_vlan, 613 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 614 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 615 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 616 ICSSG_FDB_ENTRY_BLOCK, true); 617 618 icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id), 619 BIT(emac->port_id), true); 620 return 0; 621 } 622 623 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) 624 { 625 struct prueth_emac *emac = netdev_priv(ndev); 626 struct prueth *prueth = emac->prueth; 627 628 icssg_fdb_add_del(emac, addr, prueth->default_vlan, 629 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 630 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 631 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 632 ICSSG_FDB_ENTRY_BLOCK, false); 633 634 return 0; 635 } 636 637 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, 638 void *args) 639 { 640 struct prueth_emac *emac = args; 641 642 if (!vdev || !vid) 643 return 0; 644 645 netif_addr_lock_bh(vdev); 646 __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc, 647 vdev->addr_len); 648 netif_addr_unlock_bh(vdev); 649 650 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, 651 icssg_prueth_add_mcast, icssg_prueth_del_mcast); 652 653 return 0; 654 } 655 656 /** 657 * emac_ndo_open - EMAC device open 658 * @ndev: network adapter device 659 * 660 * Called when system wants to start the interface. 661 * 662 * Return: 0 for a successful open, or appropriate error code 663 */ 664 static int emac_ndo_open(struct net_device *ndev) 665 { 666 struct prueth_emac *emac = netdev_priv(ndev); 667 int ret, i, num_data_chn = emac->tx_ch_num; 668 struct icssg_flow_cfg __iomem *flow_cfg; 669 struct prueth *prueth = emac->prueth; 670 int slice = prueth_emac_slice(emac); 671 struct device *dev = prueth->dev; 672 int max_rx_flows; 673 int rx_flow; 674 675 /* set h/w MAC as user might have re-configured */ 676 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 677 678 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 679 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 680 681 /* Notify the stack of the actual queue counts. */ 682 ret = netif_set_real_num_tx_queues(ndev, num_data_chn); 683 if (ret) { 684 dev_err(dev, "cannot set real number of tx queues\n"); 685 return ret; 686 } 687 688 init_completion(&emac->cmd_complete); 689 ret = prueth_init_tx_chns(emac); 690 if (ret) { 691 dev_err(dev, "failed to init tx channel: %d\n", ret); 692 return ret; 693 } 694 695 max_rx_flows = PRUETH_MAX_RX_FLOWS; 696 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", 697 max_rx_flows, PRUETH_MAX_RX_DESC); 698 if (ret) { 699 dev_err(dev, "failed to init rx channel: %d\n", ret); 700 goto cleanup_tx; 701 } 702 703 ret = prueth_ndev_add_tx_napi(emac); 704 if (ret) 705 goto cleanup_rx; 706 707 /* we use only the highest priority flow for now i.e. @irq[3] */ 708 rx_flow = PRUETH_RX_FLOW_DATA; 709 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, 710 IRQF_TRIGGER_HIGH, dev_name(dev), emac); 711 if (ret) { 712 dev_err(dev, "unable to request RX IRQ\n"); 713 goto cleanup_napi; 714 } 715 716 if (!prueth->emacs_initialized) { 717 ret = prueth_emac_common_start(prueth); 718 if (ret) 719 goto free_rx_irq; 720 } 721 722 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; 723 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); 724 ret = emac_fdb_flow_id_updated(emac); 725 726 if (ret) { 727 netdev_err(ndev, "Failed to update Rx Flow ID %d", ret); 728 goto stop; 729 } 730 731 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); 732 733 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, 734 IRQF_ONESHOT, dev_name(dev), emac); 735 if (ret) 736 goto stop; 737 738 /* Prepare RX */ 739 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); 740 if (ret) 741 goto free_tx_ts_irq; 742 743 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); 744 if (ret) 745 goto reset_rx_chn; 746 747 for (i = 0; i < emac->tx_ch_num; i++) { 748 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); 749 if (ret) 750 goto reset_tx_chan; 751 } 752 753 /* Enable NAPI in Tx and Rx direction */ 754 for (i = 0; i < emac->tx_ch_num; i++) 755 napi_enable(&emac->tx_chns[i].napi_tx); 756 napi_enable(&emac->napi_rx); 757 758 /* start PHY */ 759 phy_start(ndev->phydev); 760 761 prueth->emacs_initialized++; 762 763 queue_work(system_long_wq, &emac->stats_work.work); 764 765 return 0; 766 767 reset_tx_chan: 768 /* Since interface is not yet up, there is wouldn't be 769 * any SKB for completion. So set false to free_skb 770 */ 771 prueth_reset_tx_chan(emac, i, false); 772 reset_rx_chn: 773 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); 774 free_tx_ts_irq: 775 free_irq(emac->tx_ts_irq, emac); 776 stop: 777 if (!prueth->emacs_initialized) 778 prueth_emac_common_stop(prueth); 779 free_rx_irq: 780 free_irq(emac->rx_chns.irq[rx_flow], emac); 781 cleanup_napi: 782 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 783 cleanup_rx: 784 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 785 cleanup_tx: 786 prueth_cleanup_tx_chns(emac); 787 788 return ret; 789 } 790 791 /** 792 * emac_ndo_stop - EMAC device stop 793 * @ndev: network adapter device 794 * 795 * Called when system wants to stop or down the interface. 796 * 797 * Return: Always 0 (Success) 798 */ 799 static int emac_ndo_stop(struct net_device *ndev) 800 { 801 struct prueth_emac *emac = netdev_priv(ndev); 802 struct prueth *prueth = emac->prueth; 803 int rx_flow = PRUETH_RX_FLOW_DATA; 804 int max_rx_flows; 805 int ret, i; 806 807 /* inform the upper layers. */ 808 netif_tx_stop_all_queues(ndev); 809 810 /* block packets from wire */ 811 if (ndev->phydev) 812 phy_stop(ndev->phydev); 813 814 if (emac->prueth->is_hsr_offload_mode) 815 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); 816 else 817 __dev_mc_unsync(ndev, icssg_prueth_del_mcast); 818 819 atomic_set(&emac->tdown_cnt, emac->tx_ch_num); 820 /* ensure new tdown_cnt value is visible */ 821 smp_mb__after_atomic(); 822 /* tear down and disable UDMA channels */ 823 reinit_completion(&emac->tdown_complete); 824 for (i = 0; i < emac->tx_ch_num; i++) 825 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); 826 827 ret = wait_for_completion_timeout(&emac->tdown_complete, 828 msecs_to_jiffies(1000)); 829 if (!ret) 830 netdev_err(ndev, "tx teardown timeout\n"); 831 832 prueth_reset_tx_chan(emac, emac->tx_ch_num, true); 833 for (i = 0; i < emac->tx_ch_num; i++) { 834 napi_disable(&emac->tx_chns[i].napi_tx); 835 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); 836 } 837 838 max_rx_flows = PRUETH_MAX_RX_FLOWS; 839 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); 840 841 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); 842 843 napi_disable(&emac->napi_rx); 844 hrtimer_cancel(&emac->rx_hrtimer); 845 846 cancel_work_sync(&emac->rx_mode_work); 847 848 /* Destroying the queued work in ndo_stop() */ 849 cancel_delayed_work_sync(&emac->stats_work); 850 851 /* stop PRUs */ 852 if (prueth->emacs_initialized == 1) 853 prueth_emac_common_stop(prueth); 854 855 free_irq(emac->tx_ts_irq, emac); 856 857 free_irq(emac->rx_chns.irq[rx_flow], emac); 858 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 859 860 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 861 prueth_cleanup_tx_chns(emac); 862 863 prueth->emacs_initialized--; 864 865 return 0; 866 } 867 868 static void emac_ndo_set_rx_mode_work(struct work_struct *work) 869 { 870 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); 871 struct net_device *ndev = emac->ndev; 872 bool promisc, allmulti; 873 874 if (!netif_running(ndev)) 875 return; 876 877 promisc = ndev->flags & IFF_PROMISC; 878 allmulti = ndev->flags & IFF_ALLMULTI; 879 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); 880 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); 881 882 if (promisc) { 883 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); 884 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 885 return; 886 } 887 888 if (allmulti) { 889 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 890 return; 891 } 892 893 if (emac->prueth->is_hsr_offload_mode) { 894 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, 895 icssg_prueth_hsr_del_mcast); 896 } else { 897 __dev_mc_sync(ndev, icssg_prueth_add_mcast, 898 icssg_prueth_del_mcast); 899 if (rtnl_trylock()) { 900 vlan_for_each(ndev, icssg_update_vlan_mcast, emac); 901 rtnl_unlock(); 902 } 903 } 904 } 905 906 /** 907 * emac_ndo_set_rx_mode - EMAC set receive mode function 908 * @ndev: The EMAC network adapter 909 * 910 * Called when system wants to set the receive mode of the device. 911 * 912 */ 913 static void emac_ndo_set_rx_mode(struct net_device *ndev) 914 { 915 struct prueth_emac *emac = netdev_priv(ndev); 916 917 queue_work(emac->cmd_wq, &emac->rx_mode_work); 918 } 919 920 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, 921 netdev_features_t features) 922 { 923 /* hsr tag insertion offload and hsr dup offload are tightly coupled in 924 * firmware implementation. Both these features need to be enabled / 925 * disabled together. 926 */ 927 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS))) 928 if ((features & NETIF_F_HW_HSR_DUP) || 929 (features & NETIF_F_HW_HSR_TAG_INS)) 930 features |= NETIF_F_HW_HSR_DUP | 931 NETIF_F_HW_HSR_TAG_INS; 932 933 if ((ndev->features & NETIF_F_HW_HSR_DUP) || 934 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 935 if (!(features & NETIF_F_HW_HSR_DUP) || 936 !(features & NETIF_F_HW_HSR_TAG_INS)) 937 features &= ~(NETIF_F_HW_HSR_DUP | 938 NETIF_F_HW_HSR_TAG_INS); 939 940 return features; 941 } 942 943 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, 944 __be16 proto, u16 vid) 945 { 946 struct prueth_emac *emac = netdev_priv(ndev); 947 struct prueth *prueth = emac->prueth; 948 int port_mask = BIT(emac->port_id); 949 int untag_mask = 0; 950 951 if (prueth->is_hsr_offload_mode) 952 port_mask |= BIT(PRUETH_PORT_HOST); 953 954 __hw_addr_init(&emac->vlan_mcast_list[vid]); 955 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", 956 vid, port_mask, untag_mask); 957 958 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); 959 icssg_set_pvid(emac->prueth, vid, emac->port_id); 960 961 return 0; 962 } 963 964 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, 965 __be16 proto, u16 vid) 966 { 967 struct prueth_emac *emac = netdev_priv(ndev); 968 struct prueth *prueth = emac->prueth; 969 int port_mask = BIT(emac->port_id); 970 int untag_mask = 0; 971 972 if (prueth->is_hsr_offload_mode) 973 port_mask = BIT(PRUETH_PORT_HOST); 974 975 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", 976 vid, port_mask, untag_mask); 977 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); 978 979 return 0; 980 } 981 982 static const struct net_device_ops emac_netdev_ops = { 983 .ndo_open = emac_ndo_open, 984 .ndo_stop = emac_ndo_stop, 985 .ndo_start_xmit = icssg_ndo_start_xmit, 986 .ndo_set_mac_address = eth_mac_addr, 987 .ndo_validate_addr = eth_validate_addr, 988 .ndo_tx_timeout = icssg_ndo_tx_timeout, 989 .ndo_set_rx_mode = emac_ndo_set_rx_mode, 990 .ndo_eth_ioctl = icssg_ndo_ioctl, 991 .ndo_get_stats64 = icssg_ndo_get_stats64, 992 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, 993 .ndo_fix_features = emac_ndo_fix_features, 994 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, 995 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, 996 }; 997 998 static int prueth_netdev_init(struct prueth *prueth, 999 struct device_node *eth_node) 1000 { 1001 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; 1002 struct prueth_emac *emac; 1003 struct net_device *ndev; 1004 enum prueth_port port; 1005 const char *irq_name; 1006 enum prueth_mac mac; 1007 1008 port = prueth_node_port(eth_node); 1009 if (port == PRUETH_PORT_INVALID) 1010 return -EINVAL; 1011 1012 mac = prueth_node_mac(eth_node); 1013 if (mac == PRUETH_MAC_INVALID) 1014 return -EINVAL; 1015 1016 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); 1017 if (!ndev) 1018 return -ENOMEM; 1019 1020 emac = netdev_priv(ndev); 1021 emac->prueth = prueth; 1022 emac->ndev = ndev; 1023 emac->port_id = port; 1024 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); 1025 if (!emac->cmd_wq) { 1026 ret = -ENOMEM; 1027 goto free_ndev; 1028 } 1029 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); 1030 1031 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); 1032 1033 ret = pruss_request_mem_region(prueth->pruss, 1034 port == PRUETH_PORT_MII0 ? 1035 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, 1036 &emac->dram); 1037 if (ret) { 1038 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); 1039 ret = -ENOMEM; 1040 goto free_wq; 1041 } 1042 1043 emac->tx_ch_num = 1; 1044 1045 irq_name = "tx_ts0"; 1046 if (emac->port_id == PRUETH_PORT_MII1) 1047 irq_name = "tx_ts1"; 1048 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); 1049 if (emac->tx_ts_irq < 0) { 1050 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); 1051 goto free; 1052 } 1053 1054 SET_NETDEV_DEV(ndev, prueth->dev); 1055 spin_lock_init(&emac->lock); 1056 mutex_init(&emac->cmd_lock); 1057 1058 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); 1059 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { 1060 dev_err(prueth->dev, "couldn't find phy-handle\n"); 1061 ret = -ENODEV; 1062 goto free; 1063 } else if (of_phy_is_fixed_link(eth_node)) { 1064 ret = of_phy_register_fixed_link(eth_node); 1065 if (ret) { 1066 ret = dev_err_probe(prueth->dev, ret, 1067 "failed to register fixed-link phy\n"); 1068 goto free; 1069 } 1070 1071 emac->phy_node = eth_node; 1072 } 1073 1074 ret = of_get_phy_mode(eth_node, &emac->phy_if); 1075 if (ret) { 1076 dev_err(prueth->dev, "could not get phy-mode property\n"); 1077 goto free; 1078 } 1079 1080 if (emac->phy_if != PHY_INTERFACE_MODE_MII && 1081 !phy_interface_mode_is_rgmii(emac->phy_if)) { 1082 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); 1083 ret = -EINVAL; 1084 goto free; 1085 } 1086 1087 /* AM65 SR2.0 has TX Internal delay always enabled by hardware 1088 * and it is not possible to disable TX Internal delay. The below 1089 * switch case block describes how we handle different phy modes 1090 * based on hardware restriction. 1091 */ 1092 switch (emac->phy_if) { 1093 case PHY_INTERFACE_MODE_RGMII_ID: 1094 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 1095 break; 1096 case PHY_INTERFACE_MODE_RGMII_TXID: 1097 emac->phy_if = PHY_INTERFACE_MODE_RGMII; 1098 break; 1099 case PHY_INTERFACE_MODE_RGMII: 1100 case PHY_INTERFACE_MODE_RGMII_RXID: 1101 dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); 1102 ret = -EINVAL; 1103 goto free; 1104 default: 1105 break; 1106 } 1107 1108 /* get mac address from DT and set private and netdev addr */ 1109 ret = of_get_ethdev_address(eth_node, ndev); 1110 if (!is_valid_ether_addr(ndev->dev_addr)) { 1111 eth_hw_addr_random(ndev); 1112 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", 1113 port, ndev->dev_addr); 1114 } 1115 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 1116 1117 ndev->dev.of_node = eth_node; 1118 ndev->min_mtu = PRUETH_MIN_PKT_SIZE; 1119 ndev->max_mtu = PRUETH_MAX_MTU; 1120 ndev->netdev_ops = &emac_netdev_ops; 1121 ndev->ethtool_ops = &icssg_ethtool_ops; 1122 ndev->hw_features = NETIF_F_SG; 1123 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 1124 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; 1125 1126 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); 1127 hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, 1128 HRTIMER_MODE_REL_PINNED); 1129 emac->rx_hrtimer.function = &emac_rx_timer_callback; 1130 prueth->emac[mac] = emac; 1131 1132 return 0; 1133 1134 free: 1135 pruss_release_mem_region(prueth->pruss, &emac->dram); 1136 free_wq: 1137 destroy_workqueue(emac->cmd_wq); 1138 free_ndev: 1139 emac->ndev = NULL; 1140 prueth->emac[mac] = NULL; 1141 free_netdev(ndev); 1142 1143 return ret; 1144 } 1145 1146 bool prueth_dev_check(const struct net_device *ndev) 1147 { 1148 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { 1149 struct prueth_emac *emac = netdev_priv(ndev); 1150 1151 return emac->prueth->is_switch_mode; 1152 } 1153 1154 return false; 1155 } 1156 1157 static void prueth_offload_fwd_mark_update(struct prueth *prueth) 1158 { 1159 int set_val = 0; 1160 int i; 1161 1162 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) 1163 set_val = 1; 1164 1165 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); 1166 1167 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { 1168 struct prueth_emac *emac = prueth->emac[i]; 1169 1170 if (!emac || !emac->ndev) 1171 continue; 1172 1173 emac->offload_fwd_mark = set_val; 1174 } 1175 } 1176 1177 static int prueth_emac_restart(struct prueth *prueth) 1178 { 1179 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; 1180 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; 1181 int ret; 1182 1183 /* Detach the net_device for both PRUeth ports*/ 1184 if (netif_running(emac0->ndev)) 1185 netif_device_detach(emac0->ndev); 1186 if (netif_running(emac1->ndev)) 1187 netif_device_detach(emac1->ndev); 1188 1189 /* Disable both PRUeth ports */ 1190 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); 1191 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); 1192 if (ret) 1193 return ret; 1194 1195 /* Stop both pru cores for both PRUeth ports*/ 1196 ret = prueth_emac_common_stop(prueth); 1197 if (ret) { 1198 dev_err(prueth->dev, "Failed to stop the firmwares"); 1199 return ret; 1200 } 1201 1202 /* Start both pru cores for both PRUeth ports */ 1203 ret = prueth_emac_common_start(prueth); 1204 if (ret) { 1205 dev_err(prueth->dev, "Failed to start the firmwares"); 1206 return ret; 1207 } 1208 1209 /* Enable forwarding for both PRUeth ports */ 1210 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); 1211 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); 1212 1213 /* Attache net_device for both PRUeth ports */ 1214 netif_device_attach(emac0->ndev); 1215 netif_device_attach(emac1->ndev); 1216 1217 return ret; 1218 } 1219 1220 static void icssg_change_mode(struct prueth *prueth) 1221 { 1222 struct prueth_emac *emac; 1223 int mac, ret; 1224 1225 ret = prueth_emac_restart(prueth); 1226 if (ret) { 1227 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1228 return; 1229 } 1230 1231 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 1232 emac = prueth->emac[mac]; 1233 if (prueth->is_hsr_offload_mode) { 1234 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 1235 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 1236 else 1237 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 1238 } 1239 1240 if (netif_running(emac->ndev)) { 1241 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 1242 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 1243 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 1244 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 1245 ICSSG_FDB_ENTRY_BLOCK, 1246 true); 1247 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 1248 BIT(emac->port_id) | DEFAULT_PORT_MASK, 1249 BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 1250 true); 1251 if (prueth->is_hsr_offload_mode) 1252 icssg_vtbl_modify(emac, DEFAULT_VID, 1253 DEFAULT_PORT_MASK, 1254 DEFAULT_UNTAG_MASK, true); 1255 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1256 if (prueth->is_switch_mode) 1257 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1258 } 1259 } 1260 } 1261 1262 static int prueth_netdevice_port_link(struct net_device *ndev, 1263 struct net_device *br_ndev, 1264 struct netlink_ext_ack *extack) 1265 { 1266 struct prueth_emac *emac = netdev_priv(ndev); 1267 struct prueth *prueth = emac->prueth; 1268 int err; 1269 1270 if (!prueth->br_members) { 1271 prueth->hw_bridge_dev = br_ndev; 1272 } else { 1273 /* This is adding the port to a second bridge, this is 1274 * unsupported 1275 */ 1276 if (prueth->hw_bridge_dev != br_ndev) 1277 return -EOPNOTSUPP; 1278 } 1279 1280 err = switchdev_bridge_port_offload(br_ndev, ndev, emac, 1281 &prueth->prueth_switchdev_nb, 1282 &prueth->prueth_switchdev_bl_nb, 1283 false, extack); 1284 if (err) 1285 return err; 1286 1287 prueth->br_members |= BIT(emac->port_id); 1288 1289 if (!prueth->is_switch_mode) { 1290 if (prueth->br_members & BIT(PRUETH_PORT_MII0) && 1291 prueth->br_members & BIT(PRUETH_PORT_MII1)) { 1292 prueth->is_switch_mode = true; 1293 prueth->default_vlan = 1; 1294 emac->port_vlan = prueth->default_vlan; 1295 icssg_change_mode(prueth); 1296 } 1297 } 1298 1299 prueth_offload_fwd_mark_update(prueth); 1300 1301 return NOTIFY_DONE; 1302 } 1303 1304 static void prueth_netdevice_port_unlink(struct net_device *ndev) 1305 { 1306 struct prueth_emac *emac = netdev_priv(ndev); 1307 struct prueth *prueth = emac->prueth; 1308 int ret; 1309 1310 prueth->br_members &= ~BIT(emac->port_id); 1311 1312 if (prueth->is_switch_mode) { 1313 prueth->is_switch_mode = false; 1314 emac->port_vlan = 0; 1315 ret = prueth_emac_restart(prueth); 1316 if (ret) { 1317 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1318 return; 1319 } 1320 } 1321 1322 prueth_offload_fwd_mark_update(prueth); 1323 1324 if (!prueth->br_members) 1325 prueth->hw_bridge_dev = NULL; 1326 } 1327 1328 static int prueth_hsr_port_link(struct net_device *ndev) 1329 { 1330 struct prueth_emac *emac = netdev_priv(ndev); 1331 struct prueth *prueth = emac->prueth; 1332 struct prueth_emac *emac0; 1333 struct prueth_emac *emac1; 1334 1335 emac0 = prueth->emac[PRUETH_MAC0]; 1336 emac1 = prueth->emac[PRUETH_MAC1]; 1337 1338 if (prueth->is_switch_mode) 1339 return -EOPNOTSUPP; 1340 1341 prueth->hsr_members |= BIT(emac->port_id); 1342 if (!prueth->is_hsr_offload_mode) { 1343 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && 1344 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { 1345 if (!(emac0->ndev->features & 1346 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1347 !(emac1->ndev->features & 1348 NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) 1349 return -EOPNOTSUPP; 1350 prueth->is_hsr_offload_mode = true; 1351 prueth->default_vlan = 1; 1352 emac0->port_vlan = prueth->default_vlan; 1353 emac1->port_vlan = prueth->default_vlan; 1354 icssg_change_mode(prueth); 1355 netdev_dbg(ndev, "Enabling HSR offload mode\n"); 1356 } 1357 } 1358 1359 return 0; 1360 } 1361 1362 static void prueth_hsr_port_unlink(struct net_device *ndev) 1363 { 1364 struct prueth_emac *emac = netdev_priv(ndev); 1365 struct prueth *prueth = emac->prueth; 1366 struct prueth_emac *emac0; 1367 struct prueth_emac *emac1; 1368 int ret; 1369 1370 emac0 = prueth->emac[PRUETH_MAC0]; 1371 emac1 = prueth->emac[PRUETH_MAC1]; 1372 1373 prueth->hsr_members &= ~BIT(emac->port_id); 1374 if (prueth->is_hsr_offload_mode) { 1375 prueth->is_hsr_offload_mode = false; 1376 emac0->port_vlan = 0; 1377 emac1->port_vlan = 0; 1378 prueth->hsr_dev = NULL; 1379 ret = prueth_emac_restart(prueth); 1380 if (ret) { 1381 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1382 return; 1383 } 1384 netdev_dbg(ndev, "Disabling HSR Offload mode\n"); 1385 } 1386 } 1387 1388 /* netdev notifier */ 1389 static int prueth_netdevice_event(struct notifier_block *unused, 1390 unsigned long event, void *ptr) 1391 { 1392 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 1393 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1394 struct netdev_notifier_changeupper_info *info; 1395 struct prueth_emac *emac = netdev_priv(ndev); 1396 struct prueth *prueth = emac->prueth; 1397 int ret = NOTIFY_DONE; 1398 1399 if (ndev->netdev_ops != &emac_netdev_ops) 1400 return NOTIFY_DONE; 1401 1402 switch (event) { 1403 case NETDEV_CHANGEUPPER: 1404 info = ptr; 1405 1406 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1407 is_hsr_master(info->upper_dev)) { 1408 if (info->linking) { 1409 if (!prueth->hsr_dev) { 1410 prueth->hsr_dev = info->upper_dev; 1411 icssg_class_set_host_mac_addr(prueth->miig_rt, 1412 prueth->hsr_dev->dev_addr); 1413 } else { 1414 if (prueth->hsr_dev != info->upper_dev) { 1415 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); 1416 return -EOPNOTSUPP; 1417 } 1418 } 1419 prueth_hsr_port_link(ndev); 1420 } else { 1421 prueth_hsr_port_unlink(ndev); 1422 } 1423 } 1424 1425 if (netif_is_bridge_master(info->upper_dev)) { 1426 if (info->linking) 1427 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); 1428 else 1429 prueth_netdevice_port_unlink(ndev); 1430 } 1431 break; 1432 default: 1433 return NOTIFY_DONE; 1434 } 1435 1436 return notifier_from_errno(ret); 1437 } 1438 1439 static int prueth_register_notifiers(struct prueth *prueth) 1440 { 1441 int ret = 0; 1442 1443 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; 1444 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); 1445 if (ret) { 1446 dev_err(prueth->dev, "can't register netdevice notifier\n"); 1447 return ret; 1448 } 1449 1450 ret = prueth_switchdev_register_notifiers(prueth); 1451 if (ret) 1452 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1453 1454 return ret; 1455 } 1456 1457 static void prueth_unregister_notifiers(struct prueth *prueth) 1458 { 1459 prueth_switchdev_unregister_notifiers(prueth); 1460 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1461 } 1462 1463 static int prueth_probe(struct platform_device *pdev) 1464 { 1465 struct device_node *eth_node, *eth_ports_node; 1466 struct device_node *eth0_node = NULL; 1467 struct device_node *eth1_node = NULL; 1468 struct genpool_data_align gp_data = { 1469 .align = SZ_64K, 1470 }; 1471 struct device *dev = &pdev->dev; 1472 struct device_node *np; 1473 struct prueth *prueth; 1474 struct pruss *pruss; 1475 u32 msmc_ram_size; 1476 int i, ret; 1477 1478 np = dev->of_node; 1479 1480 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); 1481 if (!prueth) 1482 return -ENOMEM; 1483 1484 dev_set_drvdata(dev, prueth); 1485 prueth->pdev = pdev; 1486 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); 1487 1488 prueth->dev = dev; 1489 eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); 1490 if (!eth_ports_node) 1491 return -ENOENT; 1492 1493 for_each_child_of_node(eth_ports_node, eth_node) { 1494 u32 reg; 1495 1496 if (strcmp(eth_node->name, "port")) 1497 continue; 1498 ret = of_property_read_u32(eth_node, "reg", ®); 1499 if (ret < 0) { 1500 dev_err(dev, "%pOF error reading port_id %d\n", 1501 eth_node, ret); 1502 } 1503 1504 of_node_get(eth_node); 1505 1506 if (reg == 0) { 1507 eth0_node = eth_node; 1508 if (!of_device_is_available(eth0_node)) { 1509 of_node_put(eth0_node); 1510 eth0_node = NULL; 1511 } 1512 } else if (reg == 1) { 1513 eth1_node = eth_node; 1514 if (!of_device_is_available(eth1_node)) { 1515 of_node_put(eth1_node); 1516 eth1_node = NULL; 1517 } 1518 } else { 1519 dev_err(dev, "port reg should be 0 or 1\n"); 1520 } 1521 } 1522 1523 of_node_put(eth_ports_node); 1524 1525 /* At least one node must be present and available else we fail */ 1526 if (!eth0_node && !eth1_node) { 1527 dev_err(dev, "neither port0 nor port1 node available\n"); 1528 return -ENODEV; 1529 } 1530 1531 if (eth0_node == eth1_node) { 1532 dev_err(dev, "port0 and port1 can't have same reg\n"); 1533 of_node_put(eth0_node); 1534 return -ENODEV; 1535 } 1536 1537 prueth->eth_node[PRUETH_MAC0] = eth0_node; 1538 prueth->eth_node[PRUETH_MAC1] = eth1_node; 1539 1540 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); 1541 if (IS_ERR(prueth->miig_rt)) { 1542 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); 1543 return -ENODEV; 1544 } 1545 1546 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); 1547 if (IS_ERR(prueth->mii_rt)) { 1548 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); 1549 return -ENODEV; 1550 } 1551 1552 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); 1553 if (IS_ERR(prueth->pa_stats)) { 1554 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); 1555 prueth->pa_stats = NULL; 1556 } 1557 1558 if (eth0_node || eth1_node) { 1559 ret = prueth_get_cores(prueth, ICSS_SLICE0, false); 1560 if (ret) 1561 goto put_cores; 1562 ret = prueth_get_cores(prueth, ICSS_SLICE1, false); 1563 if (ret) 1564 goto put_cores; 1565 } 1566 1567 pruss = pruss_get(eth0_node ? 1568 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); 1569 if (IS_ERR(pruss)) { 1570 ret = PTR_ERR(pruss); 1571 dev_err(dev, "unable to get pruss handle\n"); 1572 goto put_cores; 1573 } 1574 1575 prueth->pruss = pruss; 1576 1577 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, 1578 &prueth->shram); 1579 if (ret) { 1580 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); 1581 goto put_pruss; 1582 } 1583 1584 prueth->sram_pool = of_gen_pool_get(np, "sram", 0); 1585 if (!prueth->sram_pool) { 1586 dev_err(dev, "unable to get SRAM pool\n"); 1587 ret = -ENODEV; 1588 1589 goto put_mem; 1590 } 1591 1592 msmc_ram_size = MSMC_RAM_SIZE; 1593 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1594 if (prueth->is_switchmode_supported) 1595 msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; 1596 1597 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1598 prueth->msmcram.va = 1599 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, 1600 msmc_ram_size, 1601 gen_pool_first_fit_align, 1602 &gp_data); 1603 1604 if (!prueth->msmcram.va) { 1605 ret = -ENOMEM; 1606 dev_err(dev, "unable to allocate MSMC resource\n"); 1607 goto put_mem; 1608 } 1609 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, 1610 (unsigned long)prueth->msmcram.va); 1611 prueth->msmcram.size = msmc_ram_size; 1612 memset_io(prueth->msmcram.va, 0, msmc_ram_size); 1613 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, 1614 prueth->msmcram.va, prueth->msmcram.size); 1615 1616 prueth->iep0 = icss_iep_get_idx(np, 0); 1617 if (IS_ERR(prueth->iep0)) { 1618 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); 1619 prueth->iep0 = NULL; 1620 goto free_pool; 1621 } 1622 1623 prueth->iep1 = icss_iep_get_idx(np, 1); 1624 if (IS_ERR(prueth->iep1)) { 1625 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); 1626 goto put_iep0; 1627 } 1628 1629 if (prueth->pdata.quirk_10m_link_issue) { 1630 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX 1631 * traffic. 1632 */ 1633 icss_iep_init_fw(prueth->iep1); 1634 } 1635 1636 spin_lock_init(&prueth->vtbl_lock); 1637 /* setup netdev interfaces */ 1638 if (eth0_node) { 1639 ret = prueth_netdev_init(prueth, eth0_node); 1640 if (ret) { 1641 dev_err_probe(dev, ret, "netdev init %s failed\n", 1642 eth0_node->name); 1643 goto exit_iep; 1644 } 1645 1646 prueth->emac[PRUETH_MAC0]->half_duplex = 1647 of_property_read_bool(eth0_node, "ti,half-duplex-capable"); 1648 1649 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; 1650 } 1651 1652 if (eth1_node) { 1653 ret = prueth_netdev_init(prueth, eth1_node); 1654 if (ret) { 1655 dev_err_probe(dev, ret, "netdev init %s failed\n", 1656 eth1_node->name); 1657 goto netdev_exit; 1658 } 1659 1660 prueth->emac[PRUETH_MAC1]->half_duplex = 1661 of_property_read_bool(eth1_node, "ti,half-duplex-capable"); 1662 1663 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; 1664 } 1665 1666 /* register the network devices */ 1667 if (eth0_node) { 1668 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); 1669 if (ret) { 1670 dev_err(dev, "can't register netdev for port MII0"); 1671 goto netdev_exit; 1672 } 1673 1674 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; 1675 1676 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); 1677 if (ret) { 1678 dev_err(dev, 1679 "can't connect to MII0 PHY, error -%d", ret); 1680 goto netdev_unregister; 1681 } 1682 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); 1683 } 1684 1685 if (eth1_node) { 1686 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); 1687 if (ret) { 1688 dev_err(dev, "can't register netdev for port MII1"); 1689 goto netdev_unregister; 1690 } 1691 1692 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; 1693 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); 1694 if (ret) { 1695 dev_err(dev, 1696 "can't connect to MII1 PHY, error %d", ret); 1697 goto netdev_unregister; 1698 } 1699 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); 1700 } 1701 1702 if (prueth->is_switchmode_supported) { 1703 ret = prueth_register_notifiers(prueth); 1704 if (ret) 1705 goto netdev_unregister; 1706 1707 sprintf(prueth->switch_id, "%s", dev_name(dev)); 1708 } 1709 1710 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", 1711 (!eth0_node || !eth1_node) ? "single" : "dual"); 1712 1713 if (eth1_node) 1714 of_node_put(eth1_node); 1715 if (eth0_node) 1716 of_node_put(eth0_node); 1717 return 0; 1718 1719 netdev_unregister: 1720 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1721 if (!prueth->registered_netdevs[i]) 1722 continue; 1723 if (prueth->emac[i]->ndev->phydev) { 1724 phy_disconnect(prueth->emac[i]->ndev->phydev); 1725 prueth->emac[i]->ndev->phydev = NULL; 1726 } 1727 unregister_netdev(prueth->registered_netdevs[i]); 1728 } 1729 1730 netdev_exit: 1731 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1732 eth_node = prueth->eth_node[i]; 1733 if (!eth_node) 1734 continue; 1735 1736 prueth_netdev_exit(prueth, eth_node); 1737 } 1738 1739 exit_iep: 1740 if (prueth->pdata.quirk_10m_link_issue) 1741 icss_iep_exit_fw(prueth->iep1); 1742 icss_iep_put(prueth->iep1); 1743 1744 put_iep0: 1745 icss_iep_put(prueth->iep0); 1746 prueth->iep0 = NULL; 1747 prueth->iep1 = NULL; 1748 1749 free_pool: 1750 gen_pool_free(prueth->sram_pool, 1751 (unsigned long)prueth->msmcram.va, msmc_ram_size); 1752 1753 put_mem: 1754 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1755 1756 put_pruss: 1757 pruss_put(prueth->pruss); 1758 1759 put_cores: 1760 if (eth0_node || eth1_node) { 1761 prueth_put_cores(prueth, ICSS_SLICE0); 1762 of_node_put(eth0_node); 1763 1764 prueth_put_cores(prueth, ICSS_SLICE1); 1765 of_node_put(eth1_node); 1766 } 1767 1768 return ret; 1769 } 1770 1771 static void prueth_remove(struct platform_device *pdev) 1772 { 1773 struct prueth *prueth = platform_get_drvdata(pdev); 1774 struct device_node *eth_node; 1775 int i; 1776 1777 prueth_unregister_notifiers(prueth); 1778 1779 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1780 if (!prueth->registered_netdevs[i]) 1781 continue; 1782 phy_stop(prueth->emac[i]->ndev->phydev); 1783 phy_disconnect(prueth->emac[i]->ndev->phydev); 1784 prueth->emac[i]->ndev->phydev = NULL; 1785 unregister_netdev(prueth->registered_netdevs[i]); 1786 } 1787 1788 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1789 eth_node = prueth->eth_node[i]; 1790 if (!eth_node) 1791 continue; 1792 1793 prueth_netdev_exit(prueth, eth_node); 1794 } 1795 1796 if (prueth->pdata.quirk_10m_link_issue) 1797 icss_iep_exit_fw(prueth->iep1); 1798 1799 icss_iep_put(prueth->iep1); 1800 icss_iep_put(prueth->iep0); 1801 1802 gen_pool_free(prueth->sram_pool, 1803 (unsigned long)prueth->msmcram.va, 1804 MSMC_RAM_SIZE); 1805 1806 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1807 1808 pruss_put(prueth->pruss); 1809 1810 if (prueth->eth_node[PRUETH_MAC1]) 1811 prueth_put_cores(prueth, ICSS_SLICE1); 1812 1813 if (prueth->eth_node[PRUETH_MAC0]) 1814 prueth_put_cores(prueth, ICSS_SLICE0); 1815 } 1816 1817 static const struct prueth_pdata am654_icssg_pdata = { 1818 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 1819 .quirk_10m_link_issue = 1, 1820 .switch_mode = 1, 1821 }; 1822 1823 static const struct prueth_pdata am64x_icssg_pdata = { 1824 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 1825 .quirk_10m_link_issue = 1, 1826 .switch_mode = 1, 1827 }; 1828 1829 static const struct of_device_id prueth_dt_match[] = { 1830 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, 1831 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, 1832 { /* sentinel */ } 1833 }; 1834 MODULE_DEVICE_TABLE(of, prueth_dt_match); 1835 1836 static struct platform_driver prueth_driver = { 1837 .probe = prueth_probe, 1838 .remove = prueth_remove, 1839 .driver = { 1840 .name = "icssg-prueth", 1841 .of_match_table = prueth_dt_match, 1842 .pm = &prueth_dev_pm_ops, 1843 }, 1844 }; 1845 module_platform_driver(prueth_driver); 1846 1847 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1848 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1849 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); 1850 MODULE_LICENSE("GPL"); 1851