1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG SR1.0 Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (c) Siemens AG, 2024
7 *
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/genalloc.h>
12 #include <linux/kernel.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/platform_device.h>
19 #include <linux/property.h>
20 #include <linux/phy.h>
21 #include <linux/remoteproc/pruss.h>
22 #include <linux/pruss_driver.h>
23
24 #include "icssg_prueth.h"
25 #include "icssg_mii_rt.h"
26 #include "../k3-cppi-desc-pool.h"
27
28 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
29
30 /* SR1: Set buffer sizes for the pools. There are 8 internal queues
31 * implemented in firmware, but only 4 tx channels/threads in the Egress
32 * direction to firmware. Need a high priority queue for management
33 * messages since they shouldn't be blocked even during high traffic
34 * situation. So use Q0-Q2 as data queues and Q3 as management queue
35 * in the max case. However for ease of configuration, use the max
36 * data queue + 1 for management message if we are not using max
37 * case.
38 *
39 * Allocate 4 MTU buffers per data queue. Firmware requires
40 * pool sizes to be set for internal queues. Set the upper 5 queue
41 * pool size to min size of 128 bytes since there are only 3 tx
42 * data channels and management queue requires only minimum buffer.
43 * i.e lower queues are used by driver and highest priority queue
44 * from that is used for management message.
45 */
46
47 static int emac_egress_buf_pool_size[] = {
48 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
49 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
50 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
51 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
52 };
53
icssg_config_sr1(struct prueth * prueth,struct prueth_emac * emac,int slice)54 static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
55 int slice)
56 {
57 struct icssg_sr1_config config;
58 void __iomem *va;
59 int i, index;
60
61 memset(&config, 0, sizeof(config));
62 config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
63 config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
64 config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
65 config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
66 config.rand_seed = cpu_to_le32(get_random_u32());
67
68 for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
69 index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
70 config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
71 }
72
73 va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
74 memcpy_toio(va, &config, sizeof(config));
75
76 emac->speed = SPEED_1000;
77 emac->duplex = DUPLEX_FULL;
78 }
79
emac_send_command_sr1(struct prueth_emac * emac,u32 cmd)80 static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
81 {
82 struct cppi5_host_desc_t *first_desc;
83 u32 pkt_len = sizeof(emac->cmd_data);
84 __le32 *data = emac->cmd_data;
85 dma_addr_t desc_dma, buf_dma;
86 struct prueth_tx_chn *tx_chn;
87 struct prueth_swdata *swdata;
88 int ret = 0;
89 u32 *epib;
90
91 netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
92
93 /* only one command at a time allowed to firmware */
94 mutex_lock(&emac->cmd_lock);
95 data[0] = cpu_to_le32(cmd);
96
97 /* highest priority channel for management messages */
98 tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
99
100 /* Map the linear buffer */
101 buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
102 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
103 netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
104 ret = -EINVAL;
105 goto err_unlock;
106 }
107
108 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
109 if (!first_desc) {
110 netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
111 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
112 ret = -ENOMEM;
113 goto err_unlock;
114 }
115
116 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
117 PRUETH_NAV_PS_DATA_SIZE);
118 cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
119 epib = first_desc->epib;
120 epib[0] = 0;
121 epib[1] = 0;
122
123 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
124 swdata = cppi5_hdesc_get_swdata(first_desc);
125 swdata->type = PRUETH_SWDATA_CMD;
126 swdata->data.cmd = le32_to_cpu(data[0]);
127
128 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
129 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
130
131 /* send command */
132 reinit_completion(&emac->cmd_complete);
133 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
134 if (ret) {
135 netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
136 goto free_desc;
137 }
138 ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
139 if (!ret)
140 netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
141
142 mutex_unlock(&emac->cmd_lock);
143
144 return ret;
145 free_desc:
146 prueth_xmit_free(tx_chn, first_desc);
147 err_unlock:
148 mutex_unlock(&emac->cmd_lock);
149
150 return ret;
151 }
152
icssg_config_set_speed_sr1(struct prueth_emac * emac)153 static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
154 {
155 u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
156 struct prueth *prueth = emac->prueth;
157 int slice = prueth_emac_slice(emac);
158
159 val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
160 /* firmware expects speed settings in bit 2-1 */
161 val <<= 1;
162 cmd |= val;
163
164 val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
165 /* firmware expects full duplex settings in bit 3 */
166 val <<= 3;
167 cmd |= val;
168
169 emac_send_command_sr1(emac, cmd);
170 }
171
172 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link_sr1(struct net_device * ndev)173 static void emac_adjust_link_sr1(struct net_device *ndev)
174 {
175 struct prueth_emac *emac = netdev_priv(ndev);
176 struct phy_device *phydev = ndev->phydev;
177 struct prueth *prueth = emac->prueth;
178 bool new_state = false;
179 unsigned long flags;
180
181 if (phydev->link) {
182 /* check the mode of operation - full/half duplex */
183 if (phydev->duplex != emac->duplex) {
184 new_state = true;
185 emac->duplex = phydev->duplex;
186 }
187 if (phydev->speed != emac->speed) {
188 new_state = true;
189 emac->speed = phydev->speed;
190 }
191 if (!emac->link) {
192 new_state = true;
193 emac->link = 1;
194 }
195 } else if (emac->link) {
196 new_state = true;
197 emac->link = 0;
198
199 /* f/w should support 100 & 1000 */
200 emac->speed = SPEED_1000;
201
202 /* half duplex may not be supported by f/w */
203 emac->duplex = DUPLEX_FULL;
204 }
205
206 if (new_state) {
207 phy_print_status(phydev);
208
209 /* update RGMII and MII configuration based on PHY negotiated
210 * values
211 */
212 if (emac->link) {
213 /* Set the RGMII cfg for gig en and full duplex */
214 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
215
216 /* update the Tx IPG based on 100M/1G speed */
217 spin_lock_irqsave(&emac->lock, flags);
218 icssg_config_ipg(emac);
219 spin_unlock_irqrestore(&emac->lock, flags);
220 icssg_config_set_speed_sr1(emac);
221 }
222 }
223
224 if (emac->link) {
225 /* reactivate the transmit queue */
226 netif_tx_wake_all_queues(ndev);
227 } else {
228 netif_tx_stop_all_queues(ndev);
229 prueth_cleanup_tx_ts(emac);
230 }
231 }
232
emac_phy_connect(struct prueth_emac * emac)233 static int emac_phy_connect(struct prueth_emac *emac)
234 {
235 struct prueth *prueth = emac->prueth;
236 struct net_device *ndev = emac->ndev;
237 /* connect PHY */
238 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
239 &emac_adjust_link_sr1, 0,
240 emac->phy_if);
241 if (!ndev->phydev) {
242 dev_err(prueth->dev, "couldn't connect to phy %s\n",
243 emac->phy_node->full_name);
244 return -ENODEV;
245 }
246
247 if (!emac->half_duplex) {
248 dev_dbg(prueth->dev, "half duplex mode is not supported\n");
249 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
250 }
251
252 /* Remove 100Mbits half-duplex due to RGMII misreporting connection
253 * as full duplex */
254 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
255
256 /* remove unsupported modes */
257 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
258 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
259 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
260
261 if (emac->phy_if == PHY_INTERFACE_MODE_MII)
262 phy_set_max_speed(ndev->phydev, SPEED_100);
263
264 return 0;
265 }
266
267 /* get one packet from requested flow_id
268 *
269 * Returns skb pointer if packet found else NULL
270 * Caller must free the returned skb.
271 */
prueth_process_rx_mgm(struct prueth_emac * emac,u32 flow_id)272 static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
273 u32 flow_id)
274 {
275 struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
276 struct net_device *ndev = emac->ndev;
277 struct cppi5_host_desc_t *desc_rx;
278 struct page *page, *new_page;
279 struct prueth_swdata *swdata;
280 dma_addr_t desc_dma, buf_dma;
281 u32 buf_dma_len;
282 int ret;
283
284 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
285 if (ret) {
286 if (ret != -ENODATA)
287 netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
288 return NULL;
289 }
290
291 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
292 return NULL;
293
294 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
295
296 /* Fix FW bug about incorrect PSDATA size */
297 if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
298 cppi5_hdesc_update_psdata_size(desc_rx,
299 PRUETH_NAV_PS_DATA_SIZE);
300 }
301
302 swdata = cppi5_hdesc_get_swdata(desc_rx);
303 page = swdata->data.page;
304 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
305
306 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
307 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
308
309 new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
310 /* if allocation fails we drop the packet but push the
311 * descriptor back to the ring with old skb to prevent a stall
312 */
313 if (!new_page) {
314 netdev_err(ndev,
315 "page alloc failed, dropped mgm pkt from flow %d\n",
316 flow_id);
317 new_page = page;
318 page = NULL; /* return NULL */
319 }
320
321 /* queue another DMA */
322 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
323 PRUETH_MAX_PKT_SIZE);
324 if (WARN_ON(ret < 0))
325 page_pool_recycle_direct(rx_chn->pg_pool, new_page);
326
327 return page;
328 }
329
prueth_tx_ts_sr1(struct prueth_emac * emac,struct emac_tx_ts_response_sr1 * tsr)330 static void prueth_tx_ts_sr1(struct prueth_emac *emac,
331 struct emac_tx_ts_response_sr1 *tsr)
332 {
333 struct skb_shared_hwtstamps ssh;
334 u32 hi_ts, lo_ts, cookie;
335 struct sk_buff *skb;
336 u64 ns;
337
338 hi_ts = le32_to_cpu(tsr->hi_ts);
339 lo_ts = le32_to_cpu(tsr->lo_ts);
340
341 ns = (u64)hi_ts << 32 | lo_ts;
342
343 cookie = le32_to_cpu(tsr->cookie);
344 if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
345 netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
346 cookie);
347 return;
348 }
349
350 skb = emac->tx_ts_skb[cookie];
351 emac->tx_ts_skb[cookie] = NULL; /* free slot */
352
353 memset(&ssh, 0, sizeof(ssh));
354 ssh.hwtstamp = ns_to_ktime(ns);
355
356 skb_tstamp_tx(skb, &ssh);
357 dev_consume_skb_any(skb);
358 }
359
prueth_rx_mgm_ts_thread_sr1(int irq,void * dev_id)360 static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
361 {
362 struct prueth_emac *emac = dev_id;
363 struct page *page;
364
365 page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
366 if (!page)
367 return IRQ_NONE;
368
369 prueth_tx_ts_sr1(emac, (void *)page_address(page));
370 page_pool_recycle_direct(page->pp, page);
371
372 return IRQ_HANDLED;
373 }
374
prueth_rx_mgm_rsp_thread(int irq,void * dev_id)375 static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
376 {
377 struct prueth_emac *emac = dev_id;
378 struct page *page;
379 u32 rsp;
380
381 page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
382 if (!page)
383 return IRQ_NONE;
384
385 /* Process command response */
386 rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
387 if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
388 netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
389 complete(&emac->cmd_complete);
390 } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
391 netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
392 complete(&emac->cmd_complete);
393 }
394
395 page_pool_recycle_direct(page->pp, page);
396
397 return IRQ_HANDLED;
398 }
399
400 static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
401 {
402 .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
403 .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
404 },
405 {
406 .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
407 .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
408 }
409 };
410
prueth_emac_start(struct prueth * prueth,struct prueth_emac * emac)411 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
412 {
413 struct icssg_firmwares *firmwares;
414 struct device *dev = prueth->dev;
415 int slice, ret;
416
417 firmwares = icssg_sr1_emac_firmwares;
418
419 slice = prueth_emac_slice(emac);
420 if (slice < 0) {
421 netdev_err(emac->ndev, "invalid port\n");
422 return -EINVAL;
423 }
424
425 icssg_config_sr1(prueth, emac, slice);
426
427 ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
428 ret = rproc_boot(prueth->pru[slice]);
429 if (ret) {
430 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
431 return -EINVAL;
432 }
433
434 ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
435 ret = rproc_boot(prueth->rtu[slice]);
436 if (ret) {
437 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
438 goto halt_pru;
439 }
440
441 return 0;
442
443 halt_pru:
444 rproc_shutdown(prueth->pru[slice]);
445
446 return ret;
447 }
448
prueth_emac_stop(struct prueth_emac * emac)449 static void prueth_emac_stop(struct prueth_emac *emac)
450 {
451 struct prueth *prueth = emac->prueth;
452 int slice;
453
454 switch (emac->port_id) {
455 case PRUETH_PORT_MII0:
456 slice = ICSS_SLICE0;
457 break;
458 case PRUETH_PORT_MII1:
459 slice = ICSS_SLICE1;
460 break;
461 default:
462 netdev_err(emac->ndev, "invalid port\n");
463 return;
464 }
465
466 if (!emac->is_sr1)
467 rproc_shutdown(prueth->txpru[slice]);
468 rproc_shutdown(prueth->rtu[slice]);
469 rproc_shutdown(prueth->pru[slice]);
470 }
471
472 /**
473 * emac_ndo_open - EMAC device open
474 * @ndev: network adapter device
475 *
476 * Called when system wants to start the interface.
477 *
478 * Return: 0 for a successful open, or appropriate error code
479 */
emac_ndo_open(struct net_device * ndev)480 static int emac_ndo_open(struct net_device *ndev)
481 {
482 struct prueth_emac *emac = netdev_priv(ndev);
483 int num_data_chn = emac->tx_ch_num - 1;
484 struct prueth *prueth = emac->prueth;
485 int slice = prueth_emac_slice(emac);
486 struct device *dev = prueth->dev;
487 int max_rx_flows, rx_flow;
488 int ret, i;
489
490 /* clear SMEM and MSMC settings for all slices */
491 if (!prueth->emacs_initialized) {
492 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
493 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
494 }
495
496 /* set h/w MAC as user might have re-configured */
497 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
498
499 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
500
501 icssg_class_default(prueth->miig_rt, slice, 0, true);
502
503 /* Notify the stack of the actual queue counts. */
504 ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
505 if (ret) {
506 dev_err(dev, "cannot set real number of tx queues\n");
507 return ret;
508 }
509
510 init_completion(&emac->cmd_complete);
511 ret = prueth_init_tx_chns(emac);
512 if (ret) {
513 dev_err(dev, "failed to init tx channel: %d\n", ret);
514 return ret;
515 }
516
517 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
518 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
519 max_rx_flows, PRUETH_MAX_RX_DESC);
520 if (ret) {
521 dev_err(dev, "failed to init rx channel: %d\n", ret);
522 goto cleanup_tx;
523 }
524
525 ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
526 PRUETH_MAX_RX_MGM_FLOWS_SR1,
527 PRUETH_MAX_RX_MGM_DESC_SR1);
528 if (ret) {
529 dev_err(dev, "failed to init rx mgmt channel: %d\n",
530 ret);
531 goto cleanup_rx;
532 }
533
534 ret = prueth_ndev_add_tx_napi(emac);
535 if (ret)
536 goto cleanup_rx_mgm;
537
538 /* we use only the highest priority flow for now i.e. @irq[3] */
539 rx_flow = PRUETH_RX_FLOW_DATA_SR1;
540 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
541 IRQF_TRIGGER_HIGH, dev_name(dev), emac);
542 if (ret) {
543 dev_err(dev, "unable to request RX IRQ\n");
544 goto cleanup_napi;
545 }
546
547 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
548 NULL, prueth_rx_mgm_rsp_thread,
549 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
550 dev_name(dev), emac);
551 if (ret) {
552 dev_err(dev, "unable to request RX Management RSP IRQ\n");
553 goto free_rx_irq;
554 }
555
556 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
557 NULL, prueth_rx_mgm_ts_thread_sr1,
558 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
559 dev_name(dev), emac);
560 if (ret) {
561 dev_err(dev, "unable to request RX Management TS IRQ\n");
562 goto free_rx_mgm_rsp_irq;
563 }
564
565 /* reset and start PRU firmware */
566 ret = prueth_emac_start(prueth, emac);
567 if (ret)
568 goto free_rx_mgmt_ts_irq;
569
570 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
571
572 /* Prepare RX */
573 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
574 if (ret)
575 goto stop;
576
577 ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
578 if (ret)
579 goto reset_rx_chn;
580
581 ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
582 if (ret)
583 goto reset_rx_chn;
584
585 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
586 if (ret)
587 goto reset_rx_mgm_chn;
588
589 for (i = 0; i < emac->tx_ch_num; i++) {
590 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
591 if (ret)
592 goto reset_tx_chan;
593 }
594
595 /* Enable NAPI in Tx and Rx direction */
596 for (i = 0; i < emac->tx_ch_num; i++)
597 napi_enable(&emac->tx_chns[i].napi_tx);
598 napi_enable(&emac->napi_rx);
599
600 /* start PHY */
601 phy_start(ndev->phydev);
602
603 prueth->emacs_initialized++;
604
605 queue_work(system_long_wq, &emac->stats_work.work);
606
607 return 0;
608
609 reset_tx_chan:
610 /* Since interface is not yet up, there is wouldn't be
611 * any SKB for completion. So set false to free_skb
612 */
613 prueth_reset_tx_chan(emac, i, false);
614 reset_rx_mgm_chn:
615 prueth_reset_rx_chan(&emac->rx_mgm_chn,
616 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
617 reset_rx_chn:
618 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
619 stop:
620 prueth_emac_stop(emac);
621 free_rx_mgmt_ts_irq:
622 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
623 emac);
624 free_rx_mgm_rsp_irq:
625 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
626 emac);
627 free_rx_irq:
628 free_irq(emac->rx_chns.irq[rx_flow], emac);
629 cleanup_napi:
630 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
631 cleanup_rx_mgm:
632 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
633 PRUETH_MAX_RX_MGM_FLOWS_SR1);
634 cleanup_rx:
635 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
636 cleanup_tx:
637 prueth_cleanup_tx_chns(emac);
638
639 return ret;
640 }
641
642 /**
643 * emac_ndo_stop - EMAC device stop
644 * @ndev: network adapter device
645 *
646 * Called when system wants to stop or down the interface.
647 *
648 * Return: Always 0 (Success)
649 */
emac_ndo_stop(struct net_device * ndev)650 static int emac_ndo_stop(struct net_device *ndev)
651 {
652 struct prueth_emac *emac = netdev_priv(ndev);
653 int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
654 struct prueth *prueth = emac->prueth;
655 int max_rx_flows;
656 int ret, i;
657
658 /* inform the upper layers. */
659 netif_tx_stop_all_queues(ndev);
660
661 /* block packets from wire */
662 if (ndev->phydev)
663 phy_stop(ndev->phydev);
664
665 icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
666
667 emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
668
669 atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
670 /* ensure new tdown_cnt value is visible */
671 smp_mb__after_atomic();
672 /* tear down and disable UDMA channels */
673 reinit_completion(&emac->tdown_complete);
674 for (i = 0; i < emac->tx_ch_num; i++)
675 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
676
677 ret = wait_for_completion_timeout(&emac->tdown_complete,
678 msecs_to_jiffies(1000));
679 if (!ret)
680 netdev_err(ndev, "tx teardown timeout\n");
681
682 prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
683 for (i = 0; i < emac->tx_ch_num; i++)
684 napi_disable(&emac->tx_chns[i].napi_tx);
685
686 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
687 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
688
689 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
690 /* Teardown RX MGM channel */
691 k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
692 prueth_reset_rx_chan(&emac->rx_mgm_chn,
693 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
694
695 napi_disable(&emac->napi_rx);
696
697 /* Destroying the queued work in ndo_stop() */
698 cancel_delayed_work_sync(&emac->stats_work);
699
700 /* stop PRUs */
701 prueth_emac_stop(emac);
702
703 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
704 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
705 free_irq(emac->rx_chns.irq[rx_flow], emac);
706 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
707 prueth_cleanup_tx_chns(emac);
708
709 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
710 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
711
712 prueth->emacs_initialized--;
713
714 return 0;
715 }
716
emac_ndo_set_rx_mode_sr1(struct net_device * ndev)717 static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
718 {
719 struct prueth_emac *emac = netdev_priv(ndev);
720 bool allmulti = ndev->flags & IFF_ALLMULTI;
721 bool promisc = ndev->flags & IFF_PROMISC;
722 struct prueth *prueth = emac->prueth;
723 int slice = prueth_emac_slice(emac);
724
725 if (promisc) {
726 icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
727 return;
728 }
729
730 if (allmulti) {
731 icssg_class_default(prueth->miig_rt, slice, 1, true);
732 return;
733 }
734
735 icssg_class_default(prueth->miig_rt, slice, 0, true);
736 if (!netdev_mc_empty(ndev)) {
737 /* program multicast address list into Classifier */
738 icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
739 }
740 }
741
742 static const struct net_device_ops emac_netdev_ops = {
743 .ndo_open = emac_ndo_open,
744 .ndo_stop = emac_ndo_stop,
745 .ndo_start_xmit = icssg_ndo_start_xmit,
746 .ndo_set_mac_address = eth_mac_addr,
747 .ndo_validate_addr = eth_validate_addr,
748 .ndo_tx_timeout = icssg_ndo_tx_timeout,
749 .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
750 .ndo_eth_ioctl = icssg_ndo_ioctl,
751 .ndo_get_stats64 = icssg_ndo_get_stats64,
752 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
753 };
754
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)755 static int prueth_netdev_init(struct prueth *prueth,
756 struct device_node *eth_node)
757 {
758 struct prueth_emac *emac;
759 struct net_device *ndev;
760 enum prueth_port port;
761 enum prueth_mac mac;
762 /* Only enable one TX channel due to timeouts when
763 * using multiple channels */
764 int num_tx_chn = 1;
765 int ret;
766
767 port = prueth_node_port(eth_node);
768 if (port == PRUETH_PORT_INVALID)
769 return -EINVAL;
770
771 mac = prueth_node_mac(eth_node);
772 if (mac == PRUETH_MAC_INVALID)
773 return -EINVAL;
774
775 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
776 if (!ndev)
777 return -ENOMEM;
778
779 emac = netdev_priv(ndev);
780 emac->is_sr1 = 1;
781 emac->prueth = prueth;
782 emac->ndev = ndev;
783 emac->port_id = port;
784 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
785 if (!emac->cmd_wq) {
786 ret = -ENOMEM;
787 goto free_ndev;
788 }
789
790 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
791
792 ret = pruss_request_mem_region(prueth->pruss,
793 port == PRUETH_PORT_MII0 ?
794 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
795 &emac->dram);
796 if (ret) {
797 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
798 ret = -ENOMEM;
799 goto free_wq;
800 }
801
802 /* SR1.0 uses a dedicated high priority channel
803 * to send commands to the firmware
804 */
805 emac->tx_ch_num = 2;
806
807 SET_NETDEV_DEV(ndev, prueth->dev);
808 spin_lock_init(&emac->lock);
809 mutex_init(&emac->cmd_lock);
810
811 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
812 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
813 dev_err(prueth->dev, "couldn't find phy-handle\n");
814 ret = -ENODEV;
815 goto free;
816 } else if (of_phy_is_fixed_link(eth_node)) {
817 ret = of_phy_register_fixed_link(eth_node);
818 if (ret) {
819 ret = dev_err_probe(prueth->dev, ret,
820 "failed to register fixed-link phy\n");
821 goto free;
822 }
823
824 emac->phy_node = eth_node;
825 }
826
827 ret = of_get_phy_mode(eth_node, &emac->phy_if);
828 if (ret) {
829 dev_err(prueth->dev, "could not get phy-mode property\n");
830 goto free;
831 }
832
833 if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
834 !phy_interface_mode_is_rgmii(emac->phy_if)) {
835 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
836 ret = -EINVAL;
837 goto free;
838 }
839
840 /* AM65 SR2.0 has TX Internal delay always enabled by hardware
841 * and it is not possible to disable TX Internal delay. The below
842 * switch case block describes how we handle different phy modes
843 * based on hardware restriction.
844 */
845 switch (emac->phy_if) {
846 case PHY_INTERFACE_MODE_RGMII_ID:
847 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
848 break;
849 case PHY_INTERFACE_MODE_RGMII_TXID:
850 emac->phy_if = PHY_INTERFACE_MODE_RGMII;
851 break;
852 case PHY_INTERFACE_MODE_RGMII:
853 case PHY_INTERFACE_MODE_RGMII_RXID:
854 dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
855 ret = -EINVAL;
856 goto free;
857 default:
858 break;
859 }
860
861 /* get mac address from DT and set private and netdev addr */
862 ret = of_get_ethdev_address(eth_node, ndev);
863 if (!is_valid_ether_addr(ndev->dev_addr)) {
864 eth_hw_addr_random(ndev);
865 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
866 port, ndev->dev_addr);
867 }
868 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
869
870 ndev->dev.of_node = eth_node;
871 ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
872 ndev->max_mtu = PRUETH_MAX_MTU;
873 ndev->netdev_ops = &emac_netdev_ops;
874 ndev->ethtool_ops = &icssg_ethtool_ops;
875 ndev->hw_features = NETIF_F_SG;
876 ndev->features = ndev->hw_features;
877
878 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
879 prueth->emac[mac] = emac;
880
881 return 0;
882
883 free:
884 pruss_release_mem_region(prueth->pruss, &emac->dram);
885 free_wq:
886 destroy_workqueue(emac->cmd_wq);
887 free_ndev:
888 emac->ndev = NULL;
889 prueth->emac[mac] = NULL;
890 free_netdev(ndev);
891
892 return ret;
893 }
894
prueth_probe(struct platform_device * pdev)895 static int prueth_probe(struct platform_device *pdev)
896 {
897 struct device_node *eth_node, *eth_ports_node;
898 struct device_node *eth0_node = NULL;
899 struct device_node *eth1_node = NULL;
900 struct device *dev = &pdev->dev;
901 struct device_node *np;
902 struct prueth *prueth;
903 struct pruss *pruss;
904 u32 msmc_ram_size;
905 int i, ret;
906
907 np = dev->of_node;
908
909 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
910 if (!prueth)
911 return -ENOMEM;
912
913 dev_set_drvdata(dev, prueth);
914 prueth->pdev = pdev;
915 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
916
917 prueth->dev = dev;
918 eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
919 if (!eth_ports_node)
920 return -ENOENT;
921
922 for_each_child_of_node(eth_ports_node, eth_node) {
923 u32 reg;
924
925 if (strcmp(eth_node->name, "port"))
926 continue;
927 ret = of_property_read_u32(eth_node, "reg", ®);
928 if (ret < 0) {
929 dev_err(dev, "%pOF error reading port_id %d\n",
930 eth_node, ret);
931 }
932
933 of_node_get(eth_node);
934
935 if (reg == 0) {
936 eth0_node = eth_node;
937 if (!of_device_is_available(eth0_node)) {
938 of_node_put(eth0_node);
939 eth0_node = NULL;
940 }
941 } else if (reg == 1) {
942 eth1_node = eth_node;
943 if (!of_device_is_available(eth1_node)) {
944 of_node_put(eth1_node);
945 eth1_node = NULL;
946 }
947 } else {
948 dev_err(dev, "port reg should be 0 or 1\n");
949 }
950 }
951
952 of_node_put(eth_ports_node);
953
954 /* At least one node must be present and available else we fail */
955 if (!eth0_node && !eth1_node) {
956 dev_err(dev, "neither port0 nor port1 node available\n");
957 return -ENODEV;
958 }
959
960 if (eth0_node == eth1_node) {
961 dev_err(dev, "port0 and port1 can't have same reg\n");
962 of_node_put(eth0_node);
963 return -ENODEV;
964 }
965
966 prueth->eth_node[PRUETH_MAC0] = eth0_node;
967 prueth->eth_node[PRUETH_MAC1] = eth1_node;
968
969 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
970 if (IS_ERR(prueth->miig_rt)) {
971 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
972 return -ENODEV;
973 }
974
975 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
976 if (IS_ERR(prueth->mii_rt)) {
977 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
978 return -ENODEV;
979 }
980
981 if (eth0_node) {
982 ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
983 if (ret)
984 goto put_cores;
985 }
986
987 if (eth1_node) {
988 ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
989 if (ret)
990 goto put_cores;
991 }
992
993 pruss = pruss_get(eth0_node ?
994 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
995 if (IS_ERR(pruss)) {
996 ret = PTR_ERR(pruss);
997 dev_err(dev, "unable to get pruss handle\n");
998 goto put_cores;
999 }
1000
1001 prueth->pruss = pruss;
1002
1003 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1004 &prueth->shram);
1005 if (ret) {
1006 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1007 goto put_pruss;
1008 }
1009
1010 prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1011 if (!prueth->sram_pool) {
1012 dev_err(dev, "unable to get SRAM pool\n");
1013 ret = -ENODEV;
1014
1015 goto put_mem;
1016 }
1017
1018 msmc_ram_size = MSMC_RAM_SIZE_SR1;
1019
1020 prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
1021 msmc_ram_size);
1022
1023 if (!prueth->msmcram.va) {
1024 ret = -ENOMEM;
1025 dev_err(dev, "unable to allocate MSMC resource\n");
1026 goto put_mem;
1027 }
1028 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1029 (unsigned long)prueth->msmcram.va);
1030 prueth->msmcram.size = msmc_ram_size;
1031 memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1032
1033 prueth->iep0 = icss_iep_get_idx(np, 0);
1034 if (IS_ERR(prueth->iep0)) {
1035 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
1036 "iep0 get failed\n");
1037 goto free_pool;
1038 }
1039
1040 prueth->iep1 = icss_iep_get_idx(np, 1);
1041 if (IS_ERR(prueth->iep1)) {
1042 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
1043 "iep1 get failed\n");
1044 goto put_iep0;
1045 }
1046
1047 ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
1048 if (ret) {
1049 dev_err_probe(dev, ret, "failed to init iep0\n");
1050 goto put_iep;
1051 }
1052
1053 ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
1054 if (ret) {
1055 dev_err_probe(dev, ret, "failed to init iep1\n");
1056 goto exit_iep0;
1057 }
1058
1059 if (eth0_node) {
1060 ret = prueth_netdev_init(prueth, eth0_node);
1061 if (ret) {
1062 dev_err_probe(dev, ret, "netdev init %s failed\n",
1063 eth0_node->name);
1064 goto exit_iep;
1065 }
1066
1067 prueth->emac[PRUETH_MAC0]->half_duplex =
1068 of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1069
1070 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1071 }
1072
1073 if (eth1_node) {
1074 ret = prueth_netdev_init(prueth, eth1_node);
1075 if (ret) {
1076 dev_err_probe(dev, ret, "netdev init %s failed\n",
1077 eth1_node->name);
1078 goto netdev_exit;
1079 }
1080
1081 prueth->emac[PRUETH_MAC1]->half_duplex =
1082 of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1083
1084 prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
1085 }
1086
1087 /* register the network devices */
1088 if (eth0_node) {
1089 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1090 if (ret) {
1091 dev_err(dev, "can't register netdev for port MII0\n");
1092 goto netdev_exit;
1093 }
1094
1095 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1096 emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1097 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1098 }
1099
1100 if (eth1_node) {
1101 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1102 if (ret) {
1103 dev_err(dev, "can't register netdev for port MII1\n");
1104 goto netdev_unregister;
1105 }
1106
1107 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1108 emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1109 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1110 }
1111
1112 dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
1113 (!eth0_node || !eth1_node) ? "single" : "dual");
1114
1115 if (eth1_node)
1116 of_node_put(eth1_node);
1117 if (eth0_node)
1118 of_node_put(eth0_node);
1119
1120 return 0;
1121
1122 netdev_unregister:
1123 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1124 if (!prueth->registered_netdevs[i])
1125 continue;
1126
1127 if (prueth->emac[i]->ndev->phydev) {
1128 phy_disconnect(prueth->emac[i]->ndev->phydev);
1129 prueth->emac[i]->ndev->phydev = NULL;
1130 }
1131 unregister_netdev(prueth->registered_netdevs[i]);
1132 }
1133
1134 netdev_exit:
1135 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1136 eth_node = prueth->eth_node[i];
1137 if (!eth_node)
1138 continue;
1139
1140 prueth_netdev_exit(prueth, eth_node);
1141 }
1142
1143 exit_iep:
1144 icss_iep_exit(prueth->iep1);
1145 exit_iep0:
1146 icss_iep_exit(prueth->iep0);
1147
1148 put_iep:
1149 icss_iep_put(prueth->iep1);
1150
1151 put_iep0:
1152 icss_iep_put(prueth->iep0);
1153 prueth->iep0 = NULL;
1154 prueth->iep1 = NULL;
1155
1156 free_pool:
1157 gen_pool_free(prueth->sram_pool,
1158 (unsigned long)prueth->msmcram.va, msmc_ram_size);
1159
1160 put_mem:
1161 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1162
1163 put_pruss:
1164 pruss_put(prueth->pruss);
1165
1166 put_cores:
1167 if (eth1_node) {
1168 prueth_put_cores(prueth, ICSS_SLICE1);
1169 of_node_put(eth1_node);
1170 }
1171
1172 if (eth0_node) {
1173 prueth_put_cores(prueth, ICSS_SLICE0);
1174 of_node_put(eth0_node);
1175 }
1176
1177 return ret;
1178 }
1179
prueth_remove(struct platform_device * pdev)1180 static void prueth_remove(struct platform_device *pdev)
1181 {
1182 struct prueth *prueth = platform_get_drvdata(pdev);
1183 struct device_node *eth_node;
1184 int i;
1185
1186 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1187 if (!prueth->registered_netdevs[i])
1188 continue;
1189 phy_stop(prueth->emac[i]->ndev->phydev);
1190 phy_disconnect(prueth->emac[i]->ndev->phydev);
1191 prueth->emac[i]->ndev->phydev = NULL;
1192 unregister_netdev(prueth->registered_netdevs[i]);
1193 }
1194
1195 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1196 eth_node = prueth->eth_node[i];
1197 if (!eth_node)
1198 continue;
1199
1200 prueth_netdev_exit(prueth, eth_node);
1201 }
1202
1203 icss_iep_exit(prueth->iep1);
1204 icss_iep_exit(prueth->iep0);
1205
1206 icss_iep_put(prueth->iep1);
1207 icss_iep_put(prueth->iep0);
1208
1209 gen_pool_free(prueth->sram_pool,
1210 (unsigned long)prueth->msmcram.va,
1211 MSMC_RAM_SIZE_SR1);
1212
1213 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1214
1215 pruss_put(prueth->pruss);
1216
1217 if (prueth->eth_node[PRUETH_MAC1])
1218 prueth_put_cores(prueth, ICSS_SLICE1);
1219
1220 if (prueth->eth_node[PRUETH_MAC0])
1221 prueth_put_cores(prueth, ICSS_SLICE0);
1222 }
1223
1224 static const struct prueth_pdata am654_sr1_icssg_pdata = {
1225 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1226 };
1227
1228 static const struct of_device_id prueth_dt_match[] = {
1229 { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
1230 { /* sentinel */ }
1231 };
1232 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1233
1234 static struct platform_driver prueth_driver = {
1235 .probe = prueth_probe,
1236 .remove = prueth_remove,
1237 .driver = {
1238 .name = "icssg-prueth-sr1",
1239 .of_match_table = prueth_dt_match,
1240 .pm = &prueth_dev_pm_ops,
1241 },
1242 };
1243 module_platform_driver(prueth_driver);
1244
1245 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1246 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1247 MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
1248 MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
1249 MODULE_LICENSE("GPL");
1250