xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG SR1.0 Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  * Copyright (c) Siemens AG, 2024
7  *
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/genalloc.h>
12 #include <linux/kernel.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/platform_device.h>
19 #include <linux/property.h>
20 #include <linux/phy.h>
21 #include <linux/remoteproc/pruss.h>
22 #include <linux/pruss_driver.h>
23 
24 #include "icssg_prueth.h"
25 #include "icssg_mii_rt.h"
26 #include "../k3-cppi-desc-pool.h"
27 
28 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
29 
30 /* SR1: Set buffer sizes for the pools. There are 8 internal queues
31  * implemented in firmware, but only 4 tx channels/threads in the Egress
32  * direction to firmware. Need a high priority queue for management
33  * messages since they shouldn't be blocked even during high traffic
34  * situation. So use Q0-Q2 as data queues and Q3 as management queue
35  * in the max case. However for ease of configuration, use the max
36  * data queue + 1 for management message if we are not using max
37  * case.
38  *
39  * Allocate 4 MTU buffers per data queue.  Firmware requires
40  * pool sizes to be set for internal queues. Set the upper 5 queue
41  * pool size to min size of 128 bytes since there are only 3 tx
42  * data channels and management queue requires only minimum buffer.
43  * i.e lower queues are used by driver and highest priority queue
44  * from that is used for management message.
45  */
46 
47 static int emac_egress_buf_pool_size[] = {
48 	PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
49 	PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
50 	PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
51 	PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
52 };
53 
icssg_config_sr1(struct prueth * prueth,struct prueth_emac * emac,int slice)54 static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
55 			     int slice)
56 {
57 	struct icssg_sr1_config config;
58 	void __iomem *va;
59 	int i, index;
60 
61 	memset(&config, 0, sizeof(config));
62 	config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
63 	config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
64 	config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
65 	config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
66 	config.rand_seed = cpu_to_le32(get_random_u32());
67 
68 	for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
69 		index =  i - PRUETH_EMAC_BUF_POOL_START_SR1;
70 		config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
71 	}
72 
73 	va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
74 	memcpy_toio(va, &config, sizeof(config));
75 
76 	emac->speed = SPEED_1000;
77 	emac->duplex = DUPLEX_FULL;
78 }
79 
emac_send_command_sr1(struct prueth_emac * emac,u32 cmd)80 static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
81 {
82 	struct cppi5_host_desc_t *first_desc;
83 	u32 pkt_len = sizeof(emac->cmd_data);
84 	__le32 *data = emac->cmd_data;
85 	dma_addr_t desc_dma, buf_dma;
86 	struct prueth_tx_chn *tx_chn;
87 	void **swdata;
88 	int ret = 0;
89 	u32 *epib;
90 
91 	netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
92 
93 	/* only one command at a time allowed to firmware */
94 	mutex_lock(&emac->cmd_lock);
95 	data[0] = cpu_to_le32(cmd);
96 
97 	/* highest priority channel for management messages */
98 	tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
99 
100 	/* Map the linear buffer */
101 	buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
102 	if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
103 		netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
104 		ret = -EINVAL;
105 		goto err_unlock;
106 	}
107 
108 	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
109 	if (!first_desc) {
110 		netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
111 		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
112 		ret = -ENOMEM;
113 		goto err_unlock;
114 	}
115 
116 	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
117 			 PRUETH_NAV_PS_DATA_SIZE);
118 	cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
119 	epib = first_desc->epib;
120 	epib[0] = 0;
121 	epib[1] = 0;
122 
123 	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
124 	swdata = cppi5_hdesc_get_swdata(first_desc);
125 	*swdata = data;
126 
127 	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
128 	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
129 
130 	/* send command */
131 	reinit_completion(&emac->cmd_complete);
132 	ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
133 	if (ret) {
134 		netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
135 		goto free_desc;
136 	}
137 	ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
138 	if (!ret)
139 		netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
140 
141 	mutex_unlock(&emac->cmd_lock);
142 
143 	return ret;
144 free_desc:
145 	prueth_xmit_free(tx_chn, first_desc);
146 err_unlock:
147 	mutex_unlock(&emac->cmd_lock);
148 
149 	return ret;
150 }
151 
icssg_config_set_speed_sr1(struct prueth_emac * emac)152 static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
153 {
154 	u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
155 	struct prueth *prueth = emac->prueth;
156 	int slice = prueth_emac_slice(emac);
157 
158 	val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
159 	/* firmware expects speed settings in bit 2-1 */
160 	val <<= 1;
161 	cmd |= val;
162 
163 	val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
164 	/* firmware expects full duplex settings in bit 3 */
165 	val <<= 3;
166 	cmd |= val;
167 
168 	emac_send_command_sr1(emac, cmd);
169 }
170 
171 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link_sr1(struct net_device * ndev)172 static void emac_adjust_link_sr1(struct net_device *ndev)
173 {
174 	struct prueth_emac *emac = netdev_priv(ndev);
175 	struct phy_device *phydev = ndev->phydev;
176 	struct prueth *prueth = emac->prueth;
177 	bool new_state = false;
178 	unsigned long flags;
179 
180 	if (phydev->link) {
181 		/* check the mode of operation - full/half duplex */
182 		if (phydev->duplex != emac->duplex) {
183 			new_state = true;
184 			emac->duplex = phydev->duplex;
185 		}
186 		if (phydev->speed != emac->speed) {
187 			new_state = true;
188 			emac->speed = phydev->speed;
189 		}
190 		if (!emac->link) {
191 			new_state = true;
192 			emac->link = 1;
193 		}
194 	} else if (emac->link) {
195 		new_state = true;
196 		emac->link = 0;
197 
198 		/* f/w should support 100 & 1000 */
199 		emac->speed = SPEED_1000;
200 
201 		/* half duplex may not be supported by f/w */
202 		emac->duplex = DUPLEX_FULL;
203 	}
204 
205 	if (new_state) {
206 		phy_print_status(phydev);
207 
208 		/* update RGMII and MII configuration based on PHY negotiated
209 		 * values
210 		 */
211 		if (emac->link) {
212 			/* Set the RGMII cfg for gig en and full duplex */
213 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
214 
215 			/* update the Tx IPG based on 100M/1G speed */
216 			spin_lock_irqsave(&emac->lock, flags);
217 			icssg_config_ipg(emac);
218 			spin_unlock_irqrestore(&emac->lock, flags);
219 			icssg_config_set_speed_sr1(emac);
220 		}
221 	}
222 
223 	if (emac->link) {
224 		/* reactivate the transmit queue */
225 		netif_tx_wake_all_queues(ndev);
226 	} else {
227 		netif_tx_stop_all_queues(ndev);
228 		prueth_cleanup_tx_ts(emac);
229 	}
230 }
231 
emac_phy_connect(struct prueth_emac * emac)232 static int emac_phy_connect(struct prueth_emac *emac)
233 {
234 	struct prueth *prueth = emac->prueth;
235 	struct net_device *ndev = emac->ndev;
236 	/* connect PHY */
237 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
238 				      &emac_adjust_link_sr1, 0,
239 				      emac->phy_if);
240 	if (!ndev->phydev) {
241 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
242 			emac->phy_node->full_name);
243 		return -ENODEV;
244 	}
245 
246 	if (!emac->half_duplex) {
247 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
248 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
249 	}
250 
251 	/* Remove 100Mbits half-duplex due to RGMII misreporting connection
252 	 * as full duplex */
253 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
254 
255 	/* remove unsupported modes */
256 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
257 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
258 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
259 
260 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
261 		phy_set_max_speed(ndev->phydev, SPEED_100);
262 
263 	return 0;
264 }
265 
266 /* get one packet from requested flow_id
267  *
268  * Returns skb pointer if packet found else NULL
269  * Caller must free the returned skb.
270  */
prueth_process_rx_mgm(struct prueth_emac * emac,u32 flow_id)271 static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
272 					     u32 flow_id)
273 {
274 	struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
275 	struct net_device *ndev = emac->ndev;
276 	struct cppi5_host_desc_t *desc_rx;
277 	struct sk_buff *skb, *new_skb;
278 	dma_addr_t desc_dma, buf_dma;
279 	u32 buf_dma_len, pkt_len;
280 	void **swdata;
281 	int ret;
282 
283 	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
284 	if (ret) {
285 		if (ret != -ENODATA)
286 			netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
287 		return NULL;
288 	}
289 
290 	if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
291 		return NULL;
292 
293 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
294 
295 	/* Fix FW bug about incorrect PSDATA size */
296 	if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
297 		cppi5_hdesc_update_psdata_size(desc_rx,
298 					       PRUETH_NAV_PS_DATA_SIZE);
299 	}
300 
301 	swdata = cppi5_hdesc_get_swdata(desc_rx);
302 	skb = *swdata;
303 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
304 	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
305 
306 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
307 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
308 
309 	new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
310 	/* if allocation fails we drop the packet but push the
311 	 * descriptor back to the ring with old skb to prevent a stall
312 	 */
313 	if (!new_skb) {
314 		netdev_err(ndev,
315 			   "skb alloc failed, dropped mgm pkt from flow %d\n",
316 			   flow_id);
317 		new_skb = skb;
318 		skb = NULL;	/* return NULL */
319 	} else {
320 		/* return the filled skb */
321 		skb_put(skb, pkt_len);
322 	}
323 
324 	/* queue another DMA */
325 	ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
326 	if (WARN_ON(ret < 0))
327 		dev_kfree_skb_any(new_skb);
328 
329 	return skb;
330 }
331 
prueth_tx_ts_sr1(struct prueth_emac * emac,struct emac_tx_ts_response_sr1 * tsr)332 static void prueth_tx_ts_sr1(struct prueth_emac *emac,
333 			     struct emac_tx_ts_response_sr1 *tsr)
334 {
335 	struct skb_shared_hwtstamps ssh;
336 	u32 hi_ts, lo_ts, cookie;
337 	struct sk_buff *skb;
338 	u64 ns;
339 
340 	hi_ts = le32_to_cpu(tsr->hi_ts);
341 	lo_ts = le32_to_cpu(tsr->lo_ts);
342 
343 	ns = (u64)hi_ts << 32 | lo_ts;
344 
345 	cookie = le32_to_cpu(tsr->cookie);
346 	if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
347 		netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
348 			   cookie);
349 		return;
350 	}
351 
352 	skb = emac->tx_ts_skb[cookie];
353 	emac->tx_ts_skb[cookie] = NULL;	/* free slot */
354 
355 	memset(&ssh, 0, sizeof(ssh));
356 	ssh.hwtstamp = ns_to_ktime(ns);
357 
358 	skb_tstamp_tx(skb, &ssh);
359 	dev_consume_skb_any(skb);
360 }
361 
prueth_rx_mgm_ts_thread_sr1(int irq,void * dev_id)362 static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
363 {
364 	struct prueth_emac *emac = dev_id;
365 	struct sk_buff *skb;
366 
367 	skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
368 	if (!skb)
369 		return IRQ_NONE;
370 
371 	prueth_tx_ts_sr1(emac, (void *)skb->data);
372 	dev_kfree_skb_any(skb);
373 
374 	return IRQ_HANDLED;
375 }
376 
prueth_rx_mgm_rsp_thread(int irq,void * dev_id)377 static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
378 {
379 	struct prueth_emac *emac = dev_id;
380 	struct sk_buff *skb;
381 	u32 rsp;
382 
383 	skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
384 	if (!skb)
385 		return IRQ_NONE;
386 
387 	/* Process command response */
388 	rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
389 	if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
390 		netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
391 		complete(&emac->cmd_complete);
392 	} else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
393 		netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
394 		complete(&emac->cmd_complete);
395 	}
396 
397 	dev_kfree_skb_any(skb);
398 
399 	return IRQ_HANDLED;
400 }
401 
402 static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
403 	{
404 		.pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
405 		.rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
406 	},
407 	{
408 		.pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
409 		.rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
410 	}
411 };
412 
prueth_emac_start(struct prueth * prueth,struct prueth_emac * emac)413 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
414 {
415 	struct icssg_firmwares *firmwares;
416 	struct device *dev = prueth->dev;
417 	int slice, ret;
418 
419 	firmwares = icssg_sr1_emac_firmwares;
420 
421 	slice = prueth_emac_slice(emac);
422 	if (slice < 0) {
423 		netdev_err(emac->ndev, "invalid port\n");
424 		return -EINVAL;
425 	}
426 
427 	icssg_config_sr1(prueth, emac, slice);
428 
429 	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
430 	ret = rproc_boot(prueth->pru[slice]);
431 	if (ret) {
432 		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
433 		return -EINVAL;
434 	}
435 
436 	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
437 	ret = rproc_boot(prueth->rtu[slice]);
438 	if (ret) {
439 		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
440 		goto halt_pru;
441 	}
442 
443 	emac->fw_running = 1;
444 	return 0;
445 
446 halt_pru:
447 	rproc_shutdown(prueth->pru[slice]);
448 
449 	return ret;
450 }
451 
452 /**
453  * emac_ndo_open - EMAC device open
454  * @ndev: network adapter device
455  *
456  * Called when system wants to start the interface.
457  *
458  * Return: 0 for a successful open, or appropriate error code
459  */
emac_ndo_open(struct net_device * ndev)460 static int emac_ndo_open(struct net_device *ndev)
461 {
462 	struct prueth_emac *emac = netdev_priv(ndev);
463 	int num_data_chn = emac->tx_ch_num - 1;
464 	struct prueth *prueth = emac->prueth;
465 	int slice = prueth_emac_slice(emac);
466 	struct device *dev = prueth->dev;
467 	int max_rx_flows, rx_flow;
468 	int ret, i;
469 
470 	/* clear SMEM and MSMC settings for all slices */
471 	if (!prueth->emacs_initialized) {
472 		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
473 		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
474 	}
475 
476 	/* set h/w MAC as user might have re-configured */
477 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
478 
479 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
480 
481 	icssg_class_default(prueth->miig_rt, slice, 0, true);
482 
483 	/* Notify the stack of the actual queue counts. */
484 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
485 	if (ret) {
486 		dev_err(dev, "cannot set real number of tx queues\n");
487 		return ret;
488 	}
489 
490 	init_completion(&emac->cmd_complete);
491 	ret = prueth_init_tx_chns(emac);
492 	if (ret) {
493 		dev_err(dev, "failed to init tx channel: %d\n", ret);
494 		return ret;
495 	}
496 
497 	max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
498 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
499 				  max_rx_flows, PRUETH_MAX_RX_DESC);
500 	if (ret) {
501 		dev_err(dev, "failed to init rx channel: %d\n", ret);
502 		goto cleanup_tx;
503 	}
504 
505 	ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
506 				  PRUETH_MAX_RX_MGM_FLOWS_SR1,
507 				  PRUETH_MAX_RX_MGM_DESC_SR1);
508 	if (ret) {
509 		dev_err(dev, "failed to init rx mgmt channel: %d\n",
510 			ret);
511 		goto cleanup_rx;
512 	}
513 
514 	ret = prueth_ndev_add_tx_napi(emac);
515 	if (ret)
516 		goto cleanup_rx_mgm;
517 
518 	/* we use only the highest priority flow for now i.e. @irq[3] */
519 	rx_flow = PRUETH_RX_FLOW_DATA_SR1;
520 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
521 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
522 	if (ret) {
523 		dev_err(dev, "unable to request RX IRQ\n");
524 		goto cleanup_napi;
525 	}
526 
527 	ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
528 				   NULL, prueth_rx_mgm_rsp_thread,
529 				   IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
530 				   dev_name(dev), emac);
531 	if (ret) {
532 		dev_err(dev, "unable to request RX Management RSP IRQ\n");
533 		goto free_rx_irq;
534 	}
535 
536 	ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
537 				   NULL, prueth_rx_mgm_ts_thread_sr1,
538 				   IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
539 				   dev_name(dev), emac);
540 	if (ret) {
541 		dev_err(dev, "unable to request RX Management TS IRQ\n");
542 		goto free_rx_mgm_rsp_irq;
543 	}
544 
545 	/* reset and start PRU firmware */
546 	ret = prueth_emac_start(prueth, emac);
547 	if (ret)
548 		goto free_rx_mgmt_ts_irq;
549 
550 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
551 
552 	/* Prepare RX */
553 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
554 	if (ret)
555 		goto stop;
556 
557 	ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
558 	if (ret)
559 		goto reset_rx_chn;
560 
561 	ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
562 	if (ret)
563 		goto reset_rx_chn;
564 
565 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
566 	if (ret)
567 		goto reset_rx_mgm_chn;
568 
569 	for (i = 0; i < emac->tx_ch_num; i++) {
570 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
571 		if (ret)
572 			goto reset_tx_chan;
573 	}
574 
575 	/* Enable NAPI in Tx and Rx direction */
576 	for (i = 0; i < emac->tx_ch_num; i++)
577 		napi_enable(&emac->tx_chns[i].napi_tx);
578 	napi_enable(&emac->napi_rx);
579 
580 	/* start PHY */
581 	phy_start(ndev->phydev);
582 
583 	prueth->emacs_initialized++;
584 
585 	queue_work(system_long_wq, &emac->stats_work.work);
586 
587 	return 0;
588 
589 reset_tx_chan:
590 	/* Since interface is not yet up, there is wouldn't be
591 	 * any SKB for completion. So set false to free_skb
592 	 */
593 	prueth_reset_tx_chan(emac, i, false);
594 reset_rx_mgm_chn:
595 	prueth_reset_rx_chan(&emac->rx_mgm_chn,
596 			     PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
597 reset_rx_chn:
598 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
599 stop:
600 	prueth_emac_stop(emac);
601 free_rx_mgmt_ts_irq:
602 	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
603 		 emac);
604 free_rx_mgm_rsp_irq:
605 	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
606 		 emac);
607 free_rx_irq:
608 	free_irq(emac->rx_chns.irq[rx_flow], emac);
609 cleanup_napi:
610 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
611 cleanup_rx_mgm:
612 	prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
613 			       PRUETH_MAX_RX_MGM_FLOWS_SR1);
614 cleanup_rx:
615 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
616 cleanup_tx:
617 	prueth_cleanup_tx_chns(emac);
618 
619 	return ret;
620 }
621 
622 /**
623  * emac_ndo_stop - EMAC device stop
624  * @ndev: network adapter device
625  *
626  * Called when system wants to stop or down the interface.
627  *
628  * Return: Always 0 (Success)
629  */
emac_ndo_stop(struct net_device * ndev)630 static int emac_ndo_stop(struct net_device *ndev)
631 {
632 	struct prueth_emac *emac = netdev_priv(ndev);
633 	int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
634 	struct prueth *prueth = emac->prueth;
635 	int max_rx_flows;
636 	int ret, i;
637 
638 	/* inform the upper layers. */
639 	netif_tx_stop_all_queues(ndev);
640 
641 	/* block packets from wire */
642 	if (ndev->phydev)
643 		phy_stop(ndev->phydev);
644 
645 	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
646 
647 	emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
648 
649 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
650 	/* ensure new tdown_cnt value is visible */
651 	smp_mb__after_atomic();
652 	/* tear down and disable UDMA channels */
653 	reinit_completion(&emac->tdown_complete);
654 	for (i = 0; i < emac->tx_ch_num; i++)
655 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
656 
657 	ret = wait_for_completion_timeout(&emac->tdown_complete,
658 					  msecs_to_jiffies(1000));
659 	if (!ret)
660 		netdev_err(ndev, "tx teardown timeout\n");
661 
662 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
663 	for (i = 0; i < emac->tx_ch_num; i++)
664 		napi_disable(&emac->tx_chns[i].napi_tx);
665 
666 	max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
667 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
668 
669 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
670 	/* Teardown RX MGM channel */
671 	k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
672 	prueth_reset_rx_chan(&emac->rx_mgm_chn,
673 			     PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
674 
675 	napi_disable(&emac->napi_rx);
676 
677 	/* Destroying the queued work in ndo_stop() */
678 	cancel_delayed_work_sync(&emac->stats_work);
679 
680 	/* stop PRUs */
681 	prueth_emac_stop(emac);
682 
683 	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
684 	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
685 	free_irq(emac->rx_chns.irq[rx_flow], emac);
686 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
687 	prueth_cleanup_tx_chns(emac);
688 
689 	prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
690 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
691 
692 	prueth->emacs_initialized--;
693 
694 	return 0;
695 }
696 
emac_ndo_set_rx_mode_sr1(struct net_device * ndev)697 static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
698 {
699 	struct prueth_emac *emac = netdev_priv(ndev);
700 	bool allmulti = ndev->flags & IFF_ALLMULTI;
701 	bool promisc = ndev->flags & IFF_PROMISC;
702 	struct prueth *prueth = emac->prueth;
703 	int slice = prueth_emac_slice(emac);
704 
705 	if (promisc) {
706 		icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
707 		return;
708 	}
709 
710 	if (allmulti) {
711 		icssg_class_default(prueth->miig_rt, slice, 1, true);
712 		return;
713 	}
714 
715 	icssg_class_default(prueth->miig_rt, slice, 0, true);
716 	if (!netdev_mc_empty(ndev)) {
717 		/* program multicast address list into Classifier */
718 		icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
719 	}
720 }
721 
722 static const struct net_device_ops emac_netdev_ops = {
723 	.ndo_open = emac_ndo_open,
724 	.ndo_stop = emac_ndo_stop,
725 	.ndo_start_xmit = icssg_ndo_start_xmit,
726 	.ndo_set_mac_address = eth_mac_addr,
727 	.ndo_validate_addr = eth_validate_addr,
728 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
729 	.ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
730 	.ndo_eth_ioctl = icssg_ndo_ioctl,
731 	.ndo_get_stats64 = icssg_ndo_get_stats64,
732 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
733 };
734 
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)735 static int prueth_netdev_init(struct prueth *prueth,
736 			      struct device_node *eth_node)
737 {
738 	struct prueth_emac *emac;
739 	struct net_device *ndev;
740 	enum prueth_port port;
741 	enum prueth_mac mac;
742 	/* Only enable one TX channel due to timeouts when
743 	 * using multiple channels */
744 	int num_tx_chn = 1;
745 	int ret;
746 
747 	port = prueth_node_port(eth_node);
748 	if (port == PRUETH_PORT_INVALID)
749 		return -EINVAL;
750 
751 	mac = prueth_node_mac(eth_node);
752 	if (mac == PRUETH_MAC_INVALID)
753 		return -EINVAL;
754 
755 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
756 	if (!ndev)
757 		return -ENOMEM;
758 
759 	emac = netdev_priv(ndev);
760 	emac->is_sr1 = 1;
761 	emac->prueth = prueth;
762 	emac->ndev = ndev;
763 	emac->port_id = port;
764 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
765 	if (!emac->cmd_wq) {
766 		ret = -ENOMEM;
767 		goto free_ndev;
768 	}
769 
770 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
771 
772 	ret = pruss_request_mem_region(prueth->pruss,
773 				       port == PRUETH_PORT_MII0 ?
774 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
775 				       &emac->dram);
776 	if (ret) {
777 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
778 		ret = -ENOMEM;
779 		goto free_wq;
780 	}
781 
782 	/* SR1.0 uses a dedicated high priority channel
783 	 * to send commands to the firmware
784 	 */
785 	emac->tx_ch_num = 2;
786 
787 	SET_NETDEV_DEV(ndev, prueth->dev);
788 	spin_lock_init(&emac->lock);
789 	mutex_init(&emac->cmd_lock);
790 
791 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
792 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
793 		dev_err(prueth->dev, "couldn't find phy-handle\n");
794 		ret = -ENODEV;
795 		goto free;
796 	} else if (of_phy_is_fixed_link(eth_node)) {
797 		ret = of_phy_register_fixed_link(eth_node);
798 		if (ret) {
799 			ret = dev_err_probe(prueth->dev, ret,
800 					    "failed to register fixed-link phy\n");
801 			goto free;
802 		}
803 
804 		emac->phy_node = eth_node;
805 	}
806 
807 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
808 	if (ret) {
809 		dev_err(prueth->dev, "could not get phy-mode property\n");
810 		goto free;
811 	}
812 
813 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
814 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
815 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
816 		ret = -EINVAL;
817 		goto free;
818 	}
819 
820 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
821 	 * and it is not possible to disable TX Internal delay. The below
822 	 * switch case block describes how we handle different phy modes
823 	 * based on hardware restriction.
824 	 */
825 	switch (emac->phy_if) {
826 	case PHY_INTERFACE_MODE_RGMII_ID:
827 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
828 		break;
829 	case PHY_INTERFACE_MODE_RGMII_TXID:
830 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
831 		break;
832 	case PHY_INTERFACE_MODE_RGMII:
833 	case PHY_INTERFACE_MODE_RGMII_RXID:
834 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
835 		ret = -EINVAL;
836 		goto free;
837 	default:
838 		break;
839 	}
840 
841 	/* get mac address from DT and set private and netdev addr */
842 	ret = of_get_ethdev_address(eth_node, ndev);
843 	if (!is_valid_ether_addr(ndev->dev_addr)) {
844 		eth_hw_addr_random(ndev);
845 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
846 			 port, ndev->dev_addr);
847 	}
848 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
849 
850 	ndev->dev.of_node = eth_node;
851 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
852 	ndev->max_mtu = PRUETH_MAX_MTU;
853 	ndev->netdev_ops = &emac_netdev_ops;
854 	ndev->ethtool_ops = &icssg_ethtool_ops;
855 	ndev->hw_features = NETIF_F_SG;
856 	ndev->features = ndev->hw_features;
857 
858 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
859 	prueth->emac[mac] = emac;
860 
861 	return 0;
862 
863 free:
864 	pruss_release_mem_region(prueth->pruss, &emac->dram);
865 free_wq:
866 	destroy_workqueue(emac->cmd_wq);
867 free_ndev:
868 	emac->ndev = NULL;
869 	prueth->emac[mac] = NULL;
870 	free_netdev(ndev);
871 
872 	return ret;
873 }
874 
prueth_probe(struct platform_device * pdev)875 static int prueth_probe(struct platform_device *pdev)
876 {
877 	struct device_node *eth_node, *eth_ports_node;
878 	struct device_node  *eth0_node = NULL;
879 	struct device_node  *eth1_node = NULL;
880 	struct device *dev = &pdev->dev;
881 	struct device_node *np;
882 	struct prueth *prueth;
883 	struct pruss *pruss;
884 	u32 msmc_ram_size;
885 	int i, ret;
886 
887 	np = dev->of_node;
888 
889 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
890 	if (!prueth)
891 		return -ENOMEM;
892 
893 	dev_set_drvdata(dev, prueth);
894 	prueth->pdev = pdev;
895 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
896 
897 	prueth->dev = dev;
898 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
899 	if (!eth_ports_node)
900 		return -ENOENT;
901 
902 	for_each_child_of_node(eth_ports_node, eth_node) {
903 		u32 reg;
904 
905 		if (strcmp(eth_node->name, "port"))
906 			continue;
907 		ret = of_property_read_u32(eth_node, "reg", &reg);
908 		if (ret < 0) {
909 			dev_err(dev, "%pOF error reading port_id %d\n",
910 				eth_node, ret);
911 		}
912 
913 		of_node_get(eth_node);
914 
915 		if (reg == 0) {
916 			eth0_node = eth_node;
917 			if (!of_device_is_available(eth0_node)) {
918 				of_node_put(eth0_node);
919 				eth0_node = NULL;
920 			}
921 		} else if (reg == 1) {
922 			eth1_node = eth_node;
923 			if (!of_device_is_available(eth1_node)) {
924 				of_node_put(eth1_node);
925 				eth1_node = NULL;
926 			}
927 		} else {
928 			dev_err(dev, "port reg should be 0 or 1\n");
929 		}
930 	}
931 
932 	of_node_put(eth_ports_node);
933 
934 	/* At least one node must be present and available else we fail */
935 	if (!eth0_node && !eth1_node) {
936 		dev_err(dev, "neither port0 nor port1 node available\n");
937 		return -ENODEV;
938 	}
939 
940 	if (eth0_node == eth1_node) {
941 		dev_err(dev, "port0 and port1 can't have same reg\n");
942 		of_node_put(eth0_node);
943 		return -ENODEV;
944 	}
945 
946 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
947 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
948 
949 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
950 	if (IS_ERR(prueth->miig_rt)) {
951 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
952 		return -ENODEV;
953 	}
954 
955 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
956 	if (IS_ERR(prueth->mii_rt)) {
957 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
958 		return -ENODEV;
959 	}
960 
961 	if (eth0_node) {
962 		ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
963 		if (ret)
964 			goto put_cores;
965 	}
966 
967 	if (eth1_node) {
968 		ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
969 		if (ret)
970 			goto put_cores;
971 	}
972 
973 	pruss = pruss_get(eth0_node ?
974 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
975 	if (IS_ERR(pruss)) {
976 		ret = PTR_ERR(pruss);
977 		dev_err(dev, "unable to get pruss handle\n");
978 		goto put_cores;
979 	}
980 
981 	prueth->pruss = pruss;
982 
983 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
984 				       &prueth->shram);
985 	if (ret) {
986 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
987 		goto put_pruss;
988 	}
989 
990 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
991 	if (!prueth->sram_pool) {
992 		dev_err(dev, "unable to get SRAM pool\n");
993 		ret = -ENODEV;
994 
995 		goto put_mem;
996 	}
997 
998 	msmc_ram_size = MSMC_RAM_SIZE_SR1;
999 
1000 	prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
1001 							    msmc_ram_size);
1002 
1003 	if (!prueth->msmcram.va) {
1004 		ret = -ENOMEM;
1005 		dev_err(dev, "unable to allocate MSMC resource\n");
1006 		goto put_mem;
1007 	}
1008 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1009 						   (unsigned long)prueth->msmcram.va);
1010 	prueth->msmcram.size = msmc_ram_size;
1011 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1012 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1013 		prueth->msmcram.va, prueth->msmcram.size);
1014 
1015 	prueth->iep0 = icss_iep_get_idx(np, 0);
1016 	if (IS_ERR(prueth->iep0)) {
1017 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
1018 				    "iep0 get failed\n");
1019 		goto free_pool;
1020 	}
1021 
1022 	prueth->iep1 = icss_iep_get_idx(np, 1);
1023 	if (IS_ERR(prueth->iep1)) {
1024 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
1025 				    "iep1 get failed\n");
1026 		goto put_iep0;
1027 	}
1028 
1029 	ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
1030 	if (ret) {
1031 		dev_err_probe(dev, ret, "failed to init iep0\n");
1032 		goto put_iep;
1033 	}
1034 
1035 	ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
1036 	if (ret) {
1037 		dev_err_probe(dev, ret, "failed to init iep1\n");
1038 		goto exit_iep0;
1039 	}
1040 
1041 	if (eth0_node) {
1042 		ret = prueth_netdev_init(prueth, eth0_node);
1043 		if (ret) {
1044 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1045 				      eth0_node->name);
1046 			goto exit_iep;
1047 		}
1048 
1049 		prueth->emac[PRUETH_MAC0]->half_duplex =
1050 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1051 
1052 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1053 	}
1054 
1055 	if (eth1_node) {
1056 		ret = prueth_netdev_init(prueth, eth1_node);
1057 		if (ret) {
1058 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1059 				      eth1_node->name);
1060 			goto netdev_exit;
1061 		}
1062 
1063 		prueth->emac[PRUETH_MAC1]->half_duplex =
1064 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1065 
1066 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
1067 	}
1068 
1069 	/* register the network devices */
1070 	if (eth0_node) {
1071 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1072 		if (ret) {
1073 			dev_err(dev, "can't register netdev for port MII0\n");
1074 			goto netdev_exit;
1075 		}
1076 
1077 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1078 		emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1079 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1080 	}
1081 
1082 	if (eth1_node) {
1083 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1084 		if (ret) {
1085 			dev_err(dev, "can't register netdev for port MII1\n");
1086 			goto netdev_unregister;
1087 		}
1088 
1089 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1090 		emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1091 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1092 	}
1093 
1094 	dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
1095 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1096 
1097 	if (eth1_node)
1098 		of_node_put(eth1_node);
1099 	if (eth0_node)
1100 		of_node_put(eth0_node);
1101 
1102 	return 0;
1103 
1104 netdev_unregister:
1105 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1106 		if (!prueth->registered_netdevs[i])
1107 			continue;
1108 
1109 		if (prueth->emac[i]->ndev->phydev) {
1110 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1111 			prueth->emac[i]->ndev->phydev = NULL;
1112 		}
1113 		unregister_netdev(prueth->registered_netdevs[i]);
1114 	}
1115 
1116 netdev_exit:
1117 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1118 		eth_node = prueth->eth_node[i];
1119 		if (!eth_node)
1120 			continue;
1121 
1122 		prueth_netdev_exit(prueth, eth_node);
1123 	}
1124 
1125 exit_iep:
1126 	icss_iep_exit(prueth->iep1);
1127 exit_iep0:
1128 	icss_iep_exit(prueth->iep0);
1129 
1130 put_iep:
1131 	icss_iep_put(prueth->iep1);
1132 
1133 put_iep0:
1134 	icss_iep_put(prueth->iep0);
1135 	prueth->iep0 = NULL;
1136 	prueth->iep1 = NULL;
1137 
1138 free_pool:
1139 	gen_pool_free(prueth->sram_pool,
1140 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1141 
1142 put_mem:
1143 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1144 
1145 put_pruss:
1146 	pruss_put(prueth->pruss);
1147 
1148 put_cores:
1149 	if (eth1_node) {
1150 		prueth_put_cores(prueth, ICSS_SLICE1);
1151 		of_node_put(eth1_node);
1152 	}
1153 
1154 	if (eth0_node) {
1155 		prueth_put_cores(prueth, ICSS_SLICE0);
1156 		of_node_put(eth0_node);
1157 	}
1158 
1159 	return ret;
1160 }
1161 
prueth_remove(struct platform_device * pdev)1162 static void prueth_remove(struct platform_device *pdev)
1163 {
1164 	struct prueth *prueth = platform_get_drvdata(pdev);
1165 	struct device_node *eth_node;
1166 	int i;
1167 
1168 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1169 		if (!prueth->registered_netdevs[i])
1170 			continue;
1171 		phy_stop(prueth->emac[i]->ndev->phydev);
1172 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1173 		prueth->emac[i]->ndev->phydev = NULL;
1174 		unregister_netdev(prueth->registered_netdevs[i]);
1175 	}
1176 
1177 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1178 		eth_node = prueth->eth_node[i];
1179 		if (!eth_node)
1180 			continue;
1181 
1182 		prueth_netdev_exit(prueth, eth_node);
1183 	}
1184 
1185 	icss_iep_exit(prueth->iep1);
1186 	icss_iep_exit(prueth->iep0);
1187 
1188 	icss_iep_put(prueth->iep1);
1189 	icss_iep_put(prueth->iep0);
1190 
1191 	gen_pool_free(prueth->sram_pool,
1192 		      (unsigned long)prueth->msmcram.va,
1193 		      MSMC_RAM_SIZE_SR1);
1194 
1195 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1196 
1197 	pruss_put(prueth->pruss);
1198 
1199 	if (prueth->eth_node[PRUETH_MAC1])
1200 		prueth_put_cores(prueth, ICSS_SLICE1);
1201 
1202 	if (prueth->eth_node[PRUETH_MAC0])
1203 		prueth_put_cores(prueth, ICSS_SLICE0);
1204 }
1205 
1206 static const struct prueth_pdata am654_sr1_icssg_pdata = {
1207 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1208 };
1209 
1210 static const struct of_device_id prueth_dt_match[] = {
1211 	{ .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
1212 	{ /* sentinel */ }
1213 };
1214 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1215 
1216 static struct platform_driver prueth_driver = {
1217 	.probe = prueth_probe,
1218 	.remove_new = prueth_remove,
1219 	.driver = {
1220 		.name = "icssg-prueth-sr1",
1221 		.of_match_table = prueth_dt_match,
1222 		.pm = &prueth_dev_pm_ops,
1223 	},
1224 };
1225 module_platform_driver(prueth_driver);
1226 
1227 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1228 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1229 MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
1230 MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
1231 MODULE_LICENSE("GPL");
1232