xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision a202f24b08587021a39eade5aa5444d5714689fb)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
53 static int emac_get_tx_ts(struct prueth_emac *emac,
54 			  struct emac_tx_ts_response *rsp)
55 {
56 	struct prueth *prueth = emac->prueth;
57 	int slice = prueth_emac_slice(emac);
58 	int addr;
59 
60 	addr = icssg_queue_pop(prueth, slice == 0 ?
61 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
62 	if (addr < 0)
63 		return addr;
64 
65 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
66 	/* return buffer back for to pool */
67 	icssg_queue_push(prueth, slice == 0 ?
68 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
69 
70 	return 0;
71 }
72 
73 static void tx_ts_work(struct prueth_emac *emac)
74 {
75 	struct skb_shared_hwtstamps ssh;
76 	struct emac_tx_ts_response tsr;
77 	struct sk_buff *skb;
78 	int ret = 0;
79 	u32 hi_sw;
80 	u64 ns;
81 
82 	/* There may be more than one pending requests */
83 	while (1) {
84 		ret = emac_get_tx_ts(emac, &tsr);
85 		if (ret) /* nothing more */
86 			break;
87 
88 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
89 		    !emac->tx_ts_skb[tsr.cookie]) {
90 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
91 				   tsr.cookie);
92 			break;
93 		}
94 
95 		skb = emac->tx_ts_skb[tsr.cookie];
96 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
97 		if (!skb) {
98 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
99 			break;
100 		}
101 
102 		hi_sw = readl(emac->prueth->shram.va +
103 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
104 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
105 				    IEP_DEFAULT_CYCLE_TIME_NS);
106 
107 		memset(&ssh, 0, sizeof(ssh));
108 		ssh.hwtstamp = ns_to_ktime(ns);
109 
110 		skb_tstamp_tx(skb, &ssh);
111 		dev_consume_skb_any(skb);
112 
113 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
114 			break;
115 	}
116 }
117 
118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
119 {
120 	struct prueth_emac *emac = dev_id;
121 
122 	/* currently only TX timestamp is being returned */
123 	tx_ts_work(emac);
124 
125 	return IRQ_HANDLED;
126 }
127 
128 static int prueth_start(struct rproc *rproc, const char *fw_name)
129 {
130 	int ret;
131 
132 	ret = rproc_set_firmware(rproc, fw_name);
133 	if (ret)
134 		return ret;
135 	return rproc_boot(rproc);
136 }
137 
138 static void prueth_shutdown(struct rproc *rproc)
139 {
140 	rproc_shutdown(rproc);
141 }
142 
143 static int prueth_emac_start(struct prueth *prueth)
144 {
145 	struct icssg_firmwares *firmwares;
146 	struct device *dev = prueth->dev;
147 	int ret, slice;
148 
149 	if (prueth->is_switch_mode)
150 		firmwares = prueth->icssg_switch_firmwares;
151 	else if (prueth->is_hsr_offload_mode)
152 		firmwares = prueth->icssg_hsr_firmwares;
153 	else
154 		firmwares = prueth->icssg_emac_firmwares;
155 
156 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
157 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
158 		if (ret) {
159 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
160 			goto unwind_slices;
161 		}
162 
163 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
164 		if (ret) {
165 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
166 			rproc_shutdown(prueth->pru[slice]);
167 			goto unwind_slices;
168 		}
169 
170 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
171 		if (ret) {
172 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
173 			rproc_shutdown(prueth->rtu[slice]);
174 			rproc_shutdown(prueth->pru[slice]);
175 			goto unwind_slices;
176 		}
177 	}
178 
179 	return 0;
180 
181 unwind_slices:
182 	while (--slice >= 0) {
183 		prueth_shutdown(prueth->txpru[slice]);
184 		prueth_shutdown(prueth->rtu[slice]);
185 		prueth_shutdown(prueth->pru[slice]);
186 	}
187 
188 	return ret;
189 }
190 
191 static void prueth_emac_stop(struct prueth *prueth)
192 {
193 	int slice;
194 
195 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
196 		prueth_shutdown(prueth->txpru[slice]);
197 		prueth_shutdown(prueth->rtu[slice]);
198 		prueth_shutdown(prueth->pru[slice]);
199 	}
200 }
201 
202 static int prueth_emac_common_start(struct prueth *prueth)
203 {
204 	struct prueth_emac *emac;
205 	int ret = 0;
206 	int slice;
207 
208 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
209 		return -EINVAL;
210 
211 	/* clear SMEM and MSMC settings for all slices */
212 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
213 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
214 
215 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
216 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
217 
218 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
219 		icssg_init_fw_offload_mode(prueth);
220 	else
221 		icssg_init_emac_mode(prueth);
222 
223 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
224 		emac = prueth->emac[slice];
225 		if (!emac)
226 			continue;
227 		ret = icssg_config(prueth, emac, slice);
228 		if (ret)
229 			goto disable_class;
230 	}
231 
232 	ret = prueth_emac_start(prueth);
233 	if (ret)
234 		goto disable_class;
235 
236 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
237 	       prueth->emac[ICSS_SLICE1];
238 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
239 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
240 	if (ret) {
241 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
242 		goto stop_pruss;
243 	}
244 
245 	return 0;
246 
247 stop_pruss:
248 	prueth_emac_stop(prueth);
249 
250 disable_class:
251 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
252 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
253 
254 	return ret;
255 }
256 
257 static int prueth_emac_common_stop(struct prueth *prueth)
258 {
259 	struct prueth_emac *emac;
260 
261 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
262 		return -EINVAL;
263 
264 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
265 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
266 
267 	prueth_emac_stop(prueth);
268 
269 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
270 	       prueth->emac[ICSS_SLICE1];
271 	icss_iep_exit(emac->iep);
272 
273 	return 0;
274 }
275 
276 /* called back by PHY layer if there is change in link state of hw port*/
277 static void emac_adjust_link(struct net_device *ndev)
278 {
279 	struct prueth_emac *emac = netdev_priv(ndev);
280 	struct phy_device *phydev = ndev->phydev;
281 	struct prueth *prueth = emac->prueth;
282 	bool new_state = false;
283 	unsigned long flags;
284 
285 	if (phydev->link) {
286 		/* check the mode of operation - full/half duplex */
287 		if (phydev->duplex != emac->duplex) {
288 			new_state = true;
289 			emac->duplex = phydev->duplex;
290 		}
291 		if (phydev->speed != emac->speed) {
292 			new_state = true;
293 			emac->speed = phydev->speed;
294 		}
295 		if (!emac->link) {
296 			new_state = true;
297 			emac->link = 1;
298 		}
299 	} else if (emac->link) {
300 		new_state = true;
301 		emac->link = 0;
302 
303 		/* f/w should support 100 & 1000 */
304 		emac->speed = SPEED_1000;
305 
306 		/* half duplex may not be supported by f/w */
307 		emac->duplex = DUPLEX_FULL;
308 	}
309 
310 	if (new_state) {
311 		phy_print_status(phydev);
312 
313 		/* update RGMII and MII configuration based on PHY negotiated
314 		 * values
315 		 */
316 		if (emac->link) {
317 			if (emac->duplex == DUPLEX_HALF)
318 				icssg_config_half_duplex(emac);
319 			/* Set the RGMII cfg for gig en and full duplex */
320 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
321 
322 			/* update the Tx IPG based on 100M/1G speed */
323 			spin_lock_irqsave(&emac->lock, flags);
324 			icssg_config_ipg(emac);
325 			spin_unlock_irqrestore(&emac->lock, flags);
326 			icssg_config_set_speed(emac);
327 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
328 
329 		} else {
330 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
331 		}
332 	}
333 
334 	if (emac->link) {
335 		/* reactivate the transmit queue */
336 		netif_tx_wake_all_queues(ndev);
337 	} else {
338 		netif_tx_stop_all_queues(ndev);
339 		prueth_cleanup_tx_ts(emac);
340 	}
341 }
342 
343 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
344 {
345 	struct prueth_emac *emac =
346 			container_of(timer, struct prueth_emac, rx_hrtimer);
347 	int rx_flow = PRUETH_RX_FLOW_DATA;
348 
349 	enable_irq(emac->rx_chns.irq[rx_flow]);
350 	return HRTIMER_NORESTART;
351 }
352 
353 static int emac_phy_connect(struct prueth_emac *emac)
354 {
355 	struct prueth *prueth = emac->prueth;
356 	struct net_device *ndev = emac->ndev;
357 	/* connect PHY */
358 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
359 				      &emac_adjust_link, 0,
360 				      emac->phy_if);
361 	if (!ndev->phydev) {
362 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
363 			emac->phy_node->full_name);
364 		return -ENODEV;
365 	}
366 
367 	if (!emac->half_duplex) {
368 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
369 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
370 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
371 	}
372 
373 	/* remove unsupported modes */
374 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
375 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
376 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
377 
378 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
379 		phy_set_max_speed(ndev->phydev, SPEED_100);
380 
381 	return 0;
382 }
383 
384 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
385 {
386 	u32 hi_rollover_count, hi_rollover_count_r;
387 	struct prueth_emac *emac = clockops_data;
388 	struct prueth *prueth = emac->prueth;
389 	void __iomem *fw_hi_r_count_addr;
390 	void __iomem *fw_count_hi_addr;
391 	u32 iepcount_hi, iepcount_hi_r;
392 	unsigned long flags;
393 	u32 iepcount_lo;
394 	u64 ts = 0;
395 
396 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
397 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
398 
399 	local_irq_save(flags);
400 	do {
401 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
402 		iepcount_hi += readl(fw_count_hi_addr);
403 		hi_rollover_count = readl(fw_hi_r_count_addr);
404 		ptp_read_system_prets(sts);
405 		iepcount_lo = icss_iep_get_count_low(emac->iep);
406 		ptp_read_system_postts(sts);
407 
408 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
409 		iepcount_hi_r += readl(fw_count_hi_addr);
410 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
411 	} while ((iepcount_hi_r != iepcount_hi) ||
412 		 (hi_rollover_count != hi_rollover_count_r));
413 	local_irq_restore(flags);
414 
415 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
416 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
417 
418 	return ts;
419 }
420 
421 static void prueth_iep_settime(void *clockops_data, u64 ns)
422 {
423 	struct icssg_setclock_desc __iomem *sc_descp;
424 	struct prueth_emac *emac = clockops_data;
425 	struct icssg_setclock_desc sc_desc;
426 	u64 cyclecount;
427 	u32 cycletime;
428 	int timeout;
429 
430 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
431 
432 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
433 	cyclecount = ns / cycletime;
434 
435 	memset(&sc_desc, 0, sizeof(sc_desc));
436 	sc_desc.margin = cycletime - 1000;
437 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
438 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
439 	sc_desc.iepcount_set = ns % cycletime;
440 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
441 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
442 
443 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
444 
445 	writeb(1, &sc_descp->request);
446 
447 	timeout = 5;	/* fw should take 2-3 ms */
448 	while (timeout--) {
449 		if (readb(&sc_descp->acknowledgment))
450 			return;
451 
452 		usleep_range(500, 1000);
453 	}
454 
455 	dev_err(emac->prueth->dev, "settime timeout\n");
456 }
457 
458 static int prueth_perout_enable(void *clockops_data,
459 				struct ptp_perout_request *req, int on,
460 				u64 *cmp)
461 {
462 	struct prueth_emac *emac = clockops_data;
463 	u32 reduction_factor = 0, offset = 0;
464 	struct timespec64 ts;
465 	u64 current_cycle;
466 	u64 start_offset;
467 	u64 ns_period;
468 
469 	if (!on)
470 		return 0;
471 
472 	/* Any firmware specific stuff for PPS/PEROUT handling */
473 	ts.tv_sec = req->period.sec;
474 	ts.tv_nsec = req->period.nsec;
475 	ns_period = timespec64_to_ns(&ts);
476 
477 	/* f/w doesn't support period less than cycle time */
478 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
479 		return -ENXIO;
480 
481 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
482 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
483 
484 	/* f/w requires at least 1uS within a cycle so CMP
485 	 * can trigger after SYNC is enabled
486 	 */
487 	if (offset < 5 * NSEC_PER_USEC)
488 		offset = 5 * NSEC_PER_USEC;
489 
490 	/* if offset is close to cycle time then we will miss
491 	 * the CMP event for last tick when IEP rolls over.
492 	 * In normal mode, IEP tick is 4ns.
493 	 * In slow compensation it could be 0ns or 8ns at
494 	 * every slow compensation cycle.
495 	 */
496 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
497 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
498 
499 	/* we're in shadow mode so need to set upper 32-bits */
500 	*cmp = (u64)offset << 32;
501 
502 	writel(reduction_factor, emac->prueth->shram.va +
503 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
504 
505 	current_cycle = icssg_read_time(emac->prueth->shram.va +
506 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
507 
508 	/* Rounding of current_cycle count to next second */
509 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
510 
511 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
512 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
513 
514 	return 0;
515 }
516 
517 const struct icss_iep_clockops prueth_iep_clockops = {
518 	.settime = prueth_iep_settime,
519 	.gettime = prueth_iep_gettime,
520 	.perout_enable = prueth_perout_enable,
521 };
522 
523 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
524 {
525 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
526 	struct page_pool *pool = emac->rx_chns.pg_pool;
527 	int ret;
528 
529 	ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
530 	if (ret)
531 		return ret;
532 
533 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
534 	if (ret)
535 		xdp_rxq_info_unreg(rxq);
536 
537 	return ret;
538 }
539 
540 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
541 {
542 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
543 
544 	if (!xdp_rxq_info_is_reg(rxq))
545 		return;
546 
547 	xdp_rxq_info_unreg(rxq);
548 }
549 
550 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
551 {
552 	struct net_device *real_dev;
553 	struct prueth_emac *emac;
554 	int port_mask;
555 	u8 vlan_id;
556 
557 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
558 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
559 	emac = netdev_priv(real_dev);
560 
561 	port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
562 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
563 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
564 
565 	return 0;
566 }
567 
568 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
569 {
570 	struct net_device *real_dev;
571 	struct prueth_emac *emac;
572 	int other_port_mask;
573 	int port_mask;
574 	u8 vlan_id;
575 
576 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
577 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
578 	emac = netdev_priv(real_dev);
579 
580 	port_mask = BIT(emac->port_id);
581 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
582 
583 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
584 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
585 
586 	if (other_port_mask) {
587 		icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
588 		icssg_vtbl_modify(emac, vlan_id, other_port_mask,
589 				  other_port_mask, true);
590 	}
591 
592 	return 0;
593 }
594 
595 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
596 					 const u8 *addr, u8 vid, bool add)
597 {
598 	icssg_fdb_add_del(emac, addr, vid,
599 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
600 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
601 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
602 			  ICSSG_FDB_ENTRY_BLOCK, add);
603 
604 	if (add)
605 		icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
606 				  BIT(emac->port_id), add);
607 }
608 
609 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
610 {
611 	struct net_device *real_dev;
612 	struct prueth_emac *emac;
613 	u8 vlan_id, i;
614 
615 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
616 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
617 
618 	if (is_hsr_master(real_dev)) {
619 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
620 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
621 			if (!emac)
622 				return -EINVAL;
623 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
624 						     true);
625 		}
626 	} else {
627 		emac = netdev_priv(real_dev);
628 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
629 	}
630 
631 	return 0;
632 }
633 
634 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
635 {
636 	struct net_device *real_dev;
637 	struct prueth_emac *emac;
638 	u8 vlan_id, i;
639 
640 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
641 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
642 
643 	if (is_hsr_master(real_dev)) {
644 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
645 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
646 			if (!emac)
647 				return -EINVAL;
648 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
649 						     false);
650 		}
651 	} else {
652 		emac = netdev_priv(real_dev);
653 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
654 	}
655 
656 	return 0;
657 }
658 
659 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
660 				   void *args)
661 {
662 	struct prueth_emac *emac = args;
663 
664 	if (!vdev || !vid)
665 		return 0;
666 
667 	netif_addr_lock_bh(vdev);
668 	__hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
669 				vdev->addr_len);
670 	netif_addr_unlock_bh(vdev);
671 
672 	if (emac->prueth->is_hsr_offload_mode)
673 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
674 				   icssg_prueth_hsr_add_mcast,
675 				   icssg_prueth_hsr_del_mcast);
676 	else
677 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
678 				   icssg_prueth_add_mcast,
679 				   icssg_prueth_del_mcast);
680 
681 	return 0;
682 }
683 
684 /**
685  * emac_ndo_open - EMAC device open
686  * @ndev: network adapter device
687  *
688  * Called when system wants to start the interface.
689  *
690  * Return: 0 for a successful open, or appropriate error code
691  */
692 static int emac_ndo_open(struct net_device *ndev)
693 {
694 	struct prueth_emac *emac = netdev_priv(ndev);
695 	int ret, i, num_data_chn = emac->tx_ch_num;
696 	struct icssg_flow_cfg __iomem *flow_cfg;
697 	struct prueth *prueth = emac->prueth;
698 	int slice = prueth_emac_slice(emac);
699 	struct device *dev = prueth->dev;
700 	int max_rx_flows;
701 	int rx_flow;
702 
703 	/* set h/w MAC as user might have re-configured */
704 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
705 
706 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
707 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
708 
709 	/* Notify the stack of the actual queue counts. */
710 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
711 	if (ret) {
712 		dev_err(dev, "cannot set real number of tx queues\n");
713 		return ret;
714 	}
715 
716 	init_completion(&emac->cmd_complete);
717 	ret = prueth_init_tx_chns(emac);
718 	if (ret) {
719 		dev_err(dev, "failed to init tx channel: %d\n", ret);
720 		return ret;
721 	}
722 
723 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
724 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
725 				  max_rx_flows, PRUETH_MAX_RX_DESC);
726 	if (ret) {
727 		dev_err(dev, "failed to init rx channel: %d\n", ret);
728 		goto cleanup_tx;
729 	}
730 
731 	ret = prueth_ndev_add_tx_napi(emac);
732 	if (ret)
733 		goto cleanup_rx;
734 
735 	/* we use only the highest priority flow for now i.e. @irq[3] */
736 	rx_flow = PRUETH_RX_FLOW_DATA;
737 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
738 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
739 	if (ret) {
740 		dev_err(dev, "unable to request RX IRQ\n");
741 		goto cleanup_napi;
742 	}
743 
744 	if (!prueth->emacs_initialized) {
745 		ret = prueth_emac_common_start(prueth);
746 		if (ret)
747 			goto free_rx_irq;
748 	}
749 
750 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
751 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
752 	ret = emac_fdb_flow_id_updated(emac);
753 
754 	if (ret) {
755 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
756 		goto stop;
757 	}
758 
759 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
760 
761 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
762 				   IRQF_ONESHOT, dev_name(dev), emac);
763 	if (ret)
764 		goto stop;
765 
766 	/* Prepare RX */
767 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
768 	if (ret)
769 		goto free_tx_ts_irq;
770 
771 	ret = prueth_create_xdp_rxqs(emac);
772 	if (ret)
773 		goto reset_rx_chn;
774 
775 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
776 	if (ret)
777 		goto destroy_xdp_rxqs;
778 
779 	for (i = 0; i < emac->tx_ch_num; i++) {
780 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
781 		if (ret)
782 			goto reset_tx_chan;
783 	}
784 
785 	/* Enable NAPI in Tx and Rx direction */
786 	for (i = 0; i < emac->tx_ch_num; i++)
787 		napi_enable(&emac->tx_chns[i].napi_tx);
788 	napi_enable(&emac->napi_rx);
789 
790 	/* start PHY */
791 	phy_start(ndev->phydev);
792 
793 	prueth->emacs_initialized++;
794 
795 	queue_work(system_long_wq, &emac->stats_work.work);
796 
797 	return 0;
798 
799 reset_tx_chan:
800 	/* Since interface is not yet up, there is wouldn't be
801 	 * any SKB for completion. So set false to free_skb
802 	 */
803 	prueth_reset_tx_chan(emac, i, false);
804 destroy_xdp_rxqs:
805 	prueth_destroy_xdp_rxqs(emac);
806 reset_rx_chn:
807 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
808 free_tx_ts_irq:
809 	free_irq(emac->tx_ts_irq, emac);
810 stop:
811 	if (!prueth->emacs_initialized)
812 		prueth_emac_common_stop(prueth);
813 free_rx_irq:
814 	free_irq(emac->rx_chns.irq[rx_flow], emac);
815 cleanup_napi:
816 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
817 cleanup_rx:
818 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
819 cleanup_tx:
820 	prueth_cleanup_tx_chns(emac);
821 
822 	return ret;
823 }
824 
825 /**
826  * emac_ndo_stop - EMAC device stop
827  * @ndev: network adapter device
828  *
829  * Called when system wants to stop or down the interface.
830  *
831  * Return: Always 0 (Success)
832  */
833 static int emac_ndo_stop(struct net_device *ndev)
834 {
835 	struct prueth_emac *emac = netdev_priv(ndev);
836 	struct prueth *prueth = emac->prueth;
837 	int rx_flow = PRUETH_RX_FLOW_DATA;
838 	int max_rx_flows;
839 	int ret, i;
840 
841 	/* inform the upper layers. */
842 	netif_tx_stop_all_queues(ndev);
843 
844 	/* block packets from wire */
845 	if (ndev->phydev)
846 		phy_stop(ndev->phydev);
847 
848 	if (emac->prueth->is_hsr_offload_mode)
849 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
850 	else
851 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
852 
853 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
854 	/* ensure new tdown_cnt value is visible */
855 	smp_mb__after_atomic();
856 	/* tear down and disable UDMA channels */
857 	reinit_completion(&emac->tdown_complete);
858 	for (i = 0; i < emac->tx_ch_num; i++)
859 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
860 
861 	ret = wait_for_completion_timeout(&emac->tdown_complete,
862 					  msecs_to_jiffies(1000));
863 	if (!ret)
864 		netdev_err(ndev, "tx teardown timeout\n");
865 
866 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
867 	for (i = 0; i < emac->tx_ch_num; i++) {
868 		napi_disable(&emac->tx_chns[i].napi_tx);
869 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
870 	}
871 
872 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
873 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
874 
875 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
876 	prueth_destroy_xdp_rxqs(emac);
877 	napi_disable(&emac->napi_rx);
878 	hrtimer_cancel(&emac->rx_hrtimer);
879 
880 	cancel_work_sync(&emac->rx_mode_work);
881 
882 	/* Destroying the queued work in ndo_stop() */
883 	cancel_delayed_work_sync(&emac->stats_work);
884 
885 	/* stop PRUs */
886 	if (prueth->emacs_initialized == 1)
887 		prueth_emac_common_stop(prueth);
888 
889 	free_irq(emac->tx_ts_irq, emac);
890 
891 	free_irq(emac->rx_chns.irq[rx_flow], emac);
892 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
893 
894 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
895 	prueth_cleanup_tx_chns(emac);
896 
897 	prueth->emacs_initialized--;
898 
899 	return 0;
900 }
901 
902 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
903 {
904 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
905 	struct net_device *ndev = emac->ndev;
906 	bool promisc, allmulti;
907 
908 	if (!netif_running(ndev))
909 		return;
910 
911 	promisc = ndev->flags & IFF_PROMISC;
912 	allmulti = ndev->flags & IFF_ALLMULTI;
913 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
914 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
915 
916 	if (promisc) {
917 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
918 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
919 		return;
920 	}
921 
922 	if (allmulti) {
923 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
924 		return;
925 	}
926 
927 	if (emac->prueth->is_hsr_offload_mode) {
928 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
929 			      icssg_prueth_hsr_del_mcast);
930 		if (rtnl_trylock()) {
931 			vlan_for_each(emac->prueth->hsr_dev,
932 				      icssg_update_vlan_mcast, emac);
933 			rtnl_unlock();
934 		}
935 	} else {
936 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
937 			      icssg_prueth_del_mcast);
938 		if (rtnl_trylock()) {
939 			vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
940 			rtnl_unlock();
941 		}
942 	}
943 }
944 
945 /**
946  * emac_ndo_set_rx_mode - EMAC set receive mode function
947  * @ndev: The EMAC network adapter
948  *
949  * Called when system wants to set the receive mode of the device.
950  *
951  */
952 static void emac_ndo_set_rx_mode(struct net_device *ndev)
953 {
954 	struct prueth_emac *emac = netdev_priv(ndev);
955 
956 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
957 }
958 
959 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
960 					       netdev_features_t features)
961 {
962 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
963 	 * firmware implementation. Both these features need to be enabled /
964 	 * disabled together.
965 	 */
966 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
967 		if ((features & NETIF_F_HW_HSR_DUP) ||
968 		    (features & NETIF_F_HW_HSR_TAG_INS))
969 			features |= NETIF_F_HW_HSR_DUP |
970 				    NETIF_F_HW_HSR_TAG_INS;
971 
972 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
973 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
974 		if (!(features & NETIF_F_HW_HSR_DUP) ||
975 		    !(features & NETIF_F_HW_HSR_TAG_INS))
976 			features &= ~(NETIF_F_HW_HSR_DUP |
977 				      NETIF_F_HW_HSR_TAG_INS);
978 
979 	return features;
980 }
981 
982 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
983 				    __be16 proto, u16 vid)
984 {
985 	struct prueth_emac *emac = netdev_priv(ndev);
986 	struct prueth *prueth = emac->prueth;
987 	int port_mask = BIT(emac->port_id);
988 	int untag_mask = 0;
989 
990 	if (prueth->is_hsr_offload_mode)
991 		port_mask |= BIT(PRUETH_PORT_HOST);
992 
993 	__hw_addr_init(&emac->vlan_mcast_list[vid]);
994 	netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
995 		   vid, port_mask, untag_mask);
996 
997 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
998 	icssg_set_pvid(emac->prueth, vid, emac->port_id);
999 
1000 	return 0;
1001 }
1002 
1003 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1004 				    __be16 proto, u16 vid)
1005 {
1006 	struct prueth_emac *emac = netdev_priv(ndev);
1007 	struct prueth *prueth = emac->prueth;
1008 	int port_mask = BIT(emac->port_id);
1009 	int untag_mask = 0;
1010 
1011 	if (prueth->is_hsr_offload_mode)
1012 		port_mask = BIT(PRUETH_PORT_HOST);
1013 
1014 	netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
1015 		   vid, port_mask, untag_mask);
1016 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1017 
1018 	return 0;
1019 }
1020 
1021 /**
1022  * emac_xdp_xmit - Implements ndo_xdp_xmit
1023  * @dev: netdev
1024  * @n: number of frames
1025  * @frames: array of XDP buffer pointers
1026  * @flags: XDP extra info
1027  *
1028  * Return: number of frames successfully sent. Failed frames
1029  * will be free'ed by XDP core.
1030  *
1031  * For error cases, a negative errno code is returned and no-frames
1032  * are transmitted (caller must handle freeing frames).
1033  **/
1034 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1035 			 u32 flags)
1036 {
1037 	struct prueth_emac *emac = netdev_priv(dev);
1038 	struct net_device *ndev = emac->ndev;
1039 	struct netdev_queue *netif_txq;
1040 	int cpu = smp_processor_id();
1041 	struct xdp_frame *xdpf;
1042 	unsigned int q_idx;
1043 	int nxmit = 0;
1044 	u32 err;
1045 	int i;
1046 
1047 	q_idx = cpu % emac->tx_ch_num;
1048 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1049 
1050 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1051 		return -EINVAL;
1052 
1053 	__netif_tx_lock(netif_txq, cpu);
1054 	for (i = 0; i < n; i++) {
1055 		xdpf = frames[i];
1056 		err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
1057 		if (err != ICSSG_XDP_TX) {
1058 			ndev->stats.tx_dropped++;
1059 			break;
1060 		}
1061 		nxmit++;
1062 	}
1063 	__netif_tx_unlock(netif_txq);
1064 
1065 	return nxmit;
1066 }
1067 
1068 /**
1069  * emac_xdp_setup - add/remove an XDP program
1070  * @emac: emac device
1071  * @bpf: XDP program
1072  *
1073  * Return: Always 0 (Success)
1074  **/
1075 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1076 {
1077 	struct bpf_prog *prog = bpf->prog;
1078 
1079 	if (!emac->xdpi.prog && !prog)
1080 		return 0;
1081 
1082 	WRITE_ONCE(emac->xdp_prog, prog);
1083 
1084 	xdp_attachment_setup(&emac->xdpi, bpf);
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1091  * @ndev: network adapter device
1092  * @bpf: XDP program
1093  *
1094  * Return: 0 on success, error code on failure.
1095  **/
1096 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1097 {
1098 	struct prueth_emac *emac = netdev_priv(ndev);
1099 
1100 	switch (bpf->command) {
1101 	case XDP_SETUP_PROG:
1102 		return emac_xdp_setup(emac, bpf);
1103 	default:
1104 		return -EINVAL;
1105 	}
1106 }
1107 
1108 static const struct net_device_ops emac_netdev_ops = {
1109 	.ndo_open = emac_ndo_open,
1110 	.ndo_stop = emac_ndo_stop,
1111 	.ndo_start_xmit = icssg_ndo_start_xmit,
1112 	.ndo_set_mac_address = eth_mac_addr,
1113 	.ndo_validate_addr = eth_validate_addr,
1114 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
1115 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1116 	.ndo_eth_ioctl = icssg_ndo_ioctl,
1117 	.ndo_get_stats64 = icssg_ndo_get_stats64,
1118 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1119 	.ndo_fix_features = emac_ndo_fix_features,
1120 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1121 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1122 	.ndo_bpf = emac_ndo_bpf,
1123 	.ndo_xdp_xmit = emac_xdp_xmit,
1124 };
1125 
1126 static int prueth_netdev_init(struct prueth *prueth,
1127 			      struct device_node *eth_node)
1128 {
1129 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1130 	struct prueth_emac *emac;
1131 	struct net_device *ndev;
1132 	enum prueth_port port;
1133 	const char *irq_name;
1134 	enum prueth_mac mac;
1135 
1136 	port = prueth_node_port(eth_node);
1137 	if (port == PRUETH_PORT_INVALID)
1138 		return -EINVAL;
1139 
1140 	mac = prueth_node_mac(eth_node);
1141 	if (mac == PRUETH_MAC_INVALID)
1142 		return -EINVAL;
1143 
1144 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1145 	if (!ndev)
1146 		return -ENOMEM;
1147 
1148 	emac = netdev_priv(ndev);
1149 	emac->prueth = prueth;
1150 	emac->ndev = ndev;
1151 	emac->port_id = port;
1152 	emac->xdp_prog = NULL;
1153 	emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1154 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1155 	if (!emac->cmd_wq) {
1156 		ret = -ENOMEM;
1157 		goto free_ndev;
1158 	}
1159 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1160 
1161 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1162 
1163 	ret = pruss_request_mem_region(prueth->pruss,
1164 				       port == PRUETH_PORT_MII0 ?
1165 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1166 				       &emac->dram);
1167 	if (ret) {
1168 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1169 		ret = -ENOMEM;
1170 		goto free_wq;
1171 	}
1172 
1173 	emac->tx_ch_num = 1;
1174 
1175 	irq_name = "tx_ts0";
1176 	if (emac->port_id == PRUETH_PORT_MII1)
1177 		irq_name = "tx_ts1";
1178 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1179 	if (emac->tx_ts_irq < 0) {
1180 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1181 		goto free;
1182 	}
1183 
1184 	SET_NETDEV_DEV(ndev, prueth->dev);
1185 	spin_lock_init(&emac->lock);
1186 	mutex_init(&emac->cmd_lock);
1187 
1188 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1189 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1190 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1191 		ret = -ENODEV;
1192 		goto free;
1193 	} else if (of_phy_is_fixed_link(eth_node)) {
1194 		ret = of_phy_register_fixed_link(eth_node);
1195 		if (ret) {
1196 			ret = dev_err_probe(prueth->dev, ret,
1197 					    "failed to register fixed-link phy\n");
1198 			goto free;
1199 		}
1200 
1201 		emac->phy_node = eth_node;
1202 	}
1203 
1204 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1205 	if (ret) {
1206 		dev_err(prueth->dev, "could not get phy-mode property\n");
1207 		goto free;
1208 	}
1209 
1210 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1211 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1212 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1213 		ret = -EINVAL;
1214 		goto free;
1215 	}
1216 
1217 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1218 	 * and it is not possible to disable TX Internal delay. The below
1219 	 * switch case block describes how we handle different phy modes
1220 	 * based on hardware restriction.
1221 	 */
1222 	switch (emac->phy_if) {
1223 	case PHY_INTERFACE_MODE_RGMII_ID:
1224 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1225 		break;
1226 	case PHY_INTERFACE_MODE_RGMII_TXID:
1227 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1228 		break;
1229 	case PHY_INTERFACE_MODE_RGMII:
1230 	case PHY_INTERFACE_MODE_RGMII_RXID:
1231 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1232 		ret = -EINVAL;
1233 		goto free;
1234 	default:
1235 		break;
1236 	}
1237 
1238 	/* get mac address from DT and set private and netdev addr */
1239 	ret = of_get_ethdev_address(eth_node, ndev);
1240 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1241 		eth_hw_addr_random(ndev);
1242 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1243 			 port, ndev->dev_addr);
1244 	}
1245 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1246 
1247 	ndev->dev.of_node = eth_node;
1248 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1249 	ndev->max_mtu = PRUETH_MAX_MTU;
1250 	ndev->netdev_ops = &emac_netdev_ops;
1251 	ndev->ethtool_ops = &icssg_ethtool_ops;
1252 	ndev->hw_features = NETIF_F_SG;
1253 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1254 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1255 	xdp_set_features_flag(ndev,
1256 			      NETDEV_XDP_ACT_BASIC |
1257 			      NETDEV_XDP_ACT_REDIRECT |
1258 			      NETDEV_XDP_ACT_NDO_XMIT);
1259 
1260 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1261 	hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1262 		      HRTIMER_MODE_REL_PINNED);
1263 	prueth->emac[mac] = emac;
1264 
1265 	return 0;
1266 
1267 free:
1268 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1269 free_wq:
1270 	destroy_workqueue(emac->cmd_wq);
1271 free_ndev:
1272 	emac->ndev = NULL;
1273 	prueth->emac[mac] = NULL;
1274 	free_netdev(ndev);
1275 
1276 	return ret;
1277 }
1278 
1279 bool prueth_dev_check(const struct net_device *ndev)
1280 {
1281 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1282 		struct prueth_emac *emac = netdev_priv(ndev);
1283 
1284 		return emac->prueth->is_switch_mode;
1285 	}
1286 
1287 	return false;
1288 }
1289 
1290 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1291 {
1292 	int set_val = 0;
1293 	int i;
1294 
1295 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1296 		set_val = 1;
1297 
1298 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1299 
1300 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1301 		struct prueth_emac *emac = prueth->emac[i];
1302 
1303 		if (!emac || !emac->ndev)
1304 			continue;
1305 
1306 		emac->offload_fwd_mark = set_val;
1307 	}
1308 }
1309 
1310 static int prueth_emac_restart(struct prueth *prueth)
1311 {
1312 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1313 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1314 	int ret;
1315 
1316 	/* Detach the net_device for both PRUeth ports*/
1317 	if (netif_running(emac0->ndev))
1318 		netif_device_detach(emac0->ndev);
1319 	if (netif_running(emac1->ndev))
1320 		netif_device_detach(emac1->ndev);
1321 
1322 	/* Disable both PRUeth ports */
1323 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1324 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1325 	if (ret)
1326 		return ret;
1327 
1328 	/* Stop both pru cores for both PRUeth ports*/
1329 	ret = prueth_emac_common_stop(prueth);
1330 	if (ret) {
1331 		dev_err(prueth->dev, "Failed to stop the firmwares");
1332 		return ret;
1333 	}
1334 
1335 	/* Start both pru cores for both PRUeth ports */
1336 	ret = prueth_emac_common_start(prueth);
1337 	if (ret) {
1338 		dev_err(prueth->dev, "Failed to start the firmwares");
1339 		return ret;
1340 	}
1341 
1342 	/* Enable forwarding for both PRUeth ports */
1343 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1344 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1345 
1346 	/* Attache net_device for both PRUeth ports */
1347 	netif_device_attach(emac0->ndev);
1348 	netif_device_attach(emac1->ndev);
1349 
1350 	return ret;
1351 }
1352 
1353 static void icssg_change_mode(struct prueth *prueth)
1354 {
1355 	struct prueth_emac *emac;
1356 	int mac, ret;
1357 
1358 	ret = prueth_emac_restart(prueth);
1359 	if (ret) {
1360 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1361 		return;
1362 	}
1363 
1364 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1365 		emac = prueth->emac[mac];
1366 		if (prueth->is_hsr_offload_mode) {
1367 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1368 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1369 			else
1370 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1371 		}
1372 
1373 		if (netif_running(emac->ndev)) {
1374 			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1375 					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1376 					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1377 					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1378 					  ICSSG_FDB_ENTRY_BLOCK,
1379 					  true);
1380 			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1381 					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
1382 					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1383 					  true);
1384 			if (prueth->is_hsr_offload_mode)
1385 				icssg_vtbl_modify(emac, DEFAULT_VID,
1386 						  DEFAULT_PORT_MASK,
1387 						  DEFAULT_UNTAG_MASK, true);
1388 			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1389 			if (prueth->is_switch_mode)
1390 				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1391 		}
1392 	}
1393 }
1394 
1395 static int prueth_netdevice_port_link(struct net_device *ndev,
1396 				      struct net_device *br_ndev,
1397 				      struct netlink_ext_ack *extack)
1398 {
1399 	struct prueth_emac *emac = netdev_priv(ndev);
1400 	struct prueth *prueth = emac->prueth;
1401 	int err;
1402 
1403 	if (!prueth->br_members) {
1404 		prueth->hw_bridge_dev = br_ndev;
1405 	} else {
1406 		/* This is adding the port to a second bridge, this is
1407 		 * unsupported
1408 		 */
1409 		if (prueth->hw_bridge_dev != br_ndev)
1410 			return -EOPNOTSUPP;
1411 	}
1412 
1413 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1414 					    &prueth->prueth_switchdev_nb,
1415 					    &prueth->prueth_switchdev_bl_nb,
1416 					    false, extack);
1417 	if (err)
1418 		return err;
1419 
1420 	prueth->br_members |= BIT(emac->port_id);
1421 
1422 	if (!prueth->is_switch_mode) {
1423 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1424 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1425 			prueth->is_switch_mode = true;
1426 			prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1427 			emac->port_vlan = prueth->default_vlan;
1428 			icssg_change_mode(prueth);
1429 		}
1430 	}
1431 
1432 	prueth_offload_fwd_mark_update(prueth);
1433 
1434 	return NOTIFY_DONE;
1435 }
1436 
1437 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1438 {
1439 	struct prueth_emac *emac = netdev_priv(ndev);
1440 	struct prueth *prueth = emac->prueth;
1441 	int ret;
1442 
1443 	prueth->br_members &= ~BIT(emac->port_id);
1444 
1445 	if (prueth->is_switch_mode) {
1446 		prueth->is_switch_mode = false;
1447 		emac->port_vlan = 0;
1448 		ret = prueth_emac_restart(prueth);
1449 		if (ret) {
1450 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1451 			return;
1452 		}
1453 	}
1454 
1455 	prueth_offload_fwd_mark_update(prueth);
1456 
1457 	if (!prueth->br_members)
1458 		prueth->hw_bridge_dev = NULL;
1459 }
1460 
1461 static int prueth_hsr_port_link(struct net_device *ndev)
1462 {
1463 	struct prueth_emac *emac = netdev_priv(ndev);
1464 	struct prueth *prueth = emac->prueth;
1465 	struct prueth_emac *emac0;
1466 	struct prueth_emac *emac1;
1467 
1468 	emac0 = prueth->emac[PRUETH_MAC0];
1469 	emac1 = prueth->emac[PRUETH_MAC1];
1470 
1471 	if (prueth->is_switch_mode)
1472 		return -EOPNOTSUPP;
1473 
1474 	prueth->hsr_members |= BIT(emac->port_id);
1475 	if (!prueth->is_hsr_offload_mode) {
1476 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1477 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1478 			if (!(emac0->ndev->features &
1479 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1480 			    !(emac1->ndev->features &
1481 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1482 				return -EOPNOTSUPP;
1483 			prueth->is_hsr_offload_mode = true;
1484 			prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1485 			emac0->port_vlan = prueth->default_vlan;
1486 			emac1->port_vlan = prueth->default_vlan;
1487 			icssg_change_mode(prueth);
1488 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1489 		}
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 static void prueth_hsr_port_unlink(struct net_device *ndev)
1496 {
1497 	struct prueth_emac *emac = netdev_priv(ndev);
1498 	struct prueth *prueth = emac->prueth;
1499 	struct prueth_emac *emac0;
1500 	struct prueth_emac *emac1;
1501 	int ret;
1502 
1503 	emac0 = prueth->emac[PRUETH_MAC0];
1504 	emac1 = prueth->emac[PRUETH_MAC1];
1505 
1506 	prueth->hsr_members &= ~BIT(emac->port_id);
1507 	if (prueth->is_hsr_offload_mode) {
1508 		prueth->is_hsr_offload_mode = false;
1509 		emac0->port_vlan = 0;
1510 		emac1->port_vlan = 0;
1511 		prueth->hsr_dev = NULL;
1512 		ret = prueth_emac_restart(prueth);
1513 		if (ret) {
1514 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1515 			return;
1516 		}
1517 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1518 	}
1519 }
1520 
1521 /* netdev notifier */
1522 static int prueth_netdevice_event(struct notifier_block *unused,
1523 				  unsigned long event, void *ptr)
1524 {
1525 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1526 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1527 	struct netdev_notifier_changeupper_info *info;
1528 	struct prueth_emac *emac = netdev_priv(ndev);
1529 	struct prueth *prueth = emac->prueth;
1530 	int ret = NOTIFY_DONE;
1531 
1532 	if (ndev->netdev_ops != &emac_netdev_ops)
1533 		return NOTIFY_DONE;
1534 
1535 	switch (event) {
1536 	case NETDEV_CHANGEUPPER:
1537 		info = ptr;
1538 
1539 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1540 		    is_hsr_master(info->upper_dev)) {
1541 			if (info->linking) {
1542 				if (!prueth->hsr_dev) {
1543 					prueth->hsr_dev = info->upper_dev;
1544 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1545 								      prueth->hsr_dev->dev_addr);
1546 				} else {
1547 					if (prueth->hsr_dev != info->upper_dev) {
1548 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1549 						return -EOPNOTSUPP;
1550 					}
1551 				}
1552 				prueth_hsr_port_link(ndev);
1553 			} else {
1554 				prueth_hsr_port_unlink(ndev);
1555 			}
1556 		}
1557 
1558 		if (netif_is_bridge_master(info->upper_dev)) {
1559 			if (info->linking)
1560 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1561 			else
1562 				prueth_netdevice_port_unlink(ndev);
1563 		}
1564 		break;
1565 	default:
1566 		return NOTIFY_DONE;
1567 	}
1568 
1569 	return notifier_from_errno(ret);
1570 }
1571 
1572 static int prueth_register_notifiers(struct prueth *prueth)
1573 {
1574 	int ret = 0;
1575 
1576 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1577 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1578 	if (ret) {
1579 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1580 		return ret;
1581 	}
1582 
1583 	ret = prueth_switchdev_register_notifiers(prueth);
1584 	if (ret)
1585 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1586 
1587 	return ret;
1588 }
1589 
1590 static void prueth_unregister_notifiers(struct prueth *prueth)
1591 {
1592 	prueth_switchdev_unregister_notifiers(prueth);
1593 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1594 }
1595 
1596 static void icssg_read_firmware_names(struct device_node *np,
1597 				      struct icssg_firmwares *fw)
1598 {
1599 	int i;
1600 
1601 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1602 		of_property_read_string_index(np, "firmware-name", i * 3 + 0,
1603 					      &fw[i].pru);
1604 		of_property_read_string_index(np, "firmware-name", i * 3 + 1,
1605 					      &fw[i].rtu);
1606 		of_property_read_string_index(np, "firmware-name", i * 3 + 2,
1607 					      &fw[i].txpru);
1608 	}
1609 }
1610 
1611 /* icssg_firmware_name_replace - Replace a substring in firmware name
1612  * @dev: device pointer for memory allocation
1613  * @src: source firmware name string
1614  * @from: substring to replace
1615  * @to: replacement substring
1616  *
1617  * Return: a newly allocated string with the replacement, or the original
1618  * string if replacement is not possible.
1619  */
1620 static const char *icssg_firmware_name_replace(struct device *dev,
1621 					       const char *src,
1622 					       const char *from,
1623 					       const char *to)
1624 {
1625 	size_t prefix, from_len, to_len, total;
1626 	const char *p = strstr(src, from);
1627 	char *buf;
1628 
1629 	if (!p)
1630 		return src; /* fallback: no replacement, use original */
1631 
1632 	prefix = p - src;
1633 	from_len = strlen(from);
1634 	to_len = strlen(to);
1635 	total = strlen(src) - from_len + to_len + 1;
1636 
1637 	buf = devm_kzalloc(dev, total, GFP_KERNEL);
1638 	if (!buf)
1639 		return src; /* fallback: allocation failed, use original */
1640 
1641 	strscpy(buf, src, prefix + 1);
1642 	strscpy(buf + prefix, to, to_len + 1);
1643 	strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
1644 
1645 	return buf;
1646 }
1647 
1648 /**
1649  * icssg_mode_firmware_names - Generate firmware names for a specific mode
1650  * @dev: device pointer for logging and context
1651  * @src: source array of firmware name structures
1652  * @dst: destination array to store updated firmware name structures
1653  * @from: substring in firmware names to be replaced
1654  * @to: substring to replace @from in firmware names
1655  *
1656  * Iterates over all MACs and replaces occurrences of the @from substring
1657  * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
1658  * updated firmware names are stored in the @dst array.
1659  */
1660 static void icssg_mode_firmware_names(struct device *dev,
1661 				      struct icssg_firmwares *src,
1662 				      struct icssg_firmwares *dst,
1663 				      const char *from, const char *to)
1664 {
1665 	int i;
1666 
1667 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1668 		dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
1669 							 from, to);
1670 		dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
1671 							 from, to);
1672 		dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
1673 							   from, to);
1674 	}
1675 }
1676 
1677 static int prueth_probe(struct platform_device *pdev)
1678 {
1679 	struct device_node *eth_node, *eth_ports_node;
1680 	struct device_node  *eth0_node = NULL;
1681 	struct device_node  *eth1_node = NULL;
1682 	struct genpool_data_align gp_data = {
1683 		.align = SZ_64K,
1684 	};
1685 	struct device *dev = &pdev->dev;
1686 	struct device_node *np;
1687 	struct prueth *prueth;
1688 	struct pruss *pruss;
1689 	u32 msmc_ram_size;
1690 	int i, ret;
1691 
1692 	np = dev->of_node;
1693 
1694 	BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1695 			 "insufficient SW_DATA size");
1696 
1697 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1698 	if (!prueth)
1699 		return -ENOMEM;
1700 
1701 	dev_set_drvdata(dev, prueth);
1702 	prueth->pdev = pdev;
1703 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1704 
1705 	prueth->dev = dev;
1706 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1707 	if (!eth_ports_node)
1708 		return -ENOENT;
1709 
1710 	for_each_child_of_node(eth_ports_node, eth_node) {
1711 		u32 reg;
1712 
1713 		if (strcmp(eth_node->name, "port"))
1714 			continue;
1715 		ret = of_property_read_u32(eth_node, "reg", &reg);
1716 		if (ret < 0) {
1717 			dev_err(dev, "%pOF error reading port_id %d\n",
1718 				eth_node, ret);
1719 		}
1720 
1721 		of_node_get(eth_node);
1722 
1723 		if (reg == 0) {
1724 			eth0_node = eth_node;
1725 			if (!of_device_is_available(eth0_node)) {
1726 				of_node_put(eth0_node);
1727 				eth0_node = NULL;
1728 			}
1729 		} else if (reg == 1) {
1730 			eth1_node = eth_node;
1731 			if (!of_device_is_available(eth1_node)) {
1732 				of_node_put(eth1_node);
1733 				eth1_node = NULL;
1734 			}
1735 		} else {
1736 			dev_err(dev, "port reg should be 0 or 1\n");
1737 		}
1738 	}
1739 
1740 	of_node_put(eth_ports_node);
1741 
1742 	/* At least one node must be present and available else we fail */
1743 	if (!eth0_node && !eth1_node) {
1744 		dev_err(dev, "neither port0 nor port1 node available\n");
1745 		return -ENODEV;
1746 	}
1747 
1748 	if (eth0_node == eth1_node) {
1749 		dev_err(dev, "port0 and port1 can't have same reg\n");
1750 		of_node_put(eth0_node);
1751 		return -ENODEV;
1752 	}
1753 
1754 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1755 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1756 
1757 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1758 	if (IS_ERR(prueth->miig_rt)) {
1759 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1760 		return -ENODEV;
1761 	}
1762 
1763 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1764 	if (IS_ERR(prueth->mii_rt)) {
1765 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1766 		return -ENODEV;
1767 	}
1768 
1769 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1770 	if (IS_ERR(prueth->pa_stats)) {
1771 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1772 		prueth->pa_stats = NULL;
1773 	}
1774 
1775 	if (eth0_node || eth1_node) {
1776 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1777 		if (ret)
1778 			goto put_cores;
1779 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1780 		if (ret)
1781 			goto put_cores;
1782 	}
1783 
1784 	pruss = pruss_get(eth0_node ?
1785 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1786 	if (IS_ERR(pruss)) {
1787 		ret = PTR_ERR(pruss);
1788 		dev_err(dev, "unable to get pruss handle\n");
1789 		goto put_cores;
1790 	}
1791 
1792 	prueth->pruss = pruss;
1793 
1794 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1795 				       &prueth->shram);
1796 	if (ret) {
1797 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1798 		goto put_pruss;
1799 	}
1800 
1801 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1802 	if (!prueth->sram_pool) {
1803 		dev_err(dev, "unable to get SRAM pool\n");
1804 		ret = -ENODEV;
1805 
1806 		goto put_mem;
1807 	}
1808 
1809 	msmc_ram_size = MSMC_RAM_SIZE;
1810 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1811 	if (prueth->is_switchmode_supported)
1812 		msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
1813 
1814 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1815 	prueth->msmcram.va =
1816 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1817 						    msmc_ram_size,
1818 						    gen_pool_first_fit_align,
1819 						    &gp_data);
1820 
1821 	if (!prueth->msmcram.va) {
1822 		ret = -ENOMEM;
1823 		dev_err(dev, "unable to allocate MSMC resource\n");
1824 		goto put_mem;
1825 	}
1826 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1827 						   (unsigned long)prueth->msmcram.va);
1828 	prueth->msmcram.size = msmc_ram_size;
1829 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1830 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1831 		prueth->msmcram.va, prueth->msmcram.size);
1832 
1833 	prueth->iep0 = icss_iep_get_idx(np, 0);
1834 	if (IS_ERR(prueth->iep0)) {
1835 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1836 		prueth->iep0 = NULL;
1837 		goto free_pool;
1838 	}
1839 
1840 	prueth->iep1 = icss_iep_get_idx(np, 1);
1841 	if (IS_ERR(prueth->iep1)) {
1842 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1843 		goto put_iep0;
1844 	}
1845 
1846 	if (prueth->pdata.quirk_10m_link_issue) {
1847 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1848 		 * traffic.
1849 		 */
1850 		icss_iep_init_fw(prueth->iep1);
1851 	}
1852 
1853 	/* Read EMAC firmware names from device tree */
1854 	icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
1855 
1856 	/* Generate other mode firmware names based on EMAC firmware names */
1857 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1858 				  prueth->icssg_switch_firmwares, "eth", "sw");
1859 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1860 				  prueth->icssg_hsr_firmwares, "eth", "hsr");
1861 
1862 	spin_lock_init(&prueth->vtbl_lock);
1863 	spin_lock_init(&prueth->stats_lock);
1864 	/* setup netdev interfaces */
1865 	if (eth0_node) {
1866 		ret = prueth_netdev_init(prueth, eth0_node);
1867 		if (ret) {
1868 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1869 				      eth0_node->name);
1870 			goto exit_iep;
1871 		}
1872 
1873 		prueth->emac[PRUETH_MAC0]->half_duplex =
1874 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1875 
1876 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1877 	}
1878 
1879 	if (eth1_node) {
1880 		ret = prueth_netdev_init(prueth, eth1_node);
1881 		if (ret) {
1882 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1883 				      eth1_node->name);
1884 			goto netdev_exit;
1885 		}
1886 
1887 		prueth->emac[PRUETH_MAC1]->half_duplex =
1888 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1889 
1890 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1891 	}
1892 
1893 	/* register the network devices */
1894 	if (eth0_node) {
1895 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1896 		if (ret) {
1897 			dev_err(dev, "can't register netdev for port MII0");
1898 			goto netdev_exit;
1899 		}
1900 
1901 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1902 
1903 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1904 		if (ret) {
1905 			dev_err(dev,
1906 				"can't connect to MII0 PHY, error -%d", ret);
1907 			goto netdev_unregister;
1908 		}
1909 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1910 	}
1911 
1912 	if (eth1_node) {
1913 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1914 		if (ret) {
1915 			dev_err(dev, "can't register netdev for port MII1");
1916 			goto netdev_unregister;
1917 		}
1918 
1919 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1920 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1921 		if (ret) {
1922 			dev_err(dev,
1923 				"can't connect to MII1 PHY, error %d", ret);
1924 			goto netdev_unregister;
1925 		}
1926 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1927 	}
1928 
1929 	if (prueth->is_switchmode_supported) {
1930 		ret = prueth_register_notifiers(prueth);
1931 		if (ret)
1932 			goto netdev_unregister;
1933 
1934 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1935 	}
1936 
1937 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1938 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1939 
1940 	if (eth1_node)
1941 		of_node_put(eth1_node);
1942 	if (eth0_node)
1943 		of_node_put(eth0_node);
1944 	return 0;
1945 
1946 netdev_unregister:
1947 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1948 		if (!prueth->registered_netdevs[i])
1949 			continue;
1950 		if (prueth->emac[i]->ndev->phydev) {
1951 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1952 			prueth->emac[i]->ndev->phydev = NULL;
1953 		}
1954 		unregister_netdev(prueth->registered_netdevs[i]);
1955 	}
1956 
1957 netdev_exit:
1958 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1959 		eth_node = prueth->eth_node[i];
1960 		if (!eth_node)
1961 			continue;
1962 
1963 		prueth_netdev_exit(prueth, eth_node);
1964 	}
1965 
1966 exit_iep:
1967 	if (prueth->pdata.quirk_10m_link_issue)
1968 		icss_iep_exit_fw(prueth->iep1);
1969 	icss_iep_put(prueth->iep1);
1970 
1971 put_iep0:
1972 	icss_iep_put(prueth->iep0);
1973 	prueth->iep0 = NULL;
1974 	prueth->iep1 = NULL;
1975 
1976 free_pool:
1977 	gen_pool_free(prueth->sram_pool,
1978 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1979 
1980 put_mem:
1981 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1982 
1983 put_pruss:
1984 	pruss_put(prueth->pruss);
1985 
1986 put_cores:
1987 	if (eth0_node || eth1_node) {
1988 		prueth_put_cores(prueth, ICSS_SLICE0);
1989 		of_node_put(eth0_node);
1990 
1991 		prueth_put_cores(prueth, ICSS_SLICE1);
1992 		of_node_put(eth1_node);
1993 	}
1994 
1995 	return ret;
1996 }
1997 
1998 static void prueth_remove(struct platform_device *pdev)
1999 {
2000 	struct prueth *prueth = platform_get_drvdata(pdev);
2001 	struct device_node *eth_node;
2002 	int i;
2003 
2004 	prueth_unregister_notifiers(prueth);
2005 
2006 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2007 		if (!prueth->registered_netdevs[i])
2008 			continue;
2009 		phy_stop(prueth->emac[i]->ndev->phydev);
2010 		phy_disconnect(prueth->emac[i]->ndev->phydev);
2011 		prueth->emac[i]->ndev->phydev = NULL;
2012 		unregister_netdev(prueth->registered_netdevs[i]);
2013 	}
2014 
2015 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2016 		eth_node = prueth->eth_node[i];
2017 		if (!eth_node)
2018 			continue;
2019 
2020 		prueth_netdev_exit(prueth, eth_node);
2021 	}
2022 
2023 	if (prueth->pdata.quirk_10m_link_issue)
2024 		icss_iep_exit_fw(prueth->iep1);
2025 
2026 	icss_iep_put(prueth->iep1);
2027 	icss_iep_put(prueth->iep0);
2028 
2029 	gen_pool_free(prueth->sram_pool,
2030 		      (unsigned long)prueth->msmcram.va,
2031 		      MSMC_RAM_SIZE);
2032 
2033 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2034 
2035 	pruss_put(prueth->pruss);
2036 
2037 	if (prueth->eth_node[PRUETH_MAC1])
2038 		prueth_put_cores(prueth, ICSS_SLICE1);
2039 
2040 	if (prueth->eth_node[PRUETH_MAC0])
2041 		prueth_put_cores(prueth, ICSS_SLICE0);
2042 }
2043 
2044 static const struct prueth_pdata am654_icssg_pdata = {
2045 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2046 	.quirk_10m_link_issue = 1,
2047 	.switch_mode = 1,
2048 };
2049 
2050 static const struct prueth_pdata am64x_icssg_pdata = {
2051 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
2052 	.quirk_10m_link_issue = 1,
2053 	.switch_mode = 1,
2054 };
2055 
2056 static const struct of_device_id prueth_dt_match[] = {
2057 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2058 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2059 	{ /* sentinel */ }
2060 };
2061 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2062 
2063 static struct platform_driver prueth_driver = {
2064 	.probe = prueth_probe,
2065 	.remove = prueth_remove,
2066 	.driver = {
2067 		.name = "icssg-prueth",
2068 		.of_match_table = prueth_dt_match,
2069 		.pm = &prueth_dev_pm_ops,
2070 	},
2071 };
2072 module_platform_driver(prueth_driver);
2073 
2074 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2075 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2076 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2077 MODULE_LICENSE("GPL");
2078