xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
53 static void emac_adjust_link(struct net_device *ndev);
54 
55 static int emac_get_tx_ts(struct prueth_emac *emac,
56 			  struct emac_tx_ts_response *rsp)
57 {
58 	struct prueth *prueth = emac->prueth;
59 	int slice = prueth_emac_slice(emac);
60 	int addr;
61 
62 	addr = icssg_queue_pop(prueth, slice == 0 ?
63 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
64 	if (addr < 0)
65 		return addr;
66 
67 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
68 	/* return buffer back for to pool */
69 	icssg_queue_push(prueth, slice == 0 ?
70 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
71 
72 	return 0;
73 }
74 
75 static void tx_ts_work(struct prueth_emac *emac)
76 {
77 	struct skb_shared_hwtstamps ssh;
78 	struct emac_tx_ts_response tsr;
79 	struct sk_buff *skb;
80 	int ret = 0;
81 	u32 hi_sw;
82 	u64 ns;
83 
84 	/* There may be more than one pending requests */
85 	while (1) {
86 		ret = emac_get_tx_ts(emac, &tsr);
87 		if (ret) /* nothing more */
88 			break;
89 
90 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
91 		    !emac->tx_ts_skb[tsr.cookie]) {
92 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
93 				   tsr.cookie);
94 			break;
95 		}
96 
97 		skb = emac->tx_ts_skb[tsr.cookie];
98 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
99 		if (!skb) {
100 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
101 			break;
102 		}
103 
104 		hi_sw = readl(emac->prueth->shram.va +
105 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
106 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
107 				    IEP_DEFAULT_CYCLE_TIME_NS);
108 
109 		memset(&ssh, 0, sizeof(ssh));
110 		ssh.hwtstamp = ns_to_ktime(ns);
111 
112 		skb_tstamp_tx(skb, &ssh);
113 		dev_consume_skb_any(skb);
114 
115 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
116 			break;
117 	}
118 }
119 
120 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
121 {
122 	struct prueth_emac *emac = dev_id;
123 
124 	/* currently only TX timestamp is being returned */
125 	tx_ts_work(emac);
126 
127 	return IRQ_HANDLED;
128 }
129 
130 static int prueth_start(struct rproc *rproc, const char *fw_name)
131 {
132 	int ret;
133 
134 	ret = rproc_set_firmware(rproc, fw_name);
135 	if (ret)
136 		return ret;
137 	return rproc_boot(rproc);
138 }
139 
140 static void prueth_shutdown(struct rproc *rproc)
141 {
142 	rproc_shutdown(rproc);
143 }
144 
145 static int prueth_emac_start(struct prueth *prueth)
146 {
147 	struct icssg_firmwares *firmwares;
148 	struct device *dev = prueth->dev;
149 	int ret, slice;
150 
151 	if (prueth->is_switch_mode)
152 		firmwares = prueth->icssg_switch_firmwares;
153 	else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
154 		firmwares = prueth->icssg_hsr_firmwares;
155 	else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
156 		firmwares = prueth->icssg_prp_firmwares;
157 	else
158 		firmwares = prueth->icssg_emac_firmwares;
159 
160 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
161 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
162 		if (ret) {
163 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
164 			goto unwind_slices;
165 		}
166 
167 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
168 		if (ret) {
169 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
170 			rproc_shutdown(prueth->pru[slice]);
171 			goto unwind_slices;
172 		}
173 
174 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
175 		if (ret) {
176 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
177 			rproc_shutdown(prueth->rtu[slice]);
178 			rproc_shutdown(prueth->pru[slice]);
179 			goto unwind_slices;
180 		}
181 	}
182 
183 	return 0;
184 
185 unwind_slices:
186 	while (--slice >= 0) {
187 		prueth_shutdown(prueth->txpru[slice]);
188 		prueth_shutdown(prueth->rtu[slice]);
189 		prueth_shutdown(prueth->pru[slice]);
190 	}
191 
192 	return ret;
193 }
194 
195 static void prueth_emac_stop(struct prueth *prueth)
196 {
197 	int slice;
198 
199 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
200 		prueth_shutdown(prueth->txpru[slice]);
201 		prueth_shutdown(prueth->rtu[slice]);
202 		prueth_shutdown(prueth->pru[slice]);
203 	}
204 }
205 
206 static void icssg_enable_fw_offload(struct prueth *prueth)
207 {
208 	struct prueth_emac *emac;
209 	int mac;
210 
211 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
212 		emac = prueth->emac[mac];
213 		if (prueth->is_hsr_offload_mode) {
214 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
215 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
216 			else
217 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
218 		}
219 
220 		if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
221 			if (netif_running(emac->ndev)) {
222 				icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
223 						  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
224 						  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
225 						  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
226 						  ICSSG_FDB_ENTRY_BLOCK,
227 						  true);
228 				icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
229 						  BIT(emac->port_id) | DEFAULT_PORT_MASK,
230 						  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
231 						  true);
232 				if (prueth->is_hsr_offload_mode)
233 					icssg_vtbl_modify(emac, DEFAULT_VID,
234 							  DEFAULT_PORT_MASK,
235 							  DEFAULT_UNTAG_MASK, true);
236 				icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
237 				if (prueth->is_switch_mode)
238 					icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
239 			}
240 		}
241 	}
242 }
243 
244 static int prueth_emac_common_start(struct prueth *prueth)
245 {
246 	struct prueth_emac *emac;
247 	int ret = 0;
248 	int slice;
249 
250 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
251 		return -EINVAL;
252 
253 	/* clear SMEM and MSMC settings for all slices */
254 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
255 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
256 
257 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
258 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
259 
260 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
261 		icssg_init_fw_offload_mode(prueth);
262 	else
263 		icssg_init_emac_mode(prueth);
264 
265 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
266 		emac = prueth->emac[slice];
267 		if (!emac)
268 			continue;
269 		ret = icssg_config(prueth, emac, slice);
270 		if (ret)
271 			goto disable_class;
272 
273 		mutex_lock(&emac->ndev->phydev->lock);
274 		emac_adjust_link(emac->ndev);
275 		mutex_unlock(&emac->ndev->phydev->lock);
276 	}
277 
278 	ret = prueth_emac_start(prueth);
279 	if (ret)
280 		goto disable_class;
281 
282 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
283 	       prueth->emac[ICSS_SLICE1];
284 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
285 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
286 	if (ret) {
287 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
288 		goto stop_pruss;
289 	}
290 
291 	return 0;
292 
293 stop_pruss:
294 	prueth_emac_stop(prueth);
295 
296 disable_class:
297 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
298 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
299 
300 	return ret;
301 }
302 
303 static int prueth_emac_common_stop(struct prueth *prueth)
304 {
305 	struct prueth_emac *emac;
306 
307 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
308 		return -EINVAL;
309 
310 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
311 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
312 
313 	prueth_emac_stop(prueth);
314 
315 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
316 	       prueth->emac[ICSS_SLICE1];
317 	icss_iep_exit(emac->iep);
318 
319 	return 0;
320 }
321 
322 /* called back by PHY layer if there is change in link state of hw port*/
323 static void emac_adjust_link(struct net_device *ndev)
324 {
325 	struct prueth_emac *emac = netdev_priv(ndev);
326 	struct phy_device *phydev = ndev->phydev;
327 	struct prueth *prueth = emac->prueth;
328 	bool new_state = false;
329 	unsigned long flags;
330 
331 	if (phydev->link) {
332 		/* check the mode of operation - full/half duplex */
333 		if (phydev->duplex != emac->duplex) {
334 			new_state = true;
335 			emac->duplex = phydev->duplex;
336 		}
337 		if (phydev->speed != emac->speed) {
338 			new_state = true;
339 			emac->speed = phydev->speed;
340 		}
341 		if (!emac->link) {
342 			new_state = true;
343 			emac->link = 1;
344 		}
345 	} else if (emac->link) {
346 		new_state = true;
347 		emac->link = 0;
348 
349 		/* f/w should support 100 & 1000 */
350 		emac->speed = SPEED_1000;
351 
352 		/* half duplex may not be supported by f/w */
353 		emac->duplex = DUPLEX_FULL;
354 	}
355 
356 	if (new_state) {
357 		phy_print_status(phydev);
358 
359 		/* update RGMII and MII configuration based on PHY negotiated
360 		 * values
361 		 */
362 		if (emac->link) {
363 			if (emac->duplex == DUPLEX_HALF)
364 				icssg_config_half_duplex(emac);
365 			/* Set the RGMII cfg for gig en and full duplex */
366 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
367 
368 			/* update the Tx IPG based on 100M/1G speed */
369 			spin_lock_irqsave(&emac->lock, flags);
370 			icssg_config_ipg(emac);
371 			spin_unlock_irqrestore(&emac->lock, flags);
372 			icssg_config_set_speed(emac);
373 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
374 
375 		} else {
376 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
377 		}
378 	}
379 
380 	if (emac->link) {
381 		/* reactivate the transmit queue */
382 		netif_tx_wake_all_queues(ndev);
383 	} else {
384 		netif_tx_stop_all_queues(ndev);
385 		prueth_cleanup_tx_ts(emac);
386 	}
387 }
388 
389 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
390 {
391 	struct prueth_emac *emac =
392 			container_of(timer, struct prueth_emac, rx_hrtimer);
393 	int rx_flow = PRUETH_RX_FLOW_DATA;
394 
395 	enable_irq(emac->rx_chns.irq[rx_flow]);
396 	return HRTIMER_NORESTART;
397 }
398 
399 static int emac_phy_connect(struct prueth_emac *emac)
400 {
401 	struct prueth *prueth = emac->prueth;
402 	struct net_device *ndev = emac->ndev;
403 	/* connect PHY */
404 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
405 				      &emac_adjust_link, 0,
406 				      emac->phy_if);
407 	if (!ndev->phydev) {
408 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
409 			emac->phy_node->full_name);
410 		return -ENODEV;
411 	}
412 
413 	if (!emac->half_duplex) {
414 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
415 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
416 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
417 	}
418 
419 	/* remove unsupported modes */
420 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
421 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
422 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
423 
424 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
425 		phy_set_max_speed(ndev->phydev, SPEED_100);
426 
427 	return 0;
428 }
429 
430 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
431 {
432 	u32 hi_rollover_count, hi_rollover_count_r;
433 	struct prueth_emac *emac = clockops_data;
434 	struct prueth *prueth = emac->prueth;
435 	void __iomem *fw_hi_r_count_addr;
436 	void __iomem *fw_count_hi_addr;
437 	u32 iepcount_hi, iepcount_hi_r;
438 	unsigned long flags;
439 	u32 iepcount_lo;
440 	u64 ts = 0;
441 
442 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
443 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
444 
445 	local_irq_save(flags);
446 	do {
447 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
448 		iepcount_hi += readl(fw_count_hi_addr);
449 		hi_rollover_count = readl(fw_hi_r_count_addr);
450 		ptp_read_system_prets(sts);
451 		iepcount_lo = icss_iep_get_count_low(emac->iep);
452 		ptp_read_system_postts(sts);
453 
454 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
455 		iepcount_hi_r += readl(fw_count_hi_addr);
456 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
457 	} while ((iepcount_hi_r != iepcount_hi) ||
458 		 (hi_rollover_count != hi_rollover_count_r));
459 	local_irq_restore(flags);
460 
461 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
462 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
463 
464 	return ts;
465 }
466 
467 static void prueth_iep_settime(void *clockops_data, u64 ns)
468 {
469 	struct icssg_setclock_desc __iomem *sc_descp;
470 	struct prueth_emac *emac = clockops_data;
471 	struct icssg_setclock_desc sc_desc;
472 	u64 cyclecount;
473 	u32 cycletime;
474 	int timeout;
475 
476 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
477 
478 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
479 	cyclecount = ns / cycletime;
480 
481 	memset(&sc_desc, 0, sizeof(sc_desc));
482 	sc_desc.margin = cycletime - 1000;
483 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
484 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
485 	sc_desc.iepcount_set = ns % cycletime;
486 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
487 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
488 
489 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
490 
491 	writeb(1, &sc_descp->request);
492 
493 	timeout = 5;	/* fw should take 2-3 ms */
494 	while (timeout--) {
495 		if (readb(&sc_descp->acknowledgment))
496 			return;
497 
498 		usleep_range(500, 1000);
499 	}
500 
501 	dev_err(emac->prueth->dev, "settime timeout\n");
502 }
503 
504 static int prueth_perout_enable(void *clockops_data,
505 				struct ptp_perout_request *req, int on,
506 				u64 *cmp)
507 {
508 	struct prueth_emac *emac = clockops_data;
509 	u32 reduction_factor = 0, offset = 0;
510 	struct timespec64 ts;
511 	u64 current_cycle;
512 	u64 start_offset;
513 	u64 ns_period;
514 
515 	if (!on)
516 		return 0;
517 
518 	/* Any firmware specific stuff for PPS/PEROUT handling */
519 	ts.tv_sec = req->period.sec;
520 	ts.tv_nsec = req->period.nsec;
521 	ns_period = timespec64_to_ns(&ts);
522 
523 	/* f/w doesn't support period less than cycle time */
524 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
525 		return -ENXIO;
526 
527 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
528 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
529 
530 	/* f/w requires at least 1uS within a cycle so CMP
531 	 * can trigger after SYNC is enabled
532 	 */
533 	if (offset < 5 * NSEC_PER_USEC)
534 		offset = 5 * NSEC_PER_USEC;
535 
536 	/* if offset is close to cycle time then we will miss
537 	 * the CMP event for last tick when IEP rolls over.
538 	 * In normal mode, IEP tick is 4ns.
539 	 * In slow compensation it could be 0ns or 8ns at
540 	 * every slow compensation cycle.
541 	 */
542 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
543 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
544 
545 	/* we're in shadow mode so need to set upper 32-bits */
546 	*cmp = (u64)offset << 32;
547 
548 	writel(reduction_factor, emac->prueth->shram.va +
549 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
550 
551 	current_cycle = icssg_read_time(emac->prueth->shram.va +
552 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
553 
554 	/* Rounding of current_cycle count to next second */
555 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
556 
557 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
558 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
559 
560 	return 0;
561 }
562 
563 const struct icss_iep_clockops prueth_iep_clockops = {
564 	.settime = prueth_iep_settime,
565 	.gettime = prueth_iep_gettime,
566 	.perout_enable = prueth_perout_enable,
567 };
568 
569 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
570 {
571 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
572 	struct page_pool *pool = emac->rx_chns.pg_pool;
573 	int ret;
574 
575 	ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
576 	if (ret)
577 		return ret;
578 
579 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
580 	if (ret)
581 		xdp_rxq_info_unreg(rxq);
582 
583 	return ret;
584 }
585 
586 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
587 {
588 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
589 
590 	if (!xdp_rxq_info_is_reg(rxq))
591 		return;
592 
593 	xdp_rxq_info_unreg(rxq);
594 }
595 
596 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
597 {
598 	struct net_device *real_dev;
599 	struct prueth_emac *emac;
600 	int port_mask;
601 	u8 vlan_id;
602 
603 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
604 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
605 	emac = netdev_priv(real_dev);
606 
607 	port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
608 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
609 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
610 
611 	return 0;
612 }
613 
614 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
615 {
616 	struct net_device *real_dev;
617 	struct prueth_emac *emac;
618 	int other_port_mask;
619 	int port_mask;
620 	u8 vlan_id;
621 
622 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
623 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
624 	emac = netdev_priv(real_dev);
625 
626 	port_mask = BIT(emac->port_id);
627 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
628 
629 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
630 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
631 
632 	if (other_port_mask) {
633 		icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
634 		icssg_vtbl_modify(emac, vlan_id, other_port_mask,
635 				  other_port_mask, true);
636 	}
637 
638 	return 0;
639 }
640 
641 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
642 					 const u8 *addr, u8 vid, bool add)
643 {
644 	icssg_fdb_add_del(emac, addr, vid,
645 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
646 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
647 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
648 			  ICSSG_FDB_ENTRY_BLOCK, add);
649 
650 	if (add)
651 		icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
652 				  BIT(emac->port_id), add);
653 }
654 
655 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
656 {
657 	struct net_device *real_dev;
658 	struct prueth_emac *emac;
659 	u8 vlan_id, i;
660 
661 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
662 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
663 
664 	if (is_hsr_master(real_dev)) {
665 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
666 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
667 			if (!emac)
668 				return -EINVAL;
669 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
670 						     true);
671 		}
672 	} else {
673 		emac = netdev_priv(real_dev);
674 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
675 	}
676 
677 	return 0;
678 }
679 
680 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
681 {
682 	struct net_device *real_dev;
683 	struct prueth_emac *emac;
684 	u8 vlan_id, i;
685 
686 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
687 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
688 
689 	if (is_hsr_master(real_dev)) {
690 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
691 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
692 			if (!emac)
693 				return -EINVAL;
694 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
695 						     false);
696 		}
697 	} else {
698 		emac = netdev_priv(real_dev);
699 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
700 	}
701 
702 	return 0;
703 }
704 
705 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
706 				   void *args)
707 {
708 	struct prueth_emac *emac = args;
709 
710 	if (!vdev || !vid)
711 		return 0;
712 
713 	netif_addr_lock_bh(vdev);
714 	__hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
715 				vdev->addr_len);
716 	netif_addr_unlock_bh(vdev);
717 
718 	if (emac->prueth->is_hsr_offload_mode)
719 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
720 				   icssg_prueth_hsr_add_mcast,
721 				   icssg_prueth_hsr_del_mcast);
722 	else
723 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
724 				   icssg_prueth_add_mcast,
725 				   icssg_prueth_del_mcast);
726 
727 	return 0;
728 }
729 
730 /**
731  * emac_ndo_open - EMAC device open
732  * @ndev: network adapter device
733  *
734  * Called when system wants to start the interface.
735  *
736  * Return: 0 for a successful open, or appropriate error code
737  */
738 static int emac_ndo_open(struct net_device *ndev)
739 {
740 	struct prueth_emac *emac = netdev_priv(ndev);
741 	int ret, i, num_data_chn = emac->tx_ch_num;
742 	struct icssg_flow_cfg __iomem *flow_cfg;
743 	struct prueth *prueth = emac->prueth;
744 	int slice = prueth_emac_slice(emac);
745 	struct device *dev = prueth->dev;
746 	int max_rx_flows;
747 	int rx_flow;
748 
749 	/* set h/w MAC as user might have re-configured */
750 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
751 
752 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
753 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
754 
755 	/* Notify the stack of the actual queue counts. */
756 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
757 	if (ret) {
758 		dev_err(dev, "cannot set real number of tx queues\n");
759 		return ret;
760 	}
761 
762 	init_completion(&emac->cmd_complete);
763 	ret = prueth_init_tx_chns(emac);
764 	if (ret) {
765 		dev_err(dev, "failed to init tx channel: %d\n", ret);
766 		return ret;
767 	}
768 
769 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
770 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
771 				  max_rx_flows, PRUETH_MAX_RX_DESC);
772 	if (ret) {
773 		dev_err(dev, "failed to init rx channel: %d\n", ret);
774 		goto cleanup_tx;
775 	}
776 
777 	ret = prueth_ndev_add_tx_napi(emac);
778 	if (ret)
779 		goto cleanup_rx;
780 
781 	/* we use only the highest priority flow for now i.e. @irq[3] */
782 	rx_flow = PRUETH_RX_FLOW_DATA;
783 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
784 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
785 	if (ret) {
786 		dev_err(dev, "unable to request RX IRQ\n");
787 		goto cleanup_napi;
788 	}
789 
790 	if (!prueth->emacs_initialized) {
791 		ret = prueth_emac_common_start(prueth);
792 		if (ret)
793 			goto free_rx_irq;
794 		icssg_enable_fw_offload(prueth);
795 	}
796 
797 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
798 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
799 	ret = emac_fdb_flow_id_updated(emac);
800 
801 	if (ret) {
802 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
803 		goto stop;
804 	}
805 
806 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
807 
808 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
809 				   IRQF_ONESHOT, dev_name(dev), emac);
810 	if (ret)
811 		goto stop;
812 
813 	/* Prepare RX */
814 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
815 	if (ret)
816 		goto free_tx_ts_irq;
817 
818 	ret = prueth_create_xdp_rxqs(emac);
819 	if (ret)
820 		goto reset_rx_chn;
821 
822 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
823 	if (ret)
824 		goto destroy_xdp_rxqs;
825 
826 	for (i = 0; i < emac->tx_ch_num; i++) {
827 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
828 		if (ret)
829 			goto reset_tx_chan;
830 	}
831 
832 	/* Enable NAPI in Tx and Rx direction */
833 	for (i = 0; i < emac->tx_ch_num; i++)
834 		napi_enable(&emac->tx_chns[i].napi_tx);
835 	napi_enable(&emac->napi_rx);
836 
837 	/* start PHY */
838 	phy_start(ndev->phydev);
839 
840 	prueth->emacs_initialized++;
841 
842 	queue_work(system_long_wq, &emac->stats_work.work);
843 
844 	return 0;
845 
846 reset_tx_chan:
847 	/* Since interface is not yet up, there is wouldn't be
848 	 * any SKB for completion. So set false to free_skb
849 	 */
850 	prueth_reset_tx_chan(emac, i, false);
851 destroy_xdp_rxqs:
852 	prueth_destroy_xdp_rxqs(emac);
853 reset_rx_chn:
854 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
855 free_tx_ts_irq:
856 	free_irq(emac->tx_ts_irq, emac);
857 stop:
858 	if (!prueth->emacs_initialized)
859 		prueth_emac_common_stop(prueth);
860 free_rx_irq:
861 	free_irq(emac->rx_chns.irq[rx_flow], emac);
862 cleanup_napi:
863 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
864 cleanup_rx:
865 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
866 cleanup_tx:
867 	prueth_cleanup_tx_chns(emac);
868 
869 	return ret;
870 }
871 
872 /**
873  * emac_ndo_stop - EMAC device stop
874  * @ndev: network adapter device
875  *
876  * Called when system wants to stop or down the interface.
877  *
878  * Return: Always 0 (Success)
879  */
880 static int emac_ndo_stop(struct net_device *ndev)
881 {
882 	struct prueth_emac *emac = netdev_priv(ndev);
883 	struct prueth *prueth = emac->prueth;
884 	int rx_flow = PRUETH_RX_FLOW_DATA;
885 	int max_rx_flows;
886 	int ret, i;
887 
888 	/* inform the upper layers. */
889 	netif_tx_stop_all_queues(ndev);
890 
891 	/* block packets from wire */
892 	if (ndev->phydev)
893 		phy_stop(ndev->phydev);
894 
895 	if (emac->prueth->is_hsr_offload_mode)
896 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
897 	else
898 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
899 
900 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
901 	/* ensure new tdown_cnt value is visible */
902 	smp_mb__after_atomic();
903 	/* tear down and disable UDMA channels */
904 	reinit_completion(&emac->tdown_complete);
905 	for (i = 0; i < emac->tx_ch_num; i++)
906 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
907 
908 	ret = wait_for_completion_timeout(&emac->tdown_complete,
909 					  msecs_to_jiffies(1000));
910 	if (!ret)
911 		netdev_err(ndev, "tx teardown timeout\n");
912 
913 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
914 	for (i = 0; i < emac->tx_ch_num; i++) {
915 		napi_disable(&emac->tx_chns[i].napi_tx);
916 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
917 	}
918 
919 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
920 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
921 
922 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
923 	prueth_destroy_xdp_rxqs(emac);
924 	napi_disable(&emac->napi_rx);
925 	hrtimer_cancel(&emac->rx_hrtimer);
926 
927 	cancel_work_sync(&emac->rx_mode_work);
928 
929 	/* Destroying the queued work in ndo_stop() */
930 	cancel_delayed_work_sync(&emac->stats_work);
931 
932 	/* stop PRUs */
933 	if (prueth->emacs_initialized == 1)
934 		prueth_emac_common_stop(prueth);
935 
936 	free_irq(emac->tx_ts_irq, emac);
937 
938 	free_irq(emac->rx_chns.irq[rx_flow], emac);
939 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
940 
941 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
942 	prueth_cleanup_tx_chns(emac);
943 
944 	prueth->emacs_initialized--;
945 
946 	return 0;
947 }
948 
949 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
950 {
951 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
952 	struct net_device *ndev = emac->ndev;
953 	bool promisc, allmulti;
954 
955 	if (!netif_running(ndev))
956 		return;
957 
958 	promisc = ndev->flags & IFF_PROMISC;
959 	allmulti = ndev->flags & IFF_ALLMULTI;
960 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
961 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
962 
963 	if (promisc) {
964 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
965 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
966 		return;
967 	}
968 
969 	if (allmulti) {
970 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
971 		return;
972 	}
973 
974 	if (emac->prueth->is_hsr_offload_mode) {
975 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
976 			      icssg_prueth_hsr_del_mcast);
977 		if (rtnl_trylock()) {
978 			vlan_for_each(emac->prueth->hsr_dev,
979 				      icssg_update_vlan_mcast, emac);
980 			rtnl_unlock();
981 		}
982 	} else {
983 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
984 			      icssg_prueth_del_mcast);
985 		if (rtnl_trylock()) {
986 			vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
987 			rtnl_unlock();
988 		}
989 	}
990 }
991 
992 /**
993  * emac_ndo_set_rx_mode - EMAC set receive mode function
994  * @ndev: The EMAC network adapter
995  *
996  * Called when system wants to set the receive mode of the device.
997  *
998  */
999 static void emac_ndo_set_rx_mode(struct net_device *ndev)
1000 {
1001 	struct prueth_emac *emac = netdev_priv(ndev);
1002 
1003 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
1004 }
1005 
1006 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
1007 					       netdev_features_t features)
1008 {
1009 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
1010 	 * firmware implementation. Both these features need to be enabled /
1011 	 * disabled together.
1012 	 */
1013 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
1014 		if ((features & NETIF_F_HW_HSR_DUP) ||
1015 		    (features & NETIF_F_HW_HSR_TAG_INS))
1016 			features |= NETIF_F_HW_HSR_DUP |
1017 				    NETIF_F_HW_HSR_TAG_INS;
1018 
1019 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
1020 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
1021 		if (!(features & NETIF_F_HW_HSR_DUP) ||
1022 		    !(features & NETIF_F_HW_HSR_TAG_INS))
1023 			features &= ~(NETIF_F_HW_HSR_DUP |
1024 				      NETIF_F_HW_HSR_TAG_INS);
1025 
1026 	return features;
1027 }
1028 
1029 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
1030 				    __be16 proto, u16 vid)
1031 {
1032 	struct prueth_emac *emac = netdev_priv(ndev);
1033 	struct prueth *prueth = emac->prueth;
1034 	int port_mask = BIT(emac->port_id);
1035 	int untag_mask = 0;
1036 
1037 	if (prueth->is_hsr_offload_mode)
1038 		port_mask |= BIT(PRUETH_PORT_HOST);
1039 
1040 	__hw_addr_init(&emac->vlan_mcast_list[vid]);
1041 	netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1042 		   vid, port_mask, untag_mask);
1043 
1044 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1045 	icssg_set_pvid(emac->prueth, vid, emac->port_id);
1046 
1047 	return 0;
1048 }
1049 
1050 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1051 				    __be16 proto, u16 vid)
1052 {
1053 	struct prueth_emac *emac = netdev_priv(ndev);
1054 	struct prueth *prueth = emac->prueth;
1055 	int port_mask = BIT(emac->port_id);
1056 	int untag_mask = 0;
1057 
1058 	if (prueth->is_hsr_offload_mode)
1059 		port_mask = BIT(PRUETH_PORT_HOST);
1060 
1061 	netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
1062 		   vid, port_mask, untag_mask);
1063 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1064 
1065 	return 0;
1066 }
1067 
1068 /**
1069  * emac_xdp_xmit - Implements ndo_xdp_xmit
1070  * @dev: netdev
1071  * @n: number of frames
1072  * @frames: array of XDP buffer pointers
1073  * @flags: XDP extra info
1074  *
1075  * Return: number of frames successfully sent. Failed frames
1076  * will be free'ed by XDP core.
1077  *
1078  * For error cases, a negative errno code is returned and no-frames
1079  * are transmitted (caller must handle freeing frames).
1080  **/
1081 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1082 			 u32 flags)
1083 {
1084 	struct prueth_emac *emac = netdev_priv(dev);
1085 	struct net_device *ndev = emac->ndev;
1086 	struct netdev_queue *netif_txq;
1087 	int cpu = smp_processor_id();
1088 	struct xdp_frame *xdpf;
1089 	unsigned int q_idx;
1090 	int nxmit = 0;
1091 	u32 err;
1092 	int i;
1093 
1094 	q_idx = cpu % emac->tx_ch_num;
1095 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1096 
1097 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1098 		return -EINVAL;
1099 
1100 	__netif_tx_lock(netif_txq, cpu);
1101 	for (i = 0; i < n; i++) {
1102 		xdpf = frames[i];
1103 		err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
1104 		if (err != ICSSG_XDP_TX) {
1105 			ndev->stats.tx_dropped++;
1106 			break;
1107 		}
1108 		nxmit++;
1109 	}
1110 	__netif_tx_unlock(netif_txq);
1111 
1112 	return nxmit;
1113 }
1114 
1115 /**
1116  * emac_xdp_setup - add/remove an XDP program
1117  * @emac: emac device
1118  * @bpf: XDP program
1119  *
1120  * Return: Always 0 (Success)
1121  **/
1122 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1123 {
1124 	struct bpf_prog *prog = bpf->prog;
1125 
1126 	if (!emac->xdpi.prog && !prog)
1127 		return 0;
1128 
1129 	WRITE_ONCE(emac->xdp_prog, prog);
1130 
1131 	xdp_attachment_setup(&emac->xdpi, bpf);
1132 
1133 	return 0;
1134 }
1135 
1136 /**
1137  * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1138  * @ndev: network adapter device
1139  * @bpf: XDP program
1140  *
1141  * Return: 0 on success, error code on failure.
1142  **/
1143 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1144 {
1145 	struct prueth_emac *emac = netdev_priv(ndev);
1146 
1147 	switch (bpf->command) {
1148 	case XDP_SETUP_PROG:
1149 		return emac_xdp_setup(emac, bpf);
1150 	default:
1151 		return -EINVAL;
1152 	}
1153 }
1154 
1155 static const struct net_device_ops emac_netdev_ops = {
1156 	.ndo_open = emac_ndo_open,
1157 	.ndo_stop = emac_ndo_stop,
1158 	.ndo_start_xmit = icssg_ndo_start_xmit,
1159 	.ndo_set_mac_address = eth_mac_addr,
1160 	.ndo_validate_addr = eth_validate_addr,
1161 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
1162 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1163 	.ndo_eth_ioctl = icssg_ndo_ioctl,
1164 	.ndo_get_stats64 = icssg_ndo_get_stats64,
1165 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1166 	.ndo_fix_features = emac_ndo_fix_features,
1167 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1168 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1169 	.ndo_bpf = emac_ndo_bpf,
1170 	.ndo_xdp_xmit = emac_xdp_xmit,
1171 };
1172 
1173 static int prueth_netdev_init(struct prueth *prueth,
1174 			      struct device_node *eth_node)
1175 {
1176 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1177 	struct prueth_emac *emac;
1178 	struct net_device *ndev;
1179 	enum prueth_port port;
1180 	const char *irq_name;
1181 	enum prueth_mac mac;
1182 
1183 	port = prueth_node_port(eth_node);
1184 	if (port == PRUETH_PORT_INVALID)
1185 		return -EINVAL;
1186 
1187 	mac = prueth_node_mac(eth_node);
1188 	if (mac == PRUETH_MAC_INVALID)
1189 		return -EINVAL;
1190 
1191 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1192 	if (!ndev)
1193 		return -ENOMEM;
1194 
1195 	emac = netdev_priv(ndev);
1196 	emac->prueth = prueth;
1197 	emac->ndev = ndev;
1198 	emac->port_id = port;
1199 	emac->xdp_prog = NULL;
1200 	emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1201 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1202 	if (!emac->cmd_wq) {
1203 		ret = -ENOMEM;
1204 		goto free_ndev;
1205 	}
1206 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1207 
1208 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1209 
1210 	ret = pruss_request_mem_region(prueth->pruss,
1211 				       port == PRUETH_PORT_MII0 ?
1212 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1213 				       &emac->dram);
1214 	if (ret) {
1215 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1216 		ret = -ENOMEM;
1217 		goto free_wq;
1218 	}
1219 
1220 	emac->tx_ch_num = 1;
1221 
1222 	irq_name = "tx_ts0";
1223 	if (emac->port_id == PRUETH_PORT_MII1)
1224 		irq_name = "tx_ts1";
1225 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1226 	if (emac->tx_ts_irq < 0) {
1227 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1228 		goto free;
1229 	}
1230 
1231 	SET_NETDEV_DEV(ndev, prueth->dev);
1232 	spin_lock_init(&emac->lock);
1233 	mutex_init(&emac->cmd_lock);
1234 
1235 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1236 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1237 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1238 		ret = -ENODEV;
1239 		goto free;
1240 	} else if (of_phy_is_fixed_link(eth_node)) {
1241 		ret = of_phy_register_fixed_link(eth_node);
1242 		if (ret) {
1243 			ret = dev_err_probe(prueth->dev, ret,
1244 					    "failed to register fixed-link phy\n");
1245 			goto free;
1246 		}
1247 
1248 		emac->phy_node = eth_node;
1249 	}
1250 
1251 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1252 	if (ret) {
1253 		dev_err(prueth->dev, "could not get phy-mode property\n");
1254 		goto free;
1255 	}
1256 
1257 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1258 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1259 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1260 		ret = -EINVAL;
1261 		goto free;
1262 	}
1263 
1264 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1265 	 * and it is not possible to disable TX Internal delay. The below
1266 	 * switch case block describes how we handle different phy modes
1267 	 * based on hardware restriction.
1268 	 */
1269 	switch (emac->phy_if) {
1270 	case PHY_INTERFACE_MODE_RGMII_ID:
1271 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1272 		break;
1273 	case PHY_INTERFACE_MODE_RGMII_TXID:
1274 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1275 		break;
1276 	case PHY_INTERFACE_MODE_RGMII:
1277 	case PHY_INTERFACE_MODE_RGMII_RXID:
1278 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1279 		ret = -EINVAL;
1280 		goto free;
1281 	default:
1282 		break;
1283 	}
1284 
1285 	/* get mac address from DT and set private and netdev addr */
1286 	ret = of_get_ethdev_address(eth_node, ndev);
1287 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1288 		eth_hw_addr_random(ndev);
1289 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1290 			 port, ndev->dev_addr);
1291 	}
1292 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1293 
1294 	ndev->dev.of_node = eth_node;
1295 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1296 	ndev->max_mtu = PRUETH_MAX_MTU;
1297 	ndev->netdev_ops = &emac_netdev_ops;
1298 	ndev->ethtool_ops = &icssg_ethtool_ops;
1299 	ndev->hw_features = NETIF_F_SG;
1300 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1301 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1302 	xdp_set_features_flag(ndev,
1303 			      NETDEV_XDP_ACT_BASIC |
1304 			      NETDEV_XDP_ACT_REDIRECT |
1305 			      NETDEV_XDP_ACT_NDO_XMIT);
1306 
1307 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1308 	hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1309 		      HRTIMER_MODE_REL_PINNED);
1310 	prueth->emac[mac] = emac;
1311 
1312 	return 0;
1313 
1314 free:
1315 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1316 free_wq:
1317 	destroy_workqueue(emac->cmd_wq);
1318 free_ndev:
1319 	emac->ndev = NULL;
1320 	prueth->emac[mac] = NULL;
1321 	free_netdev(ndev);
1322 
1323 	return ret;
1324 }
1325 
1326 bool prueth_dev_check(const struct net_device *ndev)
1327 {
1328 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1329 		struct prueth_emac *emac = netdev_priv(ndev);
1330 
1331 		return emac->prueth->is_switch_mode;
1332 	}
1333 
1334 	return false;
1335 }
1336 
1337 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1338 {
1339 	int set_val = 0;
1340 	int i;
1341 
1342 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1343 		set_val = 1;
1344 
1345 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1346 
1347 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1348 		struct prueth_emac *emac = prueth->emac[i];
1349 
1350 		if (!emac || !emac->ndev)
1351 			continue;
1352 
1353 		emac->offload_fwd_mark = set_val;
1354 	}
1355 }
1356 
1357 static int prueth_emac_restart(struct prueth *prueth)
1358 {
1359 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1360 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1361 	int ret;
1362 
1363 	/* Detach the net_device for both PRUeth ports*/
1364 	if (netif_running(emac0->ndev))
1365 		netif_device_detach(emac0->ndev);
1366 	if (netif_running(emac1->ndev))
1367 		netif_device_detach(emac1->ndev);
1368 
1369 	/* Disable both PRUeth ports */
1370 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1371 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1372 	if (ret)
1373 		return ret;
1374 
1375 	/* Stop both pru cores for both PRUeth ports*/
1376 	ret = prueth_emac_common_stop(prueth);
1377 	if (ret) {
1378 		dev_err(prueth->dev, "Failed to stop the firmwares");
1379 		return ret;
1380 	}
1381 
1382 	/* Start both pru cores for both PRUeth ports */
1383 	ret = prueth_emac_common_start(prueth);
1384 	if (ret) {
1385 		dev_err(prueth->dev, "Failed to start the firmwares");
1386 		return ret;
1387 	}
1388 
1389 	/* Enable forwarding for both PRUeth ports */
1390 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1391 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1392 
1393 	/* Attache net_device for both PRUeth ports */
1394 	netif_device_attach(emac0->ndev);
1395 	netif_device_attach(emac1->ndev);
1396 
1397 	return ret;
1398 }
1399 
1400 static void icssg_change_mode(struct prueth *prueth)
1401 {
1402 	int ret;
1403 
1404 	ret = prueth_emac_restart(prueth);
1405 	if (ret) {
1406 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1407 		return;
1408 	}
1409 
1410 	icssg_enable_fw_offload(prueth);
1411 }
1412 
1413 static int prueth_netdevice_port_link(struct net_device *ndev,
1414 				      struct net_device *br_ndev,
1415 				      struct netlink_ext_ack *extack)
1416 {
1417 	struct prueth_emac *emac = netdev_priv(ndev);
1418 	struct prueth *prueth = emac->prueth;
1419 	int err;
1420 
1421 	if (!prueth->br_members) {
1422 		prueth->hw_bridge_dev = br_ndev;
1423 	} else {
1424 		/* This is adding the port to a second bridge, this is
1425 		 * unsupported
1426 		 */
1427 		if (prueth->hw_bridge_dev != br_ndev)
1428 			return -EOPNOTSUPP;
1429 	}
1430 
1431 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1432 					    &prueth->prueth_switchdev_nb,
1433 					    &prueth->prueth_switchdev_bl_nb,
1434 					    false, extack);
1435 	if (err)
1436 		return err;
1437 
1438 	prueth->br_members |= BIT(emac->port_id);
1439 
1440 	if (!prueth->is_switch_mode) {
1441 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1442 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1443 			prueth->is_switch_mode = true;
1444 			prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1445 			emac->port_vlan = prueth->default_vlan;
1446 			icssg_change_mode(prueth);
1447 		}
1448 	}
1449 
1450 	prueth_offload_fwd_mark_update(prueth);
1451 
1452 	return NOTIFY_DONE;
1453 }
1454 
1455 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1456 {
1457 	struct prueth_emac *emac = netdev_priv(ndev);
1458 	struct prueth *prueth = emac->prueth;
1459 	int ret;
1460 
1461 	prueth->br_members &= ~BIT(emac->port_id);
1462 
1463 	if (prueth->is_switch_mode) {
1464 		prueth->is_switch_mode = false;
1465 		emac->port_vlan = 0;
1466 		ret = prueth_emac_restart(prueth);
1467 		if (ret) {
1468 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1469 			return;
1470 		}
1471 	}
1472 
1473 	prueth_offload_fwd_mark_update(prueth);
1474 
1475 	if (!prueth->br_members)
1476 		prueth->hw_bridge_dev = NULL;
1477 }
1478 
1479 static int prueth_hsr_port_link(struct net_device *ndev)
1480 {
1481 	struct prueth_emac *emac = netdev_priv(ndev);
1482 	struct prueth *prueth = emac->prueth;
1483 	struct prueth_emac *emac0;
1484 	struct prueth_emac *emac1;
1485 
1486 	emac0 = prueth->emac[PRUETH_MAC0];
1487 	emac1 = prueth->emac[PRUETH_MAC1];
1488 
1489 	if (prueth->is_switch_mode)
1490 		return -EOPNOTSUPP;
1491 
1492 	prueth->hsr_members |= BIT(emac->port_id);
1493 	if (!prueth->is_hsr_offload_mode) {
1494 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1495 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1496 			if (!(emac0->ndev->features &
1497 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1498 			    !(emac1->ndev->features &
1499 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1500 				return -EOPNOTSUPP;
1501 			prueth->is_hsr_offload_mode = true;
1502 			prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1503 			emac0->port_vlan = prueth->default_vlan;
1504 			emac1->port_vlan = prueth->default_vlan;
1505 			icssg_change_mode(prueth);
1506 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1507 		}
1508 	}
1509 
1510 	return 0;
1511 }
1512 
1513 static void prueth_hsr_port_unlink(struct net_device *ndev)
1514 {
1515 	struct prueth_emac *emac = netdev_priv(ndev);
1516 	struct prueth *prueth = emac->prueth;
1517 	struct prueth_emac *emac0;
1518 	struct prueth_emac *emac1;
1519 	int ret;
1520 
1521 	emac0 = prueth->emac[PRUETH_MAC0];
1522 	emac1 = prueth->emac[PRUETH_MAC1];
1523 
1524 	prueth->hsr_members &= ~BIT(emac->port_id);
1525 	if (prueth->is_hsr_offload_mode) {
1526 		prueth->is_hsr_offload_mode = false;
1527 		emac0->port_vlan = 0;
1528 		emac1->port_vlan = 0;
1529 		prueth->hsr_dev = NULL;
1530 		ret = prueth_emac_restart(prueth);
1531 		if (ret) {
1532 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1533 			return;
1534 		}
1535 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1536 	}
1537 }
1538 
1539 /* netdev notifier */
1540 static int prueth_netdevice_event(struct notifier_block *unused,
1541 				  unsigned long event, void *ptr)
1542 {
1543 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1544 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1545 	struct netdev_notifier_changeupper_info *info;
1546 	struct prueth_emac *emac = netdev_priv(ndev);
1547 	struct prueth *prueth = emac->prueth;
1548 	enum hsr_version hsr_ndev_version;
1549 	int ret = NOTIFY_DONE;
1550 
1551 	if (ndev->netdev_ops != &emac_netdev_ops)
1552 		return NOTIFY_DONE;
1553 
1554 	switch (event) {
1555 	case NETDEV_CHANGEUPPER:
1556 		info = ptr;
1557 
1558 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1559 		    is_hsr_master(info->upper_dev)) {
1560 			hsr_get_version(info->upper_dev, &hsr_ndev_version);
1561 			if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
1562 				return -EOPNOTSUPP;
1563 			prueth->hsr_prp_version = hsr_ndev_version;
1564 
1565 			if (info->linking) {
1566 				if (!prueth->hsr_dev) {
1567 					prueth->hsr_dev = info->upper_dev;
1568 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1569 								      prueth->hsr_dev->dev_addr);
1570 				} else {
1571 					if (prueth->hsr_dev != info->upper_dev) {
1572 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1573 						return -EOPNOTSUPP;
1574 					}
1575 				}
1576 				prueth_hsr_port_link(ndev);
1577 			} else {
1578 				prueth_hsr_port_unlink(ndev);
1579 			}
1580 		}
1581 
1582 		if (netif_is_bridge_master(info->upper_dev)) {
1583 			if (info->linking)
1584 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1585 			else
1586 				prueth_netdevice_port_unlink(ndev);
1587 		}
1588 		break;
1589 	default:
1590 		return NOTIFY_DONE;
1591 	}
1592 
1593 	return notifier_from_errno(ret);
1594 }
1595 
1596 static int prueth_register_notifiers(struct prueth *prueth)
1597 {
1598 	int ret = 0;
1599 
1600 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1601 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1602 	if (ret) {
1603 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1604 		return ret;
1605 	}
1606 
1607 	ret = prueth_switchdev_register_notifiers(prueth);
1608 	if (ret)
1609 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1610 
1611 	return ret;
1612 }
1613 
1614 static void prueth_unregister_notifiers(struct prueth *prueth)
1615 {
1616 	prueth_switchdev_unregister_notifiers(prueth);
1617 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1618 }
1619 
1620 static void icssg_read_firmware_names(struct device_node *np,
1621 				      struct icssg_firmwares *fw)
1622 {
1623 	int i;
1624 
1625 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1626 		of_property_read_string_index(np, "firmware-name", i * 3 + 0,
1627 					      &fw[i].pru);
1628 		of_property_read_string_index(np, "firmware-name", i * 3 + 1,
1629 					      &fw[i].rtu);
1630 		of_property_read_string_index(np, "firmware-name", i * 3 + 2,
1631 					      &fw[i].txpru);
1632 	}
1633 }
1634 
1635 /* icssg_firmware_name_replace - Replace a substring in firmware name
1636  * @dev: device pointer for memory allocation
1637  * @src: source firmware name string
1638  * @from: substring to replace
1639  * @to: replacement substring
1640  *
1641  * Return: a newly allocated string with the replacement, or the original
1642  * string if replacement is not possible.
1643  */
1644 static const char *icssg_firmware_name_replace(struct device *dev,
1645 					       const char *src,
1646 					       const char *from,
1647 					       const char *to)
1648 {
1649 	size_t prefix, from_len, to_len, total;
1650 	const char *p = strstr(src, from);
1651 	char *buf;
1652 
1653 	if (!p)
1654 		return src; /* fallback: no replacement, use original */
1655 
1656 	prefix = p - src;
1657 	from_len = strlen(from);
1658 	to_len = strlen(to);
1659 	total = strlen(src) - from_len + to_len + 1;
1660 
1661 	buf = devm_kzalloc(dev, total, GFP_KERNEL);
1662 	if (!buf)
1663 		return src; /* fallback: allocation failed, use original */
1664 
1665 	strscpy(buf, src, prefix + 1);
1666 	strscpy(buf + prefix, to, to_len + 1);
1667 	strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
1668 
1669 	return buf;
1670 }
1671 
1672 /**
1673  * icssg_mode_firmware_names - Generate firmware names for a specific mode
1674  * @dev: device pointer for logging and context
1675  * @src: source array of firmware name structures
1676  * @dst: destination array to store updated firmware name structures
1677  * @from: substring in firmware names to be replaced
1678  * @to: substring to replace @from in firmware names
1679  *
1680  * Iterates over all MACs and replaces occurrences of the @from substring
1681  * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
1682  * updated firmware names are stored in the @dst array.
1683  */
1684 static void icssg_mode_firmware_names(struct device *dev,
1685 				      struct icssg_firmwares *src,
1686 				      struct icssg_firmwares *dst,
1687 				      const char *from, const char *to)
1688 {
1689 	int i;
1690 
1691 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1692 		dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
1693 							 from, to);
1694 		dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
1695 							 from, to);
1696 		dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
1697 							   from, to);
1698 	}
1699 }
1700 
1701 static int prueth_probe(struct platform_device *pdev)
1702 {
1703 	struct device_node *eth_node, *eth_ports_node;
1704 	struct device_node  *eth0_node = NULL;
1705 	struct device_node  *eth1_node = NULL;
1706 	struct genpool_data_align gp_data = {
1707 		.align = SZ_64K,
1708 	};
1709 	struct device *dev = &pdev->dev;
1710 	struct device_node *np;
1711 	struct prueth *prueth;
1712 	struct pruss *pruss;
1713 	u32 msmc_ram_size;
1714 	int i, ret;
1715 
1716 	np = dev->of_node;
1717 
1718 	BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1719 			 "insufficient SW_DATA size");
1720 
1721 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1722 	if (!prueth)
1723 		return -ENOMEM;
1724 
1725 	dev_set_drvdata(dev, prueth);
1726 	prueth->pdev = pdev;
1727 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1728 
1729 	prueth->dev = dev;
1730 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1731 	if (!eth_ports_node)
1732 		return -ENOENT;
1733 
1734 	for_each_child_of_node(eth_ports_node, eth_node) {
1735 		u32 reg;
1736 
1737 		if (strcmp(eth_node->name, "port"))
1738 			continue;
1739 		ret = of_property_read_u32(eth_node, "reg", &reg);
1740 		if (ret < 0) {
1741 			dev_err(dev, "%pOF error reading port_id %d\n",
1742 				eth_node, ret);
1743 		}
1744 
1745 		of_node_get(eth_node);
1746 
1747 		if (reg == 0) {
1748 			eth0_node = eth_node;
1749 			if (!of_device_is_available(eth0_node)) {
1750 				of_node_put(eth0_node);
1751 				eth0_node = NULL;
1752 			}
1753 		} else if (reg == 1) {
1754 			eth1_node = eth_node;
1755 			if (!of_device_is_available(eth1_node)) {
1756 				of_node_put(eth1_node);
1757 				eth1_node = NULL;
1758 			}
1759 		} else {
1760 			dev_err(dev, "port reg should be 0 or 1\n");
1761 		}
1762 	}
1763 
1764 	of_node_put(eth_ports_node);
1765 
1766 	/* At least one node must be present and available else we fail */
1767 	if (!eth0_node && !eth1_node) {
1768 		dev_err(dev, "neither port0 nor port1 node available\n");
1769 		return -ENODEV;
1770 	}
1771 
1772 	if (eth0_node == eth1_node) {
1773 		dev_err(dev, "port0 and port1 can't have same reg\n");
1774 		of_node_put(eth0_node);
1775 		return -ENODEV;
1776 	}
1777 
1778 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1779 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1780 
1781 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1782 	if (IS_ERR(prueth->miig_rt)) {
1783 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1784 		return -ENODEV;
1785 	}
1786 
1787 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1788 	if (IS_ERR(prueth->mii_rt)) {
1789 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1790 		return -ENODEV;
1791 	}
1792 
1793 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1794 	if (IS_ERR(prueth->pa_stats)) {
1795 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1796 		prueth->pa_stats = NULL;
1797 	}
1798 
1799 	if (eth0_node || eth1_node) {
1800 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1801 		if (ret)
1802 			goto put_cores;
1803 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1804 		if (ret)
1805 			goto put_cores;
1806 	}
1807 
1808 	pruss = pruss_get(eth0_node ?
1809 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1810 	if (IS_ERR(pruss)) {
1811 		ret = PTR_ERR(pruss);
1812 		dev_err(dev, "unable to get pruss handle\n");
1813 		goto put_cores;
1814 	}
1815 
1816 	prueth->pruss = pruss;
1817 
1818 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1819 				       &prueth->shram);
1820 	if (ret) {
1821 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1822 		goto put_pruss;
1823 	}
1824 
1825 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1826 	if (!prueth->sram_pool) {
1827 		dev_err(dev, "unable to get SRAM pool\n");
1828 		ret = -ENODEV;
1829 
1830 		goto put_mem;
1831 	}
1832 
1833 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1834 	if (prueth->pdata.banked_ms_ram) {
1835 		/* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
1836 		msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
1837 	} else {
1838 		msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
1839 		if (prueth->is_switchmode_supported)
1840 			msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
1841 	}
1842 
1843 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1844 	prueth->msmcram.va =
1845 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1846 						    msmc_ram_size,
1847 						    gen_pool_first_fit_align,
1848 						    &gp_data);
1849 
1850 	if (!prueth->msmcram.va) {
1851 		ret = -ENOMEM;
1852 		dev_err(dev, "unable to allocate MSMC resource\n");
1853 		goto put_mem;
1854 	}
1855 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1856 						   (unsigned long)prueth->msmcram.va);
1857 	prueth->msmcram.size = msmc_ram_size;
1858 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1859 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1860 		prueth->msmcram.va, prueth->msmcram.size);
1861 
1862 	prueth->iep0 = icss_iep_get_idx(np, 0);
1863 	if (IS_ERR(prueth->iep0)) {
1864 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1865 		prueth->iep0 = NULL;
1866 		goto free_pool;
1867 	}
1868 
1869 	prueth->iep1 = icss_iep_get_idx(np, 1);
1870 	if (IS_ERR(prueth->iep1)) {
1871 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1872 		goto put_iep0;
1873 	}
1874 
1875 	if (prueth->pdata.quirk_10m_link_issue) {
1876 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1877 		 * traffic.
1878 		 */
1879 		icss_iep_init_fw(prueth->iep1);
1880 	}
1881 
1882 	/* Read EMAC firmware names from device tree */
1883 	icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
1884 
1885 	/* Generate other mode firmware names based on EMAC firmware names */
1886 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1887 				  prueth->icssg_switch_firmwares, "eth", "sw");
1888 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1889 				  prueth->icssg_hsr_firmwares, "eth", "hsr");
1890 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1891 				  prueth->icssg_prp_firmwares, "eth", "prp");
1892 
1893 	spin_lock_init(&prueth->vtbl_lock);
1894 	spin_lock_init(&prueth->stats_lock);
1895 	/* setup netdev interfaces */
1896 	if (eth0_node) {
1897 		ret = prueth_netdev_init(prueth, eth0_node);
1898 		if (ret) {
1899 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1900 				      eth0_node->name);
1901 			goto exit_iep;
1902 		}
1903 
1904 		prueth->emac[PRUETH_MAC0]->half_duplex =
1905 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1906 
1907 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1908 	}
1909 
1910 	if (eth1_node) {
1911 		ret = prueth_netdev_init(prueth, eth1_node);
1912 		if (ret) {
1913 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1914 				      eth1_node->name);
1915 			goto netdev_exit;
1916 		}
1917 
1918 		prueth->emac[PRUETH_MAC1]->half_duplex =
1919 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1920 
1921 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1922 	}
1923 
1924 	/* register the network devices */
1925 	if (eth0_node) {
1926 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1927 		if (ret) {
1928 			dev_err(dev, "can't register netdev for port MII0");
1929 			goto netdev_exit;
1930 		}
1931 
1932 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1933 
1934 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1935 		if (ret) {
1936 			dev_err(dev,
1937 				"can't connect to MII0 PHY, error -%d", ret);
1938 			goto netdev_unregister;
1939 		}
1940 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1941 	}
1942 
1943 	if (eth1_node) {
1944 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1945 		if (ret) {
1946 			dev_err(dev, "can't register netdev for port MII1");
1947 			goto netdev_unregister;
1948 		}
1949 
1950 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1951 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1952 		if (ret) {
1953 			dev_err(dev,
1954 				"can't connect to MII1 PHY, error %d", ret);
1955 			goto netdev_unregister;
1956 		}
1957 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1958 	}
1959 
1960 	if (prueth->is_switchmode_supported) {
1961 		ret = prueth_register_notifiers(prueth);
1962 		if (ret)
1963 			goto netdev_unregister;
1964 
1965 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1966 	}
1967 
1968 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1969 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1970 
1971 	if (eth1_node)
1972 		of_node_put(eth1_node);
1973 	if (eth0_node)
1974 		of_node_put(eth0_node);
1975 	return 0;
1976 
1977 netdev_unregister:
1978 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1979 		if (!prueth->registered_netdevs[i])
1980 			continue;
1981 		if (prueth->emac[i]->ndev->phydev) {
1982 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1983 			prueth->emac[i]->ndev->phydev = NULL;
1984 		}
1985 		unregister_netdev(prueth->registered_netdevs[i]);
1986 	}
1987 
1988 netdev_exit:
1989 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1990 		eth_node = prueth->eth_node[i];
1991 		if (!eth_node)
1992 			continue;
1993 
1994 		prueth_netdev_exit(prueth, eth_node);
1995 	}
1996 
1997 exit_iep:
1998 	if (prueth->pdata.quirk_10m_link_issue)
1999 		icss_iep_exit_fw(prueth->iep1);
2000 	icss_iep_put(prueth->iep1);
2001 
2002 put_iep0:
2003 	icss_iep_put(prueth->iep0);
2004 	prueth->iep0 = NULL;
2005 	prueth->iep1 = NULL;
2006 
2007 free_pool:
2008 	gen_pool_free(prueth->sram_pool,
2009 		      (unsigned long)prueth->msmcram.va,
2010 		      prueth->msmcram.size);
2011 
2012 put_mem:
2013 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2014 
2015 put_pruss:
2016 	pruss_put(prueth->pruss);
2017 
2018 put_cores:
2019 	if (eth0_node || eth1_node) {
2020 		prueth_put_cores(prueth, ICSS_SLICE0);
2021 		of_node_put(eth0_node);
2022 
2023 		prueth_put_cores(prueth, ICSS_SLICE1);
2024 		of_node_put(eth1_node);
2025 	}
2026 
2027 	return ret;
2028 }
2029 
2030 static void prueth_remove(struct platform_device *pdev)
2031 {
2032 	struct prueth *prueth = platform_get_drvdata(pdev);
2033 	struct device_node *eth_node;
2034 	int i;
2035 
2036 	prueth_unregister_notifiers(prueth);
2037 
2038 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2039 		if (!prueth->registered_netdevs[i])
2040 			continue;
2041 		phy_stop(prueth->emac[i]->ndev->phydev);
2042 		phy_disconnect(prueth->emac[i]->ndev->phydev);
2043 		prueth->emac[i]->ndev->phydev = NULL;
2044 		unregister_netdev(prueth->registered_netdevs[i]);
2045 	}
2046 
2047 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2048 		eth_node = prueth->eth_node[i];
2049 		if (!eth_node)
2050 			continue;
2051 
2052 		prueth_netdev_exit(prueth, eth_node);
2053 	}
2054 
2055 	if (prueth->pdata.quirk_10m_link_issue)
2056 		icss_iep_exit_fw(prueth->iep1);
2057 
2058 	icss_iep_put(prueth->iep1);
2059 	icss_iep_put(prueth->iep0);
2060 
2061 	gen_pool_free(prueth->sram_pool,
2062 		(unsigned long)prueth->msmcram.va,
2063 		prueth->msmcram.size);
2064 
2065 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2066 
2067 	pruss_put(prueth->pruss);
2068 
2069 	if (prueth->eth_node[PRUETH_MAC1])
2070 		prueth_put_cores(prueth, ICSS_SLICE1);
2071 
2072 	if (prueth->eth_node[PRUETH_MAC0])
2073 		prueth_put_cores(prueth, ICSS_SLICE0);
2074 }
2075 
2076 static const struct prueth_pdata am654_icssg_pdata = {
2077 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2078 	.quirk_10m_link_issue = 1,
2079 	.switch_mode = 1,
2080 	.banked_ms_ram = 0,
2081 };
2082 
2083 static const struct prueth_pdata am64x_icssg_pdata = {
2084 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
2085 	.quirk_10m_link_issue = 1,
2086 	.switch_mode = 1,
2087 	.banked_ms_ram = 1,
2088 };
2089 
2090 static const struct of_device_id prueth_dt_match[] = {
2091 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2092 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2093 	{ /* sentinel */ }
2094 };
2095 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2096 
2097 static struct platform_driver prueth_driver = {
2098 	.probe = prueth_probe,
2099 	.remove = prueth_remove,
2100 	.driver = {
2101 		.name = "icssg-prueth",
2102 		.of_match_table = prueth_dt_match,
2103 		.pm = &prueth_dev_pm_ops,
2104 	},
2105 };
2106 module_platform_driver(prueth_driver);
2107 
2108 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2109 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2110 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2111 MODULE_LICENSE("GPL");
2112