xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 #define PRUETH_RX_DMA_ATTR			(DMA_ATTR_SKIP_CPU_SYNC |\
51 						 DMA_ATTR_WEAK_ORDERING)
52 
53 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
54 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
55 
56 static void emac_adjust_link(struct net_device *ndev);
57 
58 static int emac_get_tx_ts(struct prueth_emac *emac,
59 			  struct emac_tx_ts_response *rsp)
60 {
61 	struct prueth *prueth = emac->prueth;
62 	int slice = prueth_emac_slice(emac);
63 	int addr;
64 
65 	addr = icssg_queue_pop(prueth, slice == 0 ?
66 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
67 	if (addr < 0)
68 		return addr;
69 
70 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
71 	/* return buffer back for to pool */
72 	icssg_queue_push(prueth, slice == 0 ?
73 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
74 
75 	return 0;
76 }
77 
78 static void tx_ts_work(struct prueth_emac *emac)
79 {
80 	struct skb_shared_hwtstamps ssh;
81 	struct emac_tx_ts_response tsr;
82 	struct sk_buff *skb;
83 	int ret = 0;
84 	u32 hi_sw;
85 	u64 ns;
86 
87 	/* There may be more than one pending requests */
88 	while (1) {
89 		ret = emac_get_tx_ts(emac, &tsr);
90 		if (ret) /* nothing more */
91 			break;
92 
93 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
94 		    !emac->tx_ts_skb[tsr.cookie]) {
95 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
96 				   tsr.cookie);
97 			break;
98 		}
99 
100 		skb = emac->tx_ts_skb[tsr.cookie];
101 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
102 		if (!skb) {
103 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
104 			break;
105 		}
106 
107 		hi_sw = readl(emac->prueth->shram.va +
108 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
109 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
110 				    IEP_DEFAULT_CYCLE_TIME_NS);
111 
112 		memset(&ssh, 0, sizeof(ssh));
113 		ssh.hwtstamp = ns_to_ktime(ns);
114 
115 		skb_tstamp_tx(skb, &ssh);
116 		dev_consume_skb_any(skb);
117 
118 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
119 			break;
120 	}
121 }
122 
123 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
124 {
125 	struct prueth_emac *emac = dev_id;
126 
127 	/* currently only TX timestamp is being returned */
128 	tx_ts_work(emac);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 static int prueth_start(struct rproc *rproc, const char *fw_name)
134 {
135 	int ret;
136 
137 	ret = rproc_set_firmware(rproc, fw_name);
138 	if (ret)
139 		return ret;
140 	return rproc_boot(rproc);
141 }
142 
143 static void prueth_shutdown(struct rproc *rproc)
144 {
145 	rproc_shutdown(rproc);
146 }
147 
148 static int prueth_emac_start(struct prueth *prueth)
149 {
150 	struct icssg_firmwares *firmwares;
151 	struct device *dev = prueth->dev;
152 	int ret, slice;
153 
154 	if (prueth->is_switch_mode)
155 		firmwares = prueth->icssg_switch_firmwares;
156 	else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
157 		firmwares = prueth->icssg_hsr_firmwares;
158 	else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
159 		firmwares = prueth->icssg_prp_firmwares;
160 	else
161 		firmwares = prueth->icssg_emac_firmwares;
162 
163 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
164 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
165 		if (ret) {
166 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
167 			goto unwind_slices;
168 		}
169 
170 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
171 		if (ret) {
172 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
173 			rproc_shutdown(prueth->pru[slice]);
174 			goto unwind_slices;
175 		}
176 
177 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
178 		if (ret) {
179 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
180 			rproc_shutdown(prueth->rtu[slice]);
181 			rproc_shutdown(prueth->pru[slice]);
182 			goto unwind_slices;
183 		}
184 	}
185 
186 	return 0;
187 
188 unwind_slices:
189 	while (--slice >= 0) {
190 		prueth_shutdown(prueth->txpru[slice]);
191 		prueth_shutdown(prueth->rtu[slice]);
192 		prueth_shutdown(prueth->pru[slice]);
193 	}
194 
195 	return ret;
196 }
197 
198 static void prueth_emac_stop(struct prueth *prueth)
199 {
200 	int slice;
201 
202 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
203 		prueth_shutdown(prueth->txpru[slice]);
204 		prueth_shutdown(prueth->rtu[slice]);
205 		prueth_shutdown(prueth->pru[slice]);
206 	}
207 }
208 
209 static void icssg_enable_fw_offload(struct prueth *prueth)
210 {
211 	struct prueth_emac *emac;
212 	int mac;
213 
214 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
215 		emac = prueth->emac[mac];
216 		if (prueth->is_hsr_offload_mode) {
217 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
218 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
219 			else
220 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
221 		}
222 
223 		if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
224 			if (netif_running(emac->ndev)) {
225 				icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
226 						  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
227 						  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
228 						  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
229 						  ICSSG_FDB_ENTRY_BLOCK,
230 						  true);
231 				icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
232 						  BIT(emac->port_id) | DEFAULT_PORT_MASK,
233 						  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
234 						  true);
235 				if (prueth->is_hsr_offload_mode)
236 					icssg_vtbl_modify(emac, DEFAULT_VID,
237 							  DEFAULT_PORT_MASK,
238 							  DEFAULT_UNTAG_MASK, true);
239 				icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
240 				if (prueth->is_switch_mode)
241 					icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
242 			}
243 		}
244 	}
245 }
246 
247 static int prueth_emac_common_start(struct prueth *prueth)
248 {
249 	struct prueth_emac *emac;
250 	int ret = 0;
251 	int slice;
252 
253 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
254 		return -EINVAL;
255 
256 	/* clear SMEM and MSMC settings for all slices */
257 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
258 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
259 
260 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
261 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
262 
263 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
264 		icssg_init_fw_offload_mode(prueth);
265 	else
266 		icssg_init_emac_mode(prueth);
267 
268 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
269 		emac = prueth->emac[slice];
270 		if (!emac)
271 			continue;
272 		ret = icssg_config(prueth, emac, slice);
273 		if (ret)
274 			goto disable_class;
275 
276 		mutex_lock(&emac->ndev->phydev->lock);
277 		emac_adjust_link(emac->ndev);
278 		mutex_unlock(&emac->ndev->phydev->lock);
279 	}
280 
281 	ret = prueth_emac_start(prueth);
282 	if (ret)
283 		goto disable_class;
284 
285 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
286 	       prueth->emac[ICSS_SLICE1];
287 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
288 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
289 	if (ret) {
290 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
291 		goto stop_pruss;
292 	}
293 
294 	return 0;
295 
296 stop_pruss:
297 	prueth_emac_stop(prueth);
298 
299 disable_class:
300 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
301 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
302 
303 	return ret;
304 }
305 
306 static int prueth_emac_common_stop(struct prueth *prueth)
307 {
308 	struct prueth_emac *emac;
309 
310 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
311 		return -EINVAL;
312 
313 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
314 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
315 
316 	prueth_emac_stop(prueth);
317 
318 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
319 	       prueth->emac[ICSS_SLICE1];
320 	icss_iep_exit(emac->iep);
321 
322 	return 0;
323 }
324 
325 /* called back by PHY layer if there is change in link state of hw port*/
326 static void emac_adjust_link(struct net_device *ndev)
327 {
328 	struct prueth_emac *emac = netdev_priv(ndev);
329 	struct phy_device *phydev = ndev->phydev;
330 	struct prueth *prueth = emac->prueth;
331 	bool new_state = false;
332 	unsigned long flags;
333 
334 	if (phydev->link) {
335 		/* check the mode of operation - full/half duplex */
336 		if (phydev->duplex != emac->duplex) {
337 			new_state = true;
338 			emac->duplex = phydev->duplex;
339 		}
340 		if (phydev->speed != emac->speed) {
341 			new_state = true;
342 			emac->speed = phydev->speed;
343 		}
344 		if (!emac->link) {
345 			new_state = true;
346 			emac->link = 1;
347 		}
348 	} else if (emac->link) {
349 		new_state = true;
350 		emac->link = 0;
351 
352 		/* f/w should support 100 & 1000 */
353 		emac->speed = SPEED_1000;
354 
355 		/* half duplex may not be supported by f/w */
356 		emac->duplex = DUPLEX_FULL;
357 	}
358 
359 	if (new_state) {
360 		phy_print_status(phydev);
361 
362 		/* update RGMII and MII configuration based on PHY negotiated
363 		 * values
364 		 */
365 		if (emac->link) {
366 			if (emac->duplex == DUPLEX_HALF)
367 				icssg_config_half_duplex(emac);
368 			/* Set the RGMII cfg for gig en and full duplex */
369 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
370 
371 			/* update the Tx IPG based on 100M/1G speed */
372 			spin_lock_irqsave(&emac->lock, flags);
373 			icssg_config_ipg(emac);
374 			spin_unlock_irqrestore(&emac->lock, flags);
375 			icssg_config_set_speed(emac);
376 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
377 
378 		} else {
379 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
380 		}
381 	}
382 
383 	if (emac->link) {
384 		/* reactivate the transmit queue */
385 		netif_tx_wake_all_queues(ndev);
386 	} else {
387 		netif_tx_stop_all_queues(ndev);
388 		prueth_cleanup_tx_ts(emac);
389 	}
390 }
391 
392 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
393 {
394 	struct prueth_emac *emac =
395 			container_of(timer, struct prueth_emac, rx_hrtimer);
396 	int rx_flow = PRUETH_RX_FLOW_DATA;
397 
398 	if (emac->rx_chns.irq_disabled) {
399 		/* re-enable the RX IRQ */
400 		emac->rx_chns.irq_disabled = false;
401 		enable_irq(emac->rx_chns.irq[rx_flow]);
402 	}
403 	return HRTIMER_NORESTART;
404 }
405 
406 static int emac_phy_connect(struct prueth_emac *emac)
407 {
408 	struct prueth *prueth = emac->prueth;
409 	struct net_device *ndev = emac->ndev;
410 	/* connect PHY */
411 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
412 				      &emac_adjust_link, 0,
413 				      emac->phy_if);
414 	if (!ndev->phydev) {
415 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
416 			emac->phy_node->full_name);
417 		return -ENODEV;
418 	}
419 
420 	if (!emac->half_duplex) {
421 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
422 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
423 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
424 	}
425 
426 	/* remove unsupported modes */
427 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
428 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
429 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
430 
431 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
432 		phy_set_max_speed(ndev->phydev, SPEED_100);
433 
434 	return 0;
435 }
436 
437 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
438 {
439 	u32 hi_rollover_count, hi_rollover_count_r;
440 	struct prueth_emac *emac = clockops_data;
441 	struct prueth *prueth = emac->prueth;
442 	void __iomem *fw_hi_r_count_addr;
443 	void __iomem *fw_count_hi_addr;
444 	u32 iepcount_hi, iepcount_hi_r;
445 	unsigned long flags;
446 	u32 iepcount_lo;
447 	u64 ts = 0;
448 
449 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
450 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
451 
452 	local_irq_save(flags);
453 	do {
454 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
455 		iepcount_hi += readl(fw_count_hi_addr);
456 		hi_rollover_count = readl(fw_hi_r_count_addr);
457 		ptp_read_system_prets(sts);
458 		iepcount_lo = icss_iep_get_count_low(emac->iep);
459 		ptp_read_system_postts(sts);
460 
461 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
462 		iepcount_hi_r += readl(fw_count_hi_addr);
463 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
464 	} while ((iepcount_hi_r != iepcount_hi) ||
465 		 (hi_rollover_count != hi_rollover_count_r));
466 	local_irq_restore(flags);
467 
468 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
469 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
470 
471 	return ts;
472 }
473 
474 static void prueth_iep_settime(void *clockops_data, u64 ns)
475 {
476 	struct icssg_setclock_desc __iomem *sc_descp;
477 	struct prueth_emac *emac = clockops_data;
478 	struct icssg_setclock_desc sc_desc;
479 	u64 cyclecount;
480 	u32 cycletime;
481 	int timeout;
482 
483 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
484 
485 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
486 	cyclecount = ns / cycletime;
487 
488 	memset(&sc_desc, 0, sizeof(sc_desc));
489 	sc_desc.margin = cycletime - 1000;
490 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
491 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
492 	sc_desc.iepcount_set = ns % cycletime;
493 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
494 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
495 
496 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
497 
498 	writeb(1, &sc_descp->request);
499 
500 	timeout = 5;	/* fw should take 2-3 ms */
501 	while (timeout--) {
502 		if (readb(&sc_descp->acknowledgment))
503 			return;
504 
505 		usleep_range(500, 1000);
506 	}
507 
508 	dev_err(emac->prueth->dev, "settime timeout\n");
509 }
510 
511 static int prueth_perout_enable(void *clockops_data,
512 				struct ptp_perout_request *req, int on,
513 				u64 *cmp)
514 {
515 	struct prueth_emac *emac = clockops_data;
516 	u32 reduction_factor = 0, offset = 0;
517 	struct timespec64 ts;
518 	u64 current_cycle;
519 	u64 start_offset;
520 	u64 ns_period;
521 
522 	if (!on)
523 		return 0;
524 
525 	/* Any firmware specific stuff for PPS/PEROUT handling */
526 	ts.tv_sec = req->period.sec;
527 	ts.tv_nsec = req->period.nsec;
528 	ns_period = timespec64_to_ns(&ts);
529 
530 	/* f/w doesn't support period less than cycle time */
531 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
532 		return -ENXIO;
533 
534 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
535 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
536 
537 	/* f/w requires at least 1uS within a cycle so CMP
538 	 * can trigger after SYNC is enabled
539 	 */
540 	if (offset < 5 * NSEC_PER_USEC)
541 		offset = 5 * NSEC_PER_USEC;
542 
543 	/* if offset is close to cycle time then we will miss
544 	 * the CMP event for last tick when IEP rolls over.
545 	 * In normal mode, IEP tick is 4ns.
546 	 * In slow compensation it could be 0ns or 8ns at
547 	 * every slow compensation cycle.
548 	 */
549 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
550 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
551 
552 	/* we're in shadow mode so need to set upper 32-bits */
553 	*cmp = (u64)offset << 32;
554 
555 	writel(reduction_factor, emac->prueth->shram.va +
556 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
557 
558 	current_cycle = icssg_read_time(emac->prueth->shram.va +
559 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
560 
561 	/* Rounding of current_cycle count to next second */
562 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
563 
564 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
565 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
566 
567 	return 0;
568 }
569 
570 const struct icss_iep_clockops prueth_iep_clockops = {
571 	.settime = prueth_iep_settime,
572 	.gettime = prueth_iep_gettime,
573 	.perout_enable = prueth_perout_enable,
574 };
575 
576 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
577 {
578 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
579 
580 	if (xdp_rxq_info_is_reg(rxq))
581 		xdp_rxq_info_unreg(rxq);
582 }
583 
584 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
585 {
586 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
587 	struct page_pool *pool = emac->rx_chns.pg_pool;
588 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
589 	int ret;
590 
591 	ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
592 	if (ret)
593 		return ret;
594 
595 	if (rx_chn->xsk_pool) {
596 		ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
597 		if (ret)
598 			goto xdp_unreg;
599 		xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
600 	} else {
601 		ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
602 		if (ret)
603 			goto xdp_unreg;
604 	}
605 
606 	return 0;
607 
608 xdp_unreg:
609 	prueth_destroy_xdp_rxqs(emac);
610 	return ret;
611 }
612 
613 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
614 {
615 	struct net_device *real_dev;
616 	struct prueth_emac *emac;
617 	int port_mask;
618 	u8 vlan_id;
619 
620 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
621 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
622 	emac = netdev_priv(real_dev);
623 
624 	port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
625 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
626 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
627 
628 	return 0;
629 }
630 
631 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
632 {
633 	struct net_device *real_dev;
634 	struct prueth_emac *emac;
635 	int other_port_mask;
636 	int port_mask;
637 	u8 vlan_id;
638 
639 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
640 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
641 	emac = netdev_priv(real_dev);
642 
643 	port_mask = BIT(emac->port_id);
644 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
645 
646 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
647 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
648 
649 	if (other_port_mask) {
650 		icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
651 		icssg_vtbl_modify(emac, vlan_id, other_port_mask,
652 				  other_port_mask, true);
653 	}
654 
655 	return 0;
656 }
657 
658 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
659 					 const u8 *addr, u8 vid, bool add)
660 {
661 	icssg_fdb_add_del(emac, addr, vid,
662 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
663 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
664 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
665 			  ICSSG_FDB_ENTRY_BLOCK, add);
666 
667 	if (add)
668 		icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
669 				  BIT(emac->port_id), add);
670 }
671 
672 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
673 {
674 	struct net_device *real_dev, *port_dev;
675 	struct prueth_emac *emac;
676 	u8 vlan_id, i;
677 
678 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
679 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
680 
681 	if (is_hsr_master(real_dev)) {
682 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
683 			port_dev = hsr_get_port_ndev(real_dev, i);
684 			emac = netdev_priv(port_dev);
685 			if (!emac) {
686 				dev_put(port_dev);
687 				return -EINVAL;
688 			}
689 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
690 						     true);
691 			dev_put(port_dev);
692 		}
693 	} else {
694 		emac = netdev_priv(real_dev);
695 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
696 	}
697 
698 	return 0;
699 }
700 
701 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
702 {
703 	struct net_device *real_dev, *port_dev;
704 	struct prueth_emac *emac;
705 	u8 vlan_id, i;
706 
707 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
708 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
709 
710 	if (is_hsr_master(real_dev)) {
711 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
712 			port_dev = hsr_get_port_ndev(real_dev, i);
713 			emac = netdev_priv(port_dev);
714 			if (!emac) {
715 				dev_put(port_dev);
716 				return -EINVAL;
717 			}
718 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
719 						     false);
720 			dev_put(port_dev);
721 		}
722 	} else {
723 		emac = netdev_priv(real_dev);
724 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
725 	}
726 
727 	return 0;
728 }
729 
730 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
731 				   void *args)
732 {
733 	struct prueth_emac *emac = args;
734 
735 	if (!vdev || !vid)
736 		return 0;
737 
738 	netif_addr_lock_bh(vdev);
739 	__hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
740 				vdev->addr_len);
741 	netif_addr_unlock_bh(vdev);
742 
743 	if (emac->prueth->is_hsr_offload_mode)
744 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
745 				   icssg_prueth_hsr_add_mcast,
746 				   icssg_prueth_hsr_del_mcast);
747 	else
748 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
749 				   icssg_prueth_add_mcast,
750 				   icssg_prueth_del_mcast);
751 
752 	return 0;
753 }
754 
755 static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
756 {
757 	struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
758 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
759 
760 	if (emac->xsk_qid != queue_id) {
761 		rx_chn->xsk_pool = NULL;
762 		tx_chn->xsk_pool = NULL;
763 	} else {
764 		rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
765 		tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
766 	}
767 }
768 
769 static void prueth_destroy_txq(struct prueth_emac *emac)
770 {
771 	int ret, i;
772 
773 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
774 	/* ensure new tdown_cnt value is visible */
775 	smp_mb__after_atomic();
776 	/* tear down and disable UDMA channels */
777 	reinit_completion(&emac->tdown_complete);
778 	for (i = 0; i < emac->tx_ch_num; i++)
779 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
780 
781 	ret = wait_for_completion_timeout(&emac->tdown_complete,
782 					  msecs_to_jiffies(1000));
783 	if (!ret)
784 		netdev_err(emac->ndev, "tx teardown timeout\n");
785 
786 	for (i = 0; i < emac->tx_ch_num; i++) {
787 		napi_disable(&emac->tx_chns[i].napi_tx);
788 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
789 		k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
790 					  &emac->tx_chns[i],
791 					  prueth_tx_cleanup);
792 		k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
793 	}
794 }
795 
796 static void prueth_destroy_rxq(struct prueth_emac *emac)
797 {
798 	int i, ret;
799 
800 	/* tear down and disable UDMA channels */
801 	reinit_completion(&emac->tdown_complete);
802 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
803 
804 	/* When RX DMA Channel Teardown is initiated, it will result in an
805 	 * interrupt and a Teardown Completion Marker (TDCM) is queued into
806 	 * the RX Completion queue. Acknowledging the interrupt involves
807 	 * popping the TDCM descriptor from the RX Completion queue via the
808 	 * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
809 	 * be popped, schedule the RX NAPI handler to run immediately.
810 	 */
811 	if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
812 		if (napi_schedule_prep(&emac->napi_rx))
813 			__napi_schedule(&emac->napi_rx);
814 	}
815 
816 	ret = wait_for_completion_timeout(&emac->tdown_complete,
817 					  msecs_to_jiffies(1000));
818 	if (!ret)
819 		netdev_err(emac->ndev, "rx teardown timeout\n");
820 
821 	for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
822 		napi_disable(&emac->napi_rx);
823 		hrtimer_cancel(&emac->rx_hrtimer);
824 		k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
825 					  &emac->rx_chns,
826 					  prueth_rx_cleanup);
827 	}
828 
829 	prueth_destroy_xdp_rxqs(emac);
830 	k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
831 }
832 
833 static int prueth_create_txq(struct prueth_emac *emac)
834 {
835 	int ret, i;
836 
837 	for (i = 0; i < emac->tx_ch_num; i++) {
838 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
839 		if (ret)
840 			goto reset_tx_chan;
841 		napi_enable(&emac->tx_chns[i].napi_tx);
842 	}
843 	return 0;
844 
845 reset_tx_chan:
846 	/* Since interface is not yet up, there is wouldn't be
847 	 * any SKB for completion. So set false to free_skb
848 	 */
849 	prueth_reset_tx_chan(emac, i, false);
850 	return ret;
851 }
852 
853 static int prueth_create_rxq(struct prueth_emac *emac)
854 {
855 	int ret;
856 
857 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
858 	if (ret)
859 		return ret;
860 
861 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
862 	if (ret)
863 		goto reset_rx_chn;
864 
865 	ret = prueth_create_xdp_rxqs(emac);
866 	if (ret)
867 		goto reset_rx_chn;
868 
869 	napi_enable(&emac->napi_rx);
870 	return 0;
871 
872 reset_rx_chn:
873 	prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
874 	return ret;
875 }
876 
877 /**
878  * emac_ndo_open - EMAC device open
879  * @ndev: network adapter device
880  *
881  * Called when system wants to start the interface.
882  *
883  * Return: 0 for a successful open, or appropriate error code
884  */
885 static int emac_ndo_open(struct net_device *ndev)
886 {
887 	struct prueth_emac *emac = netdev_priv(ndev);
888 	int ret, num_data_chn = emac->tx_ch_num;
889 	struct icssg_flow_cfg __iomem *flow_cfg;
890 	struct prueth *prueth = emac->prueth;
891 	int slice = prueth_emac_slice(emac);
892 	struct device *dev = prueth->dev;
893 	int max_rx_flows;
894 	int rx_flow;
895 
896 	/* set h/w MAC as user might have re-configured */
897 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
898 
899 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
900 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
901 
902 	/* Notify the stack of the actual queue counts. */
903 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
904 	if (ret) {
905 		dev_err(dev, "cannot set real number of tx queues\n");
906 		return ret;
907 	}
908 
909 	emac->xsk_qid = -EINVAL;
910 	init_completion(&emac->cmd_complete);
911 	ret = prueth_init_tx_chns(emac);
912 	if (ret) {
913 		dev_err(dev, "failed to init tx channel: %d\n", ret);
914 		return ret;
915 	}
916 
917 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
918 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
919 				  max_rx_flows, PRUETH_MAX_RX_DESC);
920 	if (ret) {
921 		dev_err(dev, "failed to init rx channel: %d\n", ret);
922 		goto cleanup_tx;
923 	}
924 
925 	ret = prueth_ndev_add_tx_napi(emac);
926 	if (ret)
927 		goto cleanup_rx;
928 
929 	/* we use only the highest priority flow for now i.e. @irq[3] */
930 	rx_flow = PRUETH_RX_FLOW_DATA;
931 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
932 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
933 	if (ret) {
934 		dev_err(dev, "unable to request RX IRQ\n");
935 		goto cleanup_napi;
936 	}
937 
938 	if (!prueth->emacs_initialized) {
939 		ret = prueth_emac_common_start(prueth);
940 		if (ret)
941 			goto free_rx_irq;
942 		icssg_enable_fw_offload(prueth);
943 	}
944 
945 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
946 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
947 	ret = emac_fdb_flow_id_updated(emac);
948 
949 	if (ret) {
950 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
951 		goto stop;
952 	}
953 
954 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
955 
956 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
957 				   IRQF_ONESHOT, dev_name(dev), emac);
958 	if (ret)
959 		goto stop;
960 
961 	/* Prepare RX */
962 	ret = prueth_create_rxq(emac);
963 	if (ret)
964 		goto free_tx_ts_irq;
965 
966 	ret = prueth_create_txq(emac);
967 	if (ret)
968 		goto destroy_rxq;
969 
970 	/* start PHY */
971 	phy_start(ndev->phydev);
972 
973 	prueth->emacs_initialized++;
974 
975 	queue_work(system_long_wq, &emac->stats_work.work);
976 
977 	return 0;
978 
979 destroy_rxq:
980 	prueth_destroy_rxq(emac);
981 free_tx_ts_irq:
982 	free_irq(emac->tx_ts_irq, emac);
983 stop:
984 	if (!prueth->emacs_initialized)
985 		prueth_emac_common_stop(prueth);
986 free_rx_irq:
987 	free_irq(emac->rx_chns.irq[rx_flow], emac);
988 cleanup_napi:
989 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
990 cleanup_rx:
991 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
992 cleanup_tx:
993 	prueth_cleanup_tx_chns(emac);
994 
995 	return ret;
996 }
997 
998 /**
999  * emac_ndo_stop - EMAC device stop
1000  * @ndev: network adapter device
1001  *
1002  * Called when system wants to stop or down the interface.
1003  *
1004  * Return: Always 0 (Success)
1005  */
1006 static int emac_ndo_stop(struct net_device *ndev)
1007 {
1008 	struct prueth_emac *emac = netdev_priv(ndev);
1009 	struct prueth *prueth = emac->prueth;
1010 
1011 	/* inform the upper layers. */
1012 	netif_tx_stop_all_queues(ndev);
1013 
1014 	/* block packets from wire */
1015 	if (ndev->phydev)
1016 		phy_stop(ndev->phydev);
1017 
1018 	if (emac->prueth->is_hsr_offload_mode)
1019 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
1020 	else
1021 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
1022 
1023 	prueth_destroy_txq(emac);
1024 	prueth_destroy_rxq(emac);
1025 
1026 	cancel_work_sync(&emac->rx_mode_work);
1027 
1028 	/* Destroying the queued work in ndo_stop() */
1029 	cancel_delayed_work_sync(&emac->stats_work);
1030 
1031 	/* stop PRUs */
1032 	if (prueth->emacs_initialized == 1)
1033 		prueth_emac_common_stop(prueth);
1034 
1035 	free_irq(emac->tx_ts_irq, emac);
1036 
1037 	free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
1038 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
1039 
1040 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
1041 	prueth_cleanup_tx_chns(emac);
1042 
1043 	prueth->emacs_initialized--;
1044 
1045 	return 0;
1046 }
1047 
1048 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
1049 {
1050 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
1051 	struct net_device *ndev = emac->ndev;
1052 	bool promisc, allmulti;
1053 
1054 	if (!netif_running(ndev))
1055 		return;
1056 
1057 	promisc = ndev->flags & IFF_PROMISC;
1058 	allmulti = ndev->flags & IFF_ALLMULTI;
1059 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
1060 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
1061 
1062 	if (promisc) {
1063 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
1064 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1065 		return;
1066 	}
1067 
1068 	if (allmulti) {
1069 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1070 		return;
1071 	}
1072 
1073 	if (emac->prueth->is_hsr_offload_mode) {
1074 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
1075 			      icssg_prueth_hsr_del_mcast);
1076 		if (rtnl_trylock()) {
1077 			vlan_for_each(emac->prueth->hsr_dev,
1078 				      icssg_update_vlan_mcast, emac);
1079 			rtnl_unlock();
1080 		}
1081 	} else {
1082 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
1083 			      icssg_prueth_del_mcast);
1084 		if (rtnl_trylock()) {
1085 			vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
1086 			rtnl_unlock();
1087 		}
1088 	}
1089 }
1090 
1091 /**
1092  * emac_ndo_set_rx_mode - EMAC set receive mode function
1093  * @ndev: The EMAC network adapter
1094  *
1095  * Called when system wants to set the receive mode of the device.
1096  *
1097  */
1098 static void emac_ndo_set_rx_mode(struct net_device *ndev)
1099 {
1100 	struct prueth_emac *emac = netdev_priv(ndev);
1101 
1102 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
1103 }
1104 
1105 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
1106 					       netdev_features_t features)
1107 {
1108 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
1109 	 * firmware implementation. Both these features need to be enabled /
1110 	 * disabled together.
1111 	 */
1112 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
1113 		if ((features & NETIF_F_HW_HSR_DUP) ||
1114 		    (features & NETIF_F_HW_HSR_TAG_INS))
1115 			features |= NETIF_F_HW_HSR_DUP |
1116 				    NETIF_F_HW_HSR_TAG_INS;
1117 
1118 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
1119 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
1120 		if (!(features & NETIF_F_HW_HSR_DUP) ||
1121 		    !(features & NETIF_F_HW_HSR_TAG_INS))
1122 			features &= ~(NETIF_F_HW_HSR_DUP |
1123 				      NETIF_F_HW_HSR_TAG_INS);
1124 
1125 	return features;
1126 }
1127 
1128 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
1129 				    __be16 proto, u16 vid)
1130 {
1131 	struct prueth_emac *emac = netdev_priv(ndev);
1132 	struct prueth *prueth = emac->prueth;
1133 	int port_mask = BIT(emac->port_id);
1134 	int untag_mask = 0;
1135 
1136 	if (prueth->is_hsr_offload_mode)
1137 		port_mask |= BIT(PRUETH_PORT_HOST);
1138 
1139 	__hw_addr_init(&emac->vlan_mcast_list[vid]);
1140 	netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1141 		   vid, port_mask, untag_mask);
1142 
1143 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1144 	icssg_set_pvid(emac->prueth, vid, emac->port_id);
1145 
1146 	return 0;
1147 }
1148 
1149 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1150 				    __be16 proto, u16 vid)
1151 {
1152 	struct prueth_emac *emac = netdev_priv(ndev);
1153 	struct prueth *prueth = emac->prueth;
1154 	int port_mask = BIT(emac->port_id);
1155 	int untag_mask = 0;
1156 
1157 	if (prueth->is_hsr_offload_mode)
1158 		port_mask = BIT(PRUETH_PORT_HOST);
1159 
1160 	netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
1161 		   vid, port_mask, untag_mask);
1162 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1163 
1164 	return 0;
1165 }
1166 
1167 /**
1168  * emac_xdp_xmit - Implements ndo_xdp_xmit
1169  * @dev: netdev
1170  * @n: number of frames
1171  * @frames: array of XDP buffer pointers
1172  * @flags: XDP extra info
1173  *
1174  * Return: number of frames successfully sent. Failed frames
1175  * will be free'ed by XDP core.
1176  *
1177  * For error cases, a negative errno code is returned and no-frames
1178  * are transmitted (caller must handle freeing frames).
1179  **/
1180 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1181 			 u32 flags)
1182 {
1183 	struct prueth_emac *emac = netdev_priv(dev);
1184 	struct net_device *ndev = emac->ndev;
1185 	struct netdev_queue *netif_txq;
1186 	int cpu = smp_processor_id();
1187 	struct xdp_frame *xdpf;
1188 	unsigned int q_idx;
1189 	int nxmit = 0;
1190 	u32 err;
1191 	int i;
1192 
1193 	q_idx = cpu % emac->tx_ch_num;
1194 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1195 
1196 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1197 		return -EINVAL;
1198 
1199 	__netif_tx_lock(netif_txq, cpu);
1200 	for (i = 0; i < n; i++) {
1201 		xdpf = frames[i];
1202 		err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
1203 					  PRUETH_TX_BUFF_TYPE_XDP_NDO);
1204 		if (err != ICSSG_XDP_TX) {
1205 			ndev->stats.tx_dropped++;
1206 			break;
1207 		}
1208 		nxmit++;
1209 	}
1210 	__netif_tx_unlock(netif_txq);
1211 
1212 	return nxmit;
1213 }
1214 
1215 /**
1216  * emac_xdp_setup - add/remove an XDP program
1217  * @emac: emac device
1218  * @bpf: XDP program
1219  *
1220  * Return: Always 0 (Success)
1221  **/
1222 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1223 {
1224 	struct bpf_prog *prog = bpf->prog;
1225 
1226 	if (!emac->xdpi.prog && !prog)
1227 		return 0;
1228 
1229 	WRITE_ONCE(emac->xdp_prog, prog);
1230 
1231 	xdp_attachment_setup(&emac->xdpi, bpf);
1232 
1233 	return 0;
1234 }
1235 
1236 static int prueth_xsk_pool_enable(struct prueth_emac *emac,
1237 				  struct xsk_buff_pool *pool, u16 queue_id)
1238 {
1239 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
1240 	u32 frame_size;
1241 	int ret;
1242 
1243 	if (queue_id >= PRUETH_MAX_RX_FLOWS ||
1244 	    queue_id >= emac->tx_ch_num) {
1245 		netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
1246 		return -EINVAL;
1247 	}
1248 
1249 	frame_size = xsk_pool_get_rx_frame_size(pool);
1250 	if (frame_size < PRUETH_MAX_PKT_SIZE)
1251 		return -EOPNOTSUPP;
1252 
1253 	ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
1254 	if (ret) {
1255 		netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
1256 		return ret;
1257 	}
1258 
1259 	if (netif_running(emac->ndev)) {
1260 		/* stop packets from wire for graceful teardown */
1261 		ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
1262 		if (ret)
1263 			return ret;
1264 		prueth_destroy_rxq(emac);
1265 	}
1266 
1267 	emac->xsk_qid = queue_id;
1268 	prueth_set_xsk_pool(emac, queue_id);
1269 
1270 	if (netif_running(emac->ndev)) {
1271 		ret = prueth_create_rxq(emac);
1272 		if (ret) {
1273 			netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
1274 			return ret;
1275 		}
1276 		ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
1277 		if (ret) {
1278 			prueth_destroy_rxq(emac);
1279 			return ret;
1280 		}
1281 		ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
1282 		if (ret)
1283 			return ret;
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
1290 {
1291 	struct xsk_buff_pool *pool;
1292 	int ret;
1293 
1294 	if (queue_id >= PRUETH_MAX_RX_FLOWS ||
1295 	    queue_id >= emac->tx_ch_num) {
1296 		netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
1297 		return -EINVAL;
1298 	}
1299 
1300 	if (emac->xsk_qid != queue_id) {
1301 		netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
1302 		return -EINVAL;
1303 	}
1304 
1305 	pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
1306 	if (!pool) {
1307 		netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
1308 		return -EINVAL;
1309 	}
1310 
1311 	if (netif_running(emac->ndev)) {
1312 		/* stop packets from wire for graceful teardown */
1313 		ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
1314 		if (ret)
1315 			return ret;
1316 		prueth_destroy_rxq(emac);
1317 	}
1318 
1319 	xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
1320 	emac->xsk_qid = -EINVAL;
1321 	prueth_set_xsk_pool(emac, queue_id);
1322 
1323 	if (netif_running(emac->ndev)) {
1324 		ret = prueth_create_rxq(emac);
1325 		if (ret) {
1326 			netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
1327 			return ret;
1328 		}
1329 		ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
1330 		if (ret) {
1331 			prueth_destroy_rxq(emac);
1332 			return ret;
1333 		}
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 /**
1340  * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1341  * @ndev: network adapter device
1342  * @bpf: XDP program
1343  *
1344  * Return: 0 on success, error code on failure.
1345  **/
1346 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1347 {
1348 	struct prueth_emac *emac = netdev_priv(ndev);
1349 
1350 	switch (bpf->command) {
1351 	case XDP_SETUP_PROG:
1352 		return emac_xdp_setup(emac, bpf);
1353 	case XDP_SETUP_XSK_POOL:
1354 		return bpf->xsk.pool ?
1355 			prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
1356 			prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
1357 	default:
1358 		return -EINVAL;
1359 	}
1360 }
1361 
1362 int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
1363 {
1364 	struct prueth_emac *emac = netdev_priv(ndev);
1365 	struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
1366 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
1367 
1368 	if (emac->xsk_qid != qid) {
1369 		netdev_err(ndev, "XSK queue %d not registered\n", qid);
1370 		return -EINVAL;
1371 	}
1372 
1373 	if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
1374 		netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
1375 		return -EINVAL;
1376 	}
1377 
1378 	if (!tx_chn->xsk_pool) {
1379 		netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
1380 		return -EINVAL;
1381 	}
1382 
1383 	if (!rx_chn->xsk_pool) {
1384 		netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
1385 		return -EINVAL;
1386 	}
1387 
1388 	if (flags & XDP_WAKEUP_TX) {
1389 		if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
1390 			if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
1391 				__napi_schedule(&tx_chn->napi_tx);
1392 		}
1393 	}
1394 
1395 	if (flags & XDP_WAKEUP_RX) {
1396 		if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
1397 			if (likely(napi_schedule_prep(&emac->napi_rx)))
1398 				__napi_schedule(&emac->napi_rx);
1399 		}
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 static const struct net_device_ops emac_netdev_ops = {
1406 	.ndo_open = emac_ndo_open,
1407 	.ndo_stop = emac_ndo_stop,
1408 	.ndo_start_xmit = icssg_ndo_start_xmit,
1409 	.ndo_set_mac_address = eth_mac_addr,
1410 	.ndo_validate_addr = eth_validate_addr,
1411 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
1412 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1413 	.ndo_eth_ioctl = phy_do_ioctl,
1414 	.ndo_get_stats64 = icssg_ndo_get_stats64,
1415 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1416 	.ndo_fix_features = emac_ndo_fix_features,
1417 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1418 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1419 	.ndo_bpf = emac_ndo_bpf,
1420 	.ndo_xdp_xmit = emac_xdp_xmit,
1421 	.ndo_hwtstamp_get = icssg_ndo_get_ts_config,
1422 	.ndo_hwtstamp_set = icssg_ndo_set_ts_config,
1423 	.ndo_xsk_wakeup = prueth_xsk_wakeup,
1424 };
1425 
1426 static int prueth_netdev_init(struct prueth *prueth,
1427 			      struct device_node *eth_node)
1428 {
1429 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1430 	struct prueth_emac *emac;
1431 	struct net_device *ndev;
1432 	enum prueth_port port;
1433 	const char *irq_name;
1434 	enum prueth_mac mac;
1435 
1436 	port = prueth_node_port(eth_node);
1437 	if (port == PRUETH_PORT_INVALID)
1438 		return -EINVAL;
1439 
1440 	mac = prueth_node_mac(eth_node);
1441 	if (mac == PRUETH_MAC_INVALID)
1442 		return -EINVAL;
1443 
1444 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1445 	if (!ndev)
1446 		return -ENOMEM;
1447 
1448 	emac = netdev_priv(ndev);
1449 	emac->prueth = prueth;
1450 	emac->ndev = ndev;
1451 	emac->port_id = port;
1452 	emac->xdp_prog = NULL;
1453 	emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1454 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1455 	if (!emac->cmd_wq) {
1456 		ret = -ENOMEM;
1457 		goto free_ndev;
1458 	}
1459 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1460 
1461 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1462 
1463 	ret = pruss_request_mem_region(prueth->pruss,
1464 				       port == PRUETH_PORT_MII0 ?
1465 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1466 				       &emac->dram);
1467 	if (ret) {
1468 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1469 		ret = -ENOMEM;
1470 		goto free_wq;
1471 	}
1472 
1473 	emac->tx_ch_num = 1;
1474 
1475 	irq_name = "tx_ts0";
1476 	if (emac->port_id == PRUETH_PORT_MII1)
1477 		irq_name = "tx_ts1";
1478 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1479 	if (emac->tx_ts_irq < 0) {
1480 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1481 		goto free;
1482 	}
1483 
1484 	SET_NETDEV_DEV(ndev, prueth->dev);
1485 	spin_lock_init(&emac->lock);
1486 	mutex_init(&emac->cmd_lock);
1487 
1488 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1489 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1490 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1491 		ret = -ENODEV;
1492 		goto free;
1493 	} else if (of_phy_is_fixed_link(eth_node)) {
1494 		ret = of_phy_register_fixed_link(eth_node);
1495 		if (ret) {
1496 			dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n");
1497 			goto free;
1498 		}
1499 
1500 		emac->phy_node = eth_node;
1501 	}
1502 
1503 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1504 	if (ret) {
1505 		dev_err(prueth->dev, "could not get phy-mode property\n");
1506 		goto free;
1507 	}
1508 
1509 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1510 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1511 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1512 		ret = -EINVAL;
1513 		goto free;
1514 	}
1515 
1516 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1517 	 * and it is not possible to disable TX Internal delay. The below
1518 	 * switch case block describes how we handle different phy modes
1519 	 * based on hardware restriction.
1520 	 */
1521 	switch (emac->phy_if) {
1522 	case PHY_INTERFACE_MODE_RGMII_ID:
1523 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1524 		break;
1525 	case PHY_INTERFACE_MODE_RGMII_TXID:
1526 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1527 		break;
1528 	case PHY_INTERFACE_MODE_RGMII:
1529 	case PHY_INTERFACE_MODE_RGMII_RXID:
1530 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1531 		ret = -EINVAL;
1532 		goto free;
1533 	default:
1534 		break;
1535 	}
1536 
1537 	/* get mac address from DT and set private and netdev addr */
1538 	ret = of_get_ethdev_address(eth_node, ndev);
1539 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1540 		eth_hw_addr_random(ndev);
1541 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1542 			 port, ndev->dev_addr);
1543 	}
1544 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1545 
1546 	ndev->dev.of_node = eth_node;
1547 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1548 	ndev->max_mtu = PRUETH_MAX_MTU;
1549 	ndev->netdev_ops = &emac_netdev_ops;
1550 	ndev->ethtool_ops = &icssg_ethtool_ops;
1551 	ndev->hw_features = NETIF_F_SG;
1552 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1553 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1554 	xdp_set_features_flag(ndev,
1555 			      NETDEV_XDP_ACT_BASIC |
1556 			      NETDEV_XDP_ACT_REDIRECT |
1557 			      NETDEV_XDP_ACT_NDO_XMIT |
1558 			      NETDEV_XDP_ACT_XSK_ZEROCOPY);
1559 
1560 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1561 	hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1562 		      HRTIMER_MODE_REL_PINNED);
1563 	prueth->emac[mac] = emac;
1564 
1565 	return 0;
1566 
1567 free:
1568 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1569 free_wq:
1570 	destroy_workqueue(emac->cmd_wq);
1571 free_ndev:
1572 	emac->ndev = NULL;
1573 	prueth->emac[mac] = NULL;
1574 	free_netdev(ndev);
1575 
1576 	return ret;
1577 }
1578 
1579 bool prueth_dev_check(const struct net_device *ndev)
1580 {
1581 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1582 		struct prueth_emac *emac = netdev_priv(ndev);
1583 
1584 		return emac->prueth->is_switch_mode;
1585 	}
1586 
1587 	return false;
1588 }
1589 
1590 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1591 {
1592 	int set_val = 0;
1593 	int i;
1594 
1595 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1596 		set_val = 1;
1597 
1598 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1599 
1600 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1601 		struct prueth_emac *emac = prueth->emac[i];
1602 
1603 		if (!emac || !emac->ndev)
1604 			continue;
1605 
1606 		emac->offload_fwd_mark = set_val;
1607 	}
1608 }
1609 
1610 static int prueth_emac_restart(struct prueth *prueth)
1611 {
1612 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1613 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1614 	int ret;
1615 
1616 	/* Detach the net_device for both PRUeth ports*/
1617 	if (netif_running(emac0->ndev))
1618 		netif_device_detach(emac0->ndev);
1619 	if (netif_running(emac1->ndev))
1620 		netif_device_detach(emac1->ndev);
1621 
1622 	/* Disable both PRUeth ports */
1623 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1624 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1625 	if (ret)
1626 		return ret;
1627 
1628 	/* Stop both pru cores for both PRUeth ports*/
1629 	ret = prueth_emac_common_stop(prueth);
1630 	if (ret) {
1631 		dev_err(prueth->dev, "Failed to stop the firmwares");
1632 		return ret;
1633 	}
1634 
1635 	/* Start both pru cores for both PRUeth ports */
1636 	ret = prueth_emac_common_start(prueth);
1637 	if (ret) {
1638 		dev_err(prueth->dev, "Failed to start the firmwares");
1639 		return ret;
1640 	}
1641 
1642 	/* Enable forwarding for both PRUeth ports */
1643 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1644 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1645 
1646 	/* Attache net_device for both PRUeth ports */
1647 	netif_device_attach(emac0->ndev);
1648 	netif_device_attach(emac1->ndev);
1649 
1650 	return ret;
1651 }
1652 
1653 static void icssg_change_mode(struct prueth *prueth)
1654 {
1655 	int ret;
1656 
1657 	ret = prueth_emac_restart(prueth);
1658 	if (ret) {
1659 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1660 		return;
1661 	}
1662 
1663 	icssg_enable_fw_offload(prueth);
1664 }
1665 
1666 static int prueth_netdevice_port_link(struct net_device *ndev,
1667 				      struct net_device *br_ndev,
1668 				      struct netlink_ext_ack *extack)
1669 {
1670 	struct prueth_emac *emac = netdev_priv(ndev);
1671 	struct prueth *prueth = emac->prueth;
1672 	int err;
1673 
1674 	if (!prueth->br_members) {
1675 		prueth->hw_bridge_dev = br_ndev;
1676 	} else {
1677 		/* This is adding the port to a second bridge, this is
1678 		 * unsupported
1679 		 */
1680 		if (prueth->hw_bridge_dev != br_ndev)
1681 			return -EOPNOTSUPP;
1682 	}
1683 
1684 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1685 					    &prueth->prueth_switchdev_nb,
1686 					    &prueth->prueth_switchdev_bl_nb,
1687 					    false, extack);
1688 	if (err)
1689 		return err;
1690 
1691 	prueth->br_members |= BIT(emac->port_id);
1692 
1693 	if (!prueth->is_switch_mode) {
1694 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1695 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1696 			prueth->is_switch_mode = true;
1697 			prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1698 			emac->port_vlan = prueth->default_vlan;
1699 			icssg_change_mode(prueth);
1700 		}
1701 	}
1702 
1703 	prueth_offload_fwd_mark_update(prueth);
1704 
1705 	return NOTIFY_DONE;
1706 }
1707 
1708 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1709 {
1710 	struct prueth_emac *emac = netdev_priv(ndev);
1711 	struct prueth *prueth = emac->prueth;
1712 	int ret;
1713 
1714 	prueth->br_members &= ~BIT(emac->port_id);
1715 
1716 	if (prueth->is_switch_mode) {
1717 		prueth->is_switch_mode = false;
1718 		emac->port_vlan = 0;
1719 		ret = prueth_emac_restart(prueth);
1720 		if (ret) {
1721 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1722 			return;
1723 		}
1724 	}
1725 
1726 	prueth_offload_fwd_mark_update(prueth);
1727 
1728 	if (!prueth->br_members)
1729 		prueth->hw_bridge_dev = NULL;
1730 }
1731 
1732 static int prueth_hsr_port_link(struct net_device *ndev)
1733 {
1734 	struct prueth_emac *emac = netdev_priv(ndev);
1735 	struct prueth *prueth = emac->prueth;
1736 	struct prueth_emac *emac0;
1737 	struct prueth_emac *emac1;
1738 
1739 	emac0 = prueth->emac[PRUETH_MAC0];
1740 	emac1 = prueth->emac[PRUETH_MAC1];
1741 
1742 	if (prueth->is_switch_mode)
1743 		return -EOPNOTSUPP;
1744 
1745 	prueth->hsr_members |= BIT(emac->port_id);
1746 	if (!prueth->is_hsr_offload_mode) {
1747 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1748 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1749 			if (!(emac0->ndev->features &
1750 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1751 			    !(emac1->ndev->features &
1752 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1753 				return -EOPNOTSUPP;
1754 			prueth->is_hsr_offload_mode = true;
1755 			prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1756 			emac0->port_vlan = prueth->default_vlan;
1757 			emac1->port_vlan = prueth->default_vlan;
1758 			icssg_change_mode(prueth);
1759 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1760 		}
1761 	}
1762 
1763 	return 0;
1764 }
1765 
1766 static void prueth_hsr_port_unlink(struct net_device *ndev)
1767 {
1768 	struct prueth_emac *emac = netdev_priv(ndev);
1769 	struct prueth *prueth = emac->prueth;
1770 	struct prueth_emac *emac0;
1771 	struct prueth_emac *emac1;
1772 	int ret;
1773 
1774 	emac0 = prueth->emac[PRUETH_MAC0];
1775 	emac1 = prueth->emac[PRUETH_MAC1];
1776 
1777 	prueth->hsr_members &= ~BIT(emac->port_id);
1778 	if (prueth->is_hsr_offload_mode) {
1779 		prueth->is_hsr_offload_mode = false;
1780 		emac0->port_vlan = 0;
1781 		emac1->port_vlan = 0;
1782 		prueth->hsr_dev = NULL;
1783 		ret = prueth_emac_restart(prueth);
1784 		if (ret) {
1785 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1786 			return;
1787 		}
1788 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1789 	}
1790 }
1791 
1792 /* netdev notifier */
1793 static int prueth_netdevice_event(struct notifier_block *unused,
1794 				  unsigned long event, void *ptr)
1795 {
1796 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1797 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1798 	struct netdev_notifier_changeupper_info *info;
1799 	struct prueth_emac *emac = netdev_priv(ndev);
1800 	struct prueth *prueth = emac->prueth;
1801 	enum hsr_version hsr_ndev_version;
1802 	int ret = NOTIFY_DONE;
1803 
1804 	if (ndev->netdev_ops != &emac_netdev_ops)
1805 		return NOTIFY_DONE;
1806 
1807 	switch (event) {
1808 	case NETDEV_CHANGEUPPER:
1809 		info = ptr;
1810 
1811 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1812 		    is_hsr_master(info->upper_dev)) {
1813 			hsr_get_version(info->upper_dev, &hsr_ndev_version);
1814 			if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
1815 				return -EOPNOTSUPP;
1816 			prueth->hsr_prp_version = hsr_ndev_version;
1817 
1818 			if (info->linking) {
1819 				if (!prueth->hsr_dev) {
1820 					prueth->hsr_dev = info->upper_dev;
1821 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1822 								      prueth->hsr_dev->dev_addr);
1823 				} else {
1824 					if (prueth->hsr_dev != info->upper_dev) {
1825 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1826 						return -EOPNOTSUPP;
1827 					}
1828 				}
1829 				prueth_hsr_port_link(ndev);
1830 			} else {
1831 				prueth_hsr_port_unlink(ndev);
1832 			}
1833 		}
1834 
1835 		if (netif_is_bridge_master(info->upper_dev)) {
1836 			if (info->linking)
1837 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1838 			else
1839 				prueth_netdevice_port_unlink(ndev);
1840 		}
1841 		break;
1842 	default:
1843 		return NOTIFY_DONE;
1844 	}
1845 
1846 	return notifier_from_errno(ret);
1847 }
1848 
1849 static int prueth_register_notifiers(struct prueth *prueth)
1850 {
1851 	int ret = 0;
1852 
1853 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1854 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1855 	if (ret) {
1856 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1857 		return ret;
1858 	}
1859 
1860 	ret = prueth_switchdev_register_notifiers(prueth);
1861 	if (ret)
1862 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1863 
1864 	return ret;
1865 }
1866 
1867 static void prueth_unregister_notifiers(struct prueth *prueth)
1868 {
1869 	prueth_switchdev_unregister_notifiers(prueth);
1870 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1871 }
1872 
1873 static void icssg_read_firmware_names(struct device_node *np,
1874 				      struct icssg_firmwares *fw)
1875 {
1876 	int i;
1877 
1878 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1879 		of_property_read_string_index(np, "firmware-name", i * 3 + 0,
1880 					      &fw[i].pru);
1881 		of_property_read_string_index(np, "firmware-name", i * 3 + 1,
1882 					      &fw[i].rtu);
1883 		of_property_read_string_index(np, "firmware-name", i * 3 + 2,
1884 					      &fw[i].txpru);
1885 	}
1886 }
1887 
1888 /* icssg_firmware_name_replace - Replace a substring in firmware name
1889  * @dev: device pointer for memory allocation
1890  * @src: source firmware name string
1891  * @from: substring to replace
1892  * @to: replacement substring
1893  *
1894  * Return: a newly allocated string with the replacement, or the original
1895  * string if replacement is not possible.
1896  */
1897 static const char *icssg_firmware_name_replace(struct device *dev,
1898 					       const char *src,
1899 					       const char *from,
1900 					       const char *to)
1901 {
1902 	size_t prefix, from_len, to_len, total;
1903 	const char *p = strstr(src, from);
1904 	char *buf;
1905 
1906 	if (!p)
1907 		return src; /* fallback: no replacement, use original */
1908 
1909 	prefix = p - src;
1910 	from_len = strlen(from);
1911 	to_len = strlen(to);
1912 	total = strlen(src) - from_len + to_len + 1;
1913 
1914 	buf = devm_kzalloc(dev, total, GFP_KERNEL);
1915 	if (!buf)
1916 		return src; /* fallback: allocation failed, use original */
1917 
1918 	strscpy(buf, src, prefix + 1);
1919 	strscpy(buf + prefix, to, to_len + 1);
1920 	strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
1921 
1922 	return buf;
1923 }
1924 
1925 /**
1926  * icssg_mode_firmware_names - Generate firmware names for a specific mode
1927  * @dev: device pointer for logging and context
1928  * @src: source array of firmware name structures
1929  * @dst: destination array to store updated firmware name structures
1930  * @from: substring in firmware names to be replaced
1931  * @to: substring to replace @from in firmware names
1932  *
1933  * Iterates over all MACs and replaces occurrences of the @from substring
1934  * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
1935  * updated firmware names are stored in the @dst array.
1936  */
1937 static void icssg_mode_firmware_names(struct device *dev,
1938 				      struct icssg_firmwares *src,
1939 				      struct icssg_firmwares *dst,
1940 				      const char *from, const char *to)
1941 {
1942 	int i;
1943 
1944 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1945 		dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
1946 							 from, to);
1947 		dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
1948 							 from, to);
1949 		dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
1950 							   from, to);
1951 	}
1952 }
1953 
1954 static int prueth_probe(struct platform_device *pdev)
1955 {
1956 	struct device_node *eth_node, *eth_ports_node;
1957 	struct device_node  *eth0_node = NULL;
1958 	struct device_node  *eth1_node = NULL;
1959 	struct genpool_data_align gp_data = {
1960 		.align = SZ_64K,
1961 	};
1962 	struct device *dev = &pdev->dev;
1963 	struct device_node *np;
1964 	struct prueth *prueth;
1965 	struct pruss *pruss;
1966 	u32 msmc_ram_size;
1967 	int i, ret;
1968 
1969 	np = dev->of_node;
1970 
1971 	BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1972 			 "insufficient SW_DATA size");
1973 
1974 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1975 	if (!prueth)
1976 		return -ENOMEM;
1977 
1978 	dev_set_drvdata(dev, prueth);
1979 	prueth->pdev = pdev;
1980 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1981 
1982 	prueth->dev = dev;
1983 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1984 	if (!eth_ports_node)
1985 		return -ENOENT;
1986 
1987 	for_each_child_of_node(eth_ports_node, eth_node) {
1988 		u32 reg;
1989 
1990 		if (strcmp(eth_node->name, "port"))
1991 			continue;
1992 		ret = of_property_read_u32(eth_node, "reg", &reg);
1993 		if (ret < 0) {
1994 			dev_err(dev, "%pOF error reading port_id %d\n",
1995 				eth_node, ret);
1996 		}
1997 
1998 		of_node_get(eth_node);
1999 
2000 		if (reg == 0) {
2001 			eth0_node = eth_node;
2002 			if (!of_device_is_available(eth0_node)) {
2003 				of_node_put(eth0_node);
2004 				eth0_node = NULL;
2005 			}
2006 		} else if (reg == 1) {
2007 			eth1_node = eth_node;
2008 			if (!of_device_is_available(eth1_node)) {
2009 				of_node_put(eth1_node);
2010 				eth1_node = NULL;
2011 			}
2012 		} else {
2013 			dev_err(dev, "port reg should be 0 or 1\n");
2014 		}
2015 	}
2016 
2017 	of_node_put(eth_ports_node);
2018 
2019 	/* At least one node must be present and available else we fail */
2020 	if (!eth0_node && !eth1_node) {
2021 		dev_err(dev, "neither port0 nor port1 node available\n");
2022 		return -ENODEV;
2023 	}
2024 
2025 	if (eth0_node == eth1_node) {
2026 		dev_err(dev, "port0 and port1 can't have same reg\n");
2027 		of_node_put(eth0_node);
2028 		return -ENODEV;
2029 	}
2030 
2031 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
2032 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
2033 
2034 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
2035 	if (IS_ERR(prueth->miig_rt)) {
2036 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
2037 		return -ENODEV;
2038 	}
2039 
2040 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
2041 	if (IS_ERR(prueth->mii_rt)) {
2042 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
2043 		return -ENODEV;
2044 	}
2045 
2046 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
2047 	if (IS_ERR(prueth->pa_stats)) {
2048 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
2049 		prueth->pa_stats = NULL;
2050 	}
2051 
2052 	if (eth0_node || eth1_node) {
2053 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
2054 		if (ret)
2055 			goto put_cores;
2056 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
2057 		if (ret)
2058 			goto put_cores;
2059 	}
2060 
2061 	pruss = pruss_get(eth0_node ?
2062 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
2063 	if (IS_ERR(pruss)) {
2064 		ret = PTR_ERR(pruss);
2065 		dev_err(dev, "unable to get pruss handle\n");
2066 		goto put_cores;
2067 	}
2068 
2069 	prueth->pruss = pruss;
2070 
2071 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
2072 				       &prueth->shram);
2073 	if (ret) {
2074 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
2075 		goto put_pruss;
2076 	}
2077 
2078 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
2079 	if (!prueth->sram_pool) {
2080 		dev_err(dev, "unable to get SRAM pool\n");
2081 		ret = -ENODEV;
2082 
2083 		goto put_mem;
2084 	}
2085 
2086 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
2087 	if (prueth->pdata.banked_ms_ram) {
2088 		/* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
2089 		msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
2090 	} else {
2091 		msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
2092 		if (prueth->is_switchmode_supported)
2093 			msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
2094 	}
2095 
2096 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
2097 	prueth->msmcram.va =
2098 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
2099 						    msmc_ram_size,
2100 						    gen_pool_first_fit_align,
2101 						    &gp_data);
2102 
2103 	if (!prueth->msmcram.va) {
2104 		ret = -ENOMEM;
2105 		dev_err(dev, "unable to allocate MSMC resource\n");
2106 		goto put_mem;
2107 	}
2108 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
2109 						   (unsigned long)prueth->msmcram.va);
2110 	prueth->msmcram.size = msmc_ram_size;
2111 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
2112 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
2113 		prueth->msmcram.va, prueth->msmcram.size);
2114 
2115 	prueth->iep0 = icss_iep_get_idx(np, 0);
2116 	if (IS_ERR(prueth->iep0)) {
2117 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
2118 		prueth->iep0 = NULL;
2119 		goto free_pool;
2120 	}
2121 
2122 	prueth->iep1 = icss_iep_get_idx(np, 1);
2123 	if (IS_ERR(prueth->iep1)) {
2124 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
2125 		goto put_iep0;
2126 	}
2127 
2128 	if (prueth->pdata.quirk_10m_link_issue) {
2129 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
2130 		 * traffic.
2131 		 */
2132 		icss_iep_init_fw(prueth->iep1);
2133 	}
2134 
2135 	/* Read EMAC firmware names from device tree */
2136 	icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
2137 
2138 	/* Generate other mode firmware names based on EMAC firmware names */
2139 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
2140 				  prueth->icssg_switch_firmwares, "eth", "sw");
2141 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
2142 				  prueth->icssg_hsr_firmwares, "eth", "hsr");
2143 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
2144 				  prueth->icssg_prp_firmwares, "eth", "prp");
2145 
2146 	spin_lock_init(&prueth->vtbl_lock);
2147 	spin_lock_init(&prueth->stats_lock);
2148 	/* setup netdev interfaces */
2149 	if (eth0_node) {
2150 		ret = prueth_netdev_init(prueth, eth0_node);
2151 		if (ret) {
2152 			dev_err_probe(dev, ret, "netdev init %s failed\n",
2153 				      eth0_node->name);
2154 			goto exit_iep;
2155 		}
2156 
2157 		prueth->emac[PRUETH_MAC0]->half_duplex =
2158 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
2159 
2160 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
2161 	}
2162 
2163 	if (eth1_node) {
2164 		ret = prueth_netdev_init(prueth, eth1_node);
2165 		if (ret) {
2166 			dev_err_probe(dev, ret, "netdev init %s failed\n",
2167 				      eth1_node->name);
2168 			goto netdev_exit;
2169 		}
2170 
2171 		prueth->emac[PRUETH_MAC1]->half_duplex =
2172 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
2173 
2174 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
2175 	}
2176 
2177 	/* register the network devices */
2178 	if (eth0_node) {
2179 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
2180 		if (ret) {
2181 			dev_err(dev, "can't register netdev for port MII0");
2182 			goto netdev_exit;
2183 		}
2184 
2185 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
2186 
2187 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
2188 		if (ret) {
2189 			dev_err(dev,
2190 				"can't connect to MII0 PHY, error -%d", ret);
2191 			goto netdev_unregister;
2192 		}
2193 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
2194 	}
2195 
2196 	if (eth1_node) {
2197 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
2198 		if (ret) {
2199 			dev_err(dev, "can't register netdev for port MII1");
2200 			goto netdev_unregister;
2201 		}
2202 
2203 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
2204 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
2205 		if (ret) {
2206 			dev_err(dev,
2207 				"can't connect to MII1 PHY, error %d", ret);
2208 			goto netdev_unregister;
2209 		}
2210 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
2211 	}
2212 
2213 	if (prueth->is_switchmode_supported) {
2214 		ret = prueth_register_notifiers(prueth);
2215 		if (ret)
2216 			goto netdev_unregister;
2217 
2218 		sprintf(prueth->switch_id, "%s", dev_name(dev));
2219 	}
2220 
2221 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
2222 		 (!eth0_node || !eth1_node) ? "single" : "dual");
2223 
2224 	if (eth1_node)
2225 		of_node_put(eth1_node);
2226 	if (eth0_node)
2227 		of_node_put(eth0_node);
2228 	return 0;
2229 
2230 netdev_unregister:
2231 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2232 		if (!prueth->registered_netdevs[i])
2233 			continue;
2234 		if (prueth->emac[i]->ndev->phydev) {
2235 			phy_disconnect(prueth->emac[i]->ndev->phydev);
2236 			prueth->emac[i]->ndev->phydev = NULL;
2237 		}
2238 		unregister_netdev(prueth->registered_netdevs[i]);
2239 	}
2240 
2241 netdev_exit:
2242 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2243 		eth_node = prueth->eth_node[i];
2244 		if (!eth_node)
2245 			continue;
2246 
2247 		prueth_netdev_exit(prueth, eth_node);
2248 	}
2249 
2250 exit_iep:
2251 	if (prueth->pdata.quirk_10m_link_issue)
2252 		icss_iep_exit_fw(prueth->iep1);
2253 	icss_iep_put(prueth->iep1);
2254 
2255 put_iep0:
2256 	icss_iep_put(prueth->iep0);
2257 	prueth->iep0 = NULL;
2258 	prueth->iep1 = NULL;
2259 
2260 free_pool:
2261 	gen_pool_free(prueth->sram_pool,
2262 		      (unsigned long)prueth->msmcram.va,
2263 		      prueth->msmcram.size);
2264 
2265 put_mem:
2266 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2267 
2268 put_pruss:
2269 	pruss_put(prueth->pruss);
2270 
2271 put_cores:
2272 	if (eth0_node || eth1_node) {
2273 		prueth_put_cores(prueth, ICSS_SLICE0);
2274 		of_node_put(eth0_node);
2275 
2276 		prueth_put_cores(prueth, ICSS_SLICE1);
2277 		of_node_put(eth1_node);
2278 	}
2279 
2280 	return ret;
2281 }
2282 
2283 static void prueth_remove(struct platform_device *pdev)
2284 {
2285 	struct prueth *prueth = platform_get_drvdata(pdev);
2286 	struct device_node *eth_node;
2287 	int i;
2288 
2289 	prueth_unregister_notifiers(prueth);
2290 
2291 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2292 		if (!prueth->registered_netdevs[i])
2293 			continue;
2294 		phy_stop(prueth->emac[i]->ndev->phydev);
2295 		phy_disconnect(prueth->emac[i]->ndev->phydev);
2296 		prueth->emac[i]->ndev->phydev = NULL;
2297 		unregister_netdev(prueth->registered_netdevs[i]);
2298 	}
2299 
2300 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2301 		eth_node = prueth->eth_node[i];
2302 		if (!eth_node)
2303 			continue;
2304 
2305 		prueth_netdev_exit(prueth, eth_node);
2306 	}
2307 
2308 	if (prueth->pdata.quirk_10m_link_issue)
2309 		icss_iep_exit_fw(prueth->iep1);
2310 
2311 	icss_iep_put(prueth->iep1);
2312 	icss_iep_put(prueth->iep0);
2313 
2314 	gen_pool_free(prueth->sram_pool,
2315 		(unsigned long)prueth->msmcram.va,
2316 		prueth->msmcram.size);
2317 
2318 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2319 
2320 	pruss_put(prueth->pruss);
2321 
2322 	if (prueth->eth_node[PRUETH_MAC1])
2323 		prueth_put_cores(prueth, ICSS_SLICE1);
2324 
2325 	if (prueth->eth_node[PRUETH_MAC0])
2326 		prueth_put_cores(prueth, ICSS_SLICE0);
2327 }
2328 
2329 static const struct prueth_pdata am654_icssg_pdata = {
2330 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2331 	.quirk_10m_link_issue = 1,
2332 	.switch_mode = 1,
2333 	.banked_ms_ram = 0,
2334 };
2335 
2336 static const struct prueth_pdata am64x_icssg_pdata = {
2337 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
2338 	.quirk_10m_link_issue = 1,
2339 	.switch_mode = 1,
2340 	.banked_ms_ram = 1,
2341 };
2342 
2343 static const struct of_device_id prueth_dt_match[] = {
2344 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2345 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2346 	{ /* sentinel */ }
2347 };
2348 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2349 
2350 static struct platform_driver prueth_driver = {
2351 	.probe = prueth_probe,
2352 	.remove = prueth_remove,
2353 	.driver = {
2354 		.name = "icssg-prueth",
2355 		.of_match_table = prueth_dt_match,
2356 		.pm = &prueth_dev_pm_ops,
2357 	},
2358 };
2359 module_platform_driver(prueth_driver);
2360 
2361 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2362 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2363 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2364 MODULE_LICENSE("GPL");
2365