xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision f09fc24dd9a5ec989dfdde7090624924ede6ddc7)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
53 static void emac_adjust_link(struct net_device *ndev);
54 
55 static int emac_get_tx_ts(struct prueth_emac *emac,
56 			  struct emac_tx_ts_response *rsp)
57 {
58 	struct prueth *prueth = emac->prueth;
59 	int slice = prueth_emac_slice(emac);
60 	int addr;
61 
62 	addr = icssg_queue_pop(prueth, slice == 0 ?
63 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
64 	if (addr < 0)
65 		return addr;
66 
67 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
68 	/* return buffer back for to pool */
69 	icssg_queue_push(prueth, slice == 0 ?
70 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
71 
72 	return 0;
73 }
74 
75 static void tx_ts_work(struct prueth_emac *emac)
76 {
77 	struct skb_shared_hwtstamps ssh;
78 	struct emac_tx_ts_response tsr;
79 	struct sk_buff *skb;
80 	int ret = 0;
81 	u32 hi_sw;
82 	u64 ns;
83 
84 	/* There may be more than one pending requests */
85 	while (1) {
86 		ret = emac_get_tx_ts(emac, &tsr);
87 		if (ret) /* nothing more */
88 			break;
89 
90 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
91 		    !emac->tx_ts_skb[tsr.cookie]) {
92 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
93 				   tsr.cookie);
94 			break;
95 		}
96 
97 		skb = emac->tx_ts_skb[tsr.cookie];
98 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
99 		if (!skb) {
100 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
101 			break;
102 		}
103 
104 		hi_sw = readl(emac->prueth->shram.va +
105 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
106 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
107 				    IEP_DEFAULT_CYCLE_TIME_NS);
108 
109 		memset(&ssh, 0, sizeof(ssh));
110 		ssh.hwtstamp = ns_to_ktime(ns);
111 
112 		skb_tstamp_tx(skb, &ssh);
113 		dev_consume_skb_any(skb);
114 
115 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
116 			break;
117 	}
118 }
119 
120 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
121 {
122 	struct prueth_emac *emac = dev_id;
123 
124 	/* currently only TX timestamp is being returned */
125 	tx_ts_work(emac);
126 
127 	return IRQ_HANDLED;
128 }
129 
130 static int prueth_start(struct rproc *rproc, const char *fw_name)
131 {
132 	int ret;
133 
134 	ret = rproc_set_firmware(rproc, fw_name);
135 	if (ret)
136 		return ret;
137 	return rproc_boot(rproc);
138 }
139 
140 static void prueth_shutdown(struct rproc *rproc)
141 {
142 	rproc_shutdown(rproc);
143 }
144 
145 static int prueth_emac_start(struct prueth *prueth)
146 {
147 	struct icssg_firmwares *firmwares;
148 	struct device *dev = prueth->dev;
149 	int ret, slice;
150 
151 	if (prueth->is_switch_mode)
152 		firmwares = prueth->icssg_switch_firmwares;
153 	else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
154 		firmwares = prueth->icssg_hsr_firmwares;
155 	else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
156 		firmwares = prueth->icssg_prp_firmwares;
157 	else
158 		firmwares = prueth->icssg_emac_firmwares;
159 
160 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
161 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
162 		if (ret) {
163 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
164 			goto unwind_slices;
165 		}
166 
167 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
168 		if (ret) {
169 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
170 			rproc_shutdown(prueth->pru[slice]);
171 			goto unwind_slices;
172 		}
173 
174 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
175 		if (ret) {
176 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
177 			rproc_shutdown(prueth->rtu[slice]);
178 			rproc_shutdown(prueth->pru[slice]);
179 			goto unwind_slices;
180 		}
181 	}
182 
183 	return 0;
184 
185 unwind_slices:
186 	while (--slice >= 0) {
187 		prueth_shutdown(prueth->txpru[slice]);
188 		prueth_shutdown(prueth->rtu[slice]);
189 		prueth_shutdown(prueth->pru[slice]);
190 	}
191 
192 	return ret;
193 }
194 
195 static void prueth_emac_stop(struct prueth *prueth)
196 {
197 	int slice;
198 
199 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
200 		prueth_shutdown(prueth->txpru[slice]);
201 		prueth_shutdown(prueth->rtu[slice]);
202 		prueth_shutdown(prueth->pru[slice]);
203 	}
204 }
205 
206 static int prueth_emac_common_start(struct prueth *prueth)
207 {
208 	struct prueth_emac *emac;
209 	int ret = 0;
210 	int slice;
211 
212 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
213 		return -EINVAL;
214 
215 	/* clear SMEM and MSMC settings for all slices */
216 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
217 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
218 
219 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
220 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
221 
222 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
223 		icssg_init_fw_offload_mode(prueth);
224 	else
225 		icssg_init_emac_mode(prueth);
226 
227 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
228 		emac = prueth->emac[slice];
229 		if (!emac)
230 			continue;
231 		ret = icssg_config(prueth, emac, slice);
232 		if (ret)
233 			goto disable_class;
234 
235 		mutex_lock(&emac->ndev->phydev->lock);
236 		emac_adjust_link(emac->ndev);
237 		mutex_unlock(&emac->ndev->phydev->lock);
238 	}
239 
240 	ret = prueth_emac_start(prueth);
241 	if (ret)
242 		goto disable_class;
243 
244 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
245 	       prueth->emac[ICSS_SLICE1];
246 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
247 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
248 	if (ret) {
249 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
250 		goto stop_pruss;
251 	}
252 
253 	return 0;
254 
255 stop_pruss:
256 	prueth_emac_stop(prueth);
257 
258 disable_class:
259 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
260 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
261 
262 	return ret;
263 }
264 
265 static int prueth_emac_common_stop(struct prueth *prueth)
266 {
267 	struct prueth_emac *emac;
268 
269 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
270 		return -EINVAL;
271 
272 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
273 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
274 
275 	prueth_emac_stop(prueth);
276 
277 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
278 	       prueth->emac[ICSS_SLICE1];
279 	icss_iep_exit(emac->iep);
280 
281 	return 0;
282 }
283 
284 /* called back by PHY layer if there is change in link state of hw port*/
285 static void emac_adjust_link(struct net_device *ndev)
286 {
287 	struct prueth_emac *emac = netdev_priv(ndev);
288 	struct phy_device *phydev = ndev->phydev;
289 	struct prueth *prueth = emac->prueth;
290 	bool new_state = false;
291 	unsigned long flags;
292 
293 	if (phydev->link) {
294 		/* check the mode of operation - full/half duplex */
295 		if (phydev->duplex != emac->duplex) {
296 			new_state = true;
297 			emac->duplex = phydev->duplex;
298 		}
299 		if (phydev->speed != emac->speed) {
300 			new_state = true;
301 			emac->speed = phydev->speed;
302 		}
303 		if (!emac->link) {
304 			new_state = true;
305 			emac->link = 1;
306 		}
307 	} else if (emac->link) {
308 		new_state = true;
309 		emac->link = 0;
310 
311 		/* f/w should support 100 & 1000 */
312 		emac->speed = SPEED_1000;
313 
314 		/* half duplex may not be supported by f/w */
315 		emac->duplex = DUPLEX_FULL;
316 	}
317 
318 	if (new_state) {
319 		phy_print_status(phydev);
320 
321 		/* update RGMII and MII configuration based on PHY negotiated
322 		 * values
323 		 */
324 		if (emac->link) {
325 			if (emac->duplex == DUPLEX_HALF)
326 				icssg_config_half_duplex(emac);
327 			/* Set the RGMII cfg for gig en and full duplex */
328 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
329 
330 			/* update the Tx IPG based on 100M/1G speed */
331 			spin_lock_irqsave(&emac->lock, flags);
332 			icssg_config_ipg(emac);
333 			spin_unlock_irqrestore(&emac->lock, flags);
334 			icssg_config_set_speed(emac);
335 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
336 
337 		} else {
338 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
339 		}
340 	}
341 
342 	if (emac->link) {
343 		/* reactivate the transmit queue */
344 		netif_tx_wake_all_queues(ndev);
345 	} else {
346 		netif_tx_stop_all_queues(ndev);
347 		prueth_cleanup_tx_ts(emac);
348 	}
349 }
350 
351 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
352 {
353 	struct prueth_emac *emac =
354 			container_of(timer, struct prueth_emac, rx_hrtimer);
355 	int rx_flow = PRUETH_RX_FLOW_DATA;
356 
357 	enable_irq(emac->rx_chns.irq[rx_flow]);
358 	return HRTIMER_NORESTART;
359 }
360 
361 static int emac_phy_connect(struct prueth_emac *emac)
362 {
363 	struct prueth *prueth = emac->prueth;
364 	struct net_device *ndev = emac->ndev;
365 	/* connect PHY */
366 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
367 				      &emac_adjust_link, 0,
368 				      emac->phy_if);
369 	if (!ndev->phydev) {
370 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
371 			emac->phy_node->full_name);
372 		return -ENODEV;
373 	}
374 
375 	if (!emac->half_duplex) {
376 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
377 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
378 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
379 	}
380 
381 	/* remove unsupported modes */
382 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
383 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
384 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
385 
386 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
387 		phy_set_max_speed(ndev->phydev, SPEED_100);
388 
389 	return 0;
390 }
391 
392 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
393 {
394 	u32 hi_rollover_count, hi_rollover_count_r;
395 	struct prueth_emac *emac = clockops_data;
396 	struct prueth *prueth = emac->prueth;
397 	void __iomem *fw_hi_r_count_addr;
398 	void __iomem *fw_count_hi_addr;
399 	u32 iepcount_hi, iepcount_hi_r;
400 	unsigned long flags;
401 	u32 iepcount_lo;
402 	u64 ts = 0;
403 
404 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
405 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
406 
407 	local_irq_save(flags);
408 	do {
409 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
410 		iepcount_hi += readl(fw_count_hi_addr);
411 		hi_rollover_count = readl(fw_hi_r_count_addr);
412 		ptp_read_system_prets(sts);
413 		iepcount_lo = icss_iep_get_count_low(emac->iep);
414 		ptp_read_system_postts(sts);
415 
416 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
417 		iepcount_hi_r += readl(fw_count_hi_addr);
418 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
419 	} while ((iepcount_hi_r != iepcount_hi) ||
420 		 (hi_rollover_count != hi_rollover_count_r));
421 	local_irq_restore(flags);
422 
423 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
424 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
425 
426 	return ts;
427 }
428 
429 static void prueth_iep_settime(void *clockops_data, u64 ns)
430 {
431 	struct icssg_setclock_desc __iomem *sc_descp;
432 	struct prueth_emac *emac = clockops_data;
433 	struct icssg_setclock_desc sc_desc;
434 	u64 cyclecount;
435 	u32 cycletime;
436 	int timeout;
437 
438 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
439 
440 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
441 	cyclecount = ns / cycletime;
442 
443 	memset(&sc_desc, 0, sizeof(sc_desc));
444 	sc_desc.margin = cycletime - 1000;
445 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
446 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
447 	sc_desc.iepcount_set = ns % cycletime;
448 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
449 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
450 
451 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
452 
453 	writeb(1, &sc_descp->request);
454 
455 	timeout = 5;	/* fw should take 2-3 ms */
456 	while (timeout--) {
457 		if (readb(&sc_descp->acknowledgment))
458 			return;
459 
460 		usleep_range(500, 1000);
461 	}
462 
463 	dev_err(emac->prueth->dev, "settime timeout\n");
464 }
465 
466 static int prueth_perout_enable(void *clockops_data,
467 				struct ptp_perout_request *req, int on,
468 				u64 *cmp)
469 {
470 	struct prueth_emac *emac = clockops_data;
471 	u32 reduction_factor = 0, offset = 0;
472 	struct timespec64 ts;
473 	u64 current_cycle;
474 	u64 start_offset;
475 	u64 ns_period;
476 
477 	if (!on)
478 		return 0;
479 
480 	/* Any firmware specific stuff for PPS/PEROUT handling */
481 	ts.tv_sec = req->period.sec;
482 	ts.tv_nsec = req->period.nsec;
483 	ns_period = timespec64_to_ns(&ts);
484 
485 	/* f/w doesn't support period less than cycle time */
486 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
487 		return -ENXIO;
488 
489 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
490 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
491 
492 	/* f/w requires at least 1uS within a cycle so CMP
493 	 * can trigger after SYNC is enabled
494 	 */
495 	if (offset < 5 * NSEC_PER_USEC)
496 		offset = 5 * NSEC_PER_USEC;
497 
498 	/* if offset is close to cycle time then we will miss
499 	 * the CMP event for last tick when IEP rolls over.
500 	 * In normal mode, IEP tick is 4ns.
501 	 * In slow compensation it could be 0ns or 8ns at
502 	 * every slow compensation cycle.
503 	 */
504 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
505 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
506 
507 	/* we're in shadow mode so need to set upper 32-bits */
508 	*cmp = (u64)offset << 32;
509 
510 	writel(reduction_factor, emac->prueth->shram.va +
511 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
512 
513 	current_cycle = icssg_read_time(emac->prueth->shram.va +
514 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
515 
516 	/* Rounding of current_cycle count to next second */
517 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
518 
519 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
520 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
521 
522 	return 0;
523 }
524 
525 const struct icss_iep_clockops prueth_iep_clockops = {
526 	.settime = prueth_iep_settime,
527 	.gettime = prueth_iep_gettime,
528 	.perout_enable = prueth_perout_enable,
529 };
530 
531 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
532 {
533 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
534 	struct page_pool *pool = emac->rx_chns.pg_pool;
535 	int ret;
536 
537 	ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
538 	if (ret)
539 		return ret;
540 
541 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
542 	if (ret)
543 		xdp_rxq_info_unreg(rxq);
544 
545 	return ret;
546 }
547 
548 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
549 {
550 	struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
551 
552 	if (!xdp_rxq_info_is_reg(rxq))
553 		return;
554 
555 	xdp_rxq_info_unreg(rxq);
556 }
557 
558 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
559 {
560 	struct net_device *real_dev;
561 	struct prueth_emac *emac;
562 	int port_mask;
563 	u8 vlan_id;
564 
565 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
566 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
567 	emac = netdev_priv(real_dev);
568 
569 	port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
570 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
571 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
572 
573 	return 0;
574 }
575 
576 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
577 {
578 	struct net_device *real_dev;
579 	struct prueth_emac *emac;
580 	int other_port_mask;
581 	int port_mask;
582 	u8 vlan_id;
583 
584 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
585 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
586 	emac = netdev_priv(real_dev);
587 
588 	port_mask = BIT(emac->port_id);
589 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
590 
591 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
592 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
593 
594 	if (other_port_mask) {
595 		icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
596 		icssg_vtbl_modify(emac, vlan_id, other_port_mask,
597 				  other_port_mask, true);
598 	}
599 
600 	return 0;
601 }
602 
603 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
604 					 const u8 *addr, u8 vid, bool add)
605 {
606 	icssg_fdb_add_del(emac, addr, vid,
607 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
608 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
609 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
610 			  ICSSG_FDB_ENTRY_BLOCK, add);
611 
612 	if (add)
613 		icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
614 				  BIT(emac->port_id), add);
615 }
616 
617 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
618 {
619 	struct net_device *real_dev;
620 	struct prueth_emac *emac;
621 	u8 vlan_id, i;
622 
623 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
624 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
625 
626 	if (is_hsr_master(real_dev)) {
627 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
628 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
629 			if (!emac)
630 				return -EINVAL;
631 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
632 						     true);
633 		}
634 	} else {
635 		emac = netdev_priv(real_dev);
636 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
637 	}
638 
639 	return 0;
640 }
641 
642 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
643 {
644 	struct net_device *real_dev;
645 	struct prueth_emac *emac;
646 	u8 vlan_id, i;
647 
648 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
649 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
650 
651 	if (is_hsr_master(real_dev)) {
652 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
653 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
654 			if (!emac)
655 				return -EINVAL;
656 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
657 						     false);
658 		}
659 	} else {
660 		emac = netdev_priv(real_dev);
661 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
662 	}
663 
664 	return 0;
665 }
666 
667 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
668 				   void *args)
669 {
670 	struct prueth_emac *emac = args;
671 
672 	if (!vdev || !vid)
673 		return 0;
674 
675 	netif_addr_lock_bh(vdev);
676 	__hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
677 				vdev->addr_len);
678 	netif_addr_unlock_bh(vdev);
679 
680 	if (emac->prueth->is_hsr_offload_mode)
681 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
682 				   icssg_prueth_hsr_add_mcast,
683 				   icssg_prueth_hsr_del_mcast);
684 	else
685 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
686 				   icssg_prueth_add_mcast,
687 				   icssg_prueth_del_mcast);
688 
689 	return 0;
690 }
691 
692 /**
693  * emac_ndo_open - EMAC device open
694  * @ndev: network adapter device
695  *
696  * Called when system wants to start the interface.
697  *
698  * Return: 0 for a successful open, or appropriate error code
699  */
700 static int emac_ndo_open(struct net_device *ndev)
701 {
702 	struct prueth_emac *emac = netdev_priv(ndev);
703 	int ret, i, num_data_chn = emac->tx_ch_num;
704 	struct icssg_flow_cfg __iomem *flow_cfg;
705 	struct prueth *prueth = emac->prueth;
706 	int slice = prueth_emac_slice(emac);
707 	struct device *dev = prueth->dev;
708 	int max_rx_flows;
709 	int rx_flow;
710 
711 	/* set h/w MAC as user might have re-configured */
712 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
713 
714 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
715 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
716 
717 	/* Notify the stack of the actual queue counts. */
718 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
719 	if (ret) {
720 		dev_err(dev, "cannot set real number of tx queues\n");
721 		return ret;
722 	}
723 
724 	init_completion(&emac->cmd_complete);
725 	ret = prueth_init_tx_chns(emac);
726 	if (ret) {
727 		dev_err(dev, "failed to init tx channel: %d\n", ret);
728 		return ret;
729 	}
730 
731 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
732 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
733 				  max_rx_flows, PRUETH_MAX_RX_DESC);
734 	if (ret) {
735 		dev_err(dev, "failed to init rx channel: %d\n", ret);
736 		goto cleanup_tx;
737 	}
738 
739 	ret = prueth_ndev_add_tx_napi(emac);
740 	if (ret)
741 		goto cleanup_rx;
742 
743 	/* we use only the highest priority flow for now i.e. @irq[3] */
744 	rx_flow = PRUETH_RX_FLOW_DATA;
745 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
746 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
747 	if (ret) {
748 		dev_err(dev, "unable to request RX IRQ\n");
749 		goto cleanup_napi;
750 	}
751 
752 	if (!prueth->emacs_initialized) {
753 		ret = prueth_emac_common_start(prueth);
754 		if (ret)
755 			goto free_rx_irq;
756 	}
757 
758 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
759 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
760 	ret = emac_fdb_flow_id_updated(emac);
761 
762 	if (ret) {
763 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
764 		goto stop;
765 	}
766 
767 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
768 
769 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
770 				   IRQF_ONESHOT, dev_name(dev), emac);
771 	if (ret)
772 		goto stop;
773 
774 	/* Prepare RX */
775 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
776 	if (ret)
777 		goto free_tx_ts_irq;
778 
779 	ret = prueth_create_xdp_rxqs(emac);
780 	if (ret)
781 		goto reset_rx_chn;
782 
783 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
784 	if (ret)
785 		goto destroy_xdp_rxqs;
786 
787 	for (i = 0; i < emac->tx_ch_num; i++) {
788 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
789 		if (ret)
790 			goto reset_tx_chan;
791 	}
792 
793 	/* Enable NAPI in Tx and Rx direction */
794 	for (i = 0; i < emac->tx_ch_num; i++)
795 		napi_enable(&emac->tx_chns[i].napi_tx);
796 	napi_enable(&emac->napi_rx);
797 
798 	/* start PHY */
799 	phy_start(ndev->phydev);
800 
801 	prueth->emacs_initialized++;
802 
803 	queue_work(system_long_wq, &emac->stats_work.work);
804 
805 	return 0;
806 
807 reset_tx_chan:
808 	/* Since interface is not yet up, there is wouldn't be
809 	 * any SKB for completion. So set false to free_skb
810 	 */
811 	prueth_reset_tx_chan(emac, i, false);
812 destroy_xdp_rxqs:
813 	prueth_destroy_xdp_rxqs(emac);
814 reset_rx_chn:
815 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
816 free_tx_ts_irq:
817 	free_irq(emac->tx_ts_irq, emac);
818 stop:
819 	if (!prueth->emacs_initialized)
820 		prueth_emac_common_stop(prueth);
821 free_rx_irq:
822 	free_irq(emac->rx_chns.irq[rx_flow], emac);
823 cleanup_napi:
824 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
825 cleanup_rx:
826 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
827 cleanup_tx:
828 	prueth_cleanup_tx_chns(emac);
829 
830 	return ret;
831 }
832 
833 /**
834  * emac_ndo_stop - EMAC device stop
835  * @ndev: network adapter device
836  *
837  * Called when system wants to stop or down the interface.
838  *
839  * Return: Always 0 (Success)
840  */
841 static int emac_ndo_stop(struct net_device *ndev)
842 {
843 	struct prueth_emac *emac = netdev_priv(ndev);
844 	struct prueth *prueth = emac->prueth;
845 	int rx_flow = PRUETH_RX_FLOW_DATA;
846 	int max_rx_flows;
847 	int ret, i;
848 
849 	/* inform the upper layers. */
850 	netif_tx_stop_all_queues(ndev);
851 
852 	/* block packets from wire */
853 	if (ndev->phydev)
854 		phy_stop(ndev->phydev);
855 
856 	if (emac->prueth->is_hsr_offload_mode)
857 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
858 	else
859 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
860 
861 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
862 	/* ensure new tdown_cnt value is visible */
863 	smp_mb__after_atomic();
864 	/* tear down and disable UDMA channels */
865 	reinit_completion(&emac->tdown_complete);
866 	for (i = 0; i < emac->tx_ch_num; i++)
867 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
868 
869 	ret = wait_for_completion_timeout(&emac->tdown_complete,
870 					  msecs_to_jiffies(1000));
871 	if (!ret)
872 		netdev_err(ndev, "tx teardown timeout\n");
873 
874 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
875 	for (i = 0; i < emac->tx_ch_num; i++) {
876 		napi_disable(&emac->tx_chns[i].napi_tx);
877 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
878 	}
879 
880 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
881 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
882 
883 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
884 	prueth_destroy_xdp_rxqs(emac);
885 	napi_disable(&emac->napi_rx);
886 	hrtimer_cancel(&emac->rx_hrtimer);
887 
888 	cancel_work_sync(&emac->rx_mode_work);
889 
890 	/* Destroying the queued work in ndo_stop() */
891 	cancel_delayed_work_sync(&emac->stats_work);
892 
893 	/* stop PRUs */
894 	if (prueth->emacs_initialized == 1)
895 		prueth_emac_common_stop(prueth);
896 
897 	free_irq(emac->tx_ts_irq, emac);
898 
899 	free_irq(emac->rx_chns.irq[rx_flow], emac);
900 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
901 
902 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
903 	prueth_cleanup_tx_chns(emac);
904 
905 	prueth->emacs_initialized--;
906 
907 	return 0;
908 }
909 
910 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
911 {
912 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
913 	struct net_device *ndev = emac->ndev;
914 	bool promisc, allmulti;
915 
916 	if (!netif_running(ndev))
917 		return;
918 
919 	promisc = ndev->flags & IFF_PROMISC;
920 	allmulti = ndev->flags & IFF_ALLMULTI;
921 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
922 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
923 
924 	if (promisc) {
925 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
926 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
927 		return;
928 	}
929 
930 	if (allmulti) {
931 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
932 		return;
933 	}
934 
935 	if (emac->prueth->is_hsr_offload_mode) {
936 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
937 			      icssg_prueth_hsr_del_mcast);
938 		if (rtnl_trylock()) {
939 			vlan_for_each(emac->prueth->hsr_dev,
940 				      icssg_update_vlan_mcast, emac);
941 			rtnl_unlock();
942 		}
943 	} else {
944 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
945 			      icssg_prueth_del_mcast);
946 		if (rtnl_trylock()) {
947 			vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
948 			rtnl_unlock();
949 		}
950 	}
951 }
952 
953 /**
954  * emac_ndo_set_rx_mode - EMAC set receive mode function
955  * @ndev: The EMAC network adapter
956  *
957  * Called when system wants to set the receive mode of the device.
958  *
959  */
960 static void emac_ndo_set_rx_mode(struct net_device *ndev)
961 {
962 	struct prueth_emac *emac = netdev_priv(ndev);
963 
964 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
965 }
966 
967 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
968 					       netdev_features_t features)
969 {
970 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
971 	 * firmware implementation. Both these features need to be enabled /
972 	 * disabled together.
973 	 */
974 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
975 		if ((features & NETIF_F_HW_HSR_DUP) ||
976 		    (features & NETIF_F_HW_HSR_TAG_INS))
977 			features |= NETIF_F_HW_HSR_DUP |
978 				    NETIF_F_HW_HSR_TAG_INS;
979 
980 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
981 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
982 		if (!(features & NETIF_F_HW_HSR_DUP) ||
983 		    !(features & NETIF_F_HW_HSR_TAG_INS))
984 			features &= ~(NETIF_F_HW_HSR_DUP |
985 				      NETIF_F_HW_HSR_TAG_INS);
986 
987 	return features;
988 }
989 
990 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
991 				    __be16 proto, u16 vid)
992 {
993 	struct prueth_emac *emac = netdev_priv(ndev);
994 	struct prueth *prueth = emac->prueth;
995 	int port_mask = BIT(emac->port_id);
996 	int untag_mask = 0;
997 
998 	if (prueth->is_hsr_offload_mode)
999 		port_mask |= BIT(PRUETH_PORT_HOST);
1000 
1001 	__hw_addr_init(&emac->vlan_mcast_list[vid]);
1002 	netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1003 		   vid, port_mask, untag_mask);
1004 
1005 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1006 	icssg_set_pvid(emac->prueth, vid, emac->port_id);
1007 
1008 	return 0;
1009 }
1010 
1011 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1012 				    __be16 proto, u16 vid)
1013 {
1014 	struct prueth_emac *emac = netdev_priv(ndev);
1015 	struct prueth *prueth = emac->prueth;
1016 	int port_mask = BIT(emac->port_id);
1017 	int untag_mask = 0;
1018 
1019 	if (prueth->is_hsr_offload_mode)
1020 		port_mask = BIT(PRUETH_PORT_HOST);
1021 
1022 	netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
1023 		   vid, port_mask, untag_mask);
1024 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1025 
1026 	return 0;
1027 }
1028 
1029 /**
1030  * emac_xdp_xmit - Implements ndo_xdp_xmit
1031  * @dev: netdev
1032  * @n: number of frames
1033  * @frames: array of XDP buffer pointers
1034  * @flags: XDP extra info
1035  *
1036  * Return: number of frames successfully sent. Failed frames
1037  * will be free'ed by XDP core.
1038  *
1039  * For error cases, a negative errno code is returned and no-frames
1040  * are transmitted (caller must handle freeing frames).
1041  **/
1042 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1043 			 u32 flags)
1044 {
1045 	struct prueth_emac *emac = netdev_priv(dev);
1046 	struct net_device *ndev = emac->ndev;
1047 	struct netdev_queue *netif_txq;
1048 	int cpu = smp_processor_id();
1049 	struct xdp_frame *xdpf;
1050 	unsigned int q_idx;
1051 	int nxmit = 0;
1052 	u32 err;
1053 	int i;
1054 
1055 	q_idx = cpu % emac->tx_ch_num;
1056 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1057 
1058 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1059 		return -EINVAL;
1060 
1061 	__netif_tx_lock(netif_txq, cpu);
1062 	for (i = 0; i < n; i++) {
1063 		xdpf = frames[i];
1064 		err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
1065 		if (err != ICSSG_XDP_TX) {
1066 			ndev->stats.tx_dropped++;
1067 			break;
1068 		}
1069 		nxmit++;
1070 	}
1071 	__netif_tx_unlock(netif_txq);
1072 
1073 	return nxmit;
1074 }
1075 
1076 /**
1077  * emac_xdp_setup - add/remove an XDP program
1078  * @emac: emac device
1079  * @bpf: XDP program
1080  *
1081  * Return: Always 0 (Success)
1082  **/
1083 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1084 {
1085 	struct bpf_prog *prog = bpf->prog;
1086 
1087 	if (!emac->xdpi.prog && !prog)
1088 		return 0;
1089 
1090 	WRITE_ONCE(emac->xdp_prog, prog);
1091 
1092 	xdp_attachment_setup(&emac->xdpi, bpf);
1093 
1094 	return 0;
1095 }
1096 
1097 /**
1098  * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1099  * @ndev: network adapter device
1100  * @bpf: XDP program
1101  *
1102  * Return: 0 on success, error code on failure.
1103  **/
1104 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1105 {
1106 	struct prueth_emac *emac = netdev_priv(ndev);
1107 
1108 	switch (bpf->command) {
1109 	case XDP_SETUP_PROG:
1110 		return emac_xdp_setup(emac, bpf);
1111 	default:
1112 		return -EINVAL;
1113 	}
1114 }
1115 
1116 static const struct net_device_ops emac_netdev_ops = {
1117 	.ndo_open = emac_ndo_open,
1118 	.ndo_stop = emac_ndo_stop,
1119 	.ndo_start_xmit = icssg_ndo_start_xmit,
1120 	.ndo_set_mac_address = eth_mac_addr,
1121 	.ndo_validate_addr = eth_validate_addr,
1122 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
1123 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1124 	.ndo_eth_ioctl = icssg_ndo_ioctl,
1125 	.ndo_get_stats64 = icssg_ndo_get_stats64,
1126 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1127 	.ndo_fix_features = emac_ndo_fix_features,
1128 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1129 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1130 	.ndo_bpf = emac_ndo_bpf,
1131 	.ndo_xdp_xmit = emac_xdp_xmit,
1132 };
1133 
1134 static int prueth_netdev_init(struct prueth *prueth,
1135 			      struct device_node *eth_node)
1136 {
1137 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1138 	struct prueth_emac *emac;
1139 	struct net_device *ndev;
1140 	enum prueth_port port;
1141 	const char *irq_name;
1142 	enum prueth_mac mac;
1143 
1144 	port = prueth_node_port(eth_node);
1145 	if (port == PRUETH_PORT_INVALID)
1146 		return -EINVAL;
1147 
1148 	mac = prueth_node_mac(eth_node);
1149 	if (mac == PRUETH_MAC_INVALID)
1150 		return -EINVAL;
1151 
1152 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1153 	if (!ndev)
1154 		return -ENOMEM;
1155 
1156 	emac = netdev_priv(ndev);
1157 	emac->prueth = prueth;
1158 	emac->ndev = ndev;
1159 	emac->port_id = port;
1160 	emac->xdp_prog = NULL;
1161 	emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1162 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1163 	if (!emac->cmd_wq) {
1164 		ret = -ENOMEM;
1165 		goto free_ndev;
1166 	}
1167 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1168 
1169 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1170 
1171 	ret = pruss_request_mem_region(prueth->pruss,
1172 				       port == PRUETH_PORT_MII0 ?
1173 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1174 				       &emac->dram);
1175 	if (ret) {
1176 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1177 		ret = -ENOMEM;
1178 		goto free_wq;
1179 	}
1180 
1181 	emac->tx_ch_num = 1;
1182 
1183 	irq_name = "tx_ts0";
1184 	if (emac->port_id == PRUETH_PORT_MII1)
1185 		irq_name = "tx_ts1";
1186 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1187 	if (emac->tx_ts_irq < 0) {
1188 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1189 		goto free;
1190 	}
1191 
1192 	SET_NETDEV_DEV(ndev, prueth->dev);
1193 	spin_lock_init(&emac->lock);
1194 	mutex_init(&emac->cmd_lock);
1195 
1196 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1197 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1198 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1199 		ret = -ENODEV;
1200 		goto free;
1201 	} else if (of_phy_is_fixed_link(eth_node)) {
1202 		ret = of_phy_register_fixed_link(eth_node);
1203 		if (ret) {
1204 			ret = dev_err_probe(prueth->dev, ret,
1205 					    "failed to register fixed-link phy\n");
1206 			goto free;
1207 		}
1208 
1209 		emac->phy_node = eth_node;
1210 	}
1211 
1212 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1213 	if (ret) {
1214 		dev_err(prueth->dev, "could not get phy-mode property\n");
1215 		goto free;
1216 	}
1217 
1218 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1219 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1220 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1221 		ret = -EINVAL;
1222 		goto free;
1223 	}
1224 
1225 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1226 	 * and it is not possible to disable TX Internal delay. The below
1227 	 * switch case block describes how we handle different phy modes
1228 	 * based on hardware restriction.
1229 	 */
1230 	switch (emac->phy_if) {
1231 	case PHY_INTERFACE_MODE_RGMII_ID:
1232 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1233 		break;
1234 	case PHY_INTERFACE_MODE_RGMII_TXID:
1235 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1236 		break;
1237 	case PHY_INTERFACE_MODE_RGMII:
1238 	case PHY_INTERFACE_MODE_RGMII_RXID:
1239 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1240 		ret = -EINVAL;
1241 		goto free;
1242 	default:
1243 		break;
1244 	}
1245 
1246 	/* get mac address from DT and set private and netdev addr */
1247 	ret = of_get_ethdev_address(eth_node, ndev);
1248 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1249 		eth_hw_addr_random(ndev);
1250 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1251 			 port, ndev->dev_addr);
1252 	}
1253 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1254 
1255 	ndev->dev.of_node = eth_node;
1256 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1257 	ndev->max_mtu = PRUETH_MAX_MTU;
1258 	ndev->netdev_ops = &emac_netdev_ops;
1259 	ndev->ethtool_ops = &icssg_ethtool_ops;
1260 	ndev->hw_features = NETIF_F_SG;
1261 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1262 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1263 	xdp_set_features_flag(ndev,
1264 			      NETDEV_XDP_ACT_BASIC |
1265 			      NETDEV_XDP_ACT_REDIRECT |
1266 			      NETDEV_XDP_ACT_NDO_XMIT);
1267 
1268 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1269 	hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1270 		      HRTIMER_MODE_REL_PINNED);
1271 	prueth->emac[mac] = emac;
1272 
1273 	return 0;
1274 
1275 free:
1276 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1277 free_wq:
1278 	destroy_workqueue(emac->cmd_wq);
1279 free_ndev:
1280 	emac->ndev = NULL;
1281 	prueth->emac[mac] = NULL;
1282 	free_netdev(ndev);
1283 
1284 	return ret;
1285 }
1286 
1287 bool prueth_dev_check(const struct net_device *ndev)
1288 {
1289 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1290 		struct prueth_emac *emac = netdev_priv(ndev);
1291 
1292 		return emac->prueth->is_switch_mode;
1293 	}
1294 
1295 	return false;
1296 }
1297 
1298 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1299 {
1300 	int set_val = 0;
1301 	int i;
1302 
1303 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1304 		set_val = 1;
1305 
1306 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1307 
1308 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1309 		struct prueth_emac *emac = prueth->emac[i];
1310 
1311 		if (!emac || !emac->ndev)
1312 			continue;
1313 
1314 		emac->offload_fwd_mark = set_val;
1315 	}
1316 }
1317 
1318 static int prueth_emac_restart(struct prueth *prueth)
1319 {
1320 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1321 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1322 	int ret;
1323 
1324 	/* Detach the net_device for both PRUeth ports*/
1325 	if (netif_running(emac0->ndev))
1326 		netif_device_detach(emac0->ndev);
1327 	if (netif_running(emac1->ndev))
1328 		netif_device_detach(emac1->ndev);
1329 
1330 	/* Disable both PRUeth ports */
1331 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1332 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1333 	if (ret)
1334 		return ret;
1335 
1336 	/* Stop both pru cores for both PRUeth ports*/
1337 	ret = prueth_emac_common_stop(prueth);
1338 	if (ret) {
1339 		dev_err(prueth->dev, "Failed to stop the firmwares");
1340 		return ret;
1341 	}
1342 
1343 	/* Start both pru cores for both PRUeth ports */
1344 	ret = prueth_emac_common_start(prueth);
1345 	if (ret) {
1346 		dev_err(prueth->dev, "Failed to start the firmwares");
1347 		return ret;
1348 	}
1349 
1350 	/* Enable forwarding for both PRUeth ports */
1351 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1352 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1353 
1354 	/* Attache net_device for both PRUeth ports */
1355 	netif_device_attach(emac0->ndev);
1356 	netif_device_attach(emac1->ndev);
1357 
1358 	return ret;
1359 }
1360 
1361 static void icssg_change_mode(struct prueth *prueth)
1362 {
1363 	struct prueth_emac *emac;
1364 	int mac, ret;
1365 
1366 	ret = prueth_emac_restart(prueth);
1367 	if (ret) {
1368 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1369 		return;
1370 	}
1371 
1372 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1373 		emac = prueth->emac[mac];
1374 		if (prueth->is_hsr_offload_mode) {
1375 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1376 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1377 			else
1378 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1379 		}
1380 
1381 		if (netif_running(emac->ndev)) {
1382 			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1383 					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1384 					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1385 					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1386 					  ICSSG_FDB_ENTRY_BLOCK,
1387 					  true);
1388 			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1389 					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
1390 					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1391 					  true);
1392 			if (prueth->is_hsr_offload_mode)
1393 				icssg_vtbl_modify(emac, DEFAULT_VID,
1394 						  DEFAULT_PORT_MASK,
1395 						  DEFAULT_UNTAG_MASK, true);
1396 			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1397 			if (prueth->is_switch_mode)
1398 				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1399 		}
1400 	}
1401 }
1402 
1403 static int prueth_netdevice_port_link(struct net_device *ndev,
1404 				      struct net_device *br_ndev,
1405 				      struct netlink_ext_ack *extack)
1406 {
1407 	struct prueth_emac *emac = netdev_priv(ndev);
1408 	struct prueth *prueth = emac->prueth;
1409 	int err;
1410 
1411 	if (!prueth->br_members) {
1412 		prueth->hw_bridge_dev = br_ndev;
1413 	} else {
1414 		/* This is adding the port to a second bridge, this is
1415 		 * unsupported
1416 		 */
1417 		if (prueth->hw_bridge_dev != br_ndev)
1418 			return -EOPNOTSUPP;
1419 	}
1420 
1421 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1422 					    &prueth->prueth_switchdev_nb,
1423 					    &prueth->prueth_switchdev_bl_nb,
1424 					    false, extack);
1425 	if (err)
1426 		return err;
1427 
1428 	prueth->br_members |= BIT(emac->port_id);
1429 
1430 	if (!prueth->is_switch_mode) {
1431 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1432 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1433 			prueth->is_switch_mode = true;
1434 			prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1435 			emac->port_vlan = prueth->default_vlan;
1436 			icssg_change_mode(prueth);
1437 		}
1438 	}
1439 
1440 	prueth_offload_fwd_mark_update(prueth);
1441 
1442 	return NOTIFY_DONE;
1443 }
1444 
1445 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1446 {
1447 	struct prueth_emac *emac = netdev_priv(ndev);
1448 	struct prueth *prueth = emac->prueth;
1449 	int ret;
1450 
1451 	prueth->br_members &= ~BIT(emac->port_id);
1452 
1453 	if (prueth->is_switch_mode) {
1454 		prueth->is_switch_mode = false;
1455 		emac->port_vlan = 0;
1456 		ret = prueth_emac_restart(prueth);
1457 		if (ret) {
1458 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1459 			return;
1460 		}
1461 	}
1462 
1463 	prueth_offload_fwd_mark_update(prueth);
1464 
1465 	if (!prueth->br_members)
1466 		prueth->hw_bridge_dev = NULL;
1467 }
1468 
1469 static int prueth_hsr_port_link(struct net_device *ndev)
1470 {
1471 	struct prueth_emac *emac = netdev_priv(ndev);
1472 	struct prueth *prueth = emac->prueth;
1473 	struct prueth_emac *emac0;
1474 	struct prueth_emac *emac1;
1475 
1476 	emac0 = prueth->emac[PRUETH_MAC0];
1477 	emac1 = prueth->emac[PRUETH_MAC1];
1478 
1479 	if (prueth->is_switch_mode)
1480 		return -EOPNOTSUPP;
1481 
1482 	prueth->hsr_members |= BIT(emac->port_id);
1483 	if (!prueth->is_hsr_offload_mode) {
1484 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1485 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1486 			if (!(emac0->ndev->features &
1487 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1488 			    !(emac1->ndev->features &
1489 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1490 				return -EOPNOTSUPP;
1491 			prueth->is_hsr_offload_mode = true;
1492 			prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1493 			emac0->port_vlan = prueth->default_vlan;
1494 			emac1->port_vlan = prueth->default_vlan;
1495 			icssg_change_mode(prueth);
1496 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1497 		}
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 static void prueth_hsr_port_unlink(struct net_device *ndev)
1504 {
1505 	struct prueth_emac *emac = netdev_priv(ndev);
1506 	struct prueth *prueth = emac->prueth;
1507 	struct prueth_emac *emac0;
1508 	struct prueth_emac *emac1;
1509 	int ret;
1510 
1511 	emac0 = prueth->emac[PRUETH_MAC0];
1512 	emac1 = prueth->emac[PRUETH_MAC1];
1513 
1514 	prueth->hsr_members &= ~BIT(emac->port_id);
1515 	if (prueth->is_hsr_offload_mode) {
1516 		prueth->is_hsr_offload_mode = false;
1517 		emac0->port_vlan = 0;
1518 		emac1->port_vlan = 0;
1519 		prueth->hsr_dev = NULL;
1520 		ret = prueth_emac_restart(prueth);
1521 		if (ret) {
1522 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1523 			return;
1524 		}
1525 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1526 	}
1527 }
1528 
1529 /* netdev notifier */
1530 static int prueth_netdevice_event(struct notifier_block *unused,
1531 				  unsigned long event, void *ptr)
1532 {
1533 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1534 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1535 	struct netdev_notifier_changeupper_info *info;
1536 	struct prueth_emac *emac = netdev_priv(ndev);
1537 	struct prueth *prueth = emac->prueth;
1538 	enum hsr_version hsr_ndev_version;
1539 	int ret = NOTIFY_DONE;
1540 
1541 	if (ndev->netdev_ops != &emac_netdev_ops)
1542 		return NOTIFY_DONE;
1543 
1544 	switch (event) {
1545 	case NETDEV_CHANGEUPPER:
1546 		info = ptr;
1547 
1548 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1549 		    is_hsr_master(info->upper_dev)) {
1550 			hsr_get_version(info->upper_dev, &hsr_ndev_version);
1551 			if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
1552 				return -EOPNOTSUPP;
1553 			prueth->hsr_prp_version = hsr_ndev_version;
1554 
1555 			if (info->linking) {
1556 				if (!prueth->hsr_dev) {
1557 					prueth->hsr_dev = info->upper_dev;
1558 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1559 								      prueth->hsr_dev->dev_addr);
1560 				} else {
1561 					if (prueth->hsr_dev != info->upper_dev) {
1562 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1563 						return -EOPNOTSUPP;
1564 					}
1565 				}
1566 				prueth_hsr_port_link(ndev);
1567 			} else {
1568 				prueth_hsr_port_unlink(ndev);
1569 			}
1570 		}
1571 
1572 		if (netif_is_bridge_master(info->upper_dev)) {
1573 			if (info->linking)
1574 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1575 			else
1576 				prueth_netdevice_port_unlink(ndev);
1577 		}
1578 		break;
1579 	default:
1580 		return NOTIFY_DONE;
1581 	}
1582 
1583 	return notifier_from_errno(ret);
1584 }
1585 
1586 static int prueth_register_notifiers(struct prueth *prueth)
1587 {
1588 	int ret = 0;
1589 
1590 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1591 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1592 	if (ret) {
1593 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1594 		return ret;
1595 	}
1596 
1597 	ret = prueth_switchdev_register_notifiers(prueth);
1598 	if (ret)
1599 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1600 
1601 	return ret;
1602 }
1603 
1604 static void prueth_unregister_notifiers(struct prueth *prueth)
1605 {
1606 	prueth_switchdev_unregister_notifiers(prueth);
1607 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1608 }
1609 
1610 static void icssg_read_firmware_names(struct device_node *np,
1611 				      struct icssg_firmwares *fw)
1612 {
1613 	int i;
1614 
1615 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1616 		of_property_read_string_index(np, "firmware-name", i * 3 + 0,
1617 					      &fw[i].pru);
1618 		of_property_read_string_index(np, "firmware-name", i * 3 + 1,
1619 					      &fw[i].rtu);
1620 		of_property_read_string_index(np, "firmware-name", i * 3 + 2,
1621 					      &fw[i].txpru);
1622 	}
1623 }
1624 
1625 /* icssg_firmware_name_replace - Replace a substring in firmware name
1626  * @dev: device pointer for memory allocation
1627  * @src: source firmware name string
1628  * @from: substring to replace
1629  * @to: replacement substring
1630  *
1631  * Return: a newly allocated string with the replacement, or the original
1632  * string if replacement is not possible.
1633  */
1634 static const char *icssg_firmware_name_replace(struct device *dev,
1635 					       const char *src,
1636 					       const char *from,
1637 					       const char *to)
1638 {
1639 	size_t prefix, from_len, to_len, total;
1640 	const char *p = strstr(src, from);
1641 	char *buf;
1642 
1643 	if (!p)
1644 		return src; /* fallback: no replacement, use original */
1645 
1646 	prefix = p - src;
1647 	from_len = strlen(from);
1648 	to_len = strlen(to);
1649 	total = strlen(src) - from_len + to_len + 1;
1650 
1651 	buf = devm_kzalloc(dev, total, GFP_KERNEL);
1652 	if (!buf)
1653 		return src; /* fallback: allocation failed, use original */
1654 
1655 	strscpy(buf, src, prefix + 1);
1656 	strscpy(buf + prefix, to, to_len + 1);
1657 	strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
1658 
1659 	return buf;
1660 }
1661 
1662 /**
1663  * icssg_mode_firmware_names - Generate firmware names for a specific mode
1664  * @dev: device pointer for logging and context
1665  * @src: source array of firmware name structures
1666  * @dst: destination array to store updated firmware name structures
1667  * @from: substring in firmware names to be replaced
1668  * @to: substring to replace @from in firmware names
1669  *
1670  * Iterates over all MACs and replaces occurrences of the @from substring
1671  * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
1672  * updated firmware names are stored in the @dst array.
1673  */
1674 static void icssg_mode_firmware_names(struct device *dev,
1675 				      struct icssg_firmwares *src,
1676 				      struct icssg_firmwares *dst,
1677 				      const char *from, const char *to)
1678 {
1679 	int i;
1680 
1681 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1682 		dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
1683 							 from, to);
1684 		dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
1685 							 from, to);
1686 		dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
1687 							   from, to);
1688 	}
1689 }
1690 
1691 static int prueth_probe(struct platform_device *pdev)
1692 {
1693 	struct device_node *eth_node, *eth_ports_node;
1694 	struct device_node  *eth0_node = NULL;
1695 	struct device_node  *eth1_node = NULL;
1696 	struct genpool_data_align gp_data = {
1697 		.align = SZ_64K,
1698 	};
1699 	struct device *dev = &pdev->dev;
1700 	struct device_node *np;
1701 	struct prueth *prueth;
1702 	struct pruss *pruss;
1703 	u32 msmc_ram_size;
1704 	int i, ret;
1705 
1706 	np = dev->of_node;
1707 
1708 	BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1709 			 "insufficient SW_DATA size");
1710 
1711 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1712 	if (!prueth)
1713 		return -ENOMEM;
1714 
1715 	dev_set_drvdata(dev, prueth);
1716 	prueth->pdev = pdev;
1717 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1718 
1719 	prueth->dev = dev;
1720 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1721 	if (!eth_ports_node)
1722 		return -ENOENT;
1723 
1724 	for_each_child_of_node(eth_ports_node, eth_node) {
1725 		u32 reg;
1726 
1727 		if (strcmp(eth_node->name, "port"))
1728 			continue;
1729 		ret = of_property_read_u32(eth_node, "reg", &reg);
1730 		if (ret < 0) {
1731 			dev_err(dev, "%pOF error reading port_id %d\n",
1732 				eth_node, ret);
1733 		}
1734 
1735 		of_node_get(eth_node);
1736 
1737 		if (reg == 0) {
1738 			eth0_node = eth_node;
1739 			if (!of_device_is_available(eth0_node)) {
1740 				of_node_put(eth0_node);
1741 				eth0_node = NULL;
1742 			}
1743 		} else if (reg == 1) {
1744 			eth1_node = eth_node;
1745 			if (!of_device_is_available(eth1_node)) {
1746 				of_node_put(eth1_node);
1747 				eth1_node = NULL;
1748 			}
1749 		} else {
1750 			dev_err(dev, "port reg should be 0 or 1\n");
1751 		}
1752 	}
1753 
1754 	of_node_put(eth_ports_node);
1755 
1756 	/* At least one node must be present and available else we fail */
1757 	if (!eth0_node && !eth1_node) {
1758 		dev_err(dev, "neither port0 nor port1 node available\n");
1759 		return -ENODEV;
1760 	}
1761 
1762 	if (eth0_node == eth1_node) {
1763 		dev_err(dev, "port0 and port1 can't have same reg\n");
1764 		of_node_put(eth0_node);
1765 		return -ENODEV;
1766 	}
1767 
1768 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1769 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1770 
1771 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1772 	if (IS_ERR(prueth->miig_rt)) {
1773 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1774 		return -ENODEV;
1775 	}
1776 
1777 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1778 	if (IS_ERR(prueth->mii_rt)) {
1779 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1780 		return -ENODEV;
1781 	}
1782 
1783 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1784 	if (IS_ERR(prueth->pa_stats)) {
1785 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1786 		prueth->pa_stats = NULL;
1787 	}
1788 
1789 	if (eth0_node || eth1_node) {
1790 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1791 		if (ret)
1792 			goto put_cores;
1793 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1794 		if (ret)
1795 			goto put_cores;
1796 	}
1797 
1798 	pruss = pruss_get(eth0_node ?
1799 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1800 	if (IS_ERR(pruss)) {
1801 		ret = PTR_ERR(pruss);
1802 		dev_err(dev, "unable to get pruss handle\n");
1803 		goto put_cores;
1804 	}
1805 
1806 	prueth->pruss = pruss;
1807 
1808 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1809 				       &prueth->shram);
1810 	if (ret) {
1811 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1812 		goto put_pruss;
1813 	}
1814 
1815 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1816 	if (!prueth->sram_pool) {
1817 		dev_err(dev, "unable to get SRAM pool\n");
1818 		ret = -ENODEV;
1819 
1820 		goto put_mem;
1821 	}
1822 
1823 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1824 	if (prueth->pdata.banked_ms_ram) {
1825 		/* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
1826 		msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
1827 	} else {
1828 		msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
1829 		if (prueth->is_switchmode_supported)
1830 			msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
1831 	}
1832 
1833 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1834 	prueth->msmcram.va =
1835 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1836 						    msmc_ram_size,
1837 						    gen_pool_first_fit_align,
1838 						    &gp_data);
1839 
1840 	if (!prueth->msmcram.va) {
1841 		ret = -ENOMEM;
1842 		dev_err(dev, "unable to allocate MSMC resource\n");
1843 		goto put_mem;
1844 	}
1845 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1846 						   (unsigned long)prueth->msmcram.va);
1847 	prueth->msmcram.size = msmc_ram_size;
1848 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1849 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1850 		prueth->msmcram.va, prueth->msmcram.size);
1851 
1852 	prueth->iep0 = icss_iep_get_idx(np, 0);
1853 	if (IS_ERR(prueth->iep0)) {
1854 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1855 		prueth->iep0 = NULL;
1856 		goto free_pool;
1857 	}
1858 
1859 	prueth->iep1 = icss_iep_get_idx(np, 1);
1860 	if (IS_ERR(prueth->iep1)) {
1861 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1862 		goto put_iep0;
1863 	}
1864 
1865 	if (prueth->pdata.quirk_10m_link_issue) {
1866 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1867 		 * traffic.
1868 		 */
1869 		icss_iep_init_fw(prueth->iep1);
1870 	}
1871 
1872 	/* Read EMAC firmware names from device tree */
1873 	icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
1874 
1875 	/* Generate other mode firmware names based on EMAC firmware names */
1876 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1877 				  prueth->icssg_switch_firmwares, "eth", "sw");
1878 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1879 				  prueth->icssg_hsr_firmwares, "eth", "hsr");
1880 	icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1881 				  prueth->icssg_prp_firmwares, "eth", "prp");
1882 
1883 	spin_lock_init(&prueth->vtbl_lock);
1884 	spin_lock_init(&prueth->stats_lock);
1885 	/* setup netdev interfaces */
1886 	if (eth0_node) {
1887 		ret = prueth_netdev_init(prueth, eth0_node);
1888 		if (ret) {
1889 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1890 				      eth0_node->name);
1891 			goto exit_iep;
1892 		}
1893 
1894 		prueth->emac[PRUETH_MAC0]->half_duplex =
1895 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1896 
1897 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1898 	}
1899 
1900 	if (eth1_node) {
1901 		ret = prueth_netdev_init(prueth, eth1_node);
1902 		if (ret) {
1903 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1904 				      eth1_node->name);
1905 			goto netdev_exit;
1906 		}
1907 
1908 		prueth->emac[PRUETH_MAC1]->half_duplex =
1909 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1910 
1911 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1912 	}
1913 
1914 	/* register the network devices */
1915 	if (eth0_node) {
1916 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1917 		if (ret) {
1918 			dev_err(dev, "can't register netdev for port MII0");
1919 			goto netdev_exit;
1920 		}
1921 
1922 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1923 
1924 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1925 		if (ret) {
1926 			dev_err(dev,
1927 				"can't connect to MII0 PHY, error -%d", ret);
1928 			goto netdev_unregister;
1929 		}
1930 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1931 	}
1932 
1933 	if (eth1_node) {
1934 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1935 		if (ret) {
1936 			dev_err(dev, "can't register netdev for port MII1");
1937 			goto netdev_unregister;
1938 		}
1939 
1940 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1941 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1942 		if (ret) {
1943 			dev_err(dev,
1944 				"can't connect to MII1 PHY, error %d", ret);
1945 			goto netdev_unregister;
1946 		}
1947 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1948 	}
1949 
1950 	if (prueth->is_switchmode_supported) {
1951 		ret = prueth_register_notifiers(prueth);
1952 		if (ret)
1953 			goto netdev_unregister;
1954 
1955 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1956 	}
1957 
1958 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1959 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1960 
1961 	if (eth1_node)
1962 		of_node_put(eth1_node);
1963 	if (eth0_node)
1964 		of_node_put(eth0_node);
1965 	return 0;
1966 
1967 netdev_unregister:
1968 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1969 		if (!prueth->registered_netdevs[i])
1970 			continue;
1971 		if (prueth->emac[i]->ndev->phydev) {
1972 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1973 			prueth->emac[i]->ndev->phydev = NULL;
1974 		}
1975 		unregister_netdev(prueth->registered_netdevs[i]);
1976 	}
1977 
1978 netdev_exit:
1979 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1980 		eth_node = prueth->eth_node[i];
1981 		if (!eth_node)
1982 			continue;
1983 
1984 		prueth_netdev_exit(prueth, eth_node);
1985 	}
1986 
1987 exit_iep:
1988 	if (prueth->pdata.quirk_10m_link_issue)
1989 		icss_iep_exit_fw(prueth->iep1);
1990 	icss_iep_put(prueth->iep1);
1991 
1992 put_iep0:
1993 	icss_iep_put(prueth->iep0);
1994 	prueth->iep0 = NULL;
1995 	prueth->iep1 = NULL;
1996 
1997 free_pool:
1998 	gen_pool_free(prueth->sram_pool,
1999 		      (unsigned long)prueth->msmcram.va,
2000 		      prueth->msmcram.size);
2001 
2002 put_mem:
2003 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2004 
2005 put_pruss:
2006 	pruss_put(prueth->pruss);
2007 
2008 put_cores:
2009 	if (eth0_node || eth1_node) {
2010 		prueth_put_cores(prueth, ICSS_SLICE0);
2011 		of_node_put(eth0_node);
2012 
2013 		prueth_put_cores(prueth, ICSS_SLICE1);
2014 		of_node_put(eth1_node);
2015 	}
2016 
2017 	return ret;
2018 }
2019 
2020 static void prueth_remove(struct platform_device *pdev)
2021 {
2022 	struct prueth *prueth = platform_get_drvdata(pdev);
2023 	struct device_node *eth_node;
2024 	int i;
2025 
2026 	prueth_unregister_notifiers(prueth);
2027 
2028 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2029 		if (!prueth->registered_netdevs[i])
2030 			continue;
2031 		phy_stop(prueth->emac[i]->ndev->phydev);
2032 		phy_disconnect(prueth->emac[i]->ndev->phydev);
2033 		prueth->emac[i]->ndev->phydev = NULL;
2034 		unregister_netdev(prueth->registered_netdevs[i]);
2035 	}
2036 
2037 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
2038 		eth_node = prueth->eth_node[i];
2039 		if (!eth_node)
2040 			continue;
2041 
2042 		prueth_netdev_exit(prueth, eth_node);
2043 	}
2044 
2045 	if (prueth->pdata.quirk_10m_link_issue)
2046 		icss_iep_exit_fw(prueth->iep1);
2047 
2048 	icss_iep_put(prueth->iep1);
2049 	icss_iep_put(prueth->iep0);
2050 
2051 	gen_pool_free(prueth->sram_pool,
2052 		(unsigned long)prueth->msmcram.va,
2053 		prueth->msmcram.size);
2054 
2055 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
2056 
2057 	pruss_put(prueth->pruss);
2058 
2059 	if (prueth->eth_node[PRUETH_MAC1])
2060 		prueth_put_cores(prueth, ICSS_SLICE1);
2061 
2062 	if (prueth->eth_node[PRUETH_MAC0])
2063 		prueth_put_cores(prueth, ICSS_SLICE0);
2064 }
2065 
2066 static const struct prueth_pdata am654_icssg_pdata = {
2067 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2068 	.quirk_10m_link_issue = 1,
2069 	.switch_mode = 1,
2070 	.banked_ms_ram = 0,
2071 };
2072 
2073 static const struct prueth_pdata am64x_icssg_pdata = {
2074 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
2075 	.quirk_10m_link_issue = 1,
2076 	.switch_mode = 1,
2077 	.banked_ms_ram = 1,
2078 };
2079 
2080 static const struct of_device_id prueth_dt_match[] = {
2081 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2082 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2083 	{ /* sentinel */ }
2084 };
2085 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2086 
2087 static struct platform_driver prueth_driver = {
2088 	.probe = prueth_probe,
2089 	.remove = prueth_remove,
2090 	.driver = {
2091 		.name = "icssg-prueth",
2092 		.of_match_table = prueth_dt_match,
2093 		.pm = &prueth_dev_pm_ops,
2094 	},
2095 };
2096 module_platform_driver(prueth_driver);
2097 
2098 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2099 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2100 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2101 MODULE_LICENSE("GPL");
2102