xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision aba74e639f8d76d29b94991615e33319d7371b63)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
emac_get_tx_ts(struct prueth_emac * emac,struct emac_tx_ts_response * rsp)53 static int emac_get_tx_ts(struct prueth_emac *emac,
54 			  struct emac_tx_ts_response *rsp)
55 {
56 	struct prueth *prueth = emac->prueth;
57 	int slice = prueth_emac_slice(emac);
58 	int addr;
59 
60 	addr = icssg_queue_pop(prueth, slice == 0 ?
61 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
62 	if (addr < 0)
63 		return addr;
64 
65 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
66 	/* return buffer back for to pool */
67 	icssg_queue_push(prueth, slice == 0 ?
68 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
69 
70 	return 0;
71 }
72 
tx_ts_work(struct prueth_emac * emac)73 static void tx_ts_work(struct prueth_emac *emac)
74 {
75 	struct skb_shared_hwtstamps ssh;
76 	struct emac_tx_ts_response tsr;
77 	struct sk_buff *skb;
78 	int ret = 0;
79 	u32 hi_sw;
80 	u64 ns;
81 
82 	/* There may be more than one pending requests */
83 	while (1) {
84 		ret = emac_get_tx_ts(emac, &tsr);
85 		if (ret) /* nothing more */
86 			break;
87 
88 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
89 		    !emac->tx_ts_skb[tsr.cookie]) {
90 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
91 				   tsr.cookie);
92 			break;
93 		}
94 
95 		skb = emac->tx_ts_skb[tsr.cookie];
96 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
97 		if (!skb) {
98 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
99 			break;
100 		}
101 
102 		hi_sw = readl(emac->prueth->shram.va +
103 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
104 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
105 				    IEP_DEFAULT_CYCLE_TIME_NS);
106 
107 		memset(&ssh, 0, sizeof(ssh));
108 		ssh.hwtstamp = ns_to_ktime(ns);
109 
110 		skb_tstamp_tx(skb, &ssh);
111 		dev_consume_skb_any(skb);
112 
113 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
114 			break;
115 	}
116 }
117 
prueth_tx_ts_irq(int irq,void * dev_id)118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
119 {
120 	struct prueth_emac *emac = dev_id;
121 
122 	/* currently only TX timestamp is being returned */
123 	tx_ts_work(emac);
124 
125 	return IRQ_HANDLED;
126 }
127 
128 static struct icssg_firmwares icssg_hsr_firmwares[] = {
129 	{
130 		.pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
131 		.rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
132 		.txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
133 	},
134 	{
135 		.pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
136 		.rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
137 		.txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
138 	}
139 };
140 
141 static struct icssg_firmwares icssg_switch_firmwares[] = {
142 	{
143 		.pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
144 		.rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
145 		.txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
146 	},
147 	{
148 		.pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
149 		.rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
150 		.txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
151 	}
152 };
153 
154 static struct icssg_firmwares icssg_emac_firmwares[] = {
155 	{
156 		.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
157 		.rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
158 		.txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
159 	},
160 	{
161 		.pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
162 		.rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
163 		.txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
164 	}
165 };
166 
prueth_start(struct rproc * rproc,const char * fw_name)167 static int prueth_start(struct rproc *rproc, const char *fw_name)
168 {
169 	int ret;
170 
171 	ret = rproc_set_firmware(rproc, fw_name);
172 	if (ret)
173 		return ret;
174 	return rproc_boot(rproc);
175 }
176 
prueth_shutdown(struct rproc * rproc)177 static void prueth_shutdown(struct rproc *rproc)
178 {
179 	rproc_shutdown(rproc);
180 }
181 
prueth_emac_start(struct prueth * prueth)182 static int prueth_emac_start(struct prueth *prueth)
183 {
184 	struct icssg_firmwares *firmwares;
185 	struct device *dev = prueth->dev;
186 	int ret, slice;
187 
188 	if (prueth->is_switch_mode)
189 		firmwares = icssg_switch_firmwares;
190 	else if (prueth->is_hsr_offload_mode)
191 		firmwares = icssg_hsr_firmwares;
192 	else
193 		firmwares = icssg_emac_firmwares;
194 
195 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
196 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
197 		if (ret) {
198 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
199 			goto unwind_slices;
200 		}
201 
202 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
203 		if (ret) {
204 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
205 			rproc_shutdown(prueth->pru[slice]);
206 			goto unwind_slices;
207 		}
208 
209 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
210 		if (ret) {
211 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
212 			rproc_shutdown(prueth->rtu[slice]);
213 			rproc_shutdown(prueth->pru[slice]);
214 			goto unwind_slices;
215 		}
216 	}
217 
218 	return 0;
219 
220 unwind_slices:
221 	while (--slice >= 0) {
222 		prueth_shutdown(prueth->txpru[slice]);
223 		prueth_shutdown(prueth->rtu[slice]);
224 		prueth_shutdown(prueth->pru[slice]);
225 	}
226 
227 	return ret;
228 }
229 
prueth_emac_stop(struct prueth * prueth)230 static void prueth_emac_stop(struct prueth *prueth)
231 {
232 	int slice;
233 
234 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
235 		prueth_shutdown(prueth->txpru[slice]);
236 		prueth_shutdown(prueth->rtu[slice]);
237 		prueth_shutdown(prueth->pru[slice]);
238 	}
239 }
240 
prueth_emac_common_start(struct prueth * prueth)241 static int prueth_emac_common_start(struct prueth *prueth)
242 {
243 	struct prueth_emac *emac;
244 	int ret = 0;
245 	int slice;
246 
247 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
248 		return -EINVAL;
249 
250 	/* clear SMEM and MSMC settings for all slices */
251 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
252 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
253 
254 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
255 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
256 
257 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
258 		icssg_init_fw_offload_mode(prueth);
259 	else
260 		icssg_init_emac_mode(prueth);
261 
262 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
263 		emac = prueth->emac[slice];
264 		if (!emac)
265 			continue;
266 		ret = icssg_config(prueth, emac, slice);
267 		if (ret)
268 			goto disable_class;
269 	}
270 
271 	ret = prueth_emac_start(prueth);
272 	if (ret)
273 		goto disable_class;
274 
275 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
276 	       prueth->emac[ICSS_SLICE1];
277 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
278 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
279 	if (ret) {
280 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
281 		goto stop_pruss;
282 	}
283 
284 	return 0;
285 
286 stop_pruss:
287 	prueth_emac_stop(prueth);
288 
289 disable_class:
290 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
291 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
292 
293 	return ret;
294 }
295 
prueth_emac_common_stop(struct prueth * prueth)296 static int prueth_emac_common_stop(struct prueth *prueth)
297 {
298 	struct prueth_emac *emac;
299 
300 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
301 		return -EINVAL;
302 
303 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
304 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
305 
306 	prueth_emac_stop(prueth);
307 
308 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
309 	       prueth->emac[ICSS_SLICE1];
310 	icss_iep_exit(emac->iep);
311 
312 	return 0;
313 }
314 
315 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link(struct net_device * ndev)316 static void emac_adjust_link(struct net_device *ndev)
317 {
318 	struct prueth_emac *emac = netdev_priv(ndev);
319 	struct phy_device *phydev = ndev->phydev;
320 	struct prueth *prueth = emac->prueth;
321 	bool new_state = false;
322 	unsigned long flags;
323 
324 	if (phydev->link) {
325 		/* check the mode of operation - full/half duplex */
326 		if (phydev->duplex != emac->duplex) {
327 			new_state = true;
328 			emac->duplex = phydev->duplex;
329 		}
330 		if (phydev->speed != emac->speed) {
331 			new_state = true;
332 			emac->speed = phydev->speed;
333 		}
334 		if (!emac->link) {
335 			new_state = true;
336 			emac->link = 1;
337 		}
338 	} else if (emac->link) {
339 		new_state = true;
340 		emac->link = 0;
341 
342 		/* f/w should support 100 & 1000 */
343 		emac->speed = SPEED_1000;
344 
345 		/* half duplex may not be supported by f/w */
346 		emac->duplex = DUPLEX_FULL;
347 	}
348 
349 	if (new_state) {
350 		phy_print_status(phydev);
351 
352 		/* update RGMII and MII configuration based on PHY negotiated
353 		 * values
354 		 */
355 		if (emac->link) {
356 			if (emac->duplex == DUPLEX_HALF)
357 				icssg_config_half_duplex(emac);
358 			/* Set the RGMII cfg for gig en and full duplex */
359 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
360 
361 			/* update the Tx IPG based on 100M/1G speed */
362 			spin_lock_irqsave(&emac->lock, flags);
363 			icssg_config_ipg(emac);
364 			spin_unlock_irqrestore(&emac->lock, flags);
365 			icssg_config_set_speed(emac);
366 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
367 
368 		} else {
369 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
370 		}
371 	}
372 
373 	if (emac->link) {
374 		/* reactivate the transmit queue */
375 		netif_tx_wake_all_queues(ndev);
376 	} else {
377 		netif_tx_stop_all_queues(ndev);
378 		prueth_cleanup_tx_ts(emac);
379 	}
380 }
381 
emac_rx_timer_callback(struct hrtimer * timer)382 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
383 {
384 	struct prueth_emac *emac =
385 			container_of(timer, struct prueth_emac, rx_hrtimer);
386 	int rx_flow = PRUETH_RX_FLOW_DATA;
387 
388 	enable_irq(emac->rx_chns.irq[rx_flow]);
389 	return HRTIMER_NORESTART;
390 }
391 
emac_phy_connect(struct prueth_emac * emac)392 static int emac_phy_connect(struct prueth_emac *emac)
393 {
394 	struct prueth *prueth = emac->prueth;
395 	struct net_device *ndev = emac->ndev;
396 	/* connect PHY */
397 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
398 				      &emac_adjust_link, 0,
399 				      emac->phy_if);
400 	if (!ndev->phydev) {
401 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
402 			emac->phy_node->full_name);
403 		return -ENODEV;
404 	}
405 
406 	if (!emac->half_duplex) {
407 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
408 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
409 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
410 	}
411 
412 	/* remove unsupported modes */
413 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
414 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
415 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
416 
417 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
418 		phy_set_max_speed(ndev->phydev, SPEED_100);
419 
420 	return 0;
421 }
422 
prueth_iep_gettime(void * clockops_data,struct ptp_system_timestamp * sts)423 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
424 {
425 	u32 hi_rollover_count, hi_rollover_count_r;
426 	struct prueth_emac *emac = clockops_data;
427 	struct prueth *prueth = emac->prueth;
428 	void __iomem *fw_hi_r_count_addr;
429 	void __iomem *fw_count_hi_addr;
430 	u32 iepcount_hi, iepcount_hi_r;
431 	unsigned long flags;
432 	u32 iepcount_lo;
433 	u64 ts = 0;
434 
435 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
436 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
437 
438 	local_irq_save(flags);
439 	do {
440 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
441 		iepcount_hi += readl(fw_count_hi_addr);
442 		hi_rollover_count = readl(fw_hi_r_count_addr);
443 		ptp_read_system_prets(sts);
444 		iepcount_lo = icss_iep_get_count_low(emac->iep);
445 		ptp_read_system_postts(sts);
446 
447 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
448 		iepcount_hi_r += readl(fw_count_hi_addr);
449 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
450 	} while ((iepcount_hi_r != iepcount_hi) ||
451 		 (hi_rollover_count != hi_rollover_count_r));
452 	local_irq_restore(flags);
453 
454 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
455 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
456 
457 	return ts;
458 }
459 
prueth_iep_settime(void * clockops_data,u64 ns)460 static void prueth_iep_settime(void *clockops_data, u64 ns)
461 {
462 	struct icssg_setclock_desc __iomem *sc_descp;
463 	struct prueth_emac *emac = clockops_data;
464 	struct icssg_setclock_desc sc_desc;
465 	u64 cyclecount;
466 	u32 cycletime;
467 	int timeout;
468 
469 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
470 
471 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
472 	cyclecount = ns / cycletime;
473 
474 	memset(&sc_desc, 0, sizeof(sc_desc));
475 	sc_desc.margin = cycletime - 1000;
476 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
477 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
478 	sc_desc.iepcount_set = ns % cycletime;
479 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
480 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
481 
482 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
483 
484 	writeb(1, &sc_descp->request);
485 
486 	timeout = 5;	/* fw should take 2-3 ms */
487 	while (timeout--) {
488 		if (readb(&sc_descp->acknowledgment))
489 			return;
490 
491 		usleep_range(500, 1000);
492 	}
493 
494 	dev_err(emac->prueth->dev, "settime timeout\n");
495 }
496 
prueth_perout_enable(void * clockops_data,struct ptp_perout_request * req,int on,u64 * cmp)497 static int prueth_perout_enable(void *clockops_data,
498 				struct ptp_perout_request *req, int on,
499 				u64 *cmp)
500 {
501 	struct prueth_emac *emac = clockops_data;
502 	u32 reduction_factor = 0, offset = 0;
503 	struct timespec64 ts;
504 	u64 current_cycle;
505 	u64 start_offset;
506 	u64 ns_period;
507 
508 	if (!on)
509 		return 0;
510 
511 	/* Any firmware specific stuff for PPS/PEROUT handling */
512 	ts.tv_sec = req->period.sec;
513 	ts.tv_nsec = req->period.nsec;
514 	ns_period = timespec64_to_ns(&ts);
515 
516 	/* f/w doesn't support period less than cycle time */
517 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
518 		return -ENXIO;
519 
520 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
521 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
522 
523 	/* f/w requires at least 1uS within a cycle so CMP
524 	 * can trigger after SYNC is enabled
525 	 */
526 	if (offset < 5 * NSEC_PER_USEC)
527 		offset = 5 * NSEC_PER_USEC;
528 
529 	/* if offset is close to cycle time then we will miss
530 	 * the CMP event for last tick when IEP rolls over.
531 	 * In normal mode, IEP tick is 4ns.
532 	 * In slow compensation it could be 0ns or 8ns at
533 	 * every slow compensation cycle.
534 	 */
535 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
536 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
537 
538 	/* we're in shadow mode so need to set upper 32-bits */
539 	*cmp = (u64)offset << 32;
540 
541 	writel(reduction_factor, emac->prueth->shram.va +
542 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
543 
544 	current_cycle = icssg_read_time(emac->prueth->shram.va +
545 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
546 
547 	/* Rounding of current_cycle count to next second */
548 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
549 
550 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
551 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
552 
553 	return 0;
554 }
555 
556 const struct icss_iep_clockops prueth_iep_clockops = {
557 	.settime = prueth_iep_settime,
558 	.gettime = prueth_iep_gettime,
559 	.perout_enable = prueth_perout_enable,
560 };
561 
icssg_prueth_add_mcast(struct net_device * ndev,const u8 * addr)562 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
563 {
564 	struct prueth_emac *emac = netdev_priv(ndev);
565 	int port_mask = BIT(emac->port_id);
566 
567 	port_mask |= icssg_fdb_lookup(emac, addr, 0);
568 	icssg_fdb_add_del(emac, addr, 0, port_mask, true);
569 	icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
570 
571 	return 0;
572 }
573 
icssg_prueth_del_mcast(struct net_device * ndev,const u8 * addr)574 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
575 {
576 	struct prueth_emac *emac = netdev_priv(ndev);
577 	int port_mask = BIT(emac->port_id);
578 	int other_port_mask;
579 
580 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
581 
582 	icssg_fdb_add_del(emac, addr, 0, port_mask, false);
583 	icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
584 
585 	if (other_port_mask) {
586 		icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
587 		icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
588 	}
589 
590 	return 0;
591 }
592 
icssg_prueth_hsr_add_mcast(struct net_device * ndev,const u8 * addr)593 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
594 {
595 	struct prueth_emac *emac = netdev_priv(ndev);
596 	struct prueth *prueth = emac->prueth;
597 
598 	icssg_fdb_add_del(emac, addr, prueth->default_vlan,
599 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
600 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
601 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
602 			  ICSSG_FDB_ENTRY_BLOCK, true);
603 
604 	icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
605 			  BIT(emac->port_id), true);
606 	return 0;
607 }
608 
icssg_prueth_hsr_del_mcast(struct net_device * ndev,const u8 * addr)609 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
610 {
611 	struct prueth_emac *emac = netdev_priv(ndev);
612 	struct prueth *prueth = emac->prueth;
613 
614 	icssg_fdb_add_del(emac, addr, prueth->default_vlan,
615 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
616 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
617 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
618 			  ICSSG_FDB_ENTRY_BLOCK, false);
619 
620 	return 0;
621 }
622 
623 /**
624  * emac_ndo_open - EMAC device open
625  * @ndev: network adapter device
626  *
627  * Called when system wants to start the interface.
628  *
629  * Return: 0 for a successful open, or appropriate error code
630  */
emac_ndo_open(struct net_device * ndev)631 static int emac_ndo_open(struct net_device *ndev)
632 {
633 	struct prueth_emac *emac = netdev_priv(ndev);
634 	int ret, i, num_data_chn = emac->tx_ch_num;
635 	struct icssg_flow_cfg __iomem *flow_cfg;
636 	struct prueth *prueth = emac->prueth;
637 	int slice = prueth_emac_slice(emac);
638 	struct device *dev = prueth->dev;
639 	int max_rx_flows;
640 	int rx_flow;
641 
642 	/* set h/w MAC as user might have re-configured */
643 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
644 
645 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
646 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
647 
648 	/* Notify the stack of the actual queue counts. */
649 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
650 	if (ret) {
651 		dev_err(dev, "cannot set real number of tx queues\n");
652 		return ret;
653 	}
654 
655 	init_completion(&emac->cmd_complete);
656 	ret = prueth_init_tx_chns(emac);
657 	if (ret) {
658 		dev_err(dev, "failed to init tx channel: %d\n", ret);
659 		return ret;
660 	}
661 
662 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
663 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
664 				  max_rx_flows, PRUETH_MAX_RX_DESC);
665 	if (ret) {
666 		dev_err(dev, "failed to init rx channel: %d\n", ret);
667 		goto cleanup_tx;
668 	}
669 
670 	ret = prueth_ndev_add_tx_napi(emac);
671 	if (ret)
672 		goto cleanup_rx;
673 
674 	/* we use only the highest priority flow for now i.e. @irq[3] */
675 	rx_flow = PRUETH_RX_FLOW_DATA;
676 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
677 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
678 	if (ret) {
679 		dev_err(dev, "unable to request RX IRQ\n");
680 		goto cleanup_napi;
681 	}
682 
683 	if (!prueth->emacs_initialized) {
684 		ret = prueth_emac_common_start(prueth);
685 		if (ret)
686 			goto free_rx_irq;
687 	}
688 
689 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
690 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
691 	ret = emac_fdb_flow_id_updated(emac);
692 
693 	if (ret) {
694 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
695 		goto stop;
696 	}
697 
698 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
699 
700 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
701 				   IRQF_ONESHOT, dev_name(dev), emac);
702 	if (ret)
703 		goto stop;
704 
705 	/* Prepare RX */
706 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
707 	if (ret)
708 		goto free_tx_ts_irq;
709 
710 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
711 	if (ret)
712 		goto reset_rx_chn;
713 
714 	for (i = 0; i < emac->tx_ch_num; i++) {
715 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
716 		if (ret)
717 			goto reset_tx_chan;
718 	}
719 
720 	/* Enable NAPI in Tx and Rx direction */
721 	for (i = 0; i < emac->tx_ch_num; i++)
722 		napi_enable(&emac->tx_chns[i].napi_tx);
723 	napi_enable(&emac->napi_rx);
724 
725 	/* start PHY */
726 	phy_start(ndev->phydev);
727 
728 	prueth->emacs_initialized++;
729 
730 	queue_work(system_long_wq, &emac->stats_work.work);
731 
732 	return 0;
733 
734 reset_tx_chan:
735 	/* Since interface is not yet up, there is wouldn't be
736 	 * any SKB for completion. So set false to free_skb
737 	 */
738 	prueth_reset_tx_chan(emac, i, false);
739 reset_rx_chn:
740 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
741 free_tx_ts_irq:
742 	free_irq(emac->tx_ts_irq, emac);
743 stop:
744 	if (!prueth->emacs_initialized)
745 		prueth_emac_common_stop(prueth);
746 free_rx_irq:
747 	free_irq(emac->rx_chns.irq[rx_flow], emac);
748 cleanup_napi:
749 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
750 cleanup_rx:
751 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
752 cleanup_tx:
753 	prueth_cleanup_tx_chns(emac);
754 
755 	return ret;
756 }
757 
758 /**
759  * emac_ndo_stop - EMAC device stop
760  * @ndev: network adapter device
761  *
762  * Called when system wants to stop or down the interface.
763  *
764  * Return: Always 0 (Success)
765  */
emac_ndo_stop(struct net_device * ndev)766 static int emac_ndo_stop(struct net_device *ndev)
767 {
768 	struct prueth_emac *emac = netdev_priv(ndev);
769 	struct prueth *prueth = emac->prueth;
770 	int rx_flow = PRUETH_RX_FLOW_DATA;
771 	int max_rx_flows;
772 	int ret, i;
773 
774 	/* inform the upper layers. */
775 	netif_tx_stop_all_queues(ndev);
776 
777 	/* block packets from wire */
778 	if (ndev->phydev)
779 		phy_stop(ndev->phydev);
780 
781 	if (emac->prueth->is_hsr_offload_mode)
782 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
783 	else
784 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
785 
786 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
787 	/* ensure new tdown_cnt value is visible */
788 	smp_mb__after_atomic();
789 	/* tear down and disable UDMA channels */
790 	reinit_completion(&emac->tdown_complete);
791 	for (i = 0; i < emac->tx_ch_num; i++)
792 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
793 
794 	ret = wait_for_completion_timeout(&emac->tdown_complete,
795 					  msecs_to_jiffies(1000));
796 	if (!ret)
797 		netdev_err(ndev, "tx teardown timeout\n");
798 
799 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
800 	for (i = 0; i < emac->tx_ch_num; i++) {
801 		napi_disable(&emac->tx_chns[i].napi_tx);
802 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
803 	}
804 
805 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
806 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
807 
808 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
809 
810 	napi_disable(&emac->napi_rx);
811 	hrtimer_cancel(&emac->rx_hrtimer);
812 
813 	cancel_work_sync(&emac->rx_mode_work);
814 
815 	/* Destroying the queued work in ndo_stop() */
816 	cancel_delayed_work_sync(&emac->stats_work);
817 
818 	/* stop PRUs */
819 	if (prueth->emacs_initialized == 1)
820 		prueth_emac_common_stop(prueth);
821 
822 	free_irq(emac->tx_ts_irq, emac);
823 
824 	free_irq(emac->rx_chns.irq[rx_flow], emac);
825 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
826 
827 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
828 	prueth_cleanup_tx_chns(emac);
829 
830 	prueth->emacs_initialized--;
831 
832 	return 0;
833 }
834 
emac_ndo_set_rx_mode_work(struct work_struct * work)835 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
836 {
837 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
838 	struct net_device *ndev = emac->ndev;
839 	bool promisc, allmulti;
840 
841 	if (!netif_running(ndev))
842 		return;
843 
844 	promisc = ndev->flags & IFF_PROMISC;
845 	allmulti = ndev->flags & IFF_ALLMULTI;
846 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
847 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
848 
849 	if (promisc) {
850 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
851 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
852 		return;
853 	}
854 
855 	if (allmulti) {
856 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
857 		return;
858 	}
859 
860 	if (emac->prueth->is_hsr_offload_mode)
861 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
862 			      icssg_prueth_hsr_del_mcast);
863 	else
864 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
865 			      icssg_prueth_del_mcast);
866 }
867 
868 /**
869  * emac_ndo_set_rx_mode - EMAC set receive mode function
870  * @ndev: The EMAC network adapter
871  *
872  * Called when system wants to set the receive mode of the device.
873  *
874  */
emac_ndo_set_rx_mode(struct net_device * ndev)875 static void emac_ndo_set_rx_mode(struct net_device *ndev)
876 {
877 	struct prueth_emac *emac = netdev_priv(ndev);
878 
879 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
880 }
881 
emac_ndo_fix_features(struct net_device * ndev,netdev_features_t features)882 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
883 					       netdev_features_t features)
884 {
885 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
886 	 * firmware implementation. Both these features need to be enabled /
887 	 * disabled together.
888 	 */
889 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
890 		if ((features & NETIF_F_HW_HSR_DUP) ||
891 		    (features & NETIF_F_HW_HSR_TAG_INS))
892 			features |= NETIF_F_HW_HSR_DUP |
893 				    NETIF_F_HW_HSR_TAG_INS;
894 
895 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
896 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
897 		if (!(features & NETIF_F_HW_HSR_DUP) ||
898 		    !(features & NETIF_F_HW_HSR_TAG_INS))
899 			features &= ~(NETIF_F_HW_HSR_DUP |
900 				      NETIF_F_HW_HSR_TAG_INS);
901 
902 	return features;
903 }
904 
emac_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)905 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
906 				    __be16 proto, u16 vid)
907 {
908 	struct prueth_emac *emac = netdev_priv(ndev);
909 	struct prueth *prueth = emac->prueth;
910 	int untag_mask = 0;
911 	int port_mask;
912 
913 	if (prueth->is_hsr_offload_mode) {
914 		port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
915 		untag_mask = 0;
916 
917 		netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
918 			   vid, port_mask, untag_mask);
919 
920 		icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
921 		icssg_set_pvid(emac->prueth, vid, emac->port_id);
922 	}
923 	return 0;
924 }
925 
emac_ndo_vlan_rx_del_vid(struct net_device * ndev,__be16 proto,u16 vid)926 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
927 				    __be16 proto, u16 vid)
928 {
929 	struct prueth_emac *emac = netdev_priv(ndev);
930 	struct prueth *prueth = emac->prueth;
931 	int untag_mask = 0;
932 	int port_mask;
933 
934 	if (prueth->is_hsr_offload_mode) {
935 		port_mask = BIT(PRUETH_PORT_HOST);
936 		untag_mask = 0;
937 
938 		netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
939 			   vid, port_mask, untag_mask);
940 
941 		icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
942 	}
943 	return 0;
944 }
945 
946 static const struct net_device_ops emac_netdev_ops = {
947 	.ndo_open = emac_ndo_open,
948 	.ndo_stop = emac_ndo_stop,
949 	.ndo_start_xmit = icssg_ndo_start_xmit,
950 	.ndo_set_mac_address = eth_mac_addr,
951 	.ndo_validate_addr = eth_validate_addr,
952 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
953 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
954 	.ndo_eth_ioctl = icssg_ndo_ioctl,
955 	.ndo_get_stats64 = icssg_ndo_get_stats64,
956 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
957 	.ndo_fix_features = emac_ndo_fix_features,
958 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
959 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
960 };
961 
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)962 static int prueth_netdev_init(struct prueth *prueth,
963 			      struct device_node *eth_node)
964 {
965 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
966 	struct prueth_emac *emac;
967 	struct net_device *ndev;
968 	enum prueth_port port;
969 	const char *irq_name;
970 	enum prueth_mac mac;
971 
972 	port = prueth_node_port(eth_node);
973 	if (port == PRUETH_PORT_INVALID)
974 		return -EINVAL;
975 
976 	mac = prueth_node_mac(eth_node);
977 	if (mac == PRUETH_MAC_INVALID)
978 		return -EINVAL;
979 
980 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
981 	if (!ndev)
982 		return -ENOMEM;
983 
984 	emac = netdev_priv(ndev);
985 	emac->prueth = prueth;
986 	emac->ndev = ndev;
987 	emac->port_id = port;
988 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
989 	if (!emac->cmd_wq) {
990 		ret = -ENOMEM;
991 		goto free_ndev;
992 	}
993 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
994 
995 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
996 
997 	ret = pruss_request_mem_region(prueth->pruss,
998 				       port == PRUETH_PORT_MII0 ?
999 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1000 				       &emac->dram);
1001 	if (ret) {
1002 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1003 		ret = -ENOMEM;
1004 		goto free_wq;
1005 	}
1006 
1007 	emac->tx_ch_num = 1;
1008 
1009 	irq_name = "tx_ts0";
1010 	if (emac->port_id == PRUETH_PORT_MII1)
1011 		irq_name = "tx_ts1";
1012 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1013 	if (emac->tx_ts_irq < 0) {
1014 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1015 		goto free;
1016 	}
1017 
1018 	SET_NETDEV_DEV(ndev, prueth->dev);
1019 	spin_lock_init(&emac->lock);
1020 	mutex_init(&emac->cmd_lock);
1021 
1022 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1023 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1024 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1025 		ret = -ENODEV;
1026 		goto free;
1027 	} else if (of_phy_is_fixed_link(eth_node)) {
1028 		ret = of_phy_register_fixed_link(eth_node);
1029 		if (ret) {
1030 			ret = dev_err_probe(prueth->dev, ret,
1031 					    "failed to register fixed-link phy\n");
1032 			goto free;
1033 		}
1034 
1035 		emac->phy_node = eth_node;
1036 	}
1037 
1038 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1039 	if (ret) {
1040 		dev_err(prueth->dev, "could not get phy-mode property\n");
1041 		goto free;
1042 	}
1043 
1044 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1045 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1046 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1047 		ret = -EINVAL;
1048 		goto free;
1049 	}
1050 
1051 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1052 	 * and it is not possible to disable TX Internal delay. The below
1053 	 * switch case block describes how we handle different phy modes
1054 	 * based on hardware restriction.
1055 	 */
1056 	switch (emac->phy_if) {
1057 	case PHY_INTERFACE_MODE_RGMII_ID:
1058 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1059 		break;
1060 	case PHY_INTERFACE_MODE_RGMII_TXID:
1061 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1062 		break;
1063 	case PHY_INTERFACE_MODE_RGMII:
1064 	case PHY_INTERFACE_MODE_RGMII_RXID:
1065 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1066 		ret = -EINVAL;
1067 		goto free;
1068 	default:
1069 		break;
1070 	}
1071 
1072 	/* get mac address from DT and set private and netdev addr */
1073 	ret = of_get_ethdev_address(eth_node, ndev);
1074 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1075 		eth_hw_addr_random(ndev);
1076 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1077 			 port, ndev->dev_addr);
1078 	}
1079 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1080 
1081 	ndev->dev.of_node = eth_node;
1082 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1083 	ndev->max_mtu = PRUETH_MAX_MTU;
1084 	ndev->netdev_ops = &emac_netdev_ops;
1085 	ndev->ethtool_ops = &icssg_ethtool_ops;
1086 	ndev->hw_features = NETIF_F_SG;
1087 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1088 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1089 
1090 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1091 	hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
1092 		     HRTIMER_MODE_REL_PINNED);
1093 	emac->rx_hrtimer.function = &emac_rx_timer_callback;
1094 	prueth->emac[mac] = emac;
1095 
1096 	return 0;
1097 
1098 free:
1099 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1100 free_wq:
1101 	destroy_workqueue(emac->cmd_wq);
1102 free_ndev:
1103 	emac->ndev = NULL;
1104 	prueth->emac[mac] = NULL;
1105 	free_netdev(ndev);
1106 
1107 	return ret;
1108 }
1109 
prueth_dev_check(const struct net_device * ndev)1110 bool prueth_dev_check(const struct net_device *ndev)
1111 {
1112 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1113 		struct prueth_emac *emac = netdev_priv(ndev);
1114 
1115 		return emac->prueth->is_switch_mode;
1116 	}
1117 
1118 	return false;
1119 }
1120 
prueth_offload_fwd_mark_update(struct prueth * prueth)1121 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1122 {
1123 	int set_val = 0;
1124 	int i;
1125 
1126 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1127 		set_val = 1;
1128 
1129 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1130 
1131 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1132 		struct prueth_emac *emac = prueth->emac[i];
1133 
1134 		if (!emac || !emac->ndev)
1135 			continue;
1136 
1137 		emac->offload_fwd_mark = set_val;
1138 	}
1139 }
1140 
prueth_emac_restart(struct prueth * prueth)1141 static int prueth_emac_restart(struct prueth *prueth)
1142 {
1143 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1144 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1145 	int ret;
1146 
1147 	/* Detach the net_device for both PRUeth ports*/
1148 	if (netif_running(emac0->ndev))
1149 		netif_device_detach(emac0->ndev);
1150 	if (netif_running(emac1->ndev))
1151 		netif_device_detach(emac1->ndev);
1152 
1153 	/* Disable both PRUeth ports */
1154 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1155 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1156 	if (ret)
1157 		return ret;
1158 
1159 	/* Stop both pru cores for both PRUeth ports*/
1160 	ret = prueth_emac_common_stop(prueth);
1161 	if (ret) {
1162 		dev_err(prueth->dev, "Failed to stop the firmwares");
1163 		return ret;
1164 	}
1165 
1166 	/* Start both pru cores for both PRUeth ports */
1167 	ret = prueth_emac_common_start(prueth);
1168 	if (ret) {
1169 		dev_err(prueth->dev, "Failed to start the firmwares");
1170 		return ret;
1171 	}
1172 
1173 	/* Enable forwarding for both PRUeth ports */
1174 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1175 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1176 
1177 	/* Attache net_device for both PRUeth ports */
1178 	netif_device_attach(emac0->ndev);
1179 	netif_device_attach(emac1->ndev);
1180 
1181 	return ret;
1182 }
1183 
icssg_change_mode(struct prueth * prueth)1184 static void icssg_change_mode(struct prueth *prueth)
1185 {
1186 	struct prueth_emac *emac;
1187 	int mac, ret;
1188 
1189 	ret = prueth_emac_restart(prueth);
1190 	if (ret) {
1191 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1192 		return;
1193 	}
1194 
1195 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1196 		emac = prueth->emac[mac];
1197 		if (prueth->is_hsr_offload_mode) {
1198 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1199 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1200 			else
1201 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1202 		}
1203 
1204 		if (netif_running(emac->ndev)) {
1205 			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1206 					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1207 					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1208 					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1209 					  ICSSG_FDB_ENTRY_BLOCK,
1210 					  true);
1211 			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1212 					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
1213 					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1214 					  true);
1215 			if (prueth->is_hsr_offload_mode)
1216 				icssg_vtbl_modify(emac, DEFAULT_VID,
1217 						  DEFAULT_PORT_MASK,
1218 						  DEFAULT_UNTAG_MASK, true);
1219 			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1220 			if (prueth->is_switch_mode)
1221 				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1222 		}
1223 	}
1224 }
1225 
prueth_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)1226 static int prueth_netdevice_port_link(struct net_device *ndev,
1227 				      struct net_device *br_ndev,
1228 				      struct netlink_ext_ack *extack)
1229 {
1230 	struct prueth_emac *emac = netdev_priv(ndev);
1231 	struct prueth *prueth = emac->prueth;
1232 	int err;
1233 
1234 	if (!prueth->br_members) {
1235 		prueth->hw_bridge_dev = br_ndev;
1236 	} else {
1237 		/* This is adding the port to a second bridge, this is
1238 		 * unsupported
1239 		 */
1240 		if (prueth->hw_bridge_dev != br_ndev)
1241 			return -EOPNOTSUPP;
1242 	}
1243 
1244 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1245 					    &prueth->prueth_switchdev_nb,
1246 					    &prueth->prueth_switchdev_bl_nb,
1247 					    false, extack);
1248 	if (err)
1249 		return err;
1250 
1251 	prueth->br_members |= BIT(emac->port_id);
1252 
1253 	if (!prueth->is_switch_mode) {
1254 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1255 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1256 			prueth->is_switch_mode = true;
1257 			prueth->default_vlan = 1;
1258 			emac->port_vlan = prueth->default_vlan;
1259 			icssg_change_mode(prueth);
1260 		}
1261 	}
1262 
1263 	prueth_offload_fwd_mark_update(prueth);
1264 
1265 	return NOTIFY_DONE;
1266 }
1267 
prueth_netdevice_port_unlink(struct net_device * ndev)1268 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1269 {
1270 	struct prueth_emac *emac = netdev_priv(ndev);
1271 	struct prueth *prueth = emac->prueth;
1272 	int ret;
1273 
1274 	prueth->br_members &= ~BIT(emac->port_id);
1275 
1276 	if (prueth->is_switch_mode) {
1277 		prueth->is_switch_mode = false;
1278 		emac->port_vlan = 0;
1279 		ret = prueth_emac_restart(prueth);
1280 		if (ret) {
1281 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1282 			return;
1283 		}
1284 	}
1285 
1286 	prueth_offload_fwd_mark_update(prueth);
1287 
1288 	if (!prueth->br_members)
1289 		prueth->hw_bridge_dev = NULL;
1290 }
1291 
prueth_hsr_port_link(struct net_device * ndev)1292 static int prueth_hsr_port_link(struct net_device *ndev)
1293 {
1294 	struct prueth_emac *emac = netdev_priv(ndev);
1295 	struct prueth *prueth = emac->prueth;
1296 	struct prueth_emac *emac0;
1297 	struct prueth_emac *emac1;
1298 
1299 	emac0 = prueth->emac[PRUETH_MAC0];
1300 	emac1 = prueth->emac[PRUETH_MAC1];
1301 
1302 	if (prueth->is_switch_mode)
1303 		return -EOPNOTSUPP;
1304 
1305 	prueth->hsr_members |= BIT(emac->port_id);
1306 	if (!prueth->is_hsr_offload_mode) {
1307 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1308 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1309 			if (!(emac0->ndev->features &
1310 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1311 			    !(emac1->ndev->features &
1312 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1313 				return -EOPNOTSUPP;
1314 			prueth->is_hsr_offload_mode = true;
1315 			prueth->default_vlan = 1;
1316 			emac0->port_vlan = prueth->default_vlan;
1317 			emac1->port_vlan = prueth->default_vlan;
1318 			icssg_change_mode(prueth);
1319 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1320 		}
1321 	}
1322 
1323 	return 0;
1324 }
1325 
prueth_hsr_port_unlink(struct net_device * ndev)1326 static void prueth_hsr_port_unlink(struct net_device *ndev)
1327 {
1328 	struct prueth_emac *emac = netdev_priv(ndev);
1329 	struct prueth *prueth = emac->prueth;
1330 	struct prueth_emac *emac0;
1331 	struct prueth_emac *emac1;
1332 	int ret;
1333 
1334 	emac0 = prueth->emac[PRUETH_MAC0];
1335 	emac1 = prueth->emac[PRUETH_MAC1];
1336 
1337 	prueth->hsr_members &= ~BIT(emac->port_id);
1338 	if (prueth->is_hsr_offload_mode) {
1339 		prueth->is_hsr_offload_mode = false;
1340 		emac0->port_vlan = 0;
1341 		emac1->port_vlan = 0;
1342 		prueth->hsr_dev = NULL;
1343 		ret = prueth_emac_restart(prueth);
1344 		if (ret) {
1345 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1346 			return;
1347 		}
1348 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1349 	}
1350 }
1351 
1352 /* netdev notifier */
prueth_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)1353 static int prueth_netdevice_event(struct notifier_block *unused,
1354 				  unsigned long event, void *ptr)
1355 {
1356 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1357 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1358 	struct netdev_notifier_changeupper_info *info;
1359 	struct prueth_emac *emac = netdev_priv(ndev);
1360 	struct prueth *prueth = emac->prueth;
1361 	int ret = NOTIFY_DONE;
1362 
1363 	if (ndev->netdev_ops != &emac_netdev_ops)
1364 		return NOTIFY_DONE;
1365 
1366 	switch (event) {
1367 	case NETDEV_CHANGEUPPER:
1368 		info = ptr;
1369 
1370 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1371 		    is_hsr_master(info->upper_dev)) {
1372 			if (info->linking) {
1373 				if (!prueth->hsr_dev) {
1374 					prueth->hsr_dev = info->upper_dev;
1375 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1376 								      prueth->hsr_dev->dev_addr);
1377 				} else {
1378 					if (prueth->hsr_dev != info->upper_dev) {
1379 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1380 						return -EOPNOTSUPP;
1381 					}
1382 				}
1383 				prueth_hsr_port_link(ndev);
1384 			} else {
1385 				prueth_hsr_port_unlink(ndev);
1386 			}
1387 		}
1388 
1389 		if (netif_is_bridge_master(info->upper_dev)) {
1390 			if (info->linking)
1391 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1392 			else
1393 				prueth_netdevice_port_unlink(ndev);
1394 		}
1395 		break;
1396 	default:
1397 		return NOTIFY_DONE;
1398 	}
1399 
1400 	return notifier_from_errno(ret);
1401 }
1402 
prueth_register_notifiers(struct prueth * prueth)1403 static int prueth_register_notifiers(struct prueth *prueth)
1404 {
1405 	int ret = 0;
1406 
1407 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1408 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1409 	if (ret) {
1410 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1411 		return ret;
1412 	}
1413 
1414 	ret = prueth_switchdev_register_notifiers(prueth);
1415 	if (ret)
1416 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1417 
1418 	return ret;
1419 }
1420 
prueth_unregister_notifiers(struct prueth * prueth)1421 static void prueth_unregister_notifiers(struct prueth *prueth)
1422 {
1423 	prueth_switchdev_unregister_notifiers(prueth);
1424 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1425 }
1426 
prueth_probe(struct platform_device * pdev)1427 static int prueth_probe(struct platform_device *pdev)
1428 {
1429 	struct device_node *eth_node, *eth_ports_node;
1430 	struct device_node  *eth0_node = NULL;
1431 	struct device_node  *eth1_node = NULL;
1432 	struct genpool_data_align gp_data = {
1433 		.align = SZ_64K,
1434 	};
1435 	struct device *dev = &pdev->dev;
1436 	struct device_node *np;
1437 	struct prueth *prueth;
1438 	struct pruss *pruss;
1439 	u32 msmc_ram_size;
1440 	int i, ret;
1441 
1442 	np = dev->of_node;
1443 
1444 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1445 	if (!prueth)
1446 		return -ENOMEM;
1447 
1448 	dev_set_drvdata(dev, prueth);
1449 	prueth->pdev = pdev;
1450 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1451 
1452 	prueth->dev = dev;
1453 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1454 	if (!eth_ports_node)
1455 		return -ENOENT;
1456 
1457 	for_each_child_of_node(eth_ports_node, eth_node) {
1458 		u32 reg;
1459 
1460 		if (strcmp(eth_node->name, "port"))
1461 			continue;
1462 		ret = of_property_read_u32(eth_node, "reg", &reg);
1463 		if (ret < 0) {
1464 			dev_err(dev, "%pOF error reading port_id %d\n",
1465 				eth_node, ret);
1466 		}
1467 
1468 		of_node_get(eth_node);
1469 
1470 		if (reg == 0) {
1471 			eth0_node = eth_node;
1472 			if (!of_device_is_available(eth0_node)) {
1473 				of_node_put(eth0_node);
1474 				eth0_node = NULL;
1475 			}
1476 		} else if (reg == 1) {
1477 			eth1_node = eth_node;
1478 			if (!of_device_is_available(eth1_node)) {
1479 				of_node_put(eth1_node);
1480 				eth1_node = NULL;
1481 			}
1482 		} else {
1483 			dev_err(dev, "port reg should be 0 or 1\n");
1484 		}
1485 	}
1486 
1487 	of_node_put(eth_ports_node);
1488 
1489 	/* At least one node must be present and available else we fail */
1490 	if (!eth0_node && !eth1_node) {
1491 		dev_err(dev, "neither port0 nor port1 node available\n");
1492 		return -ENODEV;
1493 	}
1494 
1495 	if (eth0_node == eth1_node) {
1496 		dev_err(dev, "port0 and port1 can't have same reg\n");
1497 		of_node_put(eth0_node);
1498 		return -ENODEV;
1499 	}
1500 
1501 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1502 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1503 
1504 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1505 	if (IS_ERR(prueth->miig_rt)) {
1506 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1507 		return -ENODEV;
1508 	}
1509 
1510 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1511 	if (IS_ERR(prueth->mii_rt)) {
1512 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1513 		return -ENODEV;
1514 	}
1515 
1516 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1517 	if (IS_ERR(prueth->pa_stats)) {
1518 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1519 		prueth->pa_stats = NULL;
1520 	}
1521 
1522 	if (eth0_node || eth1_node) {
1523 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1524 		if (ret)
1525 			goto put_cores;
1526 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1527 		if (ret)
1528 			goto put_cores;
1529 	}
1530 
1531 	pruss = pruss_get(eth0_node ?
1532 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1533 	if (IS_ERR(pruss)) {
1534 		ret = PTR_ERR(pruss);
1535 		dev_err(dev, "unable to get pruss handle\n");
1536 		goto put_cores;
1537 	}
1538 
1539 	prueth->pruss = pruss;
1540 
1541 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1542 				       &prueth->shram);
1543 	if (ret) {
1544 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1545 		goto put_pruss;
1546 	}
1547 
1548 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1549 	if (!prueth->sram_pool) {
1550 		dev_err(dev, "unable to get SRAM pool\n");
1551 		ret = -ENODEV;
1552 
1553 		goto put_mem;
1554 	}
1555 
1556 	msmc_ram_size = MSMC_RAM_SIZE;
1557 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1558 	if (prueth->is_switchmode_supported)
1559 		msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
1560 
1561 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1562 	prueth->msmcram.va =
1563 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1564 						    msmc_ram_size,
1565 						    gen_pool_first_fit_align,
1566 						    &gp_data);
1567 
1568 	if (!prueth->msmcram.va) {
1569 		ret = -ENOMEM;
1570 		dev_err(dev, "unable to allocate MSMC resource\n");
1571 		goto put_mem;
1572 	}
1573 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1574 						   (unsigned long)prueth->msmcram.va);
1575 	prueth->msmcram.size = msmc_ram_size;
1576 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1577 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1578 		prueth->msmcram.va, prueth->msmcram.size);
1579 
1580 	prueth->iep0 = icss_iep_get_idx(np, 0);
1581 	if (IS_ERR(prueth->iep0)) {
1582 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1583 		prueth->iep0 = NULL;
1584 		goto free_pool;
1585 	}
1586 
1587 	prueth->iep1 = icss_iep_get_idx(np, 1);
1588 	if (IS_ERR(prueth->iep1)) {
1589 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1590 		goto put_iep0;
1591 	}
1592 
1593 	if (prueth->pdata.quirk_10m_link_issue) {
1594 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1595 		 * traffic.
1596 		 */
1597 		icss_iep_init_fw(prueth->iep1);
1598 	}
1599 
1600 	spin_lock_init(&prueth->vtbl_lock);
1601 	/* setup netdev interfaces */
1602 	if (eth0_node) {
1603 		ret = prueth_netdev_init(prueth, eth0_node);
1604 		if (ret) {
1605 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1606 				      eth0_node->name);
1607 			goto exit_iep;
1608 		}
1609 
1610 		prueth->emac[PRUETH_MAC0]->half_duplex =
1611 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1612 
1613 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1614 	}
1615 
1616 	if (eth1_node) {
1617 		ret = prueth_netdev_init(prueth, eth1_node);
1618 		if (ret) {
1619 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1620 				      eth1_node->name);
1621 			goto netdev_exit;
1622 		}
1623 
1624 		prueth->emac[PRUETH_MAC1]->half_duplex =
1625 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1626 
1627 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1628 	}
1629 
1630 	/* register the network devices */
1631 	if (eth0_node) {
1632 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1633 		if (ret) {
1634 			dev_err(dev, "can't register netdev for port MII0");
1635 			goto netdev_exit;
1636 		}
1637 
1638 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1639 
1640 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1641 		if (ret) {
1642 			dev_err(dev,
1643 				"can't connect to MII0 PHY, error -%d", ret);
1644 			goto netdev_unregister;
1645 		}
1646 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1647 	}
1648 
1649 	if (eth1_node) {
1650 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1651 		if (ret) {
1652 			dev_err(dev, "can't register netdev for port MII1");
1653 			goto netdev_unregister;
1654 		}
1655 
1656 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1657 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1658 		if (ret) {
1659 			dev_err(dev,
1660 				"can't connect to MII1 PHY, error %d", ret);
1661 			goto netdev_unregister;
1662 		}
1663 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1664 	}
1665 
1666 	if (prueth->is_switchmode_supported) {
1667 		ret = prueth_register_notifiers(prueth);
1668 		if (ret)
1669 			goto netdev_unregister;
1670 
1671 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1672 	}
1673 
1674 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1675 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1676 
1677 	if (eth1_node)
1678 		of_node_put(eth1_node);
1679 	if (eth0_node)
1680 		of_node_put(eth0_node);
1681 	return 0;
1682 
1683 netdev_unregister:
1684 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1685 		if (!prueth->registered_netdevs[i])
1686 			continue;
1687 		if (prueth->emac[i]->ndev->phydev) {
1688 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1689 			prueth->emac[i]->ndev->phydev = NULL;
1690 		}
1691 		unregister_netdev(prueth->registered_netdevs[i]);
1692 	}
1693 
1694 netdev_exit:
1695 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1696 		eth_node = prueth->eth_node[i];
1697 		if (!eth_node)
1698 			continue;
1699 
1700 		prueth_netdev_exit(prueth, eth_node);
1701 	}
1702 
1703 exit_iep:
1704 	if (prueth->pdata.quirk_10m_link_issue)
1705 		icss_iep_exit_fw(prueth->iep1);
1706 	icss_iep_put(prueth->iep1);
1707 
1708 put_iep0:
1709 	icss_iep_put(prueth->iep0);
1710 	prueth->iep0 = NULL;
1711 	prueth->iep1 = NULL;
1712 
1713 free_pool:
1714 	gen_pool_free(prueth->sram_pool,
1715 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1716 
1717 put_mem:
1718 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1719 
1720 put_pruss:
1721 	pruss_put(prueth->pruss);
1722 
1723 put_cores:
1724 	if (eth0_node || eth1_node) {
1725 		prueth_put_cores(prueth, ICSS_SLICE0);
1726 		of_node_put(eth0_node);
1727 
1728 		prueth_put_cores(prueth, ICSS_SLICE1);
1729 		of_node_put(eth1_node);
1730 	}
1731 
1732 	return ret;
1733 }
1734 
prueth_remove(struct platform_device * pdev)1735 static void prueth_remove(struct platform_device *pdev)
1736 {
1737 	struct prueth *prueth = platform_get_drvdata(pdev);
1738 	struct device_node *eth_node;
1739 	int i;
1740 
1741 	prueth_unregister_notifiers(prueth);
1742 
1743 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1744 		if (!prueth->registered_netdevs[i])
1745 			continue;
1746 		phy_stop(prueth->emac[i]->ndev->phydev);
1747 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1748 		prueth->emac[i]->ndev->phydev = NULL;
1749 		unregister_netdev(prueth->registered_netdevs[i]);
1750 	}
1751 
1752 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1753 		eth_node = prueth->eth_node[i];
1754 		if (!eth_node)
1755 			continue;
1756 
1757 		prueth_netdev_exit(prueth, eth_node);
1758 	}
1759 
1760 	if (prueth->pdata.quirk_10m_link_issue)
1761 		icss_iep_exit_fw(prueth->iep1);
1762 
1763 	icss_iep_put(prueth->iep1);
1764 	icss_iep_put(prueth->iep0);
1765 
1766 	gen_pool_free(prueth->sram_pool,
1767 		      (unsigned long)prueth->msmcram.va,
1768 		      MSMC_RAM_SIZE);
1769 
1770 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1771 
1772 	pruss_put(prueth->pruss);
1773 
1774 	if (prueth->eth_node[PRUETH_MAC1])
1775 		prueth_put_cores(prueth, ICSS_SLICE1);
1776 
1777 	if (prueth->eth_node[PRUETH_MAC0])
1778 		prueth_put_cores(prueth, ICSS_SLICE0);
1779 }
1780 
1781 static const struct prueth_pdata am654_icssg_pdata = {
1782 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1783 	.quirk_10m_link_issue = 1,
1784 	.switch_mode = 1,
1785 };
1786 
1787 static const struct prueth_pdata am64x_icssg_pdata = {
1788 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
1789 	.quirk_10m_link_issue = 1,
1790 	.switch_mode = 1,
1791 };
1792 
1793 static const struct of_device_id prueth_dt_match[] = {
1794 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
1795 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
1796 	{ /* sentinel */ }
1797 };
1798 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1799 
1800 static struct platform_driver prueth_driver = {
1801 	.probe = prueth_probe,
1802 	.remove = prueth_remove,
1803 	.driver = {
1804 		.name = "icssg-prueth",
1805 		.of_match_table = prueth_dt_match,
1806 		.pm = &prueth_dev_pm_ops,
1807 	},
1808 };
1809 module_platform_driver(prueth_driver);
1810 
1811 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1812 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1813 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
1814 MODULE_LICENSE("GPL");
1815