xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
emac_get_tx_ts(struct prueth_emac * emac,struct emac_tx_ts_response * rsp)53 static int emac_get_tx_ts(struct prueth_emac *emac,
54 			  struct emac_tx_ts_response *rsp)
55 {
56 	struct prueth *prueth = emac->prueth;
57 	int slice = prueth_emac_slice(emac);
58 	int addr;
59 
60 	addr = icssg_queue_pop(prueth, slice == 0 ?
61 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
62 	if (addr < 0)
63 		return addr;
64 
65 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
66 	/* return buffer back for to pool */
67 	icssg_queue_push(prueth, slice == 0 ?
68 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
69 
70 	return 0;
71 }
72 
tx_ts_work(struct prueth_emac * emac)73 static void tx_ts_work(struct prueth_emac *emac)
74 {
75 	struct skb_shared_hwtstamps ssh;
76 	struct emac_tx_ts_response tsr;
77 	struct sk_buff *skb;
78 	int ret = 0;
79 	u32 hi_sw;
80 	u64 ns;
81 
82 	/* There may be more than one pending requests */
83 	while (1) {
84 		ret = emac_get_tx_ts(emac, &tsr);
85 		if (ret) /* nothing more */
86 			break;
87 
88 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
89 		    !emac->tx_ts_skb[tsr.cookie]) {
90 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
91 				   tsr.cookie);
92 			break;
93 		}
94 
95 		skb = emac->tx_ts_skb[tsr.cookie];
96 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
97 		if (!skb) {
98 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
99 			break;
100 		}
101 
102 		hi_sw = readl(emac->prueth->shram.va +
103 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
104 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
105 				    IEP_DEFAULT_CYCLE_TIME_NS);
106 
107 		memset(&ssh, 0, sizeof(ssh));
108 		ssh.hwtstamp = ns_to_ktime(ns);
109 
110 		skb_tstamp_tx(skb, &ssh);
111 		dev_consume_skb_any(skb);
112 
113 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
114 			break;
115 	}
116 }
117 
prueth_tx_ts_irq(int irq,void * dev_id)118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
119 {
120 	struct prueth_emac *emac = dev_id;
121 
122 	/* currently only TX timestamp is being returned */
123 	tx_ts_work(emac);
124 
125 	return IRQ_HANDLED;
126 }
127 
128 static struct icssg_firmwares icssg_hsr_firmwares[] = {
129 	{
130 		.pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
131 		.rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
132 		.txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
133 	},
134 	{
135 		.pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
136 		.rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
137 		.txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
138 	}
139 };
140 
141 static struct icssg_firmwares icssg_switch_firmwares[] = {
142 	{
143 		.pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
144 		.rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
145 		.txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
146 	},
147 	{
148 		.pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
149 		.rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
150 		.txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
151 	}
152 };
153 
154 static struct icssg_firmwares icssg_emac_firmwares[] = {
155 	{
156 		.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
157 		.rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
158 		.txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
159 	},
160 	{
161 		.pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
162 		.rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
163 		.txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
164 	}
165 };
166 
prueth_start(struct rproc * rproc,const char * fw_name)167 static int prueth_start(struct rproc *rproc, const char *fw_name)
168 {
169 	int ret;
170 
171 	ret = rproc_set_firmware(rproc, fw_name);
172 	if (ret)
173 		return ret;
174 	return rproc_boot(rproc);
175 }
176 
prueth_shutdown(struct rproc * rproc)177 static void prueth_shutdown(struct rproc *rproc)
178 {
179 	rproc_shutdown(rproc);
180 }
181 
prueth_emac_start(struct prueth * prueth)182 static int prueth_emac_start(struct prueth *prueth)
183 {
184 	struct icssg_firmwares *firmwares;
185 	struct device *dev = prueth->dev;
186 	int ret, slice;
187 
188 	if (prueth->is_switch_mode)
189 		firmwares = icssg_switch_firmwares;
190 	else if (prueth->is_hsr_offload_mode)
191 		firmwares = icssg_hsr_firmwares;
192 	else
193 		firmwares = icssg_emac_firmwares;
194 
195 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
196 		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
197 		if (ret) {
198 			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
199 			goto unwind_slices;
200 		}
201 
202 		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
203 		if (ret) {
204 			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
205 			rproc_shutdown(prueth->pru[slice]);
206 			goto unwind_slices;
207 		}
208 
209 		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
210 		if (ret) {
211 			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
212 			rproc_shutdown(prueth->rtu[slice]);
213 			rproc_shutdown(prueth->pru[slice]);
214 			goto unwind_slices;
215 		}
216 	}
217 
218 	return 0;
219 
220 unwind_slices:
221 	while (--slice >= 0) {
222 		prueth_shutdown(prueth->txpru[slice]);
223 		prueth_shutdown(prueth->rtu[slice]);
224 		prueth_shutdown(prueth->pru[slice]);
225 	}
226 
227 	return ret;
228 }
229 
prueth_emac_stop(struct prueth * prueth)230 static void prueth_emac_stop(struct prueth *prueth)
231 {
232 	int slice;
233 
234 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
235 		prueth_shutdown(prueth->txpru[slice]);
236 		prueth_shutdown(prueth->rtu[slice]);
237 		prueth_shutdown(prueth->pru[slice]);
238 	}
239 }
240 
prueth_emac_common_start(struct prueth * prueth)241 static int prueth_emac_common_start(struct prueth *prueth)
242 {
243 	struct prueth_emac *emac;
244 	int ret = 0;
245 	int slice;
246 
247 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
248 		return -EINVAL;
249 
250 	/* clear SMEM and MSMC settings for all slices */
251 	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
252 	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
253 
254 	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
255 	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
256 
257 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
258 		icssg_init_fw_offload_mode(prueth);
259 	else
260 		icssg_init_emac_mode(prueth);
261 
262 	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
263 		emac = prueth->emac[slice];
264 		if (!emac)
265 			continue;
266 		ret = icssg_config(prueth, emac, slice);
267 		if (ret)
268 			goto disable_class;
269 	}
270 
271 	ret = prueth_emac_start(prueth);
272 	if (ret)
273 		goto disable_class;
274 
275 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
276 	       prueth->emac[ICSS_SLICE1];
277 	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
278 			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
279 	if (ret) {
280 		dev_err(prueth->dev, "Failed to initialize IEP module\n");
281 		goto stop_pruss;
282 	}
283 
284 	return 0;
285 
286 stop_pruss:
287 	prueth_emac_stop(prueth);
288 
289 disable_class:
290 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
291 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
292 
293 	return ret;
294 }
295 
prueth_emac_common_stop(struct prueth * prueth)296 static int prueth_emac_common_stop(struct prueth *prueth)
297 {
298 	struct prueth_emac *emac;
299 
300 	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
301 		return -EINVAL;
302 
303 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
304 	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
305 
306 	prueth_emac_stop(prueth);
307 
308 	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
309 	       prueth->emac[ICSS_SLICE1];
310 	icss_iep_exit(emac->iep);
311 
312 	return 0;
313 }
314 
315 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link(struct net_device * ndev)316 static void emac_adjust_link(struct net_device *ndev)
317 {
318 	struct prueth_emac *emac = netdev_priv(ndev);
319 	struct phy_device *phydev = ndev->phydev;
320 	struct prueth *prueth = emac->prueth;
321 	bool new_state = false;
322 	unsigned long flags;
323 
324 	if (phydev->link) {
325 		/* check the mode of operation - full/half duplex */
326 		if (phydev->duplex != emac->duplex) {
327 			new_state = true;
328 			emac->duplex = phydev->duplex;
329 		}
330 		if (phydev->speed != emac->speed) {
331 			new_state = true;
332 			emac->speed = phydev->speed;
333 		}
334 		if (!emac->link) {
335 			new_state = true;
336 			emac->link = 1;
337 		}
338 	} else if (emac->link) {
339 		new_state = true;
340 		emac->link = 0;
341 
342 		/* f/w should support 100 & 1000 */
343 		emac->speed = SPEED_1000;
344 
345 		/* half duplex may not be supported by f/w */
346 		emac->duplex = DUPLEX_FULL;
347 	}
348 
349 	if (new_state) {
350 		phy_print_status(phydev);
351 
352 		/* update RGMII and MII configuration based on PHY negotiated
353 		 * values
354 		 */
355 		if (emac->link) {
356 			if (emac->duplex == DUPLEX_HALF)
357 				icssg_config_half_duplex(emac);
358 			/* Set the RGMII cfg for gig en and full duplex */
359 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
360 
361 			/* update the Tx IPG based on 100M/1G speed */
362 			spin_lock_irqsave(&emac->lock, flags);
363 			icssg_config_ipg(emac);
364 			spin_unlock_irqrestore(&emac->lock, flags);
365 			icssg_config_set_speed(emac);
366 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
367 
368 		} else {
369 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
370 		}
371 	}
372 
373 	if (emac->link) {
374 		/* reactivate the transmit queue */
375 		netif_tx_wake_all_queues(ndev);
376 	} else {
377 		netif_tx_stop_all_queues(ndev);
378 		prueth_cleanup_tx_ts(emac);
379 	}
380 }
381 
emac_rx_timer_callback(struct hrtimer * timer)382 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
383 {
384 	struct prueth_emac *emac =
385 			container_of(timer, struct prueth_emac, rx_hrtimer);
386 	int rx_flow = PRUETH_RX_FLOW_DATA;
387 
388 	enable_irq(emac->rx_chns.irq[rx_flow]);
389 	return HRTIMER_NORESTART;
390 }
391 
emac_phy_connect(struct prueth_emac * emac)392 static int emac_phy_connect(struct prueth_emac *emac)
393 {
394 	struct prueth *prueth = emac->prueth;
395 	struct net_device *ndev = emac->ndev;
396 	/* connect PHY */
397 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
398 				      &emac_adjust_link, 0,
399 				      emac->phy_if);
400 	if (!ndev->phydev) {
401 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
402 			emac->phy_node->full_name);
403 		return -ENODEV;
404 	}
405 
406 	if (!emac->half_duplex) {
407 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
408 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
409 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
410 	}
411 
412 	/* remove unsupported modes */
413 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
414 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
415 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
416 
417 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
418 		phy_set_max_speed(ndev->phydev, SPEED_100);
419 
420 	return 0;
421 }
422 
prueth_iep_gettime(void * clockops_data,struct ptp_system_timestamp * sts)423 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
424 {
425 	u32 hi_rollover_count, hi_rollover_count_r;
426 	struct prueth_emac *emac = clockops_data;
427 	struct prueth *prueth = emac->prueth;
428 	void __iomem *fw_hi_r_count_addr;
429 	void __iomem *fw_count_hi_addr;
430 	u32 iepcount_hi, iepcount_hi_r;
431 	unsigned long flags;
432 	u32 iepcount_lo;
433 	u64 ts = 0;
434 
435 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
436 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
437 
438 	local_irq_save(flags);
439 	do {
440 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
441 		iepcount_hi += readl(fw_count_hi_addr);
442 		hi_rollover_count = readl(fw_hi_r_count_addr);
443 		ptp_read_system_prets(sts);
444 		iepcount_lo = icss_iep_get_count_low(emac->iep);
445 		ptp_read_system_postts(sts);
446 
447 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
448 		iepcount_hi_r += readl(fw_count_hi_addr);
449 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
450 	} while ((iepcount_hi_r != iepcount_hi) ||
451 		 (hi_rollover_count != hi_rollover_count_r));
452 	local_irq_restore(flags);
453 
454 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
455 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
456 
457 	return ts;
458 }
459 
prueth_iep_settime(void * clockops_data,u64 ns)460 static void prueth_iep_settime(void *clockops_data, u64 ns)
461 {
462 	struct icssg_setclock_desc __iomem *sc_descp;
463 	struct prueth_emac *emac = clockops_data;
464 	struct icssg_setclock_desc sc_desc;
465 	u64 cyclecount;
466 	u32 cycletime;
467 	int timeout;
468 
469 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
470 
471 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
472 	cyclecount = ns / cycletime;
473 
474 	memset(&sc_desc, 0, sizeof(sc_desc));
475 	sc_desc.margin = cycletime - 1000;
476 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
477 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
478 	sc_desc.iepcount_set = ns % cycletime;
479 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
480 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
481 
482 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
483 
484 	writeb(1, &sc_descp->request);
485 
486 	timeout = 5;	/* fw should take 2-3 ms */
487 	while (timeout--) {
488 		if (readb(&sc_descp->acknowledgment))
489 			return;
490 
491 		usleep_range(500, 1000);
492 	}
493 
494 	dev_err(emac->prueth->dev, "settime timeout\n");
495 }
496 
prueth_perout_enable(void * clockops_data,struct ptp_perout_request * req,int on,u64 * cmp)497 static int prueth_perout_enable(void *clockops_data,
498 				struct ptp_perout_request *req, int on,
499 				u64 *cmp)
500 {
501 	struct prueth_emac *emac = clockops_data;
502 	u32 reduction_factor = 0, offset = 0;
503 	struct timespec64 ts;
504 	u64 current_cycle;
505 	u64 start_offset;
506 	u64 ns_period;
507 
508 	if (!on)
509 		return 0;
510 
511 	/* Any firmware specific stuff for PPS/PEROUT handling */
512 	ts.tv_sec = req->period.sec;
513 	ts.tv_nsec = req->period.nsec;
514 	ns_period = timespec64_to_ns(&ts);
515 
516 	/* f/w doesn't support period less than cycle time */
517 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
518 		return -ENXIO;
519 
520 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
521 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
522 
523 	/* f/w requires at least 1uS within a cycle so CMP
524 	 * can trigger after SYNC is enabled
525 	 */
526 	if (offset < 5 * NSEC_PER_USEC)
527 		offset = 5 * NSEC_PER_USEC;
528 
529 	/* if offset is close to cycle time then we will miss
530 	 * the CMP event for last tick when IEP rolls over.
531 	 * In normal mode, IEP tick is 4ns.
532 	 * In slow compensation it could be 0ns or 8ns at
533 	 * every slow compensation cycle.
534 	 */
535 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
536 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
537 
538 	/* we're in shadow mode so need to set upper 32-bits */
539 	*cmp = (u64)offset << 32;
540 
541 	writel(reduction_factor, emac->prueth->shram.va +
542 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
543 
544 	current_cycle = icssg_read_time(emac->prueth->shram.va +
545 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
546 
547 	/* Rounding of current_cycle count to next second */
548 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
549 
550 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
551 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
552 
553 	return 0;
554 }
555 
556 const struct icss_iep_clockops prueth_iep_clockops = {
557 	.settime = prueth_iep_settime,
558 	.gettime = prueth_iep_gettime,
559 	.perout_enable = prueth_perout_enable,
560 };
561 
icssg_prueth_add_mcast(struct net_device * ndev,const u8 * addr)562 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
563 {
564 	struct net_device *real_dev;
565 	struct prueth_emac *emac;
566 	int port_mask;
567 	u8 vlan_id;
568 
569 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
570 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
571 	emac = netdev_priv(real_dev);
572 
573 	port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
574 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
575 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
576 
577 	return 0;
578 }
579 
icssg_prueth_del_mcast(struct net_device * ndev,const u8 * addr)580 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
581 {
582 	struct net_device *real_dev;
583 	struct prueth_emac *emac;
584 	int other_port_mask;
585 	int port_mask;
586 	u8 vlan_id;
587 
588 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
589 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
590 	emac = netdev_priv(real_dev);
591 
592 	port_mask = BIT(emac->port_id);
593 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
594 
595 	icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
596 	icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
597 
598 	if (other_port_mask) {
599 		icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
600 		icssg_vtbl_modify(emac, vlan_id, other_port_mask,
601 				  other_port_mask, true);
602 	}
603 
604 	return 0;
605 }
606 
icssg_prueth_hsr_fdb_add_del(struct prueth_emac * emac,const u8 * addr,u8 vid,bool add)607 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
608 					 const u8 *addr, u8 vid, bool add)
609 {
610 	icssg_fdb_add_del(emac, addr, vid,
611 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
612 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
613 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
614 			  ICSSG_FDB_ENTRY_BLOCK, add);
615 
616 	if (add)
617 		icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
618 				  BIT(emac->port_id), add);
619 }
620 
icssg_prueth_hsr_add_mcast(struct net_device * ndev,const u8 * addr)621 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
622 {
623 	struct net_device *real_dev;
624 	struct prueth_emac *emac;
625 	u8 vlan_id, i;
626 
627 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
628 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
629 
630 	if (is_hsr_master(real_dev)) {
631 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
632 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
633 			if (!emac)
634 				return -EINVAL;
635 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
636 						     true);
637 		}
638 	} else {
639 		emac = netdev_priv(real_dev);
640 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
641 	}
642 
643 	return 0;
644 }
645 
icssg_prueth_hsr_del_mcast(struct net_device * ndev,const u8 * addr)646 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
647 {
648 	struct net_device *real_dev;
649 	struct prueth_emac *emac;
650 	u8 vlan_id, i;
651 
652 	vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
653 	real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
654 
655 	if (is_hsr_master(real_dev)) {
656 		for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
657 			emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
658 			if (!emac)
659 				return -EINVAL;
660 			icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
661 						     false);
662 		}
663 	} else {
664 		emac = netdev_priv(real_dev);
665 		icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
666 	}
667 
668 	return 0;
669 }
670 
icssg_update_vlan_mcast(struct net_device * vdev,int vid,void * args)671 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
672 				   void *args)
673 {
674 	struct prueth_emac *emac = args;
675 
676 	if (!vdev || !vid)
677 		return 0;
678 
679 	netif_addr_lock_bh(vdev);
680 	__hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
681 				vdev->addr_len);
682 	netif_addr_unlock_bh(vdev);
683 
684 	if (emac->prueth->is_hsr_offload_mode)
685 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
686 				   icssg_prueth_hsr_add_mcast,
687 				   icssg_prueth_hsr_del_mcast);
688 	else
689 		__hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
690 				   icssg_prueth_add_mcast,
691 				   icssg_prueth_del_mcast);
692 
693 	return 0;
694 }
695 
696 /**
697  * emac_ndo_open - EMAC device open
698  * @ndev: network adapter device
699  *
700  * Called when system wants to start the interface.
701  *
702  * Return: 0 for a successful open, or appropriate error code
703  */
emac_ndo_open(struct net_device * ndev)704 static int emac_ndo_open(struct net_device *ndev)
705 {
706 	struct prueth_emac *emac = netdev_priv(ndev);
707 	int ret, i, num_data_chn = emac->tx_ch_num;
708 	struct icssg_flow_cfg __iomem *flow_cfg;
709 	struct prueth *prueth = emac->prueth;
710 	int slice = prueth_emac_slice(emac);
711 	struct device *dev = prueth->dev;
712 	int max_rx_flows;
713 	int rx_flow;
714 
715 	/* set h/w MAC as user might have re-configured */
716 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
717 
718 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
719 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
720 
721 	/* Notify the stack of the actual queue counts. */
722 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
723 	if (ret) {
724 		dev_err(dev, "cannot set real number of tx queues\n");
725 		return ret;
726 	}
727 
728 	init_completion(&emac->cmd_complete);
729 	ret = prueth_init_tx_chns(emac);
730 	if (ret) {
731 		dev_err(dev, "failed to init tx channel: %d\n", ret);
732 		return ret;
733 	}
734 
735 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
736 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
737 				  max_rx_flows, PRUETH_MAX_RX_DESC);
738 	if (ret) {
739 		dev_err(dev, "failed to init rx channel: %d\n", ret);
740 		goto cleanup_tx;
741 	}
742 
743 	ret = prueth_ndev_add_tx_napi(emac);
744 	if (ret)
745 		goto cleanup_rx;
746 
747 	/* we use only the highest priority flow for now i.e. @irq[3] */
748 	rx_flow = PRUETH_RX_FLOW_DATA;
749 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
750 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
751 	if (ret) {
752 		dev_err(dev, "unable to request RX IRQ\n");
753 		goto cleanup_napi;
754 	}
755 
756 	if (!prueth->emacs_initialized) {
757 		ret = prueth_emac_common_start(prueth);
758 		if (ret)
759 			goto free_rx_irq;
760 	}
761 
762 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
763 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
764 	ret = emac_fdb_flow_id_updated(emac);
765 
766 	if (ret) {
767 		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
768 		goto stop;
769 	}
770 
771 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
772 
773 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
774 				   IRQF_ONESHOT, dev_name(dev), emac);
775 	if (ret)
776 		goto stop;
777 
778 	/* Prepare RX */
779 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
780 	if (ret)
781 		goto free_tx_ts_irq;
782 
783 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
784 	if (ret)
785 		goto reset_rx_chn;
786 
787 	for (i = 0; i < emac->tx_ch_num; i++) {
788 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
789 		if (ret)
790 			goto reset_tx_chan;
791 	}
792 
793 	/* Enable NAPI in Tx and Rx direction */
794 	for (i = 0; i < emac->tx_ch_num; i++)
795 		napi_enable(&emac->tx_chns[i].napi_tx);
796 	napi_enable(&emac->napi_rx);
797 
798 	/* start PHY */
799 	phy_start(ndev->phydev);
800 
801 	prueth->emacs_initialized++;
802 
803 	queue_work(system_long_wq, &emac->stats_work.work);
804 
805 	return 0;
806 
807 reset_tx_chan:
808 	/* Since interface is not yet up, there is wouldn't be
809 	 * any SKB for completion. So set false to free_skb
810 	 */
811 	prueth_reset_tx_chan(emac, i, false);
812 reset_rx_chn:
813 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
814 free_tx_ts_irq:
815 	free_irq(emac->tx_ts_irq, emac);
816 stop:
817 	if (!prueth->emacs_initialized)
818 		prueth_emac_common_stop(prueth);
819 free_rx_irq:
820 	free_irq(emac->rx_chns.irq[rx_flow], emac);
821 cleanup_napi:
822 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
823 cleanup_rx:
824 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
825 cleanup_tx:
826 	prueth_cleanup_tx_chns(emac);
827 
828 	return ret;
829 }
830 
831 /**
832  * emac_ndo_stop - EMAC device stop
833  * @ndev: network adapter device
834  *
835  * Called when system wants to stop or down the interface.
836  *
837  * Return: Always 0 (Success)
838  */
emac_ndo_stop(struct net_device * ndev)839 static int emac_ndo_stop(struct net_device *ndev)
840 {
841 	struct prueth_emac *emac = netdev_priv(ndev);
842 	struct prueth *prueth = emac->prueth;
843 	int rx_flow = PRUETH_RX_FLOW_DATA;
844 	int max_rx_flows;
845 	int ret, i;
846 
847 	/* inform the upper layers. */
848 	netif_tx_stop_all_queues(ndev);
849 
850 	/* block packets from wire */
851 	if (ndev->phydev)
852 		phy_stop(ndev->phydev);
853 
854 	if (emac->prueth->is_hsr_offload_mode)
855 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
856 	else
857 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
858 
859 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
860 	/* ensure new tdown_cnt value is visible */
861 	smp_mb__after_atomic();
862 	/* tear down and disable UDMA channels */
863 	reinit_completion(&emac->tdown_complete);
864 	for (i = 0; i < emac->tx_ch_num; i++)
865 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
866 
867 	ret = wait_for_completion_timeout(&emac->tdown_complete,
868 					  msecs_to_jiffies(1000));
869 	if (!ret)
870 		netdev_err(ndev, "tx teardown timeout\n");
871 
872 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
873 	for (i = 0; i < emac->tx_ch_num; i++) {
874 		napi_disable(&emac->tx_chns[i].napi_tx);
875 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
876 	}
877 
878 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
879 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
880 
881 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
882 
883 	napi_disable(&emac->napi_rx);
884 	hrtimer_cancel(&emac->rx_hrtimer);
885 
886 	cancel_work_sync(&emac->rx_mode_work);
887 
888 	/* Destroying the queued work in ndo_stop() */
889 	cancel_delayed_work_sync(&emac->stats_work);
890 
891 	/* stop PRUs */
892 	if (prueth->emacs_initialized == 1)
893 		prueth_emac_common_stop(prueth);
894 
895 	free_irq(emac->tx_ts_irq, emac);
896 
897 	free_irq(emac->rx_chns.irq[rx_flow], emac);
898 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
899 
900 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
901 	prueth_cleanup_tx_chns(emac);
902 
903 	prueth->emacs_initialized--;
904 
905 	return 0;
906 }
907 
emac_ndo_set_rx_mode_work(struct work_struct * work)908 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
909 {
910 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
911 	struct net_device *ndev = emac->ndev;
912 	bool promisc, allmulti;
913 
914 	if (!netif_running(ndev))
915 		return;
916 
917 	promisc = ndev->flags & IFF_PROMISC;
918 	allmulti = ndev->flags & IFF_ALLMULTI;
919 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
920 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
921 
922 	if (promisc) {
923 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
924 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
925 		return;
926 	}
927 
928 	if (allmulti) {
929 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
930 		return;
931 	}
932 
933 	if (emac->prueth->is_hsr_offload_mode) {
934 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
935 			      icssg_prueth_hsr_del_mcast);
936 		if (rtnl_trylock()) {
937 			vlan_for_each(emac->prueth->hsr_dev,
938 				      icssg_update_vlan_mcast, emac);
939 			rtnl_unlock();
940 		}
941 	} else {
942 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
943 			      icssg_prueth_del_mcast);
944 		if (rtnl_trylock()) {
945 			vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
946 			rtnl_unlock();
947 		}
948 	}
949 }
950 
951 /**
952  * emac_ndo_set_rx_mode - EMAC set receive mode function
953  * @ndev: The EMAC network adapter
954  *
955  * Called when system wants to set the receive mode of the device.
956  *
957  */
emac_ndo_set_rx_mode(struct net_device * ndev)958 static void emac_ndo_set_rx_mode(struct net_device *ndev)
959 {
960 	struct prueth_emac *emac = netdev_priv(ndev);
961 
962 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
963 }
964 
emac_ndo_fix_features(struct net_device * ndev,netdev_features_t features)965 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
966 					       netdev_features_t features)
967 {
968 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
969 	 * firmware implementation. Both these features need to be enabled /
970 	 * disabled together.
971 	 */
972 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
973 		if ((features & NETIF_F_HW_HSR_DUP) ||
974 		    (features & NETIF_F_HW_HSR_TAG_INS))
975 			features |= NETIF_F_HW_HSR_DUP |
976 				    NETIF_F_HW_HSR_TAG_INS;
977 
978 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
979 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
980 		if (!(features & NETIF_F_HW_HSR_DUP) ||
981 		    !(features & NETIF_F_HW_HSR_TAG_INS))
982 			features &= ~(NETIF_F_HW_HSR_DUP |
983 				      NETIF_F_HW_HSR_TAG_INS);
984 
985 	return features;
986 }
987 
emac_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)988 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
989 				    __be16 proto, u16 vid)
990 {
991 	struct prueth_emac *emac = netdev_priv(ndev);
992 	struct prueth *prueth = emac->prueth;
993 	int port_mask = BIT(emac->port_id);
994 	int untag_mask = 0;
995 
996 	if (prueth->is_hsr_offload_mode)
997 		port_mask |= BIT(PRUETH_PORT_HOST);
998 
999 	__hw_addr_init(&emac->vlan_mcast_list[vid]);
1000 	netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1001 		   vid, port_mask, untag_mask);
1002 
1003 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1004 	icssg_set_pvid(emac->prueth, vid, emac->port_id);
1005 
1006 	return 0;
1007 }
1008 
emac_ndo_vlan_rx_del_vid(struct net_device * ndev,__be16 proto,u16 vid)1009 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1010 				    __be16 proto, u16 vid)
1011 {
1012 	struct prueth_emac *emac = netdev_priv(ndev);
1013 	struct prueth *prueth = emac->prueth;
1014 	int port_mask = BIT(emac->port_id);
1015 	int untag_mask = 0;
1016 
1017 	if (prueth->is_hsr_offload_mode)
1018 		port_mask = BIT(PRUETH_PORT_HOST);
1019 
1020 	netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
1021 		   vid, port_mask, untag_mask);
1022 	icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1023 
1024 	return 0;
1025 }
1026 
1027 static const struct net_device_ops emac_netdev_ops = {
1028 	.ndo_open = emac_ndo_open,
1029 	.ndo_stop = emac_ndo_stop,
1030 	.ndo_start_xmit = icssg_ndo_start_xmit,
1031 	.ndo_set_mac_address = eth_mac_addr,
1032 	.ndo_validate_addr = eth_validate_addr,
1033 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
1034 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1035 	.ndo_eth_ioctl = icssg_ndo_ioctl,
1036 	.ndo_get_stats64 = icssg_ndo_get_stats64,
1037 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1038 	.ndo_fix_features = emac_ndo_fix_features,
1039 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1040 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1041 };
1042 
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)1043 static int prueth_netdev_init(struct prueth *prueth,
1044 			      struct device_node *eth_node)
1045 {
1046 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1047 	struct prueth_emac *emac;
1048 	struct net_device *ndev;
1049 	enum prueth_port port;
1050 	const char *irq_name;
1051 	enum prueth_mac mac;
1052 
1053 	port = prueth_node_port(eth_node);
1054 	if (port == PRUETH_PORT_INVALID)
1055 		return -EINVAL;
1056 
1057 	mac = prueth_node_mac(eth_node);
1058 	if (mac == PRUETH_MAC_INVALID)
1059 		return -EINVAL;
1060 
1061 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1062 	if (!ndev)
1063 		return -ENOMEM;
1064 
1065 	emac = netdev_priv(ndev);
1066 	emac->prueth = prueth;
1067 	emac->ndev = ndev;
1068 	emac->port_id = port;
1069 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1070 	if (!emac->cmd_wq) {
1071 		ret = -ENOMEM;
1072 		goto free_ndev;
1073 	}
1074 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1075 
1076 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1077 
1078 	ret = pruss_request_mem_region(prueth->pruss,
1079 				       port == PRUETH_PORT_MII0 ?
1080 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1081 				       &emac->dram);
1082 	if (ret) {
1083 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1084 		ret = -ENOMEM;
1085 		goto free_wq;
1086 	}
1087 
1088 	emac->tx_ch_num = 1;
1089 
1090 	irq_name = "tx_ts0";
1091 	if (emac->port_id == PRUETH_PORT_MII1)
1092 		irq_name = "tx_ts1";
1093 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1094 	if (emac->tx_ts_irq < 0) {
1095 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1096 		goto free;
1097 	}
1098 
1099 	SET_NETDEV_DEV(ndev, prueth->dev);
1100 	spin_lock_init(&emac->lock);
1101 	mutex_init(&emac->cmd_lock);
1102 
1103 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1104 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1105 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1106 		ret = -ENODEV;
1107 		goto free;
1108 	} else if (of_phy_is_fixed_link(eth_node)) {
1109 		ret = of_phy_register_fixed_link(eth_node);
1110 		if (ret) {
1111 			ret = dev_err_probe(prueth->dev, ret,
1112 					    "failed to register fixed-link phy\n");
1113 			goto free;
1114 		}
1115 
1116 		emac->phy_node = eth_node;
1117 	}
1118 
1119 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1120 	if (ret) {
1121 		dev_err(prueth->dev, "could not get phy-mode property\n");
1122 		goto free;
1123 	}
1124 
1125 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1126 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1127 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1128 		ret = -EINVAL;
1129 		goto free;
1130 	}
1131 
1132 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1133 	 * and it is not possible to disable TX Internal delay. The below
1134 	 * switch case block describes how we handle different phy modes
1135 	 * based on hardware restriction.
1136 	 */
1137 	switch (emac->phy_if) {
1138 	case PHY_INTERFACE_MODE_RGMII_ID:
1139 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1140 		break;
1141 	case PHY_INTERFACE_MODE_RGMII_TXID:
1142 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1143 		break;
1144 	case PHY_INTERFACE_MODE_RGMII:
1145 	case PHY_INTERFACE_MODE_RGMII_RXID:
1146 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1147 		ret = -EINVAL;
1148 		goto free;
1149 	default:
1150 		break;
1151 	}
1152 
1153 	/* get mac address from DT and set private and netdev addr */
1154 	ret = of_get_ethdev_address(eth_node, ndev);
1155 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1156 		eth_hw_addr_random(ndev);
1157 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1158 			 port, ndev->dev_addr);
1159 	}
1160 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1161 
1162 	ndev->dev.of_node = eth_node;
1163 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1164 	ndev->max_mtu = PRUETH_MAX_MTU;
1165 	ndev->netdev_ops = &emac_netdev_ops;
1166 	ndev->ethtool_ops = &icssg_ethtool_ops;
1167 	ndev->hw_features = NETIF_F_SG;
1168 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1169 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1170 
1171 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1172 	hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
1173 		     HRTIMER_MODE_REL_PINNED);
1174 	emac->rx_hrtimer.function = &emac_rx_timer_callback;
1175 	prueth->emac[mac] = emac;
1176 
1177 	return 0;
1178 
1179 free:
1180 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1181 free_wq:
1182 	destroy_workqueue(emac->cmd_wq);
1183 free_ndev:
1184 	emac->ndev = NULL;
1185 	prueth->emac[mac] = NULL;
1186 	free_netdev(ndev);
1187 
1188 	return ret;
1189 }
1190 
prueth_dev_check(const struct net_device * ndev)1191 bool prueth_dev_check(const struct net_device *ndev)
1192 {
1193 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1194 		struct prueth_emac *emac = netdev_priv(ndev);
1195 
1196 		return emac->prueth->is_switch_mode;
1197 	}
1198 
1199 	return false;
1200 }
1201 
prueth_offload_fwd_mark_update(struct prueth * prueth)1202 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1203 {
1204 	int set_val = 0;
1205 	int i;
1206 
1207 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1208 		set_val = 1;
1209 
1210 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1211 
1212 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1213 		struct prueth_emac *emac = prueth->emac[i];
1214 
1215 		if (!emac || !emac->ndev)
1216 			continue;
1217 
1218 		emac->offload_fwd_mark = set_val;
1219 	}
1220 }
1221 
prueth_emac_restart(struct prueth * prueth)1222 static int prueth_emac_restart(struct prueth *prueth)
1223 {
1224 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1225 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1226 	int ret;
1227 
1228 	/* Detach the net_device for both PRUeth ports*/
1229 	if (netif_running(emac0->ndev))
1230 		netif_device_detach(emac0->ndev);
1231 	if (netif_running(emac1->ndev))
1232 		netif_device_detach(emac1->ndev);
1233 
1234 	/* Disable both PRUeth ports */
1235 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1236 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1237 	if (ret)
1238 		return ret;
1239 
1240 	/* Stop both pru cores for both PRUeth ports*/
1241 	ret = prueth_emac_common_stop(prueth);
1242 	if (ret) {
1243 		dev_err(prueth->dev, "Failed to stop the firmwares");
1244 		return ret;
1245 	}
1246 
1247 	/* Start both pru cores for both PRUeth ports */
1248 	ret = prueth_emac_common_start(prueth);
1249 	if (ret) {
1250 		dev_err(prueth->dev, "Failed to start the firmwares");
1251 		return ret;
1252 	}
1253 
1254 	/* Enable forwarding for both PRUeth ports */
1255 	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1256 	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1257 
1258 	/* Attache net_device for both PRUeth ports */
1259 	netif_device_attach(emac0->ndev);
1260 	netif_device_attach(emac1->ndev);
1261 
1262 	return ret;
1263 }
1264 
icssg_change_mode(struct prueth * prueth)1265 static void icssg_change_mode(struct prueth *prueth)
1266 {
1267 	struct prueth_emac *emac;
1268 	int mac, ret;
1269 
1270 	ret = prueth_emac_restart(prueth);
1271 	if (ret) {
1272 		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1273 		return;
1274 	}
1275 
1276 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1277 		emac = prueth->emac[mac];
1278 		if (prueth->is_hsr_offload_mode) {
1279 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1280 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1281 			else
1282 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1283 		}
1284 
1285 		if (netif_running(emac->ndev)) {
1286 			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1287 					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1288 					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1289 					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1290 					  ICSSG_FDB_ENTRY_BLOCK,
1291 					  true);
1292 			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1293 					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
1294 					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1295 					  true);
1296 			if (prueth->is_hsr_offload_mode)
1297 				icssg_vtbl_modify(emac, DEFAULT_VID,
1298 						  DEFAULT_PORT_MASK,
1299 						  DEFAULT_UNTAG_MASK, true);
1300 			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1301 			if (prueth->is_switch_mode)
1302 				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1303 		}
1304 	}
1305 }
1306 
prueth_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)1307 static int prueth_netdevice_port_link(struct net_device *ndev,
1308 				      struct net_device *br_ndev,
1309 				      struct netlink_ext_ack *extack)
1310 {
1311 	struct prueth_emac *emac = netdev_priv(ndev);
1312 	struct prueth *prueth = emac->prueth;
1313 	int err;
1314 
1315 	if (!prueth->br_members) {
1316 		prueth->hw_bridge_dev = br_ndev;
1317 	} else {
1318 		/* This is adding the port to a second bridge, this is
1319 		 * unsupported
1320 		 */
1321 		if (prueth->hw_bridge_dev != br_ndev)
1322 			return -EOPNOTSUPP;
1323 	}
1324 
1325 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1326 					    &prueth->prueth_switchdev_nb,
1327 					    &prueth->prueth_switchdev_bl_nb,
1328 					    false, extack);
1329 	if (err)
1330 		return err;
1331 
1332 	prueth->br_members |= BIT(emac->port_id);
1333 
1334 	if (!prueth->is_switch_mode) {
1335 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1336 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1337 			prueth->is_switch_mode = true;
1338 			prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1339 			emac->port_vlan = prueth->default_vlan;
1340 			icssg_change_mode(prueth);
1341 		}
1342 	}
1343 
1344 	prueth_offload_fwd_mark_update(prueth);
1345 
1346 	return NOTIFY_DONE;
1347 }
1348 
prueth_netdevice_port_unlink(struct net_device * ndev)1349 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1350 {
1351 	struct prueth_emac *emac = netdev_priv(ndev);
1352 	struct prueth *prueth = emac->prueth;
1353 	int ret;
1354 
1355 	prueth->br_members &= ~BIT(emac->port_id);
1356 
1357 	if (prueth->is_switch_mode) {
1358 		prueth->is_switch_mode = false;
1359 		emac->port_vlan = 0;
1360 		ret = prueth_emac_restart(prueth);
1361 		if (ret) {
1362 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1363 			return;
1364 		}
1365 	}
1366 
1367 	prueth_offload_fwd_mark_update(prueth);
1368 
1369 	if (!prueth->br_members)
1370 		prueth->hw_bridge_dev = NULL;
1371 }
1372 
prueth_hsr_port_link(struct net_device * ndev)1373 static int prueth_hsr_port_link(struct net_device *ndev)
1374 {
1375 	struct prueth_emac *emac = netdev_priv(ndev);
1376 	struct prueth *prueth = emac->prueth;
1377 	struct prueth_emac *emac0;
1378 	struct prueth_emac *emac1;
1379 
1380 	emac0 = prueth->emac[PRUETH_MAC0];
1381 	emac1 = prueth->emac[PRUETH_MAC1];
1382 
1383 	if (prueth->is_switch_mode)
1384 		return -EOPNOTSUPP;
1385 
1386 	prueth->hsr_members |= BIT(emac->port_id);
1387 	if (!prueth->is_hsr_offload_mode) {
1388 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1389 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1390 			if (!(emac0->ndev->features &
1391 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1392 			    !(emac1->ndev->features &
1393 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1394 				return -EOPNOTSUPP;
1395 			prueth->is_hsr_offload_mode = true;
1396 			prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1397 			emac0->port_vlan = prueth->default_vlan;
1398 			emac1->port_vlan = prueth->default_vlan;
1399 			icssg_change_mode(prueth);
1400 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1401 		}
1402 	}
1403 
1404 	return 0;
1405 }
1406 
prueth_hsr_port_unlink(struct net_device * ndev)1407 static void prueth_hsr_port_unlink(struct net_device *ndev)
1408 {
1409 	struct prueth_emac *emac = netdev_priv(ndev);
1410 	struct prueth *prueth = emac->prueth;
1411 	struct prueth_emac *emac0;
1412 	struct prueth_emac *emac1;
1413 	int ret;
1414 
1415 	emac0 = prueth->emac[PRUETH_MAC0];
1416 	emac1 = prueth->emac[PRUETH_MAC1];
1417 
1418 	prueth->hsr_members &= ~BIT(emac->port_id);
1419 	if (prueth->is_hsr_offload_mode) {
1420 		prueth->is_hsr_offload_mode = false;
1421 		emac0->port_vlan = 0;
1422 		emac1->port_vlan = 0;
1423 		prueth->hsr_dev = NULL;
1424 		ret = prueth_emac_restart(prueth);
1425 		if (ret) {
1426 			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1427 			return;
1428 		}
1429 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1430 	}
1431 }
1432 
1433 /* netdev notifier */
prueth_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)1434 static int prueth_netdevice_event(struct notifier_block *unused,
1435 				  unsigned long event, void *ptr)
1436 {
1437 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1438 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1439 	struct netdev_notifier_changeupper_info *info;
1440 	struct prueth_emac *emac = netdev_priv(ndev);
1441 	struct prueth *prueth = emac->prueth;
1442 	int ret = NOTIFY_DONE;
1443 
1444 	if (ndev->netdev_ops != &emac_netdev_ops)
1445 		return NOTIFY_DONE;
1446 
1447 	switch (event) {
1448 	case NETDEV_CHANGEUPPER:
1449 		info = ptr;
1450 
1451 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1452 		    is_hsr_master(info->upper_dev)) {
1453 			if (info->linking) {
1454 				if (!prueth->hsr_dev) {
1455 					prueth->hsr_dev = info->upper_dev;
1456 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1457 								      prueth->hsr_dev->dev_addr);
1458 				} else {
1459 					if (prueth->hsr_dev != info->upper_dev) {
1460 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1461 						return -EOPNOTSUPP;
1462 					}
1463 				}
1464 				prueth_hsr_port_link(ndev);
1465 			} else {
1466 				prueth_hsr_port_unlink(ndev);
1467 			}
1468 		}
1469 
1470 		if (netif_is_bridge_master(info->upper_dev)) {
1471 			if (info->linking)
1472 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1473 			else
1474 				prueth_netdevice_port_unlink(ndev);
1475 		}
1476 		break;
1477 	default:
1478 		return NOTIFY_DONE;
1479 	}
1480 
1481 	return notifier_from_errno(ret);
1482 }
1483 
prueth_register_notifiers(struct prueth * prueth)1484 static int prueth_register_notifiers(struct prueth *prueth)
1485 {
1486 	int ret = 0;
1487 
1488 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1489 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1490 	if (ret) {
1491 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1492 		return ret;
1493 	}
1494 
1495 	ret = prueth_switchdev_register_notifiers(prueth);
1496 	if (ret)
1497 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1498 
1499 	return ret;
1500 }
1501 
prueth_unregister_notifiers(struct prueth * prueth)1502 static void prueth_unregister_notifiers(struct prueth *prueth)
1503 {
1504 	prueth_switchdev_unregister_notifiers(prueth);
1505 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1506 }
1507 
prueth_probe(struct platform_device * pdev)1508 static int prueth_probe(struct platform_device *pdev)
1509 {
1510 	struct device_node *eth_node, *eth_ports_node;
1511 	struct device_node  *eth0_node = NULL;
1512 	struct device_node  *eth1_node = NULL;
1513 	struct genpool_data_align gp_data = {
1514 		.align = SZ_64K,
1515 	};
1516 	struct device *dev = &pdev->dev;
1517 	struct device_node *np;
1518 	struct prueth *prueth;
1519 	struct pruss *pruss;
1520 	u32 msmc_ram_size;
1521 	int i, ret;
1522 
1523 	np = dev->of_node;
1524 
1525 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1526 	if (!prueth)
1527 		return -ENOMEM;
1528 
1529 	dev_set_drvdata(dev, prueth);
1530 	prueth->pdev = pdev;
1531 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1532 
1533 	prueth->dev = dev;
1534 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1535 	if (!eth_ports_node)
1536 		return -ENOENT;
1537 
1538 	for_each_child_of_node(eth_ports_node, eth_node) {
1539 		u32 reg;
1540 
1541 		if (strcmp(eth_node->name, "port"))
1542 			continue;
1543 		ret = of_property_read_u32(eth_node, "reg", &reg);
1544 		if (ret < 0) {
1545 			dev_err(dev, "%pOF error reading port_id %d\n",
1546 				eth_node, ret);
1547 		}
1548 
1549 		of_node_get(eth_node);
1550 
1551 		if (reg == 0) {
1552 			eth0_node = eth_node;
1553 			if (!of_device_is_available(eth0_node)) {
1554 				of_node_put(eth0_node);
1555 				eth0_node = NULL;
1556 			}
1557 		} else if (reg == 1) {
1558 			eth1_node = eth_node;
1559 			if (!of_device_is_available(eth1_node)) {
1560 				of_node_put(eth1_node);
1561 				eth1_node = NULL;
1562 			}
1563 		} else {
1564 			dev_err(dev, "port reg should be 0 or 1\n");
1565 		}
1566 	}
1567 
1568 	of_node_put(eth_ports_node);
1569 
1570 	/* At least one node must be present and available else we fail */
1571 	if (!eth0_node && !eth1_node) {
1572 		dev_err(dev, "neither port0 nor port1 node available\n");
1573 		return -ENODEV;
1574 	}
1575 
1576 	if (eth0_node == eth1_node) {
1577 		dev_err(dev, "port0 and port1 can't have same reg\n");
1578 		of_node_put(eth0_node);
1579 		return -ENODEV;
1580 	}
1581 
1582 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1583 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1584 
1585 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1586 	if (IS_ERR(prueth->miig_rt)) {
1587 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1588 		return -ENODEV;
1589 	}
1590 
1591 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1592 	if (IS_ERR(prueth->mii_rt)) {
1593 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1594 		return -ENODEV;
1595 	}
1596 
1597 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1598 	if (IS_ERR(prueth->pa_stats)) {
1599 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1600 		prueth->pa_stats = NULL;
1601 	}
1602 
1603 	if (eth0_node || eth1_node) {
1604 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1605 		if (ret)
1606 			goto put_cores;
1607 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1608 		if (ret)
1609 			goto put_cores;
1610 	}
1611 
1612 	pruss = pruss_get(eth0_node ?
1613 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1614 	if (IS_ERR(pruss)) {
1615 		ret = PTR_ERR(pruss);
1616 		dev_err(dev, "unable to get pruss handle\n");
1617 		goto put_cores;
1618 	}
1619 
1620 	prueth->pruss = pruss;
1621 
1622 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1623 				       &prueth->shram);
1624 	if (ret) {
1625 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1626 		goto put_pruss;
1627 	}
1628 
1629 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1630 	if (!prueth->sram_pool) {
1631 		dev_err(dev, "unable to get SRAM pool\n");
1632 		ret = -ENODEV;
1633 
1634 		goto put_mem;
1635 	}
1636 
1637 	msmc_ram_size = MSMC_RAM_SIZE;
1638 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1639 	if (prueth->is_switchmode_supported)
1640 		msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
1641 
1642 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1643 	prueth->msmcram.va =
1644 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1645 						    msmc_ram_size,
1646 						    gen_pool_first_fit_align,
1647 						    &gp_data);
1648 
1649 	if (!prueth->msmcram.va) {
1650 		ret = -ENOMEM;
1651 		dev_err(dev, "unable to allocate MSMC resource\n");
1652 		goto put_mem;
1653 	}
1654 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1655 						   (unsigned long)prueth->msmcram.va);
1656 	prueth->msmcram.size = msmc_ram_size;
1657 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1658 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1659 		prueth->msmcram.va, prueth->msmcram.size);
1660 
1661 	prueth->iep0 = icss_iep_get_idx(np, 0);
1662 	if (IS_ERR(prueth->iep0)) {
1663 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1664 		prueth->iep0 = NULL;
1665 		goto free_pool;
1666 	}
1667 
1668 	prueth->iep1 = icss_iep_get_idx(np, 1);
1669 	if (IS_ERR(prueth->iep1)) {
1670 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1671 		goto put_iep0;
1672 	}
1673 
1674 	if (prueth->pdata.quirk_10m_link_issue) {
1675 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1676 		 * traffic.
1677 		 */
1678 		icss_iep_init_fw(prueth->iep1);
1679 	}
1680 
1681 	spin_lock_init(&prueth->vtbl_lock);
1682 	/* setup netdev interfaces */
1683 	if (eth0_node) {
1684 		ret = prueth_netdev_init(prueth, eth0_node);
1685 		if (ret) {
1686 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1687 				      eth0_node->name);
1688 			goto exit_iep;
1689 		}
1690 
1691 		prueth->emac[PRUETH_MAC0]->half_duplex =
1692 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1693 
1694 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1695 	}
1696 
1697 	if (eth1_node) {
1698 		ret = prueth_netdev_init(prueth, eth1_node);
1699 		if (ret) {
1700 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1701 				      eth1_node->name);
1702 			goto netdev_exit;
1703 		}
1704 
1705 		prueth->emac[PRUETH_MAC1]->half_duplex =
1706 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1707 
1708 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1709 	}
1710 
1711 	/* register the network devices */
1712 	if (eth0_node) {
1713 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1714 		if (ret) {
1715 			dev_err(dev, "can't register netdev for port MII0");
1716 			goto netdev_exit;
1717 		}
1718 
1719 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1720 
1721 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1722 		if (ret) {
1723 			dev_err(dev,
1724 				"can't connect to MII0 PHY, error -%d", ret);
1725 			goto netdev_unregister;
1726 		}
1727 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1728 	}
1729 
1730 	if (eth1_node) {
1731 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1732 		if (ret) {
1733 			dev_err(dev, "can't register netdev for port MII1");
1734 			goto netdev_unregister;
1735 		}
1736 
1737 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1738 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1739 		if (ret) {
1740 			dev_err(dev,
1741 				"can't connect to MII1 PHY, error %d", ret);
1742 			goto netdev_unregister;
1743 		}
1744 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1745 	}
1746 
1747 	if (prueth->is_switchmode_supported) {
1748 		ret = prueth_register_notifiers(prueth);
1749 		if (ret)
1750 			goto netdev_unregister;
1751 
1752 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1753 	}
1754 
1755 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1756 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1757 
1758 	if (eth1_node)
1759 		of_node_put(eth1_node);
1760 	if (eth0_node)
1761 		of_node_put(eth0_node);
1762 	return 0;
1763 
1764 netdev_unregister:
1765 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1766 		if (!prueth->registered_netdevs[i])
1767 			continue;
1768 		if (prueth->emac[i]->ndev->phydev) {
1769 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1770 			prueth->emac[i]->ndev->phydev = NULL;
1771 		}
1772 		unregister_netdev(prueth->registered_netdevs[i]);
1773 	}
1774 
1775 netdev_exit:
1776 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1777 		eth_node = prueth->eth_node[i];
1778 		if (!eth_node)
1779 			continue;
1780 
1781 		prueth_netdev_exit(prueth, eth_node);
1782 	}
1783 
1784 exit_iep:
1785 	if (prueth->pdata.quirk_10m_link_issue)
1786 		icss_iep_exit_fw(prueth->iep1);
1787 	icss_iep_put(prueth->iep1);
1788 
1789 put_iep0:
1790 	icss_iep_put(prueth->iep0);
1791 	prueth->iep0 = NULL;
1792 	prueth->iep1 = NULL;
1793 
1794 free_pool:
1795 	gen_pool_free(prueth->sram_pool,
1796 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1797 
1798 put_mem:
1799 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1800 
1801 put_pruss:
1802 	pruss_put(prueth->pruss);
1803 
1804 put_cores:
1805 	if (eth0_node || eth1_node) {
1806 		prueth_put_cores(prueth, ICSS_SLICE0);
1807 		of_node_put(eth0_node);
1808 
1809 		prueth_put_cores(prueth, ICSS_SLICE1);
1810 		of_node_put(eth1_node);
1811 	}
1812 
1813 	return ret;
1814 }
1815 
prueth_remove(struct platform_device * pdev)1816 static void prueth_remove(struct platform_device *pdev)
1817 {
1818 	struct prueth *prueth = platform_get_drvdata(pdev);
1819 	struct device_node *eth_node;
1820 	int i;
1821 
1822 	prueth_unregister_notifiers(prueth);
1823 
1824 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1825 		if (!prueth->registered_netdevs[i])
1826 			continue;
1827 		phy_stop(prueth->emac[i]->ndev->phydev);
1828 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1829 		prueth->emac[i]->ndev->phydev = NULL;
1830 		unregister_netdev(prueth->registered_netdevs[i]);
1831 	}
1832 
1833 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1834 		eth_node = prueth->eth_node[i];
1835 		if (!eth_node)
1836 			continue;
1837 
1838 		prueth_netdev_exit(prueth, eth_node);
1839 	}
1840 
1841 	if (prueth->pdata.quirk_10m_link_issue)
1842 		icss_iep_exit_fw(prueth->iep1);
1843 
1844 	icss_iep_put(prueth->iep1);
1845 	icss_iep_put(prueth->iep0);
1846 
1847 	gen_pool_free(prueth->sram_pool,
1848 		      (unsigned long)prueth->msmcram.va,
1849 		      MSMC_RAM_SIZE);
1850 
1851 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1852 
1853 	pruss_put(prueth->pruss);
1854 
1855 	if (prueth->eth_node[PRUETH_MAC1])
1856 		prueth_put_cores(prueth, ICSS_SLICE1);
1857 
1858 	if (prueth->eth_node[PRUETH_MAC0])
1859 		prueth_put_cores(prueth, ICSS_SLICE0);
1860 }
1861 
1862 static const struct prueth_pdata am654_icssg_pdata = {
1863 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1864 	.quirk_10m_link_issue = 1,
1865 	.switch_mode = 1,
1866 };
1867 
1868 static const struct prueth_pdata am64x_icssg_pdata = {
1869 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
1870 	.quirk_10m_link_issue = 1,
1871 	.switch_mode = 1,
1872 };
1873 
1874 static const struct of_device_id prueth_dt_match[] = {
1875 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
1876 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
1877 	{ /* sentinel */ }
1878 };
1879 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1880 
1881 static struct platform_driver prueth_driver = {
1882 	.probe = prueth_probe,
1883 	.remove = prueth_remove,
1884 	.driver = {
1885 		.name = "icssg-prueth",
1886 		.of_match_table = prueth_dt_match,
1887 		.pm = &prueth_dev_pm_ops,
1888 	},
1889 };
1890 module_platform_driver(prueth_driver);
1891 
1892 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1893 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1894 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
1895 MODULE_LICENSE("GPL");
1896