xref: /linux/drivers/net/ethernet/ti/icssg/icssg_prueth.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33 
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38 
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40 
41 #define DEFAULT_VID		1
42 #define DEFAULT_PORT_MASK	1
43 #define DEFAULT_UNTAG_MASK	1
44 
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES	(NETIF_F_HW_HSR_FWD | \
46 						 NETIF_F_HW_HSR_DUP | \
47 						 NETIF_F_HW_HSR_TAG_INS | \
48 						 NETIF_F_HW_HSR_TAG_RM)
49 
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
52 
emac_get_tx_ts(struct prueth_emac * emac,struct emac_tx_ts_response * rsp)53 static int emac_get_tx_ts(struct prueth_emac *emac,
54 			  struct emac_tx_ts_response *rsp)
55 {
56 	struct prueth *prueth = emac->prueth;
57 	int slice = prueth_emac_slice(emac);
58 	int addr;
59 
60 	addr = icssg_queue_pop(prueth, slice == 0 ?
61 			       ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
62 	if (addr < 0)
63 		return addr;
64 
65 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
66 	/* return buffer back for to pool */
67 	icssg_queue_push(prueth, slice == 0 ?
68 			 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
69 
70 	return 0;
71 }
72 
tx_ts_work(struct prueth_emac * emac)73 static void tx_ts_work(struct prueth_emac *emac)
74 {
75 	struct skb_shared_hwtstamps ssh;
76 	struct emac_tx_ts_response tsr;
77 	struct sk_buff *skb;
78 	int ret = 0;
79 	u32 hi_sw;
80 	u64 ns;
81 
82 	/* There may be more than one pending requests */
83 	while (1) {
84 		ret = emac_get_tx_ts(emac, &tsr);
85 		if (ret) /* nothing more */
86 			break;
87 
88 		if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
89 		    !emac->tx_ts_skb[tsr.cookie]) {
90 			netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
91 				   tsr.cookie);
92 			break;
93 		}
94 
95 		skb = emac->tx_ts_skb[tsr.cookie];
96 		emac->tx_ts_skb[tsr.cookie] = NULL;	/* free slot */
97 		if (!skb) {
98 			netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
99 			break;
100 		}
101 
102 		hi_sw = readl(emac->prueth->shram.va +
103 			      TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
104 		ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
105 				    IEP_DEFAULT_CYCLE_TIME_NS);
106 
107 		memset(&ssh, 0, sizeof(ssh));
108 		ssh.hwtstamp = ns_to_ktime(ns);
109 
110 		skb_tstamp_tx(skb, &ssh);
111 		dev_consume_skb_any(skb);
112 
113 		if (atomic_dec_and_test(&emac->tx_ts_pending))	/* no more? */
114 			break;
115 	}
116 }
117 
prueth_tx_ts_irq(int irq,void * dev_id)118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
119 {
120 	struct prueth_emac *emac = dev_id;
121 
122 	/* currently only TX timestamp is being returned */
123 	tx_ts_work(emac);
124 
125 	return IRQ_HANDLED;
126 }
127 
128 static struct icssg_firmwares icssg_hsr_firmwares[] = {
129 	{
130 		.pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
131 		.rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
132 		.txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
133 	},
134 	{
135 		.pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
136 		.rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
137 		.txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
138 	}
139 };
140 
141 static struct icssg_firmwares icssg_switch_firmwares[] = {
142 	{
143 		.pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
144 		.rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
145 		.txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
146 	},
147 	{
148 		.pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
149 		.rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
150 		.txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
151 	}
152 };
153 
154 static struct icssg_firmwares icssg_emac_firmwares[] = {
155 	{
156 		.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
157 		.rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
158 		.txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
159 	},
160 	{
161 		.pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
162 		.rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
163 		.txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
164 	}
165 };
166 
prueth_emac_start(struct prueth * prueth,struct prueth_emac * emac)167 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
168 {
169 	struct icssg_firmwares *firmwares;
170 	struct device *dev = prueth->dev;
171 	int slice, ret;
172 
173 	if (prueth->is_switch_mode)
174 		firmwares = icssg_switch_firmwares;
175 	else if (prueth->is_hsr_offload_mode)
176 		firmwares = icssg_hsr_firmwares;
177 	else
178 		firmwares = icssg_emac_firmwares;
179 
180 	slice = prueth_emac_slice(emac);
181 	if (slice < 0) {
182 		netdev_err(emac->ndev, "invalid port\n");
183 		return -EINVAL;
184 	}
185 
186 	ret = icssg_config(prueth, emac, slice);
187 	if (ret)
188 		return ret;
189 
190 	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
191 	ret = rproc_boot(prueth->pru[slice]);
192 	if (ret) {
193 		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
194 		return -EINVAL;
195 	}
196 
197 	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
198 	ret = rproc_boot(prueth->rtu[slice]);
199 	if (ret) {
200 		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
201 		goto halt_pru;
202 	}
203 
204 	ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
205 	ret = rproc_boot(prueth->txpru[slice]);
206 	if (ret) {
207 		dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
208 		goto halt_rtu;
209 	}
210 
211 	emac->fw_running = 1;
212 	return 0;
213 
214 halt_rtu:
215 	rproc_shutdown(prueth->rtu[slice]);
216 
217 halt_pru:
218 	rproc_shutdown(prueth->pru[slice]);
219 
220 	return ret;
221 }
222 
223 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link(struct net_device * ndev)224 static void emac_adjust_link(struct net_device *ndev)
225 {
226 	struct prueth_emac *emac = netdev_priv(ndev);
227 	struct phy_device *phydev = ndev->phydev;
228 	struct prueth *prueth = emac->prueth;
229 	bool new_state = false;
230 	unsigned long flags;
231 
232 	if (phydev->link) {
233 		/* check the mode of operation - full/half duplex */
234 		if (phydev->duplex != emac->duplex) {
235 			new_state = true;
236 			emac->duplex = phydev->duplex;
237 		}
238 		if (phydev->speed != emac->speed) {
239 			new_state = true;
240 			emac->speed = phydev->speed;
241 		}
242 		if (!emac->link) {
243 			new_state = true;
244 			emac->link = 1;
245 		}
246 	} else if (emac->link) {
247 		new_state = true;
248 		emac->link = 0;
249 
250 		/* f/w should support 100 & 1000 */
251 		emac->speed = SPEED_1000;
252 
253 		/* half duplex may not be supported by f/w */
254 		emac->duplex = DUPLEX_FULL;
255 	}
256 
257 	if (new_state) {
258 		phy_print_status(phydev);
259 
260 		/* update RGMII and MII configuration based on PHY negotiated
261 		 * values
262 		 */
263 		if (emac->link) {
264 			if (emac->duplex == DUPLEX_HALF)
265 				icssg_config_half_duplex(emac);
266 			/* Set the RGMII cfg for gig en and full duplex */
267 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
268 
269 			/* update the Tx IPG based on 100M/1G speed */
270 			spin_lock_irqsave(&emac->lock, flags);
271 			icssg_config_ipg(emac);
272 			spin_unlock_irqrestore(&emac->lock, flags);
273 			icssg_config_set_speed(emac);
274 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
275 
276 		} else {
277 			icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
278 		}
279 	}
280 
281 	if (emac->link) {
282 		/* reactivate the transmit queue */
283 		netif_tx_wake_all_queues(ndev);
284 	} else {
285 		netif_tx_stop_all_queues(ndev);
286 		prueth_cleanup_tx_ts(emac);
287 	}
288 }
289 
emac_rx_timer_callback(struct hrtimer * timer)290 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
291 {
292 	struct prueth_emac *emac =
293 			container_of(timer, struct prueth_emac, rx_hrtimer);
294 	int rx_flow = PRUETH_RX_FLOW_DATA;
295 
296 	enable_irq(emac->rx_chns.irq[rx_flow]);
297 	return HRTIMER_NORESTART;
298 }
299 
emac_phy_connect(struct prueth_emac * emac)300 static int emac_phy_connect(struct prueth_emac *emac)
301 {
302 	struct prueth *prueth = emac->prueth;
303 	struct net_device *ndev = emac->ndev;
304 	/* connect PHY */
305 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
306 				      &emac_adjust_link, 0,
307 				      emac->phy_if);
308 	if (!ndev->phydev) {
309 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
310 			emac->phy_node->full_name);
311 		return -ENODEV;
312 	}
313 
314 	if (!emac->half_duplex) {
315 		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
316 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
317 		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
318 	}
319 
320 	/* remove unsupported modes */
321 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
322 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
323 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
324 
325 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
326 		phy_set_max_speed(ndev->phydev, SPEED_100);
327 
328 	return 0;
329 }
330 
prueth_iep_gettime(void * clockops_data,struct ptp_system_timestamp * sts)331 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
332 {
333 	u32 hi_rollover_count, hi_rollover_count_r;
334 	struct prueth_emac *emac = clockops_data;
335 	struct prueth *prueth = emac->prueth;
336 	void __iomem *fw_hi_r_count_addr;
337 	void __iomem *fw_count_hi_addr;
338 	u32 iepcount_hi, iepcount_hi_r;
339 	unsigned long flags;
340 	u32 iepcount_lo;
341 	u64 ts = 0;
342 
343 	fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
344 	fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
345 
346 	local_irq_save(flags);
347 	do {
348 		iepcount_hi = icss_iep_get_count_hi(emac->iep);
349 		iepcount_hi += readl(fw_count_hi_addr);
350 		hi_rollover_count = readl(fw_hi_r_count_addr);
351 		ptp_read_system_prets(sts);
352 		iepcount_lo = icss_iep_get_count_low(emac->iep);
353 		ptp_read_system_postts(sts);
354 
355 		iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
356 		iepcount_hi_r += readl(fw_count_hi_addr);
357 		hi_rollover_count_r = readl(fw_hi_r_count_addr);
358 	} while ((iepcount_hi_r != iepcount_hi) ||
359 		 (hi_rollover_count != hi_rollover_count_r));
360 	local_irq_restore(flags);
361 
362 	ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
363 	ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
364 
365 	return ts;
366 }
367 
prueth_iep_settime(void * clockops_data,u64 ns)368 static void prueth_iep_settime(void *clockops_data, u64 ns)
369 {
370 	struct icssg_setclock_desc __iomem *sc_descp;
371 	struct prueth_emac *emac = clockops_data;
372 	struct icssg_setclock_desc sc_desc;
373 	u64 cyclecount;
374 	u32 cycletime;
375 	int timeout;
376 
377 	if (!emac->fw_running)
378 		return;
379 
380 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
381 
382 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
383 	cyclecount = ns / cycletime;
384 
385 	memset(&sc_desc, 0, sizeof(sc_desc));
386 	sc_desc.margin = cycletime - 1000;
387 	sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
388 	sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
389 	sc_desc.iepcount_set = ns % cycletime;
390 	/* Count from 0 to (cycle time) - emac->iep->def_inc */
391 	sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
392 
393 	memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
394 
395 	writeb(1, &sc_descp->request);
396 
397 	timeout = 5;	/* fw should take 2-3 ms */
398 	while (timeout--) {
399 		if (readb(&sc_descp->acknowledgment))
400 			return;
401 
402 		usleep_range(500, 1000);
403 	}
404 
405 	dev_err(emac->prueth->dev, "settime timeout\n");
406 }
407 
prueth_perout_enable(void * clockops_data,struct ptp_perout_request * req,int on,u64 * cmp)408 static int prueth_perout_enable(void *clockops_data,
409 				struct ptp_perout_request *req, int on,
410 				u64 *cmp)
411 {
412 	struct prueth_emac *emac = clockops_data;
413 	u32 reduction_factor = 0, offset = 0;
414 	struct timespec64 ts;
415 	u64 current_cycle;
416 	u64 start_offset;
417 	u64 ns_period;
418 
419 	if (!on)
420 		return 0;
421 
422 	/* Any firmware specific stuff for PPS/PEROUT handling */
423 	ts.tv_sec = req->period.sec;
424 	ts.tv_nsec = req->period.nsec;
425 	ns_period = timespec64_to_ns(&ts);
426 
427 	/* f/w doesn't support period less than cycle time */
428 	if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
429 		return -ENXIO;
430 
431 	reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
432 	offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
433 
434 	/* f/w requires at least 1uS within a cycle so CMP
435 	 * can trigger after SYNC is enabled
436 	 */
437 	if (offset < 5 * NSEC_PER_USEC)
438 		offset = 5 * NSEC_PER_USEC;
439 
440 	/* if offset is close to cycle time then we will miss
441 	 * the CMP event for last tick when IEP rolls over.
442 	 * In normal mode, IEP tick is 4ns.
443 	 * In slow compensation it could be 0ns or 8ns at
444 	 * every slow compensation cycle.
445 	 */
446 	if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
447 		offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
448 
449 	/* we're in shadow mode so need to set upper 32-bits */
450 	*cmp = (u64)offset << 32;
451 
452 	writel(reduction_factor, emac->prueth->shram.va +
453 		TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
454 
455 	current_cycle = icssg_read_time(emac->prueth->shram.va +
456 					TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
457 
458 	/* Rounding of current_cycle count to next second */
459 	start_offset = roundup(current_cycle, MSEC_PER_SEC);
460 
461 	hi_lo_writeq(start_offset, emac->prueth->shram.va +
462 		     TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
463 
464 	return 0;
465 }
466 
467 const struct icss_iep_clockops prueth_iep_clockops = {
468 	.settime = prueth_iep_settime,
469 	.gettime = prueth_iep_gettime,
470 	.perout_enable = prueth_perout_enable,
471 };
472 
icssg_prueth_add_mcast(struct net_device * ndev,const u8 * addr)473 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
474 {
475 	struct prueth_emac *emac = netdev_priv(ndev);
476 	int port_mask = BIT(emac->port_id);
477 
478 	port_mask |= icssg_fdb_lookup(emac, addr, 0);
479 	icssg_fdb_add_del(emac, addr, 0, port_mask, true);
480 	icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
481 
482 	return 0;
483 }
484 
icssg_prueth_del_mcast(struct net_device * ndev,const u8 * addr)485 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
486 {
487 	struct prueth_emac *emac = netdev_priv(ndev);
488 	int port_mask = BIT(emac->port_id);
489 	int other_port_mask;
490 
491 	other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
492 
493 	icssg_fdb_add_del(emac, addr, 0, port_mask, false);
494 	icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
495 
496 	if (other_port_mask) {
497 		icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
498 		icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
499 	}
500 
501 	return 0;
502 }
503 
icssg_prueth_hsr_add_mcast(struct net_device * ndev,const u8 * addr)504 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
505 {
506 	struct prueth_emac *emac = netdev_priv(ndev);
507 	struct prueth *prueth = emac->prueth;
508 
509 	icssg_fdb_add_del(emac, addr, prueth->default_vlan,
510 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
511 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
512 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
513 			  ICSSG_FDB_ENTRY_BLOCK, true);
514 
515 	icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
516 			  BIT(emac->port_id), true);
517 	return 0;
518 }
519 
icssg_prueth_hsr_del_mcast(struct net_device * ndev,const u8 * addr)520 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
521 {
522 	struct prueth_emac *emac = netdev_priv(ndev);
523 	struct prueth *prueth = emac->prueth;
524 
525 	icssg_fdb_add_del(emac, addr, prueth->default_vlan,
526 			  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
527 			  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
528 			  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
529 			  ICSSG_FDB_ENTRY_BLOCK, false);
530 
531 	return 0;
532 }
533 
534 /**
535  * emac_ndo_open - EMAC device open
536  * @ndev: network adapter device
537  *
538  * Called when system wants to start the interface.
539  *
540  * Return: 0 for a successful open, or appropriate error code
541  */
emac_ndo_open(struct net_device * ndev)542 static int emac_ndo_open(struct net_device *ndev)
543 {
544 	struct prueth_emac *emac = netdev_priv(ndev);
545 	int ret, i, num_data_chn = emac->tx_ch_num;
546 	struct prueth *prueth = emac->prueth;
547 	int slice = prueth_emac_slice(emac);
548 	struct device *dev = prueth->dev;
549 	int max_rx_flows;
550 	int rx_flow;
551 
552 	/* clear SMEM and MSMC settings for all slices */
553 	if (!prueth->emacs_initialized) {
554 		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
555 		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
556 	}
557 
558 	/* set h/w MAC as user might have re-configured */
559 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
560 
561 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
562 	icssg_class_default(prueth->miig_rt, slice, 0, false);
563 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
564 
565 	/* Notify the stack of the actual queue counts. */
566 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
567 	if (ret) {
568 		dev_err(dev, "cannot set real number of tx queues\n");
569 		return ret;
570 	}
571 
572 	init_completion(&emac->cmd_complete);
573 	ret = prueth_init_tx_chns(emac);
574 	if (ret) {
575 		dev_err(dev, "failed to init tx channel: %d\n", ret);
576 		return ret;
577 	}
578 
579 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
580 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
581 				  max_rx_flows, PRUETH_MAX_RX_DESC);
582 	if (ret) {
583 		dev_err(dev, "failed to init rx channel: %d\n", ret);
584 		goto cleanup_tx;
585 	}
586 
587 	ret = prueth_ndev_add_tx_napi(emac);
588 	if (ret)
589 		goto cleanup_rx;
590 
591 	/* we use only the highest priority flow for now i.e. @irq[3] */
592 	rx_flow = PRUETH_RX_FLOW_DATA;
593 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
594 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
595 	if (ret) {
596 		dev_err(dev, "unable to request RX IRQ\n");
597 		goto cleanup_napi;
598 	}
599 
600 	/* reset and start PRU firmware */
601 	ret = prueth_emac_start(prueth, emac);
602 	if (ret)
603 		goto free_rx_irq;
604 
605 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
606 
607 	if (!prueth->emacs_initialized) {
608 		ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
609 				    emac, IEP_DEFAULT_CYCLE_TIME_NS);
610 	}
611 
612 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
613 				   IRQF_ONESHOT, dev_name(dev), emac);
614 	if (ret)
615 		goto stop;
616 
617 	/* Prepare RX */
618 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
619 	if (ret)
620 		goto free_tx_ts_irq;
621 
622 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
623 	if (ret)
624 		goto reset_rx_chn;
625 
626 	for (i = 0; i < emac->tx_ch_num; i++) {
627 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
628 		if (ret)
629 			goto reset_tx_chan;
630 	}
631 
632 	/* Enable NAPI in Tx and Rx direction */
633 	for (i = 0; i < emac->tx_ch_num; i++)
634 		napi_enable(&emac->tx_chns[i].napi_tx);
635 	napi_enable(&emac->napi_rx);
636 
637 	/* start PHY */
638 	phy_start(ndev->phydev);
639 
640 	prueth->emacs_initialized++;
641 
642 	queue_work(system_long_wq, &emac->stats_work.work);
643 
644 	return 0;
645 
646 reset_tx_chan:
647 	/* Since interface is not yet up, there is wouldn't be
648 	 * any SKB for completion. So set false to free_skb
649 	 */
650 	prueth_reset_tx_chan(emac, i, false);
651 reset_rx_chn:
652 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
653 free_tx_ts_irq:
654 	free_irq(emac->tx_ts_irq, emac);
655 stop:
656 	prueth_emac_stop(emac);
657 free_rx_irq:
658 	free_irq(emac->rx_chns.irq[rx_flow], emac);
659 cleanup_napi:
660 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
661 cleanup_rx:
662 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
663 cleanup_tx:
664 	prueth_cleanup_tx_chns(emac);
665 
666 	return ret;
667 }
668 
669 /**
670  * emac_ndo_stop - EMAC device stop
671  * @ndev: network adapter device
672  *
673  * Called when system wants to stop or down the interface.
674  *
675  * Return: Always 0 (Success)
676  */
emac_ndo_stop(struct net_device * ndev)677 static int emac_ndo_stop(struct net_device *ndev)
678 {
679 	struct prueth_emac *emac = netdev_priv(ndev);
680 	struct prueth *prueth = emac->prueth;
681 	int rx_flow = PRUETH_RX_FLOW_DATA;
682 	int max_rx_flows;
683 	int ret, i;
684 
685 	/* inform the upper layers. */
686 	netif_tx_stop_all_queues(ndev);
687 
688 	/* block packets from wire */
689 	if (ndev->phydev)
690 		phy_stop(ndev->phydev);
691 
692 	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
693 
694 	if (emac->prueth->is_hsr_offload_mode)
695 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
696 	else
697 		__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
698 
699 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
700 	/* ensure new tdown_cnt value is visible */
701 	smp_mb__after_atomic();
702 	/* tear down and disable UDMA channels */
703 	reinit_completion(&emac->tdown_complete);
704 	for (i = 0; i < emac->tx_ch_num; i++)
705 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
706 
707 	ret = wait_for_completion_timeout(&emac->tdown_complete,
708 					  msecs_to_jiffies(1000));
709 	if (!ret)
710 		netdev_err(ndev, "tx teardown timeout\n");
711 
712 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
713 	for (i = 0; i < emac->tx_ch_num; i++) {
714 		napi_disable(&emac->tx_chns[i].napi_tx);
715 		hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
716 	}
717 
718 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
719 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
720 
721 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
722 
723 	napi_disable(&emac->napi_rx);
724 	hrtimer_cancel(&emac->rx_hrtimer);
725 
726 	cancel_work_sync(&emac->rx_mode_work);
727 
728 	/* Destroying the queued work in ndo_stop() */
729 	cancel_delayed_work_sync(&emac->stats_work);
730 
731 	if (prueth->emacs_initialized == 1)
732 		icss_iep_exit(emac->iep);
733 
734 	/* stop PRUs */
735 	prueth_emac_stop(emac);
736 
737 	free_irq(emac->tx_ts_irq, emac);
738 
739 	free_irq(emac->rx_chns.irq[rx_flow], emac);
740 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
741 
742 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
743 	prueth_cleanup_tx_chns(emac);
744 
745 	prueth->emacs_initialized--;
746 
747 	return 0;
748 }
749 
emac_ndo_set_rx_mode_work(struct work_struct * work)750 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
751 {
752 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
753 	struct net_device *ndev = emac->ndev;
754 	bool promisc, allmulti;
755 
756 	if (!netif_running(ndev))
757 		return;
758 
759 	promisc = ndev->flags & IFF_PROMISC;
760 	allmulti = ndev->flags & IFF_ALLMULTI;
761 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
762 	icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
763 
764 	if (promisc) {
765 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
766 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
767 		return;
768 	}
769 
770 	if (allmulti) {
771 		icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
772 		return;
773 	}
774 
775 	if (emac->prueth->is_hsr_offload_mode)
776 		__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
777 			      icssg_prueth_hsr_del_mcast);
778 	else
779 		__dev_mc_sync(ndev, icssg_prueth_add_mcast,
780 			      icssg_prueth_del_mcast);
781 }
782 
783 /**
784  * emac_ndo_set_rx_mode - EMAC set receive mode function
785  * @ndev: The EMAC network adapter
786  *
787  * Called when system wants to set the receive mode of the device.
788  *
789  */
emac_ndo_set_rx_mode(struct net_device * ndev)790 static void emac_ndo_set_rx_mode(struct net_device *ndev)
791 {
792 	struct prueth_emac *emac = netdev_priv(ndev);
793 
794 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
795 }
796 
emac_ndo_fix_features(struct net_device * ndev,netdev_features_t features)797 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
798 					       netdev_features_t features)
799 {
800 	/* hsr tag insertion offload and hsr dup offload are tightly coupled in
801 	 * firmware implementation. Both these features need to be enabled /
802 	 * disabled together.
803 	 */
804 	if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
805 		if ((features & NETIF_F_HW_HSR_DUP) ||
806 		    (features & NETIF_F_HW_HSR_TAG_INS))
807 			features |= NETIF_F_HW_HSR_DUP |
808 				    NETIF_F_HW_HSR_TAG_INS;
809 
810 	if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
811 	    (ndev->features & NETIF_F_HW_HSR_TAG_INS))
812 		if (!(features & NETIF_F_HW_HSR_DUP) ||
813 		    !(features & NETIF_F_HW_HSR_TAG_INS))
814 			features &= ~(NETIF_F_HW_HSR_DUP |
815 				      NETIF_F_HW_HSR_TAG_INS);
816 
817 	return features;
818 }
819 
emac_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)820 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
821 				    __be16 proto, u16 vid)
822 {
823 	struct prueth_emac *emac = netdev_priv(ndev);
824 	struct prueth *prueth = emac->prueth;
825 	int untag_mask = 0;
826 	int port_mask;
827 
828 	if (prueth->is_hsr_offload_mode) {
829 		port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
830 		untag_mask = 0;
831 
832 		netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
833 			   vid, port_mask, untag_mask);
834 
835 		icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
836 		icssg_set_pvid(emac->prueth, vid, emac->port_id);
837 	}
838 	return 0;
839 }
840 
emac_ndo_vlan_rx_del_vid(struct net_device * ndev,__be16 proto,u16 vid)841 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
842 				    __be16 proto, u16 vid)
843 {
844 	struct prueth_emac *emac = netdev_priv(ndev);
845 	struct prueth *prueth = emac->prueth;
846 	int untag_mask = 0;
847 	int port_mask;
848 
849 	if (prueth->is_hsr_offload_mode) {
850 		port_mask = BIT(PRUETH_PORT_HOST);
851 		untag_mask = 0;
852 
853 		netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask  %X\n",
854 			   vid, port_mask, untag_mask);
855 
856 		icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
857 	}
858 	return 0;
859 }
860 
861 static const struct net_device_ops emac_netdev_ops = {
862 	.ndo_open = emac_ndo_open,
863 	.ndo_stop = emac_ndo_stop,
864 	.ndo_start_xmit = icssg_ndo_start_xmit,
865 	.ndo_set_mac_address = eth_mac_addr,
866 	.ndo_validate_addr = eth_validate_addr,
867 	.ndo_tx_timeout = icssg_ndo_tx_timeout,
868 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
869 	.ndo_eth_ioctl = icssg_ndo_ioctl,
870 	.ndo_get_stats64 = icssg_ndo_get_stats64,
871 	.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
872 	.ndo_fix_features = emac_ndo_fix_features,
873 	.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
874 	.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
875 };
876 
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)877 static int prueth_netdev_init(struct prueth *prueth,
878 			      struct device_node *eth_node)
879 {
880 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
881 	struct prueth_emac *emac;
882 	struct net_device *ndev;
883 	enum prueth_port port;
884 	const char *irq_name;
885 	enum prueth_mac mac;
886 
887 	port = prueth_node_port(eth_node);
888 	if (port == PRUETH_PORT_INVALID)
889 		return -EINVAL;
890 
891 	mac = prueth_node_mac(eth_node);
892 	if (mac == PRUETH_MAC_INVALID)
893 		return -EINVAL;
894 
895 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
896 	if (!ndev)
897 		return -ENOMEM;
898 
899 	emac = netdev_priv(ndev);
900 	emac->prueth = prueth;
901 	emac->ndev = ndev;
902 	emac->port_id = port;
903 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
904 	if (!emac->cmd_wq) {
905 		ret = -ENOMEM;
906 		goto free_ndev;
907 	}
908 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
909 
910 	INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
911 
912 	ret = pruss_request_mem_region(prueth->pruss,
913 				       port == PRUETH_PORT_MII0 ?
914 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
915 				       &emac->dram);
916 	if (ret) {
917 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
918 		ret = -ENOMEM;
919 		goto free_wq;
920 	}
921 
922 	emac->tx_ch_num = 1;
923 
924 	irq_name = "tx_ts0";
925 	if (emac->port_id == PRUETH_PORT_MII1)
926 		irq_name = "tx_ts1";
927 	emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
928 	if (emac->tx_ts_irq < 0) {
929 		ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
930 		goto free;
931 	}
932 
933 	SET_NETDEV_DEV(ndev, prueth->dev);
934 	spin_lock_init(&emac->lock);
935 	mutex_init(&emac->cmd_lock);
936 
937 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
938 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
939 		dev_err(prueth->dev, "couldn't find phy-handle\n");
940 		ret = -ENODEV;
941 		goto free;
942 	} else if (of_phy_is_fixed_link(eth_node)) {
943 		ret = of_phy_register_fixed_link(eth_node);
944 		if (ret) {
945 			ret = dev_err_probe(prueth->dev, ret,
946 					    "failed to register fixed-link phy\n");
947 			goto free;
948 		}
949 
950 		emac->phy_node = eth_node;
951 	}
952 
953 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
954 	if (ret) {
955 		dev_err(prueth->dev, "could not get phy-mode property\n");
956 		goto free;
957 	}
958 
959 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
960 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
961 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
962 		ret = -EINVAL;
963 		goto free;
964 	}
965 
966 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
967 	 * and it is not possible to disable TX Internal delay. The below
968 	 * switch case block describes how we handle different phy modes
969 	 * based on hardware restriction.
970 	 */
971 	switch (emac->phy_if) {
972 	case PHY_INTERFACE_MODE_RGMII_ID:
973 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
974 		break;
975 	case PHY_INTERFACE_MODE_RGMII_TXID:
976 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
977 		break;
978 	case PHY_INTERFACE_MODE_RGMII:
979 	case PHY_INTERFACE_MODE_RGMII_RXID:
980 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
981 		ret = -EINVAL;
982 		goto free;
983 	default:
984 		break;
985 	}
986 
987 	/* get mac address from DT and set private and netdev addr */
988 	ret = of_get_ethdev_address(eth_node, ndev);
989 	if (!is_valid_ether_addr(ndev->dev_addr)) {
990 		eth_hw_addr_random(ndev);
991 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
992 			 port, ndev->dev_addr);
993 	}
994 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
995 
996 	ndev->dev.of_node = eth_node;
997 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
998 	ndev->max_mtu = PRUETH_MAX_MTU;
999 	ndev->netdev_ops = &emac_netdev_ops;
1000 	ndev->ethtool_ops = &icssg_ethtool_ops;
1001 	ndev->hw_features = NETIF_F_SG;
1002 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1003 	ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1004 
1005 	netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1006 	hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
1007 		     HRTIMER_MODE_REL_PINNED);
1008 	emac->rx_hrtimer.function = &emac_rx_timer_callback;
1009 	prueth->emac[mac] = emac;
1010 
1011 	return 0;
1012 
1013 free:
1014 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1015 free_wq:
1016 	destroy_workqueue(emac->cmd_wq);
1017 free_ndev:
1018 	emac->ndev = NULL;
1019 	prueth->emac[mac] = NULL;
1020 	free_netdev(ndev);
1021 
1022 	return ret;
1023 }
1024 
prueth_dev_check(const struct net_device * ndev)1025 bool prueth_dev_check(const struct net_device *ndev)
1026 {
1027 	if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1028 		struct prueth_emac *emac = netdev_priv(ndev);
1029 
1030 		return emac->prueth->is_switch_mode;
1031 	}
1032 
1033 	return false;
1034 }
1035 
prueth_offload_fwd_mark_update(struct prueth * prueth)1036 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1037 {
1038 	int set_val = 0;
1039 	int i;
1040 
1041 	if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1042 		set_val = 1;
1043 
1044 	dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1045 
1046 	for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1047 		struct prueth_emac *emac = prueth->emac[i];
1048 
1049 		if (!emac || !emac->ndev)
1050 			continue;
1051 
1052 		emac->offload_fwd_mark = set_val;
1053 	}
1054 }
1055 
prueth_emac_restart(struct prueth * prueth)1056 static void prueth_emac_restart(struct prueth *prueth)
1057 {
1058 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1059 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1060 
1061 	/* Detach the net_device for both PRUeth ports*/
1062 	if (netif_running(emac0->ndev))
1063 		netif_device_detach(emac0->ndev);
1064 	if (netif_running(emac1->ndev))
1065 		netif_device_detach(emac1->ndev);
1066 
1067 	/* Disable both PRUeth ports */
1068 	icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1069 	icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1070 
1071 	/* Stop both pru cores for both PRUeth ports*/
1072 	prueth_emac_stop(emac0);
1073 	prueth->emacs_initialized--;
1074 	prueth_emac_stop(emac1);
1075 	prueth->emacs_initialized--;
1076 
1077 	/* Start both pru cores for both PRUeth ports */
1078 	prueth_emac_start(prueth, emac0);
1079 	prueth->emacs_initialized++;
1080 	prueth_emac_start(prueth, emac1);
1081 	prueth->emacs_initialized++;
1082 
1083 	/* Enable forwarding for both PRUeth ports */
1084 	icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1085 	icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1086 
1087 	/* Attache net_device for both PRUeth ports */
1088 	netif_device_attach(emac0->ndev);
1089 	netif_device_attach(emac1->ndev);
1090 }
1091 
icssg_change_mode(struct prueth * prueth)1092 static void icssg_change_mode(struct prueth *prueth)
1093 {
1094 	struct prueth_emac *emac;
1095 	int mac;
1096 
1097 	prueth_emac_restart(prueth);
1098 
1099 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1100 		emac = prueth->emac[mac];
1101 		if (prueth->is_hsr_offload_mode) {
1102 			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1103 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1104 			else
1105 				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1106 		}
1107 
1108 		if (netif_running(emac->ndev)) {
1109 			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1110 					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1111 					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1112 					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1113 					  ICSSG_FDB_ENTRY_BLOCK,
1114 					  true);
1115 			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1116 					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
1117 					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1118 					  true);
1119 			if (prueth->is_hsr_offload_mode)
1120 				icssg_vtbl_modify(emac, DEFAULT_VID,
1121 						  DEFAULT_PORT_MASK,
1122 						  DEFAULT_UNTAG_MASK, true);
1123 			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1124 			if (prueth->is_switch_mode)
1125 				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1126 		}
1127 	}
1128 }
1129 
prueth_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)1130 static int prueth_netdevice_port_link(struct net_device *ndev,
1131 				      struct net_device *br_ndev,
1132 				      struct netlink_ext_ack *extack)
1133 {
1134 	struct prueth_emac *emac = netdev_priv(ndev);
1135 	struct prueth *prueth = emac->prueth;
1136 	int err;
1137 
1138 	if (!prueth->br_members) {
1139 		prueth->hw_bridge_dev = br_ndev;
1140 	} else {
1141 		/* This is adding the port to a second bridge, this is
1142 		 * unsupported
1143 		 */
1144 		if (prueth->hw_bridge_dev != br_ndev)
1145 			return -EOPNOTSUPP;
1146 	}
1147 
1148 	err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1149 					    &prueth->prueth_switchdev_nb,
1150 					    &prueth->prueth_switchdev_bl_nb,
1151 					    false, extack);
1152 	if (err)
1153 		return err;
1154 
1155 	prueth->br_members |= BIT(emac->port_id);
1156 
1157 	if (!prueth->is_switch_mode) {
1158 		if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1159 		    prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1160 			prueth->is_switch_mode = true;
1161 			prueth->default_vlan = 1;
1162 			emac->port_vlan = prueth->default_vlan;
1163 			icssg_change_mode(prueth);
1164 		}
1165 	}
1166 
1167 	prueth_offload_fwd_mark_update(prueth);
1168 
1169 	return NOTIFY_DONE;
1170 }
1171 
prueth_netdevice_port_unlink(struct net_device * ndev)1172 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1173 {
1174 	struct prueth_emac *emac = netdev_priv(ndev);
1175 	struct prueth *prueth = emac->prueth;
1176 
1177 	prueth->br_members &= ~BIT(emac->port_id);
1178 
1179 	if (prueth->is_switch_mode) {
1180 		prueth->is_switch_mode = false;
1181 		emac->port_vlan = 0;
1182 		prueth_emac_restart(prueth);
1183 	}
1184 
1185 	prueth_offload_fwd_mark_update(prueth);
1186 
1187 	if (!prueth->br_members)
1188 		prueth->hw_bridge_dev = NULL;
1189 }
1190 
prueth_hsr_port_link(struct net_device * ndev)1191 static int prueth_hsr_port_link(struct net_device *ndev)
1192 {
1193 	struct prueth_emac *emac = netdev_priv(ndev);
1194 	struct prueth *prueth = emac->prueth;
1195 	struct prueth_emac *emac0;
1196 	struct prueth_emac *emac1;
1197 
1198 	emac0 = prueth->emac[PRUETH_MAC0];
1199 	emac1 = prueth->emac[PRUETH_MAC1];
1200 
1201 	if (prueth->is_switch_mode)
1202 		return -EOPNOTSUPP;
1203 
1204 	prueth->hsr_members |= BIT(emac->port_id);
1205 	if (!prueth->is_hsr_offload_mode) {
1206 		if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1207 		    prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1208 			if (!(emac0->ndev->features &
1209 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1210 			    !(emac1->ndev->features &
1211 			      NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1212 				return -EOPNOTSUPP;
1213 			prueth->is_hsr_offload_mode = true;
1214 			prueth->default_vlan = 1;
1215 			emac0->port_vlan = prueth->default_vlan;
1216 			emac1->port_vlan = prueth->default_vlan;
1217 			icssg_change_mode(prueth);
1218 			netdev_dbg(ndev, "Enabling HSR offload mode\n");
1219 		}
1220 	}
1221 
1222 	return 0;
1223 }
1224 
prueth_hsr_port_unlink(struct net_device * ndev)1225 static void prueth_hsr_port_unlink(struct net_device *ndev)
1226 {
1227 	struct prueth_emac *emac = netdev_priv(ndev);
1228 	struct prueth *prueth = emac->prueth;
1229 	struct prueth_emac *emac0;
1230 	struct prueth_emac *emac1;
1231 
1232 	emac0 = prueth->emac[PRUETH_MAC0];
1233 	emac1 = prueth->emac[PRUETH_MAC1];
1234 
1235 	prueth->hsr_members &= ~BIT(emac->port_id);
1236 	if (prueth->is_hsr_offload_mode) {
1237 		prueth->is_hsr_offload_mode = false;
1238 		emac0->port_vlan = 0;
1239 		emac1->port_vlan = 0;
1240 		prueth->hsr_dev = NULL;
1241 		prueth_emac_restart(prueth);
1242 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1243 	}
1244 }
1245 
1246 /* netdev notifier */
prueth_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)1247 static int prueth_netdevice_event(struct notifier_block *unused,
1248 				  unsigned long event, void *ptr)
1249 {
1250 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1251 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1252 	struct netdev_notifier_changeupper_info *info;
1253 	struct prueth_emac *emac = netdev_priv(ndev);
1254 	struct prueth *prueth = emac->prueth;
1255 	int ret = NOTIFY_DONE;
1256 
1257 	if (ndev->netdev_ops != &emac_netdev_ops)
1258 		return NOTIFY_DONE;
1259 
1260 	switch (event) {
1261 	case NETDEV_CHANGEUPPER:
1262 		info = ptr;
1263 
1264 		if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1265 		    is_hsr_master(info->upper_dev)) {
1266 			if (info->linking) {
1267 				if (!prueth->hsr_dev) {
1268 					prueth->hsr_dev = info->upper_dev;
1269 					icssg_class_set_host_mac_addr(prueth->miig_rt,
1270 								      prueth->hsr_dev->dev_addr);
1271 				} else {
1272 					if (prueth->hsr_dev != info->upper_dev) {
1273 						netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1274 						return -EOPNOTSUPP;
1275 					}
1276 				}
1277 				prueth_hsr_port_link(ndev);
1278 			} else {
1279 				prueth_hsr_port_unlink(ndev);
1280 			}
1281 		}
1282 
1283 		if (netif_is_bridge_master(info->upper_dev)) {
1284 			if (info->linking)
1285 				ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1286 			else
1287 				prueth_netdevice_port_unlink(ndev);
1288 		}
1289 		break;
1290 	default:
1291 		return NOTIFY_DONE;
1292 	}
1293 
1294 	return notifier_from_errno(ret);
1295 }
1296 
prueth_register_notifiers(struct prueth * prueth)1297 static int prueth_register_notifiers(struct prueth *prueth)
1298 {
1299 	int ret = 0;
1300 
1301 	prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1302 	ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1303 	if (ret) {
1304 		dev_err(prueth->dev, "can't register netdevice notifier\n");
1305 		return ret;
1306 	}
1307 
1308 	ret = prueth_switchdev_register_notifiers(prueth);
1309 	if (ret)
1310 		unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1311 
1312 	return ret;
1313 }
1314 
prueth_unregister_notifiers(struct prueth * prueth)1315 static void prueth_unregister_notifiers(struct prueth *prueth)
1316 {
1317 	prueth_switchdev_unregister_notifiers(prueth);
1318 	unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1319 }
1320 
prueth_probe(struct platform_device * pdev)1321 static int prueth_probe(struct platform_device *pdev)
1322 {
1323 	struct device_node *eth_node, *eth_ports_node;
1324 	struct device_node  *eth0_node = NULL;
1325 	struct device_node  *eth1_node = NULL;
1326 	struct genpool_data_align gp_data = {
1327 		.align = SZ_64K,
1328 	};
1329 	struct device *dev = &pdev->dev;
1330 	struct device_node *np;
1331 	struct prueth *prueth;
1332 	struct pruss *pruss;
1333 	u32 msmc_ram_size;
1334 	int i, ret;
1335 
1336 	np = dev->of_node;
1337 
1338 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1339 	if (!prueth)
1340 		return -ENOMEM;
1341 
1342 	dev_set_drvdata(dev, prueth);
1343 	prueth->pdev = pdev;
1344 	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1345 
1346 	prueth->dev = dev;
1347 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1348 	if (!eth_ports_node)
1349 		return -ENOENT;
1350 
1351 	for_each_child_of_node(eth_ports_node, eth_node) {
1352 		u32 reg;
1353 
1354 		if (strcmp(eth_node->name, "port"))
1355 			continue;
1356 		ret = of_property_read_u32(eth_node, "reg", &reg);
1357 		if (ret < 0) {
1358 			dev_err(dev, "%pOF error reading port_id %d\n",
1359 				eth_node, ret);
1360 		}
1361 
1362 		of_node_get(eth_node);
1363 
1364 		if (reg == 0) {
1365 			eth0_node = eth_node;
1366 			if (!of_device_is_available(eth0_node)) {
1367 				of_node_put(eth0_node);
1368 				eth0_node = NULL;
1369 			}
1370 		} else if (reg == 1) {
1371 			eth1_node = eth_node;
1372 			if (!of_device_is_available(eth1_node)) {
1373 				of_node_put(eth1_node);
1374 				eth1_node = NULL;
1375 			}
1376 		} else {
1377 			dev_err(dev, "port reg should be 0 or 1\n");
1378 		}
1379 	}
1380 
1381 	of_node_put(eth_ports_node);
1382 
1383 	/* At least one node must be present and available else we fail */
1384 	if (!eth0_node && !eth1_node) {
1385 		dev_err(dev, "neither port0 nor port1 node available\n");
1386 		return -ENODEV;
1387 	}
1388 
1389 	if (eth0_node == eth1_node) {
1390 		dev_err(dev, "port0 and port1 can't have same reg\n");
1391 		of_node_put(eth0_node);
1392 		return -ENODEV;
1393 	}
1394 
1395 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1396 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1397 
1398 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1399 	if (IS_ERR(prueth->miig_rt)) {
1400 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1401 		return -ENODEV;
1402 	}
1403 
1404 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1405 	if (IS_ERR(prueth->mii_rt)) {
1406 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1407 		return -ENODEV;
1408 	}
1409 
1410 	prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1411 	if (IS_ERR(prueth->pa_stats)) {
1412 		dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1413 		prueth->pa_stats = NULL;
1414 	}
1415 
1416 	if (eth0_node) {
1417 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1418 		if (ret)
1419 			goto put_cores;
1420 	}
1421 
1422 	if (eth1_node) {
1423 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1424 		if (ret)
1425 			goto put_cores;
1426 	}
1427 
1428 	pruss = pruss_get(eth0_node ?
1429 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1430 	if (IS_ERR(pruss)) {
1431 		ret = PTR_ERR(pruss);
1432 		dev_err(dev, "unable to get pruss handle\n");
1433 		goto put_cores;
1434 	}
1435 
1436 	prueth->pruss = pruss;
1437 
1438 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1439 				       &prueth->shram);
1440 	if (ret) {
1441 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1442 		goto put_pruss;
1443 	}
1444 
1445 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1446 	if (!prueth->sram_pool) {
1447 		dev_err(dev, "unable to get SRAM pool\n");
1448 		ret = -ENODEV;
1449 
1450 		goto put_mem;
1451 	}
1452 
1453 	msmc_ram_size = MSMC_RAM_SIZE;
1454 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1455 	if (prueth->is_switchmode_supported)
1456 		msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
1457 
1458 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1459 	prueth->msmcram.va =
1460 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1461 						    msmc_ram_size,
1462 						    gen_pool_first_fit_align,
1463 						    &gp_data);
1464 
1465 	if (!prueth->msmcram.va) {
1466 		ret = -ENOMEM;
1467 		dev_err(dev, "unable to allocate MSMC resource\n");
1468 		goto put_mem;
1469 	}
1470 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1471 						   (unsigned long)prueth->msmcram.va);
1472 	prueth->msmcram.size = msmc_ram_size;
1473 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1474 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1475 		prueth->msmcram.va, prueth->msmcram.size);
1476 
1477 	prueth->iep0 = icss_iep_get_idx(np, 0);
1478 	if (IS_ERR(prueth->iep0)) {
1479 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1480 		prueth->iep0 = NULL;
1481 		goto free_pool;
1482 	}
1483 
1484 	prueth->iep1 = icss_iep_get_idx(np, 1);
1485 	if (IS_ERR(prueth->iep1)) {
1486 		ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1487 		goto put_iep0;
1488 	}
1489 
1490 	if (prueth->pdata.quirk_10m_link_issue) {
1491 		/* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1492 		 * traffic.
1493 		 */
1494 		icss_iep_init_fw(prueth->iep1);
1495 	}
1496 
1497 	spin_lock_init(&prueth->vtbl_lock);
1498 	/* setup netdev interfaces */
1499 	if (eth0_node) {
1500 		ret = prueth_netdev_init(prueth, eth0_node);
1501 		if (ret) {
1502 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1503 				      eth0_node->name);
1504 			goto exit_iep;
1505 		}
1506 
1507 		prueth->emac[PRUETH_MAC0]->half_duplex =
1508 			of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1509 
1510 		prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1511 	}
1512 
1513 	if (eth1_node) {
1514 		ret = prueth_netdev_init(prueth, eth1_node);
1515 		if (ret) {
1516 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1517 				      eth1_node->name);
1518 			goto netdev_exit;
1519 		}
1520 
1521 		prueth->emac[PRUETH_MAC1]->half_duplex =
1522 			of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1523 
1524 		prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1525 	}
1526 
1527 	/* register the network devices */
1528 	if (eth0_node) {
1529 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1530 		if (ret) {
1531 			dev_err(dev, "can't register netdev for port MII0");
1532 			goto netdev_exit;
1533 		}
1534 
1535 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1536 
1537 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1538 		if (ret) {
1539 			dev_err(dev,
1540 				"can't connect to MII0 PHY, error -%d", ret);
1541 			goto netdev_unregister;
1542 		}
1543 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1544 	}
1545 
1546 	if (eth1_node) {
1547 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1548 		if (ret) {
1549 			dev_err(dev, "can't register netdev for port MII1");
1550 			goto netdev_unregister;
1551 		}
1552 
1553 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1554 		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1555 		if (ret) {
1556 			dev_err(dev,
1557 				"can't connect to MII1 PHY, error %d", ret);
1558 			goto netdev_unregister;
1559 		}
1560 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1561 	}
1562 
1563 	if (prueth->is_switchmode_supported) {
1564 		ret = prueth_register_notifiers(prueth);
1565 		if (ret)
1566 			goto netdev_unregister;
1567 
1568 		sprintf(prueth->switch_id, "%s", dev_name(dev));
1569 	}
1570 
1571 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1572 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1573 
1574 	if (eth1_node)
1575 		of_node_put(eth1_node);
1576 	if (eth0_node)
1577 		of_node_put(eth0_node);
1578 	return 0;
1579 
1580 netdev_unregister:
1581 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1582 		if (!prueth->registered_netdevs[i])
1583 			continue;
1584 		if (prueth->emac[i]->ndev->phydev) {
1585 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1586 			prueth->emac[i]->ndev->phydev = NULL;
1587 		}
1588 		unregister_netdev(prueth->registered_netdevs[i]);
1589 	}
1590 
1591 netdev_exit:
1592 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1593 		eth_node = prueth->eth_node[i];
1594 		if (!eth_node)
1595 			continue;
1596 
1597 		prueth_netdev_exit(prueth, eth_node);
1598 	}
1599 
1600 exit_iep:
1601 	if (prueth->pdata.quirk_10m_link_issue)
1602 		icss_iep_exit_fw(prueth->iep1);
1603 	icss_iep_put(prueth->iep1);
1604 
1605 put_iep0:
1606 	icss_iep_put(prueth->iep0);
1607 	prueth->iep0 = NULL;
1608 	prueth->iep1 = NULL;
1609 
1610 free_pool:
1611 	gen_pool_free(prueth->sram_pool,
1612 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1613 
1614 put_mem:
1615 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1616 
1617 put_pruss:
1618 	pruss_put(prueth->pruss);
1619 
1620 put_cores:
1621 	if (eth1_node) {
1622 		prueth_put_cores(prueth, ICSS_SLICE1);
1623 		of_node_put(eth1_node);
1624 	}
1625 
1626 	if (eth0_node) {
1627 		prueth_put_cores(prueth, ICSS_SLICE0);
1628 		of_node_put(eth0_node);
1629 	}
1630 
1631 	return ret;
1632 }
1633 
prueth_remove(struct platform_device * pdev)1634 static void prueth_remove(struct platform_device *pdev)
1635 {
1636 	struct prueth *prueth = platform_get_drvdata(pdev);
1637 	struct device_node *eth_node;
1638 	int i;
1639 
1640 	prueth_unregister_notifiers(prueth);
1641 
1642 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1643 		if (!prueth->registered_netdevs[i])
1644 			continue;
1645 		phy_stop(prueth->emac[i]->ndev->phydev);
1646 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1647 		prueth->emac[i]->ndev->phydev = NULL;
1648 		unregister_netdev(prueth->registered_netdevs[i]);
1649 	}
1650 
1651 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1652 		eth_node = prueth->eth_node[i];
1653 		if (!eth_node)
1654 			continue;
1655 
1656 		prueth_netdev_exit(prueth, eth_node);
1657 	}
1658 
1659 	if (prueth->pdata.quirk_10m_link_issue)
1660 		icss_iep_exit_fw(prueth->iep1);
1661 
1662 	icss_iep_put(prueth->iep1);
1663 	icss_iep_put(prueth->iep0);
1664 
1665 	gen_pool_free(prueth->sram_pool,
1666 		      (unsigned long)prueth->msmcram.va,
1667 		      MSMC_RAM_SIZE);
1668 
1669 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1670 
1671 	pruss_put(prueth->pruss);
1672 
1673 	if (prueth->eth_node[PRUETH_MAC1])
1674 		prueth_put_cores(prueth, ICSS_SLICE1);
1675 
1676 	if (prueth->eth_node[PRUETH_MAC0])
1677 		prueth_put_cores(prueth, ICSS_SLICE0);
1678 }
1679 
1680 static const struct prueth_pdata am654_icssg_pdata = {
1681 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1682 	.quirk_10m_link_issue = 1,
1683 	.switch_mode = 1,
1684 };
1685 
1686 static const struct prueth_pdata am64x_icssg_pdata = {
1687 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
1688 	.quirk_10m_link_issue = 1,
1689 	.switch_mode = 1,
1690 };
1691 
1692 static const struct of_device_id prueth_dt_match[] = {
1693 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
1694 	{ .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
1695 	{ /* sentinel */ }
1696 };
1697 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1698 
1699 static struct platform_driver prueth_driver = {
1700 	.probe = prueth_probe,
1701 	.remove = prueth_remove,
1702 	.driver = {
1703 		.name = "icssg-prueth",
1704 		.of_match_table = prueth_dt_match,
1705 		.pm = &prueth_dev_pm_ops,
1706 	},
1707 };
1708 module_platform_driver(prueth_driver);
1709 
1710 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1711 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1712 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
1713 MODULE_LICENSE("GPL");
1714