1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40
41 #define DEFAULT_VID 1
42 #define DEFAULT_PORT_MASK 1
43 #define DEFAULT_UNTAG_MASK 1
44
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \
46 NETIF_F_HW_HSR_DUP | \
47 NETIF_F_HW_HSR_TAG_INS | \
48 NETIF_F_HW_HSR_TAG_RM)
49
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
52
53 static void emac_adjust_link(struct net_device *ndev);
54
emac_get_tx_ts(struct prueth_emac * emac,struct emac_tx_ts_response * rsp)55 static int emac_get_tx_ts(struct prueth_emac *emac,
56 struct emac_tx_ts_response *rsp)
57 {
58 struct prueth *prueth = emac->prueth;
59 int slice = prueth_emac_slice(emac);
60 int addr;
61
62 addr = icssg_queue_pop(prueth, slice == 0 ?
63 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
64 if (addr < 0)
65 return addr;
66
67 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
68 /* return buffer back for to pool */
69 icssg_queue_push(prueth, slice == 0 ?
70 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
71
72 return 0;
73 }
74
tx_ts_work(struct prueth_emac * emac)75 static void tx_ts_work(struct prueth_emac *emac)
76 {
77 struct skb_shared_hwtstamps ssh;
78 struct emac_tx_ts_response tsr;
79 struct sk_buff *skb;
80 int ret = 0;
81 u32 hi_sw;
82 u64 ns;
83
84 /* There may be more than one pending requests */
85 while (1) {
86 ret = emac_get_tx_ts(emac, &tsr);
87 if (ret) /* nothing more */
88 break;
89
90 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
91 !emac->tx_ts_skb[tsr.cookie]) {
92 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
93 tsr.cookie);
94 break;
95 }
96
97 skb = emac->tx_ts_skb[tsr.cookie];
98 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
99 if (!skb) {
100 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
101 break;
102 }
103
104 hi_sw = readl(emac->prueth->shram.va +
105 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
106 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
107 IEP_DEFAULT_CYCLE_TIME_NS);
108
109 memset(&ssh, 0, sizeof(ssh));
110 ssh.hwtstamp = ns_to_ktime(ns);
111
112 skb_tstamp_tx(skb, &ssh);
113 dev_consume_skb_any(skb);
114
115 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
116 break;
117 }
118 }
119
prueth_tx_ts_irq(int irq,void * dev_id)120 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
121 {
122 struct prueth_emac *emac = dev_id;
123
124 /* currently only TX timestamp is being returned */
125 tx_ts_work(emac);
126
127 return IRQ_HANDLED;
128 }
129
prueth_start(struct rproc * rproc,const char * fw_name)130 static int prueth_start(struct rproc *rproc, const char *fw_name)
131 {
132 int ret;
133
134 ret = rproc_set_firmware(rproc, fw_name);
135 if (ret)
136 return ret;
137 return rproc_boot(rproc);
138 }
139
prueth_shutdown(struct rproc * rproc)140 static void prueth_shutdown(struct rproc *rproc)
141 {
142 rproc_shutdown(rproc);
143 }
144
prueth_emac_start(struct prueth * prueth)145 static int prueth_emac_start(struct prueth *prueth)
146 {
147 struct icssg_firmwares *firmwares;
148 struct device *dev = prueth->dev;
149 int ret, slice;
150
151 if (prueth->is_switch_mode)
152 firmwares = prueth->icssg_switch_firmwares;
153 else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
154 firmwares = prueth->icssg_hsr_firmwares;
155 else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
156 firmwares = prueth->icssg_prp_firmwares;
157 else
158 firmwares = prueth->icssg_emac_firmwares;
159
160 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
161 ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
162 if (ret) {
163 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
164 goto unwind_slices;
165 }
166
167 ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
168 if (ret) {
169 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
170 rproc_shutdown(prueth->pru[slice]);
171 goto unwind_slices;
172 }
173
174 ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
175 if (ret) {
176 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
177 rproc_shutdown(prueth->rtu[slice]);
178 rproc_shutdown(prueth->pru[slice]);
179 goto unwind_slices;
180 }
181 }
182
183 return 0;
184
185 unwind_slices:
186 while (--slice >= 0) {
187 prueth_shutdown(prueth->txpru[slice]);
188 prueth_shutdown(prueth->rtu[slice]);
189 prueth_shutdown(prueth->pru[slice]);
190 }
191
192 return ret;
193 }
194
prueth_emac_stop(struct prueth * prueth)195 static void prueth_emac_stop(struct prueth *prueth)
196 {
197 int slice;
198
199 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
200 prueth_shutdown(prueth->txpru[slice]);
201 prueth_shutdown(prueth->rtu[slice]);
202 prueth_shutdown(prueth->pru[slice]);
203 }
204 }
205
icssg_enable_fw_offload(struct prueth * prueth)206 static void icssg_enable_fw_offload(struct prueth *prueth)
207 {
208 struct prueth_emac *emac;
209 int mac;
210
211 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
212 emac = prueth->emac[mac];
213 if (prueth->is_hsr_offload_mode) {
214 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
215 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
216 else
217 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
218 }
219
220 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
221 if (netif_running(emac->ndev)) {
222 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
223 ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
224 ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
225 ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
226 ICSSG_FDB_ENTRY_BLOCK,
227 true);
228 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
229 BIT(emac->port_id) | DEFAULT_PORT_MASK,
230 BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
231 true);
232 if (prueth->is_hsr_offload_mode)
233 icssg_vtbl_modify(emac, DEFAULT_VID,
234 DEFAULT_PORT_MASK,
235 DEFAULT_UNTAG_MASK, true);
236 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
237 if (prueth->is_switch_mode)
238 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
239 }
240 }
241 }
242 }
243
prueth_emac_common_start(struct prueth * prueth)244 static int prueth_emac_common_start(struct prueth *prueth)
245 {
246 struct prueth_emac *emac;
247 int ret = 0;
248 int slice;
249
250 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
251 return -EINVAL;
252
253 /* clear SMEM and MSMC settings for all slices */
254 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
255 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
256
257 icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
258 icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
259
260 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
261 icssg_init_fw_offload_mode(prueth);
262 else
263 icssg_init_emac_mode(prueth);
264
265 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
266 emac = prueth->emac[slice];
267 if (!emac)
268 continue;
269 ret = icssg_config(prueth, emac, slice);
270 if (ret)
271 goto disable_class;
272
273 mutex_lock(&emac->ndev->phydev->lock);
274 emac_adjust_link(emac->ndev);
275 mutex_unlock(&emac->ndev->phydev->lock);
276 }
277
278 ret = prueth_emac_start(prueth);
279 if (ret)
280 goto disable_class;
281
282 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
283 prueth->emac[ICSS_SLICE1];
284 ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
285 emac, IEP_DEFAULT_CYCLE_TIME_NS);
286 if (ret) {
287 dev_err(prueth->dev, "Failed to initialize IEP module\n");
288 goto stop_pruss;
289 }
290
291 return 0;
292
293 stop_pruss:
294 prueth_emac_stop(prueth);
295
296 disable_class:
297 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
298 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
299
300 return ret;
301 }
302
prueth_emac_common_stop(struct prueth * prueth)303 static int prueth_emac_common_stop(struct prueth *prueth)
304 {
305 struct prueth_emac *emac;
306
307 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
308 return -EINVAL;
309
310 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
311 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
312
313 prueth_emac_stop(prueth);
314
315 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
316 prueth->emac[ICSS_SLICE1];
317 icss_iep_exit(emac->iep);
318
319 return 0;
320 }
321
322 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link(struct net_device * ndev)323 static void emac_adjust_link(struct net_device *ndev)
324 {
325 struct prueth_emac *emac = netdev_priv(ndev);
326 struct phy_device *phydev = ndev->phydev;
327 struct prueth *prueth = emac->prueth;
328 bool new_state = false;
329 unsigned long flags;
330
331 if (phydev->link) {
332 /* check the mode of operation - full/half duplex */
333 if (phydev->duplex != emac->duplex) {
334 new_state = true;
335 emac->duplex = phydev->duplex;
336 }
337 if (phydev->speed != emac->speed) {
338 new_state = true;
339 emac->speed = phydev->speed;
340 }
341 if (!emac->link) {
342 new_state = true;
343 emac->link = 1;
344 }
345 } else if (emac->link) {
346 new_state = true;
347 emac->link = 0;
348
349 /* f/w should support 100 & 1000 */
350 emac->speed = SPEED_1000;
351
352 /* half duplex may not be supported by f/w */
353 emac->duplex = DUPLEX_FULL;
354 }
355
356 if (new_state) {
357 phy_print_status(phydev);
358
359 /* update RGMII and MII configuration based on PHY negotiated
360 * values
361 */
362 if (emac->link) {
363 if (emac->duplex == DUPLEX_HALF)
364 icssg_config_half_duplex(emac);
365 /* Set the RGMII cfg for gig en and full duplex */
366 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
367
368 /* update the Tx IPG based on 100M/1G speed */
369 spin_lock_irqsave(&emac->lock, flags);
370 icssg_config_ipg(emac);
371 spin_unlock_irqrestore(&emac->lock, flags);
372 icssg_config_set_speed(emac);
373 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
374
375 } else {
376 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
377 }
378 }
379
380 if (emac->link) {
381 /* reactivate the transmit queue */
382 netif_tx_wake_all_queues(ndev);
383 } else {
384 netif_tx_stop_all_queues(ndev);
385 prueth_cleanup_tx_ts(emac);
386 }
387 }
388
emac_rx_timer_callback(struct hrtimer * timer)389 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
390 {
391 struct prueth_emac *emac =
392 container_of(timer, struct prueth_emac, rx_hrtimer);
393 int rx_flow = PRUETH_RX_FLOW_DATA;
394
395 enable_irq(emac->rx_chns.irq[rx_flow]);
396 return HRTIMER_NORESTART;
397 }
398
emac_phy_connect(struct prueth_emac * emac)399 static int emac_phy_connect(struct prueth_emac *emac)
400 {
401 struct prueth *prueth = emac->prueth;
402 struct net_device *ndev = emac->ndev;
403 /* connect PHY */
404 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
405 &emac_adjust_link, 0,
406 emac->phy_if);
407 if (!ndev->phydev) {
408 dev_err(prueth->dev, "couldn't connect to phy %s\n",
409 emac->phy_node->full_name);
410 return -ENODEV;
411 }
412
413 if (!emac->half_duplex) {
414 dev_dbg(prueth->dev, "half duplex mode is not supported\n");
415 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
416 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
417 }
418
419 /* remove unsupported modes */
420 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
421 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
422 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
423
424 if (emac->phy_if == PHY_INTERFACE_MODE_MII)
425 phy_set_max_speed(ndev->phydev, SPEED_100);
426
427 return 0;
428 }
429
prueth_iep_gettime(void * clockops_data,struct ptp_system_timestamp * sts)430 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
431 {
432 u32 hi_rollover_count, hi_rollover_count_r;
433 struct prueth_emac *emac = clockops_data;
434 struct prueth *prueth = emac->prueth;
435 void __iomem *fw_hi_r_count_addr;
436 void __iomem *fw_count_hi_addr;
437 u32 iepcount_hi, iepcount_hi_r;
438 unsigned long flags;
439 u32 iepcount_lo;
440 u64 ts = 0;
441
442 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
443 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
444
445 local_irq_save(flags);
446 do {
447 iepcount_hi = icss_iep_get_count_hi(emac->iep);
448 iepcount_hi += readl(fw_count_hi_addr);
449 hi_rollover_count = readl(fw_hi_r_count_addr);
450 ptp_read_system_prets(sts);
451 iepcount_lo = icss_iep_get_count_low(emac->iep);
452 ptp_read_system_postts(sts);
453
454 iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
455 iepcount_hi_r += readl(fw_count_hi_addr);
456 hi_rollover_count_r = readl(fw_hi_r_count_addr);
457 } while ((iepcount_hi_r != iepcount_hi) ||
458 (hi_rollover_count != hi_rollover_count_r));
459 local_irq_restore(flags);
460
461 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
462 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
463
464 return ts;
465 }
466
prueth_iep_settime(void * clockops_data,u64 ns)467 static void prueth_iep_settime(void *clockops_data, u64 ns)
468 {
469 struct icssg_setclock_desc __iomem *sc_descp;
470 struct prueth_emac *emac = clockops_data;
471 struct icssg_setclock_desc sc_desc;
472 u64 cyclecount;
473 u32 cycletime;
474 int timeout;
475
476 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
477
478 cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
479 cyclecount = ns / cycletime;
480
481 memset(&sc_desc, 0, sizeof(sc_desc));
482 sc_desc.margin = cycletime - 1000;
483 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
484 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
485 sc_desc.iepcount_set = ns % cycletime;
486 /* Count from 0 to (cycle time) - emac->iep->def_inc */
487 sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
488
489 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
490
491 writeb(1, &sc_descp->request);
492
493 timeout = 5; /* fw should take 2-3 ms */
494 while (timeout--) {
495 if (readb(&sc_descp->acknowledgment))
496 return;
497
498 usleep_range(500, 1000);
499 }
500
501 dev_err(emac->prueth->dev, "settime timeout\n");
502 }
503
prueth_perout_enable(void * clockops_data,struct ptp_perout_request * req,int on,u64 * cmp)504 static int prueth_perout_enable(void *clockops_data,
505 struct ptp_perout_request *req, int on,
506 u64 *cmp)
507 {
508 struct prueth_emac *emac = clockops_data;
509 u32 reduction_factor = 0, offset = 0;
510 struct timespec64 ts;
511 u64 current_cycle;
512 u64 start_offset;
513 u64 ns_period;
514
515 if (!on)
516 return 0;
517
518 /* Any firmware specific stuff for PPS/PEROUT handling */
519 ts.tv_sec = req->period.sec;
520 ts.tv_nsec = req->period.nsec;
521 ns_period = timespec64_to_ns(&ts);
522
523 /* f/w doesn't support period less than cycle time */
524 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
525 return -ENXIO;
526
527 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
528 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
529
530 /* f/w requires at least 1uS within a cycle so CMP
531 * can trigger after SYNC is enabled
532 */
533 if (offset < 5 * NSEC_PER_USEC)
534 offset = 5 * NSEC_PER_USEC;
535
536 /* if offset is close to cycle time then we will miss
537 * the CMP event for last tick when IEP rolls over.
538 * In normal mode, IEP tick is 4ns.
539 * In slow compensation it could be 0ns or 8ns at
540 * every slow compensation cycle.
541 */
542 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
543 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
544
545 /* we're in shadow mode so need to set upper 32-bits */
546 *cmp = (u64)offset << 32;
547
548 writel(reduction_factor, emac->prueth->shram.va +
549 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
550
551 current_cycle = icssg_read_time(emac->prueth->shram.va +
552 TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
553
554 /* Rounding of current_cycle count to next second */
555 start_offset = roundup(current_cycle, MSEC_PER_SEC);
556
557 hi_lo_writeq(start_offset, emac->prueth->shram.va +
558 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
559
560 return 0;
561 }
562
563 const struct icss_iep_clockops prueth_iep_clockops = {
564 .settime = prueth_iep_settime,
565 .gettime = prueth_iep_gettime,
566 .perout_enable = prueth_perout_enable,
567 };
568
prueth_create_xdp_rxqs(struct prueth_emac * emac)569 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
570 {
571 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
572 struct page_pool *pool = emac->rx_chns.pg_pool;
573 int ret;
574
575 ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
576 if (ret)
577 return ret;
578
579 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
580 if (ret)
581 xdp_rxq_info_unreg(rxq);
582
583 return ret;
584 }
585
prueth_destroy_xdp_rxqs(struct prueth_emac * emac)586 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
587 {
588 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
589
590 if (!xdp_rxq_info_is_reg(rxq))
591 return;
592
593 xdp_rxq_info_unreg(rxq);
594 }
595
icssg_prueth_add_mcast(struct net_device * ndev,const u8 * addr)596 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
597 {
598 struct net_device *real_dev;
599 struct prueth_emac *emac;
600 int port_mask;
601 u8 vlan_id;
602
603 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
604 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
605 emac = netdev_priv(real_dev);
606
607 port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
608 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
609 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
610
611 return 0;
612 }
613
icssg_prueth_del_mcast(struct net_device * ndev,const u8 * addr)614 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
615 {
616 struct net_device *real_dev;
617 struct prueth_emac *emac;
618 int other_port_mask;
619 int port_mask;
620 u8 vlan_id;
621
622 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
623 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
624 emac = netdev_priv(real_dev);
625
626 port_mask = BIT(emac->port_id);
627 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
628
629 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
630 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
631
632 if (other_port_mask) {
633 icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
634 icssg_vtbl_modify(emac, vlan_id, other_port_mask,
635 other_port_mask, true);
636 }
637
638 return 0;
639 }
640
icssg_prueth_hsr_fdb_add_del(struct prueth_emac * emac,const u8 * addr,u8 vid,bool add)641 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
642 const u8 *addr, u8 vid, bool add)
643 {
644 icssg_fdb_add_del(emac, addr, vid,
645 ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
646 ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
647 ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
648 ICSSG_FDB_ENTRY_BLOCK, add);
649
650 if (add)
651 icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
652 BIT(emac->port_id), add);
653 }
654
icssg_prueth_hsr_add_mcast(struct net_device * ndev,const u8 * addr)655 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
656 {
657 struct net_device *real_dev, *port_dev;
658 struct prueth_emac *emac;
659 u8 vlan_id, i;
660
661 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
662 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
663
664 if (is_hsr_master(real_dev)) {
665 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
666 port_dev = hsr_get_port_ndev(real_dev, i);
667 emac = netdev_priv(port_dev);
668 if (!emac) {
669 dev_put(port_dev);
670 return -EINVAL;
671 }
672 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
673 true);
674 dev_put(port_dev);
675 }
676 } else {
677 emac = netdev_priv(real_dev);
678 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
679 }
680
681 return 0;
682 }
683
icssg_prueth_hsr_del_mcast(struct net_device * ndev,const u8 * addr)684 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
685 {
686 struct net_device *real_dev, *port_dev;
687 struct prueth_emac *emac;
688 u8 vlan_id, i;
689
690 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
691 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
692
693 if (is_hsr_master(real_dev)) {
694 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
695 port_dev = hsr_get_port_ndev(real_dev, i);
696 emac = netdev_priv(port_dev);
697 if (!emac) {
698 dev_put(port_dev);
699 return -EINVAL;
700 }
701 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
702 false);
703 dev_put(port_dev);
704 }
705 } else {
706 emac = netdev_priv(real_dev);
707 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
708 }
709
710 return 0;
711 }
712
icssg_update_vlan_mcast(struct net_device * vdev,int vid,void * args)713 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
714 void *args)
715 {
716 struct prueth_emac *emac = args;
717
718 if (!vdev || !vid)
719 return 0;
720
721 netif_addr_lock_bh(vdev);
722 __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
723 vdev->addr_len);
724 netif_addr_unlock_bh(vdev);
725
726 if (emac->prueth->is_hsr_offload_mode)
727 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
728 icssg_prueth_hsr_add_mcast,
729 icssg_prueth_hsr_del_mcast);
730 else
731 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
732 icssg_prueth_add_mcast,
733 icssg_prueth_del_mcast);
734
735 return 0;
736 }
737
738 /**
739 * emac_ndo_open - EMAC device open
740 * @ndev: network adapter device
741 *
742 * Called when system wants to start the interface.
743 *
744 * Return: 0 for a successful open, or appropriate error code
745 */
emac_ndo_open(struct net_device * ndev)746 static int emac_ndo_open(struct net_device *ndev)
747 {
748 struct prueth_emac *emac = netdev_priv(ndev);
749 int ret, i, num_data_chn = emac->tx_ch_num;
750 struct icssg_flow_cfg __iomem *flow_cfg;
751 struct prueth *prueth = emac->prueth;
752 int slice = prueth_emac_slice(emac);
753 struct device *dev = prueth->dev;
754 int max_rx_flows;
755 int rx_flow;
756
757 /* set h/w MAC as user might have re-configured */
758 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
759
760 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
761 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
762
763 /* Notify the stack of the actual queue counts. */
764 ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
765 if (ret) {
766 dev_err(dev, "cannot set real number of tx queues\n");
767 return ret;
768 }
769
770 init_completion(&emac->cmd_complete);
771 ret = prueth_init_tx_chns(emac);
772 if (ret) {
773 dev_err(dev, "failed to init tx channel: %d\n", ret);
774 return ret;
775 }
776
777 max_rx_flows = PRUETH_MAX_RX_FLOWS;
778 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
779 max_rx_flows, PRUETH_MAX_RX_DESC);
780 if (ret) {
781 dev_err(dev, "failed to init rx channel: %d\n", ret);
782 goto cleanup_tx;
783 }
784
785 ret = prueth_ndev_add_tx_napi(emac);
786 if (ret)
787 goto cleanup_rx;
788
789 /* we use only the highest priority flow for now i.e. @irq[3] */
790 rx_flow = PRUETH_RX_FLOW_DATA;
791 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
792 IRQF_TRIGGER_HIGH, dev_name(dev), emac);
793 if (ret) {
794 dev_err(dev, "unable to request RX IRQ\n");
795 goto cleanup_napi;
796 }
797
798 if (!prueth->emacs_initialized) {
799 ret = prueth_emac_common_start(prueth);
800 if (ret)
801 goto free_rx_irq;
802 icssg_enable_fw_offload(prueth);
803 }
804
805 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
806 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
807 ret = emac_fdb_flow_id_updated(emac);
808
809 if (ret) {
810 netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
811 goto stop;
812 }
813
814 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
815
816 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
817 IRQF_ONESHOT, dev_name(dev), emac);
818 if (ret)
819 goto stop;
820
821 /* Prepare RX */
822 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
823 if (ret)
824 goto free_tx_ts_irq;
825
826 ret = prueth_create_xdp_rxqs(emac);
827 if (ret)
828 goto reset_rx_chn;
829
830 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
831 if (ret)
832 goto destroy_xdp_rxqs;
833
834 for (i = 0; i < emac->tx_ch_num; i++) {
835 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
836 if (ret)
837 goto reset_tx_chan;
838 }
839
840 /* Enable NAPI in Tx and Rx direction */
841 for (i = 0; i < emac->tx_ch_num; i++)
842 napi_enable(&emac->tx_chns[i].napi_tx);
843 napi_enable(&emac->napi_rx);
844
845 /* start PHY */
846 phy_start(ndev->phydev);
847
848 prueth->emacs_initialized++;
849
850 queue_work(system_long_wq, &emac->stats_work.work);
851
852 return 0;
853
854 reset_tx_chan:
855 /* Since interface is not yet up, there is wouldn't be
856 * any SKB for completion. So set false to free_skb
857 */
858 prueth_reset_tx_chan(emac, i, false);
859 destroy_xdp_rxqs:
860 prueth_destroy_xdp_rxqs(emac);
861 reset_rx_chn:
862 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
863 free_tx_ts_irq:
864 free_irq(emac->tx_ts_irq, emac);
865 stop:
866 if (!prueth->emacs_initialized)
867 prueth_emac_common_stop(prueth);
868 free_rx_irq:
869 free_irq(emac->rx_chns.irq[rx_flow], emac);
870 cleanup_napi:
871 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
872 cleanup_rx:
873 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
874 cleanup_tx:
875 prueth_cleanup_tx_chns(emac);
876
877 return ret;
878 }
879
880 /**
881 * emac_ndo_stop - EMAC device stop
882 * @ndev: network adapter device
883 *
884 * Called when system wants to stop or down the interface.
885 *
886 * Return: Always 0 (Success)
887 */
emac_ndo_stop(struct net_device * ndev)888 static int emac_ndo_stop(struct net_device *ndev)
889 {
890 struct prueth_emac *emac = netdev_priv(ndev);
891 struct prueth *prueth = emac->prueth;
892 int rx_flow = PRUETH_RX_FLOW_DATA;
893 int max_rx_flows;
894 int ret, i;
895
896 /* inform the upper layers. */
897 netif_tx_stop_all_queues(ndev);
898
899 /* block packets from wire */
900 if (ndev->phydev)
901 phy_stop(ndev->phydev);
902
903 if (emac->prueth->is_hsr_offload_mode)
904 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
905 else
906 __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
907
908 atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
909 /* ensure new tdown_cnt value is visible */
910 smp_mb__after_atomic();
911 /* tear down and disable UDMA channels */
912 reinit_completion(&emac->tdown_complete);
913 for (i = 0; i < emac->tx_ch_num; i++)
914 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
915
916 ret = wait_for_completion_timeout(&emac->tdown_complete,
917 msecs_to_jiffies(1000));
918 if (!ret)
919 netdev_err(ndev, "tx teardown timeout\n");
920
921 prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
922 for (i = 0; i < emac->tx_ch_num; i++) {
923 napi_disable(&emac->tx_chns[i].napi_tx);
924 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
925 }
926
927 max_rx_flows = PRUETH_MAX_RX_FLOWS;
928 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
929
930 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
931 prueth_destroy_xdp_rxqs(emac);
932 napi_disable(&emac->napi_rx);
933 hrtimer_cancel(&emac->rx_hrtimer);
934
935 cancel_work_sync(&emac->rx_mode_work);
936
937 /* Destroying the queued work in ndo_stop() */
938 cancel_delayed_work_sync(&emac->stats_work);
939
940 /* stop PRUs */
941 if (prueth->emacs_initialized == 1)
942 prueth_emac_common_stop(prueth);
943
944 free_irq(emac->tx_ts_irq, emac);
945
946 free_irq(emac->rx_chns.irq[rx_flow], emac);
947 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
948
949 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
950 prueth_cleanup_tx_chns(emac);
951
952 prueth->emacs_initialized--;
953
954 return 0;
955 }
956
emac_ndo_set_rx_mode_work(struct work_struct * work)957 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
958 {
959 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
960 struct net_device *ndev = emac->ndev;
961 bool promisc, allmulti;
962
963 if (!netif_running(ndev))
964 return;
965
966 promisc = ndev->flags & IFF_PROMISC;
967 allmulti = ndev->flags & IFF_ALLMULTI;
968 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
969 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
970
971 if (promisc) {
972 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
973 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
974 return;
975 }
976
977 if (allmulti) {
978 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
979 return;
980 }
981
982 if (emac->prueth->is_hsr_offload_mode) {
983 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
984 icssg_prueth_hsr_del_mcast);
985 if (rtnl_trylock()) {
986 vlan_for_each(emac->prueth->hsr_dev,
987 icssg_update_vlan_mcast, emac);
988 rtnl_unlock();
989 }
990 } else {
991 __dev_mc_sync(ndev, icssg_prueth_add_mcast,
992 icssg_prueth_del_mcast);
993 if (rtnl_trylock()) {
994 vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
995 rtnl_unlock();
996 }
997 }
998 }
999
1000 /**
1001 * emac_ndo_set_rx_mode - EMAC set receive mode function
1002 * @ndev: The EMAC network adapter
1003 *
1004 * Called when system wants to set the receive mode of the device.
1005 *
1006 */
emac_ndo_set_rx_mode(struct net_device * ndev)1007 static void emac_ndo_set_rx_mode(struct net_device *ndev)
1008 {
1009 struct prueth_emac *emac = netdev_priv(ndev);
1010
1011 queue_work(emac->cmd_wq, &emac->rx_mode_work);
1012 }
1013
emac_ndo_fix_features(struct net_device * ndev,netdev_features_t features)1014 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
1015 netdev_features_t features)
1016 {
1017 /* hsr tag insertion offload and hsr dup offload are tightly coupled in
1018 * firmware implementation. Both these features need to be enabled /
1019 * disabled together.
1020 */
1021 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
1022 if ((features & NETIF_F_HW_HSR_DUP) ||
1023 (features & NETIF_F_HW_HSR_TAG_INS))
1024 features |= NETIF_F_HW_HSR_DUP |
1025 NETIF_F_HW_HSR_TAG_INS;
1026
1027 if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
1028 (ndev->features & NETIF_F_HW_HSR_TAG_INS))
1029 if (!(features & NETIF_F_HW_HSR_DUP) ||
1030 !(features & NETIF_F_HW_HSR_TAG_INS))
1031 features &= ~(NETIF_F_HW_HSR_DUP |
1032 NETIF_F_HW_HSR_TAG_INS);
1033
1034 return features;
1035 }
1036
emac_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)1037 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
1038 __be16 proto, u16 vid)
1039 {
1040 struct prueth_emac *emac = netdev_priv(ndev);
1041 struct prueth *prueth = emac->prueth;
1042 int port_mask = BIT(emac->port_id);
1043 int untag_mask = 0;
1044
1045 if (prueth->is_hsr_offload_mode)
1046 port_mask |= BIT(PRUETH_PORT_HOST);
1047
1048 __hw_addr_init(&emac->vlan_mcast_list[vid]);
1049 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1050 vid, port_mask, untag_mask);
1051
1052 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1053 icssg_set_pvid(emac->prueth, vid, emac->port_id);
1054
1055 return 0;
1056 }
1057
emac_ndo_vlan_rx_del_vid(struct net_device * ndev,__be16 proto,u16 vid)1058 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1059 __be16 proto, u16 vid)
1060 {
1061 struct prueth_emac *emac = netdev_priv(ndev);
1062 struct prueth *prueth = emac->prueth;
1063 int port_mask = BIT(emac->port_id);
1064 int untag_mask = 0;
1065
1066 if (prueth->is_hsr_offload_mode)
1067 port_mask = BIT(PRUETH_PORT_HOST);
1068
1069 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
1070 vid, port_mask, untag_mask);
1071 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1072
1073 return 0;
1074 }
1075
1076 /**
1077 * emac_xdp_xmit - Implements ndo_xdp_xmit
1078 * @dev: netdev
1079 * @n: number of frames
1080 * @frames: array of XDP buffer pointers
1081 * @flags: XDP extra info
1082 *
1083 * Return: number of frames successfully sent. Failed frames
1084 * will be free'ed by XDP core.
1085 *
1086 * For error cases, a negative errno code is returned and no-frames
1087 * are transmitted (caller must handle freeing frames).
1088 **/
emac_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1089 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1090 u32 flags)
1091 {
1092 struct prueth_emac *emac = netdev_priv(dev);
1093 struct net_device *ndev = emac->ndev;
1094 struct netdev_queue *netif_txq;
1095 int cpu = smp_processor_id();
1096 struct xdp_frame *xdpf;
1097 unsigned int q_idx;
1098 int nxmit = 0;
1099 u32 err;
1100 int i;
1101
1102 q_idx = cpu % emac->tx_ch_num;
1103 netif_txq = netdev_get_tx_queue(ndev, q_idx);
1104
1105 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1106 return -EINVAL;
1107
1108 __netif_tx_lock(netif_txq, cpu);
1109 for (i = 0; i < n; i++) {
1110 xdpf = frames[i];
1111 err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
1112 if (err != ICSSG_XDP_TX) {
1113 ndev->stats.tx_dropped++;
1114 break;
1115 }
1116 nxmit++;
1117 }
1118 __netif_tx_unlock(netif_txq);
1119
1120 return nxmit;
1121 }
1122
1123 /**
1124 * emac_xdp_setup - add/remove an XDP program
1125 * @emac: emac device
1126 * @bpf: XDP program
1127 *
1128 * Return: Always 0 (Success)
1129 **/
emac_xdp_setup(struct prueth_emac * emac,struct netdev_bpf * bpf)1130 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1131 {
1132 struct bpf_prog *prog = bpf->prog;
1133
1134 if (!emac->xdpi.prog && !prog)
1135 return 0;
1136
1137 WRITE_ONCE(emac->xdp_prog, prog);
1138
1139 xdp_attachment_setup(&emac->xdpi, bpf);
1140
1141 return 0;
1142 }
1143
1144 /**
1145 * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1146 * @ndev: network adapter device
1147 * @bpf: XDP program
1148 *
1149 * Return: 0 on success, error code on failure.
1150 **/
emac_ndo_bpf(struct net_device * ndev,struct netdev_bpf * bpf)1151 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1152 {
1153 struct prueth_emac *emac = netdev_priv(ndev);
1154
1155 switch (bpf->command) {
1156 case XDP_SETUP_PROG:
1157 return emac_xdp_setup(emac, bpf);
1158 default:
1159 return -EINVAL;
1160 }
1161 }
1162
1163 static const struct net_device_ops emac_netdev_ops = {
1164 .ndo_open = emac_ndo_open,
1165 .ndo_stop = emac_ndo_stop,
1166 .ndo_start_xmit = icssg_ndo_start_xmit,
1167 .ndo_set_mac_address = eth_mac_addr,
1168 .ndo_validate_addr = eth_validate_addr,
1169 .ndo_tx_timeout = icssg_ndo_tx_timeout,
1170 .ndo_set_rx_mode = emac_ndo_set_rx_mode,
1171 .ndo_eth_ioctl = icssg_ndo_ioctl,
1172 .ndo_get_stats64 = icssg_ndo_get_stats64,
1173 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1174 .ndo_fix_features = emac_ndo_fix_features,
1175 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1176 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1177 .ndo_bpf = emac_ndo_bpf,
1178 .ndo_xdp_xmit = emac_xdp_xmit,
1179 };
1180
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)1181 static int prueth_netdev_init(struct prueth *prueth,
1182 struct device_node *eth_node)
1183 {
1184 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1185 struct prueth_emac *emac;
1186 struct net_device *ndev;
1187 enum prueth_port port;
1188 const char *irq_name;
1189 enum prueth_mac mac;
1190
1191 port = prueth_node_port(eth_node);
1192 if (port == PRUETH_PORT_INVALID)
1193 return -EINVAL;
1194
1195 mac = prueth_node_mac(eth_node);
1196 if (mac == PRUETH_MAC_INVALID)
1197 return -EINVAL;
1198
1199 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1200 if (!ndev)
1201 return -ENOMEM;
1202
1203 emac = netdev_priv(ndev);
1204 emac->prueth = prueth;
1205 emac->ndev = ndev;
1206 emac->port_id = port;
1207 emac->xdp_prog = NULL;
1208 emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1209 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1210 if (!emac->cmd_wq) {
1211 ret = -ENOMEM;
1212 goto free_ndev;
1213 }
1214 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1215
1216 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1217
1218 ret = pruss_request_mem_region(prueth->pruss,
1219 port == PRUETH_PORT_MII0 ?
1220 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1221 &emac->dram);
1222 if (ret) {
1223 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1224 ret = -ENOMEM;
1225 goto free_wq;
1226 }
1227
1228 emac->tx_ch_num = 1;
1229
1230 irq_name = "tx_ts0";
1231 if (emac->port_id == PRUETH_PORT_MII1)
1232 irq_name = "tx_ts1";
1233 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1234 if (emac->tx_ts_irq < 0) {
1235 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1236 goto free;
1237 }
1238
1239 SET_NETDEV_DEV(ndev, prueth->dev);
1240 spin_lock_init(&emac->lock);
1241 mutex_init(&emac->cmd_lock);
1242
1243 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1244 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1245 dev_err(prueth->dev, "couldn't find phy-handle\n");
1246 ret = -ENODEV;
1247 goto free;
1248 } else if (of_phy_is_fixed_link(eth_node)) {
1249 ret = of_phy_register_fixed_link(eth_node);
1250 if (ret) {
1251 ret = dev_err_probe(prueth->dev, ret,
1252 "failed to register fixed-link phy\n");
1253 goto free;
1254 }
1255
1256 emac->phy_node = eth_node;
1257 }
1258
1259 ret = of_get_phy_mode(eth_node, &emac->phy_if);
1260 if (ret) {
1261 dev_err(prueth->dev, "could not get phy-mode property\n");
1262 goto free;
1263 }
1264
1265 if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1266 !phy_interface_mode_is_rgmii(emac->phy_if)) {
1267 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1268 ret = -EINVAL;
1269 goto free;
1270 }
1271
1272 /* AM65 SR2.0 has TX Internal delay always enabled by hardware
1273 * and it is not possible to disable TX Internal delay. The below
1274 * switch case block describes how we handle different phy modes
1275 * based on hardware restriction.
1276 */
1277 switch (emac->phy_if) {
1278 case PHY_INTERFACE_MODE_RGMII_ID:
1279 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1280 break;
1281 case PHY_INTERFACE_MODE_RGMII_TXID:
1282 emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1283 break;
1284 case PHY_INTERFACE_MODE_RGMII:
1285 case PHY_INTERFACE_MODE_RGMII_RXID:
1286 dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1287 ret = -EINVAL;
1288 goto free;
1289 default:
1290 break;
1291 }
1292
1293 /* get mac address from DT and set private and netdev addr */
1294 ret = of_get_ethdev_address(eth_node, ndev);
1295 if (!is_valid_ether_addr(ndev->dev_addr)) {
1296 eth_hw_addr_random(ndev);
1297 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1298 port, ndev->dev_addr);
1299 }
1300 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1301
1302 ndev->dev.of_node = eth_node;
1303 ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1304 ndev->max_mtu = PRUETH_MAX_MTU;
1305 ndev->netdev_ops = &emac_netdev_ops;
1306 ndev->ethtool_ops = &icssg_ethtool_ops;
1307 ndev->hw_features = NETIF_F_SG;
1308 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1309 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1310 xdp_set_features_flag(ndev,
1311 NETDEV_XDP_ACT_BASIC |
1312 NETDEV_XDP_ACT_REDIRECT |
1313 NETDEV_XDP_ACT_NDO_XMIT);
1314
1315 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1316 hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1317 HRTIMER_MODE_REL_PINNED);
1318 prueth->emac[mac] = emac;
1319
1320 return 0;
1321
1322 free:
1323 pruss_release_mem_region(prueth->pruss, &emac->dram);
1324 free_wq:
1325 destroy_workqueue(emac->cmd_wq);
1326 free_ndev:
1327 emac->ndev = NULL;
1328 prueth->emac[mac] = NULL;
1329 free_netdev(ndev);
1330
1331 return ret;
1332 }
1333
prueth_dev_check(const struct net_device * ndev)1334 bool prueth_dev_check(const struct net_device *ndev)
1335 {
1336 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1337 struct prueth_emac *emac = netdev_priv(ndev);
1338
1339 return emac->prueth->is_switch_mode;
1340 }
1341
1342 return false;
1343 }
1344
prueth_offload_fwd_mark_update(struct prueth * prueth)1345 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1346 {
1347 int set_val = 0;
1348 int i;
1349
1350 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1351 set_val = 1;
1352
1353 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1354
1355 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1356 struct prueth_emac *emac = prueth->emac[i];
1357
1358 if (!emac || !emac->ndev)
1359 continue;
1360
1361 emac->offload_fwd_mark = set_val;
1362 }
1363 }
1364
prueth_emac_restart(struct prueth * prueth)1365 static int prueth_emac_restart(struct prueth *prueth)
1366 {
1367 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1368 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1369 int ret;
1370
1371 /* Detach the net_device for both PRUeth ports*/
1372 if (netif_running(emac0->ndev))
1373 netif_device_detach(emac0->ndev);
1374 if (netif_running(emac1->ndev))
1375 netif_device_detach(emac1->ndev);
1376
1377 /* Disable both PRUeth ports */
1378 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1379 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1380 if (ret)
1381 return ret;
1382
1383 /* Stop both pru cores for both PRUeth ports*/
1384 ret = prueth_emac_common_stop(prueth);
1385 if (ret) {
1386 dev_err(prueth->dev, "Failed to stop the firmwares");
1387 return ret;
1388 }
1389
1390 /* Start both pru cores for both PRUeth ports */
1391 ret = prueth_emac_common_start(prueth);
1392 if (ret) {
1393 dev_err(prueth->dev, "Failed to start the firmwares");
1394 return ret;
1395 }
1396
1397 /* Enable forwarding for both PRUeth ports */
1398 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1399 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1400
1401 /* Attache net_device for both PRUeth ports */
1402 netif_device_attach(emac0->ndev);
1403 netif_device_attach(emac1->ndev);
1404
1405 return ret;
1406 }
1407
icssg_change_mode(struct prueth * prueth)1408 static void icssg_change_mode(struct prueth *prueth)
1409 {
1410 int ret;
1411
1412 ret = prueth_emac_restart(prueth);
1413 if (ret) {
1414 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1415 return;
1416 }
1417
1418 icssg_enable_fw_offload(prueth);
1419 }
1420
prueth_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)1421 static int prueth_netdevice_port_link(struct net_device *ndev,
1422 struct net_device *br_ndev,
1423 struct netlink_ext_ack *extack)
1424 {
1425 struct prueth_emac *emac = netdev_priv(ndev);
1426 struct prueth *prueth = emac->prueth;
1427 int err;
1428
1429 if (!prueth->br_members) {
1430 prueth->hw_bridge_dev = br_ndev;
1431 } else {
1432 /* This is adding the port to a second bridge, this is
1433 * unsupported
1434 */
1435 if (prueth->hw_bridge_dev != br_ndev)
1436 return -EOPNOTSUPP;
1437 }
1438
1439 err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1440 &prueth->prueth_switchdev_nb,
1441 &prueth->prueth_switchdev_bl_nb,
1442 false, extack);
1443 if (err)
1444 return err;
1445
1446 prueth->br_members |= BIT(emac->port_id);
1447
1448 if (!prueth->is_switch_mode) {
1449 if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1450 prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1451 prueth->is_switch_mode = true;
1452 prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1453 emac->port_vlan = prueth->default_vlan;
1454 icssg_change_mode(prueth);
1455 }
1456 }
1457
1458 prueth_offload_fwd_mark_update(prueth);
1459
1460 return NOTIFY_DONE;
1461 }
1462
prueth_netdevice_port_unlink(struct net_device * ndev)1463 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1464 {
1465 struct prueth_emac *emac = netdev_priv(ndev);
1466 struct prueth *prueth = emac->prueth;
1467 int ret;
1468
1469 prueth->br_members &= ~BIT(emac->port_id);
1470
1471 if (prueth->is_switch_mode) {
1472 prueth->is_switch_mode = false;
1473 emac->port_vlan = 0;
1474 ret = prueth_emac_restart(prueth);
1475 if (ret) {
1476 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1477 return;
1478 }
1479 }
1480
1481 prueth_offload_fwd_mark_update(prueth);
1482
1483 if (!prueth->br_members)
1484 prueth->hw_bridge_dev = NULL;
1485 }
1486
prueth_hsr_port_link(struct net_device * ndev)1487 static int prueth_hsr_port_link(struct net_device *ndev)
1488 {
1489 struct prueth_emac *emac = netdev_priv(ndev);
1490 struct prueth *prueth = emac->prueth;
1491 struct prueth_emac *emac0;
1492 struct prueth_emac *emac1;
1493
1494 emac0 = prueth->emac[PRUETH_MAC0];
1495 emac1 = prueth->emac[PRUETH_MAC1];
1496
1497 if (prueth->is_switch_mode)
1498 return -EOPNOTSUPP;
1499
1500 prueth->hsr_members |= BIT(emac->port_id);
1501 if (!prueth->is_hsr_offload_mode) {
1502 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1503 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1504 if (!(emac0->ndev->features &
1505 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1506 !(emac1->ndev->features &
1507 NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1508 return -EOPNOTSUPP;
1509 prueth->is_hsr_offload_mode = true;
1510 prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1511 emac0->port_vlan = prueth->default_vlan;
1512 emac1->port_vlan = prueth->default_vlan;
1513 icssg_change_mode(prueth);
1514 netdev_dbg(ndev, "Enabling HSR offload mode\n");
1515 }
1516 }
1517
1518 return 0;
1519 }
1520
prueth_hsr_port_unlink(struct net_device * ndev)1521 static void prueth_hsr_port_unlink(struct net_device *ndev)
1522 {
1523 struct prueth_emac *emac = netdev_priv(ndev);
1524 struct prueth *prueth = emac->prueth;
1525 struct prueth_emac *emac0;
1526 struct prueth_emac *emac1;
1527 int ret;
1528
1529 emac0 = prueth->emac[PRUETH_MAC0];
1530 emac1 = prueth->emac[PRUETH_MAC1];
1531
1532 prueth->hsr_members &= ~BIT(emac->port_id);
1533 if (prueth->is_hsr_offload_mode) {
1534 prueth->is_hsr_offload_mode = false;
1535 emac0->port_vlan = 0;
1536 emac1->port_vlan = 0;
1537 prueth->hsr_dev = NULL;
1538 ret = prueth_emac_restart(prueth);
1539 if (ret) {
1540 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1541 return;
1542 }
1543 netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1544 }
1545 }
1546
1547 /* netdev notifier */
prueth_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)1548 static int prueth_netdevice_event(struct notifier_block *unused,
1549 unsigned long event, void *ptr)
1550 {
1551 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1552 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1553 struct netdev_notifier_changeupper_info *info;
1554 struct prueth_emac *emac = netdev_priv(ndev);
1555 struct prueth *prueth = emac->prueth;
1556 enum hsr_version hsr_ndev_version;
1557 int ret = NOTIFY_DONE;
1558
1559 if (ndev->netdev_ops != &emac_netdev_ops)
1560 return NOTIFY_DONE;
1561
1562 switch (event) {
1563 case NETDEV_CHANGEUPPER:
1564 info = ptr;
1565
1566 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1567 is_hsr_master(info->upper_dev)) {
1568 hsr_get_version(info->upper_dev, &hsr_ndev_version);
1569 if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
1570 return -EOPNOTSUPP;
1571 prueth->hsr_prp_version = hsr_ndev_version;
1572
1573 if (info->linking) {
1574 if (!prueth->hsr_dev) {
1575 prueth->hsr_dev = info->upper_dev;
1576 icssg_class_set_host_mac_addr(prueth->miig_rt,
1577 prueth->hsr_dev->dev_addr);
1578 } else {
1579 if (prueth->hsr_dev != info->upper_dev) {
1580 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1581 return -EOPNOTSUPP;
1582 }
1583 }
1584 prueth_hsr_port_link(ndev);
1585 } else {
1586 prueth_hsr_port_unlink(ndev);
1587 }
1588 }
1589
1590 if (netif_is_bridge_master(info->upper_dev)) {
1591 if (info->linking)
1592 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1593 else
1594 prueth_netdevice_port_unlink(ndev);
1595 }
1596 break;
1597 default:
1598 return NOTIFY_DONE;
1599 }
1600
1601 return notifier_from_errno(ret);
1602 }
1603
prueth_register_notifiers(struct prueth * prueth)1604 static int prueth_register_notifiers(struct prueth *prueth)
1605 {
1606 int ret = 0;
1607
1608 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1609 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1610 if (ret) {
1611 dev_err(prueth->dev, "can't register netdevice notifier\n");
1612 return ret;
1613 }
1614
1615 ret = prueth_switchdev_register_notifiers(prueth);
1616 if (ret)
1617 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1618
1619 return ret;
1620 }
1621
prueth_unregister_notifiers(struct prueth * prueth)1622 static void prueth_unregister_notifiers(struct prueth *prueth)
1623 {
1624 prueth_switchdev_unregister_notifiers(prueth);
1625 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1626 }
1627
icssg_read_firmware_names(struct device_node * np,struct icssg_firmwares * fw)1628 static void icssg_read_firmware_names(struct device_node *np,
1629 struct icssg_firmwares *fw)
1630 {
1631 int i;
1632
1633 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1634 of_property_read_string_index(np, "firmware-name", i * 3 + 0,
1635 &fw[i].pru);
1636 of_property_read_string_index(np, "firmware-name", i * 3 + 1,
1637 &fw[i].rtu);
1638 of_property_read_string_index(np, "firmware-name", i * 3 + 2,
1639 &fw[i].txpru);
1640 }
1641 }
1642
1643 /* icssg_firmware_name_replace - Replace a substring in firmware name
1644 * @dev: device pointer for memory allocation
1645 * @src: source firmware name string
1646 * @from: substring to replace
1647 * @to: replacement substring
1648 *
1649 * Return: a newly allocated string with the replacement, or the original
1650 * string if replacement is not possible.
1651 */
icssg_firmware_name_replace(struct device * dev,const char * src,const char * from,const char * to)1652 static const char *icssg_firmware_name_replace(struct device *dev,
1653 const char *src,
1654 const char *from,
1655 const char *to)
1656 {
1657 size_t prefix, from_len, to_len, total;
1658 const char *p = strstr(src, from);
1659 char *buf;
1660
1661 if (!p)
1662 return src; /* fallback: no replacement, use original */
1663
1664 prefix = p - src;
1665 from_len = strlen(from);
1666 to_len = strlen(to);
1667 total = strlen(src) - from_len + to_len + 1;
1668
1669 buf = devm_kzalloc(dev, total, GFP_KERNEL);
1670 if (!buf)
1671 return src; /* fallback: allocation failed, use original */
1672
1673 strscpy(buf, src, prefix + 1);
1674 strscpy(buf + prefix, to, to_len + 1);
1675 strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
1676
1677 return buf;
1678 }
1679
1680 /**
1681 * icssg_mode_firmware_names - Generate firmware names for a specific mode
1682 * @dev: device pointer for logging and context
1683 * @src: source array of firmware name structures
1684 * @dst: destination array to store updated firmware name structures
1685 * @from: substring in firmware names to be replaced
1686 * @to: substring to replace @from in firmware names
1687 *
1688 * Iterates over all MACs and replaces occurrences of the @from substring
1689 * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
1690 * updated firmware names are stored in the @dst array.
1691 */
icssg_mode_firmware_names(struct device * dev,struct icssg_firmwares * src,struct icssg_firmwares * dst,const char * from,const char * to)1692 static void icssg_mode_firmware_names(struct device *dev,
1693 struct icssg_firmwares *src,
1694 struct icssg_firmwares *dst,
1695 const char *from, const char *to)
1696 {
1697 int i;
1698
1699 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1700 dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
1701 from, to);
1702 dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
1703 from, to);
1704 dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
1705 from, to);
1706 }
1707 }
1708
prueth_probe(struct platform_device * pdev)1709 static int prueth_probe(struct platform_device *pdev)
1710 {
1711 struct device_node *eth_node, *eth_ports_node;
1712 struct device_node *eth0_node = NULL;
1713 struct device_node *eth1_node = NULL;
1714 struct genpool_data_align gp_data = {
1715 .align = SZ_64K,
1716 };
1717 struct device *dev = &pdev->dev;
1718 struct device_node *np;
1719 struct prueth *prueth;
1720 struct pruss *pruss;
1721 u32 msmc_ram_size;
1722 int i, ret;
1723
1724 np = dev->of_node;
1725
1726 BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1727 "insufficient SW_DATA size");
1728
1729 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1730 if (!prueth)
1731 return -ENOMEM;
1732
1733 dev_set_drvdata(dev, prueth);
1734 prueth->pdev = pdev;
1735 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1736
1737 prueth->dev = dev;
1738 eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1739 if (!eth_ports_node)
1740 return -ENOENT;
1741
1742 for_each_child_of_node(eth_ports_node, eth_node) {
1743 u32 reg;
1744
1745 if (strcmp(eth_node->name, "port"))
1746 continue;
1747 ret = of_property_read_u32(eth_node, "reg", ®);
1748 if (ret < 0) {
1749 dev_err(dev, "%pOF error reading port_id %d\n",
1750 eth_node, ret);
1751 }
1752
1753 of_node_get(eth_node);
1754
1755 if (reg == 0) {
1756 eth0_node = eth_node;
1757 if (!of_device_is_available(eth0_node)) {
1758 of_node_put(eth0_node);
1759 eth0_node = NULL;
1760 }
1761 } else if (reg == 1) {
1762 eth1_node = eth_node;
1763 if (!of_device_is_available(eth1_node)) {
1764 of_node_put(eth1_node);
1765 eth1_node = NULL;
1766 }
1767 } else {
1768 dev_err(dev, "port reg should be 0 or 1\n");
1769 }
1770 }
1771
1772 of_node_put(eth_ports_node);
1773
1774 /* At least one node must be present and available else we fail */
1775 if (!eth0_node && !eth1_node) {
1776 dev_err(dev, "neither port0 nor port1 node available\n");
1777 return -ENODEV;
1778 }
1779
1780 if (eth0_node == eth1_node) {
1781 dev_err(dev, "port0 and port1 can't have same reg\n");
1782 of_node_put(eth0_node);
1783 return -ENODEV;
1784 }
1785
1786 prueth->eth_node[PRUETH_MAC0] = eth0_node;
1787 prueth->eth_node[PRUETH_MAC1] = eth1_node;
1788
1789 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1790 if (IS_ERR(prueth->miig_rt)) {
1791 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1792 return -ENODEV;
1793 }
1794
1795 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1796 if (IS_ERR(prueth->mii_rt)) {
1797 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1798 return -ENODEV;
1799 }
1800
1801 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1802 if (IS_ERR(prueth->pa_stats)) {
1803 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1804 prueth->pa_stats = NULL;
1805 }
1806
1807 if (eth0_node || eth1_node) {
1808 ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1809 if (ret)
1810 goto put_cores;
1811 ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1812 if (ret)
1813 goto put_cores;
1814 }
1815
1816 pruss = pruss_get(eth0_node ?
1817 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1818 if (IS_ERR(pruss)) {
1819 ret = PTR_ERR(pruss);
1820 dev_err(dev, "unable to get pruss handle\n");
1821 goto put_cores;
1822 }
1823
1824 prueth->pruss = pruss;
1825
1826 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1827 &prueth->shram);
1828 if (ret) {
1829 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1830 goto put_pruss;
1831 }
1832
1833 prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1834 if (!prueth->sram_pool) {
1835 dev_err(dev, "unable to get SRAM pool\n");
1836 ret = -ENODEV;
1837
1838 goto put_mem;
1839 }
1840
1841 prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1842 if (prueth->pdata.banked_ms_ram) {
1843 /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
1844 msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
1845 } else {
1846 msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
1847 if (prueth->is_switchmode_supported)
1848 msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
1849 }
1850
1851 /* NOTE: FW bug needs buffer base to be 64KB aligned */
1852 prueth->msmcram.va =
1853 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1854 msmc_ram_size,
1855 gen_pool_first_fit_align,
1856 &gp_data);
1857
1858 if (!prueth->msmcram.va) {
1859 ret = -ENOMEM;
1860 dev_err(dev, "unable to allocate MSMC resource\n");
1861 goto put_mem;
1862 }
1863 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1864 (unsigned long)prueth->msmcram.va);
1865 prueth->msmcram.size = msmc_ram_size;
1866 memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1867 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1868 prueth->msmcram.va, prueth->msmcram.size);
1869
1870 prueth->iep0 = icss_iep_get_idx(np, 0);
1871 if (IS_ERR(prueth->iep0)) {
1872 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1873 prueth->iep0 = NULL;
1874 goto free_pool;
1875 }
1876
1877 prueth->iep1 = icss_iep_get_idx(np, 1);
1878 if (IS_ERR(prueth->iep1)) {
1879 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1880 goto put_iep0;
1881 }
1882
1883 if (prueth->pdata.quirk_10m_link_issue) {
1884 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1885 * traffic.
1886 */
1887 icss_iep_init_fw(prueth->iep1);
1888 }
1889
1890 /* Read EMAC firmware names from device tree */
1891 icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
1892
1893 /* Generate other mode firmware names based on EMAC firmware names */
1894 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1895 prueth->icssg_switch_firmwares, "eth", "sw");
1896 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1897 prueth->icssg_hsr_firmwares, "eth", "hsr");
1898 icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
1899 prueth->icssg_prp_firmwares, "eth", "prp");
1900
1901 spin_lock_init(&prueth->vtbl_lock);
1902 spin_lock_init(&prueth->stats_lock);
1903 /* setup netdev interfaces */
1904 if (eth0_node) {
1905 ret = prueth_netdev_init(prueth, eth0_node);
1906 if (ret) {
1907 dev_err_probe(dev, ret, "netdev init %s failed\n",
1908 eth0_node->name);
1909 goto exit_iep;
1910 }
1911
1912 prueth->emac[PRUETH_MAC0]->half_duplex =
1913 of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1914
1915 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1916 }
1917
1918 if (eth1_node) {
1919 ret = prueth_netdev_init(prueth, eth1_node);
1920 if (ret) {
1921 dev_err_probe(dev, ret, "netdev init %s failed\n",
1922 eth1_node->name);
1923 goto netdev_exit;
1924 }
1925
1926 prueth->emac[PRUETH_MAC1]->half_duplex =
1927 of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1928
1929 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1930 }
1931
1932 /* register the network devices */
1933 if (eth0_node) {
1934 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1935 if (ret) {
1936 dev_err(dev, "can't register netdev for port MII0");
1937 goto netdev_exit;
1938 }
1939
1940 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1941
1942 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1943 if (ret) {
1944 dev_err(dev,
1945 "can't connect to MII0 PHY, error -%d", ret);
1946 goto netdev_unregister;
1947 }
1948 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1949 }
1950
1951 if (eth1_node) {
1952 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1953 if (ret) {
1954 dev_err(dev, "can't register netdev for port MII1");
1955 goto netdev_unregister;
1956 }
1957
1958 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1959 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1960 if (ret) {
1961 dev_err(dev,
1962 "can't connect to MII1 PHY, error %d", ret);
1963 goto netdev_unregister;
1964 }
1965 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1966 }
1967
1968 if (prueth->is_switchmode_supported) {
1969 ret = prueth_register_notifiers(prueth);
1970 if (ret)
1971 goto netdev_unregister;
1972
1973 sprintf(prueth->switch_id, "%s", dev_name(dev));
1974 }
1975
1976 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1977 (!eth0_node || !eth1_node) ? "single" : "dual");
1978
1979 if (eth1_node)
1980 of_node_put(eth1_node);
1981 if (eth0_node)
1982 of_node_put(eth0_node);
1983 return 0;
1984
1985 netdev_unregister:
1986 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1987 if (!prueth->registered_netdevs[i])
1988 continue;
1989 if (prueth->emac[i]->ndev->phydev) {
1990 phy_disconnect(prueth->emac[i]->ndev->phydev);
1991 prueth->emac[i]->ndev->phydev = NULL;
1992 }
1993 unregister_netdev(prueth->registered_netdevs[i]);
1994 }
1995
1996 netdev_exit:
1997 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1998 eth_node = prueth->eth_node[i];
1999 if (!eth_node)
2000 continue;
2001
2002 prueth_netdev_exit(prueth, eth_node);
2003 }
2004
2005 exit_iep:
2006 if (prueth->pdata.quirk_10m_link_issue)
2007 icss_iep_exit_fw(prueth->iep1);
2008 icss_iep_put(prueth->iep1);
2009
2010 put_iep0:
2011 icss_iep_put(prueth->iep0);
2012 prueth->iep0 = NULL;
2013 prueth->iep1 = NULL;
2014
2015 free_pool:
2016 gen_pool_free(prueth->sram_pool,
2017 (unsigned long)prueth->msmcram.va,
2018 prueth->msmcram.size);
2019
2020 put_mem:
2021 pruss_release_mem_region(prueth->pruss, &prueth->shram);
2022
2023 put_pruss:
2024 pruss_put(prueth->pruss);
2025
2026 put_cores:
2027 if (eth0_node || eth1_node) {
2028 prueth_put_cores(prueth, ICSS_SLICE0);
2029 of_node_put(eth0_node);
2030
2031 prueth_put_cores(prueth, ICSS_SLICE1);
2032 of_node_put(eth1_node);
2033 }
2034
2035 return ret;
2036 }
2037
prueth_remove(struct platform_device * pdev)2038 static void prueth_remove(struct platform_device *pdev)
2039 {
2040 struct prueth *prueth = platform_get_drvdata(pdev);
2041 struct device_node *eth_node;
2042 int i;
2043
2044 prueth_unregister_notifiers(prueth);
2045
2046 for (i = 0; i < PRUETH_NUM_MACS; i++) {
2047 if (!prueth->registered_netdevs[i])
2048 continue;
2049 phy_stop(prueth->emac[i]->ndev->phydev);
2050 phy_disconnect(prueth->emac[i]->ndev->phydev);
2051 prueth->emac[i]->ndev->phydev = NULL;
2052 unregister_netdev(prueth->registered_netdevs[i]);
2053 }
2054
2055 for (i = 0; i < PRUETH_NUM_MACS; i++) {
2056 eth_node = prueth->eth_node[i];
2057 if (!eth_node)
2058 continue;
2059
2060 prueth_netdev_exit(prueth, eth_node);
2061 }
2062
2063 if (prueth->pdata.quirk_10m_link_issue)
2064 icss_iep_exit_fw(prueth->iep1);
2065
2066 icss_iep_put(prueth->iep1);
2067 icss_iep_put(prueth->iep0);
2068
2069 gen_pool_free(prueth->sram_pool,
2070 (unsigned long)prueth->msmcram.va,
2071 prueth->msmcram.size);
2072
2073 pruss_release_mem_region(prueth->pruss, &prueth->shram);
2074
2075 pruss_put(prueth->pruss);
2076
2077 if (prueth->eth_node[PRUETH_MAC1])
2078 prueth_put_cores(prueth, ICSS_SLICE1);
2079
2080 if (prueth->eth_node[PRUETH_MAC0])
2081 prueth_put_cores(prueth, ICSS_SLICE0);
2082 }
2083
2084 static const struct prueth_pdata am654_icssg_pdata = {
2085 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2086 .quirk_10m_link_issue = 1,
2087 .switch_mode = 1,
2088 .banked_ms_ram = 0,
2089 };
2090
2091 static const struct prueth_pdata am64x_icssg_pdata = {
2092 .fdqring_mode = K3_RINGACC_RING_MODE_RING,
2093 .quirk_10m_link_issue = 1,
2094 .switch_mode = 1,
2095 .banked_ms_ram = 1,
2096 };
2097
2098 static const struct of_device_id prueth_dt_match[] = {
2099 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2100 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2101 { /* sentinel */ }
2102 };
2103 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2104
2105 static struct platform_driver prueth_driver = {
2106 .probe = prueth_probe,
2107 .remove = prueth_remove,
2108 .driver = {
2109 .name = "icssg-prueth",
2110 .of_match_table = prueth_dt_match,
2111 .pm = &prueth_dev_pm_ops,
2112 },
2113 };
2114 module_platform_driver(prueth_driver);
2115
2116 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2117 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2118 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2119 MODULE_LICENSE("GPL");
2120