1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_hsr.h>
17 #include <linux/if_vlan.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-hi-lo.h>
20 #include <linux/kernel.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28 #include <linux/property.h>
29 #include <linux/remoteproc/pruss.h>
30 #include <linux/regmap.h>
31 #include <linux/remoteproc.h>
32 #include <net/switchdev.h>
33
34 #include "icssg_prueth.h"
35 #include "icssg_mii_rt.h"
36 #include "icssg_switchdev.h"
37 #include "../k3-cppi-desc-pool.h"
38
39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
40
41 #define DEFAULT_VID 1
42 #define DEFAULT_PORT_MASK 1
43 #define DEFAULT_UNTAG_MASK 1
44
45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \
46 NETIF_F_HW_HSR_DUP | \
47 NETIF_F_HW_HSR_TAG_INS | \
48 NETIF_F_HW_HSR_TAG_RM)
49
50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
52
emac_get_tx_ts(struct prueth_emac * emac,struct emac_tx_ts_response * rsp)53 static int emac_get_tx_ts(struct prueth_emac *emac,
54 struct emac_tx_ts_response *rsp)
55 {
56 struct prueth *prueth = emac->prueth;
57 int slice = prueth_emac_slice(emac);
58 int addr;
59
60 addr = icssg_queue_pop(prueth, slice == 0 ?
61 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
62 if (addr < 0)
63 return addr;
64
65 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
66 /* return buffer back for to pool */
67 icssg_queue_push(prueth, slice == 0 ?
68 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
69
70 return 0;
71 }
72
tx_ts_work(struct prueth_emac * emac)73 static void tx_ts_work(struct prueth_emac *emac)
74 {
75 struct skb_shared_hwtstamps ssh;
76 struct emac_tx_ts_response tsr;
77 struct sk_buff *skb;
78 int ret = 0;
79 u32 hi_sw;
80 u64 ns;
81
82 /* There may be more than one pending requests */
83 while (1) {
84 ret = emac_get_tx_ts(emac, &tsr);
85 if (ret) /* nothing more */
86 break;
87
88 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
89 !emac->tx_ts_skb[tsr.cookie]) {
90 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
91 tsr.cookie);
92 break;
93 }
94
95 skb = emac->tx_ts_skb[tsr.cookie];
96 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
97 if (!skb) {
98 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
99 break;
100 }
101
102 hi_sw = readl(emac->prueth->shram.va +
103 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
104 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
105 IEP_DEFAULT_CYCLE_TIME_NS);
106
107 memset(&ssh, 0, sizeof(ssh));
108 ssh.hwtstamp = ns_to_ktime(ns);
109
110 skb_tstamp_tx(skb, &ssh);
111 dev_consume_skb_any(skb);
112
113 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
114 break;
115 }
116 }
117
prueth_tx_ts_irq(int irq,void * dev_id)118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
119 {
120 struct prueth_emac *emac = dev_id;
121
122 /* currently only TX timestamp is being returned */
123 tx_ts_work(emac);
124
125 return IRQ_HANDLED;
126 }
127
128 static struct icssg_firmwares icssg_hsr_firmwares[] = {
129 {
130 .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
131 .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
132 .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
133 },
134 {
135 .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
136 .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
137 .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
138 }
139 };
140
141 static struct icssg_firmwares icssg_switch_firmwares[] = {
142 {
143 .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
144 .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
145 .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
146 },
147 {
148 .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
149 .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
150 .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
151 }
152 };
153
154 static struct icssg_firmwares icssg_emac_firmwares[] = {
155 {
156 .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
157 .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
158 .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
159 },
160 {
161 .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
162 .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
163 .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
164 }
165 };
166
prueth_start(struct rproc * rproc,const char * fw_name)167 static int prueth_start(struct rproc *rproc, const char *fw_name)
168 {
169 int ret;
170
171 ret = rproc_set_firmware(rproc, fw_name);
172 if (ret)
173 return ret;
174 return rproc_boot(rproc);
175 }
176
prueth_shutdown(struct rproc * rproc)177 static void prueth_shutdown(struct rproc *rproc)
178 {
179 rproc_shutdown(rproc);
180 }
181
prueth_emac_start(struct prueth * prueth)182 static int prueth_emac_start(struct prueth *prueth)
183 {
184 struct icssg_firmwares *firmwares;
185 struct device *dev = prueth->dev;
186 int ret, slice;
187
188 if (prueth->is_switch_mode)
189 firmwares = icssg_switch_firmwares;
190 else if (prueth->is_hsr_offload_mode)
191 firmwares = icssg_hsr_firmwares;
192 else
193 firmwares = icssg_emac_firmwares;
194
195 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
196 ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
197 if (ret) {
198 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
199 goto unwind_slices;
200 }
201
202 ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
203 if (ret) {
204 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
205 rproc_shutdown(prueth->pru[slice]);
206 goto unwind_slices;
207 }
208
209 ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
210 if (ret) {
211 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
212 rproc_shutdown(prueth->rtu[slice]);
213 rproc_shutdown(prueth->pru[slice]);
214 goto unwind_slices;
215 }
216 }
217
218 return 0;
219
220 unwind_slices:
221 while (--slice >= 0) {
222 prueth_shutdown(prueth->txpru[slice]);
223 prueth_shutdown(prueth->rtu[slice]);
224 prueth_shutdown(prueth->pru[slice]);
225 }
226
227 return ret;
228 }
229
prueth_emac_stop(struct prueth * prueth)230 static void prueth_emac_stop(struct prueth *prueth)
231 {
232 int slice;
233
234 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
235 prueth_shutdown(prueth->txpru[slice]);
236 prueth_shutdown(prueth->rtu[slice]);
237 prueth_shutdown(prueth->pru[slice]);
238 }
239 }
240
prueth_emac_common_start(struct prueth * prueth)241 static int prueth_emac_common_start(struct prueth *prueth)
242 {
243 struct prueth_emac *emac;
244 int ret = 0;
245 int slice;
246
247 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
248 return -EINVAL;
249
250 /* clear SMEM and MSMC settings for all slices */
251 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
252 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
253
254 icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
255 icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
256
257 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
258 icssg_init_fw_offload_mode(prueth);
259 else
260 icssg_init_emac_mode(prueth);
261
262 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
263 emac = prueth->emac[slice];
264 if (!emac)
265 continue;
266 ret = icssg_config(prueth, emac, slice);
267 if (ret)
268 goto disable_class;
269 }
270
271 ret = prueth_emac_start(prueth);
272 if (ret)
273 goto disable_class;
274
275 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
276 prueth->emac[ICSS_SLICE1];
277 ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
278 emac, IEP_DEFAULT_CYCLE_TIME_NS);
279 if (ret) {
280 dev_err(prueth->dev, "Failed to initialize IEP module\n");
281 goto stop_pruss;
282 }
283
284 return 0;
285
286 stop_pruss:
287 prueth_emac_stop(prueth);
288
289 disable_class:
290 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
291 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
292
293 return ret;
294 }
295
prueth_emac_common_stop(struct prueth * prueth)296 static int prueth_emac_common_stop(struct prueth *prueth)
297 {
298 struct prueth_emac *emac;
299
300 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
301 return -EINVAL;
302
303 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
304 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
305
306 prueth_emac_stop(prueth);
307
308 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
309 prueth->emac[ICSS_SLICE1];
310 icss_iep_exit(emac->iep);
311
312 return 0;
313 }
314
315 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link(struct net_device * ndev)316 static void emac_adjust_link(struct net_device *ndev)
317 {
318 struct prueth_emac *emac = netdev_priv(ndev);
319 struct phy_device *phydev = ndev->phydev;
320 struct prueth *prueth = emac->prueth;
321 bool new_state = false;
322 unsigned long flags;
323
324 if (phydev->link) {
325 /* check the mode of operation - full/half duplex */
326 if (phydev->duplex != emac->duplex) {
327 new_state = true;
328 emac->duplex = phydev->duplex;
329 }
330 if (phydev->speed != emac->speed) {
331 new_state = true;
332 emac->speed = phydev->speed;
333 }
334 if (!emac->link) {
335 new_state = true;
336 emac->link = 1;
337 }
338 } else if (emac->link) {
339 new_state = true;
340 emac->link = 0;
341
342 /* f/w should support 100 & 1000 */
343 emac->speed = SPEED_1000;
344
345 /* half duplex may not be supported by f/w */
346 emac->duplex = DUPLEX_FULL;
347 }
348
349 if (new_state) {
350 phy_print_status(phydev);
351
352 /* update RGMII and MII configuration based on PHY negotiated
353 * values
354 */
355 if (emac->link) {
356 if (emac->duplex == DUPLEX_HALF)
357 icssg_config_half_duplex(emac);
358 /* Set the RGMII cfg for gig en and full duplex */
359 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
360
361 /* update the Tx IPG based on 100M/1G speed */
362 spin_lock_irqsave(&emac->lock, flags);
363 icssg_config_ipg(emac);
364 spin_unlock_irqrestore(&emac->lock, flags);
365 icssg_config_set_speed(emac);
366 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
367
368 } else {
369 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
370 }
371 }
372
373 if (emac->link) {
374 /* reactivate the transmit queue */
375 netif_tx_wake_all_queues(ndev);
376 } else {
377 netif_tx_stop_all_queues(ndev);
378 prueth_cleanup_tx_ts(emac);
379 }
380 }
381
emac_rx_timer_callback(struct hrtimer * timer)382 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
383 {
384 struct prueth_emac *emac =
385 container_of(timer, struct prueth_emac, rx_hrtimer);
386 int rx_flow = PRUETH_RX_FLOW_DATA;
387
388 enable_irq(emac->rx_chns.irq[rx_flow]);
389 return HRTIMER_NORESTART;
390 }
391
emac_phy_connect(struct prueth_emac * emac)392 static int emac_phy_connect(struct prueth_emac *emac)
393 {
394 struct prueth *prueth = emac->prueth;
395 struct net_device *ndev = emac->ndev;
396 /* connect PHY */
397 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
398 &emac_adjust_link, 0,
399 emac->phy_if);
400 if (!ndev->phydev) {
401 dev_err(prueth->dev, "couldn't connect to phy %s\n",
402 emac->phy_node->full_name);
403 return -ENODEV;
404 }
405
406 if (!emac->half_duplex) {
407 dev_dbg(prueth->dev, "half duplex mode is not supported\n");
408 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
409 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
410 }
411
412 /* remove unsupported modes */
413 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
414 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
415 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
416
417 if (emac->phy_if == PHY_INTERFACE_MODE_MII)
418 phy_set_max_speed(ndev->phydev, SPEED_100);
419
420 return 0;
421 }
422
prueth_iep_gettime(void * clockops_data,struct ptp_system_timestamp * sts)423 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
424 {
425 u32 hi_rollover_count, hi_rollover_count_r;
426 struct prueth_emac *emac = clockops_data;
427 struct prueth *prueth = emac->prueth;
428 void __iomem *fw_hi_r_count_addr;
429 void __iomem *fw_count_hi_addr;
430 u32 iepcount_hi, iepcount_hi_r;
431 unsigned long flags;
432 u32 iepcount_lo;
433 u64 ts = 0;
434
435 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
436 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
437
438 local_irq_save(flags);
439 do {
440 iepcount_hi = icss_iep_get_count_hi(emac->iep);
441 iepcount_hi += readl(fw_count_hi_addr);
442 hi_rollover_count = readl(fw_hi_r_count_addr);
443 ptp_read_system_prets(sts);
444 iepcount_lo = icss_iep_get_count_low(emac->iep);
445 ptp_read_system_postts(sts);
446
447 iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
448 iepcount_hi_r += readl(fw_count_hi_addr);
449 hi_rollover_count_r = readl(fw_hi_r_count_addr);
450 } while ((iepcount_hi_r != iepcount_hi) ||
451 (hi_rollover_count != hi_rollover_count_r));
452 local_irq_restore(flags);
453
454 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
455 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
456
457 return ts;
458 }
459
prueth_iep_settime(void * clockops_data,u64 ns)460 static void prueth_iep_settime(void *clockops_data, u64 ns)
461 {
462 struct icssg_setclock_desc __iomem *sc_descp;
463 struct prueth_emac *emac = clockops_data;
464 struct icssg_setclock_desc sc_desc;
465 u64 cyclecount;
466 u32 cycletime;
467 int timeout;
468
469 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
470
471 cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
472 cyclecount = ns / cycletime;
473
474 memset(&sc_desc, 0, sizeof(sc_desc));
475 sc_desc.margin = cycletime - 1000;
476 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
477 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
478 sc_desc.iepcount_set = ns % cycletime;
479 /* Count from 0 to (cycle time) - emac->iep->def_inc */
480 sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
481
482 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
483
484 writeb(1, &sc_descp->request);
485
486 timeout = 5; /* fw should take 2-3 ms */
487 while (timeout--) {
488 if (readb(&sc_descp->acknowledgment))
489 return;
490
491 usleep_range(500, 1000);
492 }
493
494 dev_err(emac->prueth->dev, "settime timeout\n");
495 }
496
prueth_perout_enable(void * clockops_data,struct ptp_perout_request * req,int on,u64 * cmp)497 static int prueth_perout_enable(void *clockops_data,
498 struct ptp_perout_request *req, int on,
499 u64 *cmp)
500 {
501 struct prueth_emac *emac = clockops_data;
502 u32 reduction_factor = 0, offset = 0;
503 struct timespec64 ts;
504 u64 current_cycle;
505 u64 start_offset;
506 u64 ns_period;
507
508 if (!on)
509 return 0;
510
511 /* Any firmware specific stuff for PPS/PEROUT handling */
512 ts.tv_sec = req->period.sec;
513 ts.tv_nsec = req->period.nsec;
514 ns_period = timespec64_to_ns(&ts);
515
516 /* f/w doesn't support period less than cycle time */
517 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
518 return -ENXIO;
519
520 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
521 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
522
523 /* f/w requires at least 1uS within a cycle so CMP
524 * can trigger after SYNC is enabled
525 */
526 if (offset < 5 * NSEC_PER_USEC)
527 offset = 5 * NSEC_PER_USEC;
528
529 /* if offset is close to cycle time then we will miss
530 * the CMP event for last tick when IEP rolls over.
531 * In normal mode, IEP tick is 4ns.
532 * In slow compensation it could be 0ns or 8ns at
533 * every slow compensation cycle.
534 */
535 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
536 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
537
538 /* we're in shadow mode so need to set upper 32-bits */
539 *cmp = (u64)offset << 32;
540
541 writel(reduction_factor, emac->prueth->shram.va +
542 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
543
544 current_cycle = icssg_read_time(emac->prueth->shram.va +
545 TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
546
547 /* Rounding of current_cycle count to next second */
548 start_offset = roundup(current_cycle, MSEC_PER_SEC);
549
550 hi_lo_writeq(start_offset, emac->prueth->shram.va +
551 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
552
553 return 0;
554 }
555
556 const struct icss_iep_clockops prueth_iep_clockops = {
557 .settime = prueth_iep_settime,
558 .gettime = prueth_iep_gettime,
559 .perout_enable = prueth_perout_enable,
560 };
561
prueth_create_xdp_rxqs(struct prueth_emac * emac)562 static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
563 {
564 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
565 struct page_pool *pool = emac->rx_chns.pg_pool;
566 int ret;
567
568 ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
569 if (ret)
570 return ret;
571
572 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
573 if (ret)
574 xdp_rxq_info_unreg(rxq);
575
576 return ret;
577 }
578
prueth_destroy_xdp_rxqs(struct prueth_emac * emac)579 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
580 {
581 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
582
583 if (!xdp_rxq_info_is_reg(rxq))
584 return;
585
586 xdp_rxq_info_unreg(rxq);
587 }
588
icssg_prueth_add_mcast(struct net_device * ndev,const u8 * addr)589 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
590 {
591 struct net_device *real_dev;
592 struct prueth_emac *emac;
593 int port_mask;
594 u8 vlan_id;
595
596 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
597 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
598 emac = netdev_priv(real_dev);
599
600 port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
601 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
602 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
603
604 return 0;
605 }
606
icssg_prueth_del_mcast(struct net_device * ndev,const u8 * addr)607 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
608 {
609 struct net_device *real_dev;
610 struct prueth_emac *emac;
611 int other_port_mask;
612 int port_mask;
613 u8 vlan_id;
614
615 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
616 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
617 emac = netdev_priv(real_dev);
618
619 port_mask = BIT(emac->port_id);
620 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
621
622 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
623 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
624
625 if (other_port_mask) {
626 icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
627 icssg_vtbl_modify(emac, vlan_id, other_port_mask,
628 other_port_mask, true);
629 }
630
631 return 0;
632 }
633
icssg_prueth_hsr_fdb_add_del(struct prueth_emac * emac,const u8 * addr,u8 vid,bool add)634 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
635 const u8 *addr, u8 vid, bool add)
636 {
637 icssg_fdb_add_del(emac, addr, vid,
638 ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
639 ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
640 ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
641 ICSSG_FDB_ENTRY_BLOCK, add);
642
643 if (add)
644 icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
645 BIT(emac->port_id), add);
646 }
647
icssg_prueth_hsr_add_mcast(struct net_device * ndev,const u8 * addr)648 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
649 {
650 struct net_device *real_dev;
651 struct prueth_emac *emac;
652 u8 vlan_id, i;
653
654 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
655 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
656
657 if (is_hsr_master(real_dev)) {
658 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
659 emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
660 if (!emac)
661 return -EINVAL;
662 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
663 true);
664 }
665 } else {
666 emac = netdev_priv(real_dev);
667 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
668 }
669
670 return 0;
671 }
672
icssg_prueth_hsr_del_mcast(struct net_device * ndev,const u8 * addr)673 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
674 {
675 struct net_device *real_dev;
676 struct prueth_emac *emac;
677 u8 vlan_id, i;
678
679 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
680 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
681
682 if (is_hsr_master(real_dev)) {
683 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
684 emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
685 if (!emac)
686 return -EINVAL;
687 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
688 false);
689 }
690 } else {
691 emac = netdev_priv(real_dev);
692 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
693 }
694
695 return 0;
696 }
697
icssg_update_vlan_mcast(struct net_device * vdev,int vid,void * args)698 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
699 void *args)
700 {
701 struct prueth_emac *emac = args;
702
703 if (!vdev || !vid)
704 return 0;
705
706 netif_addr_lock_bh(vdev);
707 __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
708 vdev->addr_len);
709 netif_addr_unlock_bh(vdev);
710
711 if (emac->prueth->is_hsr_offload_mode)
712 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
713 icssg_prueth_hsr_add_mcast,
714 icssg_prueth_hsr_del_mcast);
715 else
716 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
717 icssg_prueth_add_mcast,
718 icssg_prueth_del_mcast);
719
720 return 0;
721 }
722
723 /**
724 * emac_ndo_open - EMAC device open
725 * @ndev: network adapter device
726 *
727 * Called when system wants to start the interface.
728 *
729 * Return: 0 for a successful open, or appropriate error code
730 */
emac_ndo_open(struct net_device * ndev)731 static int emac_ndo_open(struct net_device *ndev)
732 {
733 struct prueth_emac *emac = netdev_priv(ndev);
734 int ret, i, num_data_chn = emac->tx_ch_num;
735 struct icssg_flow_cfg __iomem *flow_cfg;
736 struct prueth *prueth = emac->prueth;
737 int slice = prueth_emac_slice(emac);
738 struct device *dev = prueth->dev;
739 int max_rx_flows;
740 int rx_flow;
741
742 /* set h/w MAC as user might have re-configured */
743 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
744
745 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
746 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
747
748 /* Notify the stack of the actual queue counts. */
749 ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
750 if (ret) {
751 dev_err(dev, "cannot set real number of tx queues\n");
752 return ret;
753 }
754
755 init_completion(&emac->cmd_complete);
756 ret = prueth_init_tx_chns(emac);
757 if (ret) {
758 dev_err(dev, "failed to init tx channel: %d\n", ret);
759 return ret;
760 }
761
762 max_rx_flows = PRUETH_MAX_RX_FLOWS;
763 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
764 max_rx_flows, PRUETH_MAX_RX_DESC);
765 if (ret) {
766 dev_err(dev, "failed to init rx channel: %d\n", ret);
767 goto cleanup_tx;
768 }
769
770 ret = prueth_ndev_add_tx_napi(emac);
771 if (ret)
772 goto cleanup_rx;
773
774 /* we use only the highest priority flow for now i.e. @irq[3] */
775 rx_flow = PRUETH_RX_FLOW_DATA;
776 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
777 IRQF_TRIGGER_HIGH, dev_name(dev), emac);
778 if (ret) {
779 dev_err(dev, "unable to request RX IRQ\n");
780 goto cleanup_napi;
781 }
782
783 if (!prueth->emacs_initialized) {
784 ret = prueth_emac_common_start(prueth);
785 if (ret)
786 goto free_rx_irq;
787 }
788
789 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
790 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
791 ret = emac_fdb_flow_id_updated(emac);
792
793 if (ret) {
794 netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
795 goto stop;
796 }
797
798 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
799
800 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
801 IRQF_ONESHOT, dev_name(dev), emac);
802 if (ret)
803 goto stop;
804
805 /* Prepare RX */
806 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
807 if (ret)
808 goto free_tx_ts_irq;
809
810 ret = prueth_create_xdp_rxqs(emac);
811 if (ret)
812 goto reset_rx_chn;
813
814 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
815 if (ret)
816 goto destroy_xdp_rxqs;
817
818 for (i = 0; i < emac->tx_ch_num; i++) {
819 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
820 if (ret)
821 goto reset_tx_chan;
822 }
823
824 /* Enable NAPI in Tx and Rx direction */
825 for (i = 0; i < emac->tx_ch_num; i++)
826 napi_enable(&emac->tx_chns[i].napi_tx);
827 napi_enable(&emac->napi_rx);
828
829 /* start PHY */
830 phy_start(ndev->phydev);
831
832 prueth->emacs_initialized++;
833
834 queue_work(system_long_wq, &emac->stats_work.work);
835
836 return 0;
837
838 reset_tx_chan:
839 /* Since interface is not yet up, there is wouldn't be
840 * any SKB for completion. So set false to free_skb
841 */
842 prueth_reset_tx_chan(emac, i, false);
843 destroy_xdp_rxqs:
844 prueth_destroy_xdp_rxqs(emac);
845 reset_rx_chn:
846 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
847 free_tx_ts_irq:
848 free_irq(emac->tx_ts_irq, emac);
849 stop:
850 if (!prueth->emacs_initialized)
851 prueth_emac_common_stop(prueth);
852 free_rx_irq:
853 free_irq(emac->rx_chns.irq[rx_flow], emac);
854 cleanup_napi:
855 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
856 cleanup_rx:
857 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
858 cleanup_tx:
859 prueth_cleanup_tx_chns(emac);
860
861 return ret;
862 }
863
864 /**
865 * emac_ndo_stop - EMAC device stop
866 * @ndev: network adapter device
867 *
868 * Called when system wants to stop or down the interface.
869 *
870 * Return: Always 0 (Success)
871 */
emac_ndo_stop(struct net_device * ndev)872 static int emac_ndo_stop(struct net_device *ndev)
873 {
874 struct prueth_emac *emac = netdev_priv(ndev);
875 struct prueth *prueth = emac->prueth;
876 int rx_flow = PRUETH_RX_FLOW_DATA;
877 int max_rx_flows;
878 int ret, i;
879
880 /* inform the upper layers. */
881 netif_tx_stop_all_queues(ndev);
882
883 /* block packets from wire */
884 if (ndev->phydev)
885 phy_stop(ndev->phydev);
886
887 if (emac->prueth->is_hsr_offload_mode)
888 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
889 else
890 __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
891
892 atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
893 /* ensure new tdown_cnt value is visible */
894 smp_mb__after_atomic();
895 /* tear down and disable UDMA channels */
896 reinit_completion(&emac->tdown_complete);
897 for (i = 0; i < emac->tx_ch_num; i++)
898 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
899
900 ret = wait_for_completion_timeout(&emac->tdown_complete,
901 msecs_to_jiffies(1000));
902 if (!ret)
903 netdev_err(ndev, "tx teardown timeout\n");
904
905 prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
906 for (i = 0; i < emac->tx_ch_num; i++) {
907 napi_disable(&emac->tx_chns[i].napi_tx);
908 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
909 }
910
911 max_rx_flows = PRUETH_MAX_RX_FLOWS;
912 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
913
914 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
915 prueth_destroy_xdp_rxqs(emac);
916 napi_disable(&emac->napi_rx);
917 hrtimer_cancel(&emac->rx_hrtimer);
918
919 cancel_work_sync(&emac->rx_mode_work);
920
921 /* Destroying the queued work in ndo_stop() */
922 cancel_delayed_work_sync(&emac->stats_work);
923
924 /* stop PRUs */
925 if (prueth->emacs_initialized == 1)
926 prueth_emac_common_stop(prueth);
927
928 free_irq(emac->tx_ts_irq, emac);
929
930 free_irq(emac->rx_chns.irq[rx_flow], emac);
931 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
932
933 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
934 prueth_cleanup_tx_chns(emac);
935
936 prueth->emacs_initialized--;
937
938 return 0;
939 }
940
emac_ndo_set_rx_mode_work(struct work_struct * work)941 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
942 {
943 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
944 struct net_device *ndev = emac->ndev;
945 bool promisc, allmulti;
946
947 if (!netif_running(ndev))
948 return;
949
950 promisc = ndev->flags & IFF_PROMISC;
951 allmulti = ndev->flags & IFF_ALLMULTI;
952 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
953 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
954
955 if (promisc) {
956 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
957 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
958 return;
959 }
960
961 if (allmulti) {
962 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
963 return;
964 }
965
966 if (emac->prueth->is_hsr_offload_mode) {
967 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
968 icssg_prueth_hsr_del_mcast);
969 if (rtnl_trylock()) {
970 vlan_for_each(emac->prueth->hsr_dev,
971 icssg_update_vlan_mcast, emac);
972 rtnl_unlock();
973 }
974 } else {
975 __dev_mc_sync(ndev, icssg_prueth_add_mcast,
976 icssg_prueth_del_mcast);
977 if (rtnl_trylock()) {
978 vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
979 rtnl_unlock();
980 }
981 }
982 }
983
984 /**
985 * emac_ndo_set_rx_mode - EMAC set receive mode function
986 * @ndev: The EMAC network adapter
987 *
988 * Called when system wants to set the receive mode of the device.
989 *
990 */
emac_ndo_set_rx_mode(struct net_device * ndev)991 static void emac_ndo_set_rx_mode(struct net_device *ndev)
992 {
993 struct prueth_emac *emac = netdev_priv(ndev);
994
995 queue_work(emac->cmd_wq, &emac->rx_mode_work);
996 }
997
emac_ndo_fix_features(struct net_device * ndev,netdev_features_t features)998 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
999 netdev_features_t features)
1000 {
1001 /* hsr tag insertion offload and hsr dup offload are tightly coupled in
1002 * firmware implementation. Both these features need to be enabled /
1003 * disabled together.
1004 */
1005 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
1006 if ((features & NETIF_F_HW_HSR_DUP) ||
1007 (features & NETIF_F_HW_HSR_TAG_INS))
1008 features |= NETIF_F_HW_HSR_DUP |
1009 NETIF_F_HW_HSR_TAG_INS;
1010
1011 if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
1012 (ndev->features & NETIF_F_HW_HSR_TAG_INS))
1013 if (!(features & NETIF_F_HW_HSR_DUP) ||
1014 !(features & NETIF_F_HW_HSR_TAG_INS))
1015 features &= ~(NETIF_F_HW_HSR_DUP |
1016 NETIF_F_HW_HSR_TAG_INS);
1017
1018 return features;
1019 }
1020
emac_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)1021 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
1022 __be16 proto, u16 vid)
1023 {
1024 struct prueth_emac *emac = netdev_priv(ndev);
1025 struct prueth *prueth = emac->prueth;
1026 int port_mask = BIT(emac->port_id);
1027 int untag_mask = 0;
1028
1029 if (prueth->is_hsr_offload_mode)
1030 port_mask |= BIT(PRUETH_PORT_HOST);
1031
1032 __hw_addr_init(&emac->vlan_mcast_list[vid]);
1033 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
1034 vid, port_mask, untag_mask);
1035
1036 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
1037 icssg_set_pvid(emac->prueth, vid, emac->port_id);
1038
1039 return 0;
1040 }
1041
emac_ndo_vlan_rx_del_vid(struct net_device * ndev,__be16 proto,u16 vid)1042 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
1043 __be16 proto, u16 vid)
1044 {
1045 struct prueth_emac *emac = netdev_priv(ndev);
1046 struct prueth *prueth = emac->prueth;
1047 int port_mask = BIT(emac->port_id);
1048 int untag_mask = 0;
1049
1050 if (prueth->is_hsr_offload_mode)
1051 port_mask = BIT(PRUETH_PORT_HOST);
1052
1053 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
1054 vid, port_mask, untag_mask);
1055 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
1056
1057 return 0;
1058 }
1059
1060 /**
1061 * emac_xdp_xmit - Implements ndo_xdp_xmit
1062 * @dev: netdev
1063 * @n: number of frames
1064 * @frames: array of XDP buffer pointers
1065 * @flags: XDP extra info
1066 *
1067 * Return: number of frames successfully sent. Failed frames
1068 * will be free'ed by XDP core.
1069 *
1070 * For error cases, a negative errno code is returned and no-frames
1071 * are transmitted (caller must handle freeing frames).
1072 **/
emac_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1073 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1074 u32 flags)
1075 {
1076 struct prueth_emac *emac = netdev_priv(dev);
1077 struct net_device *ndev = emac->ndev;
1078 struct xdp_frame *xdpf;
1079 unsigned int q_idx;
1080 int nxmit = 0;
1081 u32 err;
1082 int i;
1083
1084 q_idx = smp_processor_id() % emac->tx_ch_num;
1085
1086 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1087 return -EINVAL;
1088
1089 for (i = 0; i < n; i++) {
1090 xdpf = frames[i];
1091 err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
1092 if (err != ICSSG_XDP_TX) {
1093 ndev->stats.tx_dropped++;
1094 break;
1095 }
1096 nxmit++;
1097 }
1098
1099 return nxmit;
1100 }
1101
1102 /**
1103 * emac_xdp_setup - add/remove an XDP program
1104 * @emac: emac device
1105 * @bpf: XDP program
1106 *
1107 * Return: Always 0 (Success)
1108 **/
emac_xdp_setup(struct prueth_emac * emac,struct netdev_bpf * bpf)1109 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
1110 {
1111 struct bpf_prog *prog = bpf->prog;
1112 xdp_features_t val;
1113
1114 val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
1115 NETDEV_XDP_ACT_NDO_XMIT;
1116 xdp_set_features_flag(emac->ndev, val);
1117
1118 if (!emac->xdpi.prog && !prog)
1119 return 0;
1120
1121 WRITE_ONCE(emac->xdp_prog, prog);
1122
1123 xdp_attachment_setup(&emac->xdpi, bpf);
1124
1125 return 0;
1126 }
1127
1128 /**
1129 * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
1130 * @ndev: network adapter device
1131 * @bpf: XDP program
1132 *
1133 * Return: 0 on success, error code on failure.
1134 **/
emac_ndo_bpf(struct net_device * ndev,struct netdev_bpf * bpf)1135 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1136 {
1137 struct prueth_emac *emac = netdev_priv(ndev);
1138
1139 switch (bpf->command) {
1140 case XDP_SETUP_PROG:
1141 return emac_xdp_setup(emac, bpf);
1142 default:
1143 return -EINVAL;
1144 }
1145 }
1146
1147 static const struct net_device_ops emac_netdev_ops = {
1148 .ndo_open = emac_ndo_open,
1149 .ndo_stop = emac_ndo_stop,
1150 .ndo_start_xmit = icssg_ndo_start_xmit,
1151 .ndo_set_mac_address = eth_mac_addr,
1152 .ndo_validate_addr = eth_validate_addr,
1153 .ndo_tx_timeout = icssg_ndo_tx_timeout,
1154 .ndo_set_rx_mode = emac_ndo_set_rx_mode,
1155 .ndo_eth_ioctl = icssg_ndo_ioctl,
1156 .ndo_get_stats64 = icssg_ndo_get_stats64,
1157 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
1158 .ndo_fix_features = emac_ndo_fix_features,
1159 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
1160 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
1161 .ndo_bpf = emac_ndo_bpf,
1162 .ndo_xdp_xmit = emac_xdp_xmit,
1163 };
1164
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)1165 static int prueth_netdev_init(struct prueth *prueth,
1166 struct device_node *eth_node)
1167 {
1168 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1169 struct prueth_emac *emac;
1170 struct net_device *ndev;
1171 enum prueth_port port;
1172 const char *irq_name;
1173 enum prueth_mac mac;
1174
1175 port = prueth_node_port(eth_node);
1176 if (port == PRUETH_PORT_INVALID)
1177 return -EINVAL;
1178
1179 mac = prueth_node_mac(eth_node);
1180 if (mac == PRUETH_MAC_INVALID)
1181 return -EINVAL;
1182
1183 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1184 if (!ndev)
1185 return -ENOMEM;
1186
1187 emac = netdev_priv(ndev);
1188 emac->prueth = prueth;
1189 emac->ndev = ndev;
1190 emac->port_id = port;
1191 emac->xdp_prog = NULL;
1192 emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1193 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1194 if (!emac->cmd_wq) {
1195 ret = -ENOMEM;
1196 goto free_ndev;
1197 }
1198 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1199
1200 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
1201
1202 ret = pruss_request_mem_region(prueth->pruss,
1203 port == PRUETH_PORT_MII0 ?
1204 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1205 &emac->dram);
1206 if (ret) {
1207 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1208 ret = -ENOMEM;
1209 goto free_wq;
1210 }
1211
1212 emac->tx_ch_num = 1;
1213
1214 irq_name = "tx_ts0";
1215 if (emac->port_id == PRUETH_PORT_MII1)
1216 irq_name = "tx_ts1";
1217 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
1218 if (emac->tx_ts_irq < 0) {
1219 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
1220 goto free;
1221 }
1222
1223 SET_NETDEV_DEV(ndev, prueth->dev);
1224 spin_lock_init(&emac->lock);
1225 mutex_init(&emac->cmd_lock);
1226
1227 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1228 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1229 dev_err(prueth->dev, "couldn't find phy-handle\n");
1230 ret = -ENODEV;
1231 goto free;
1232 } else if (of_phy_is_fixed_link(eth_node)) {
1233 ret = of_phy_register_fixed_link(eth_node);
1234 if (ret) {
1235 ret = dev_err_probe(prueth->dev, ret,
1236 "failed to register fixed-link phy\n");
1237 goto free;
1238 }
1239
1240 emac->phy_node = eth_node;
1241 }
1242
1243 ret = of_get_phy_mode(eth_node, &emac->phy_if);
1244 if (ret) {
1245 dev_err(prueth->dev, "could not get phy-mode property\n");
1246 goto free;
1247 }
1248
1249 if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1250 !phy_interface_mode_is_rgmii(emac->phy_if)) {
1251 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1252 ret = -EINVAL;
1253 goto free;
1254 }
1255
1256 /* AM65 SR2.0 has TX Internal delay always enabled by hardware
1257 * and it is not possible to disable TX Internal delay. The below
1258 * switch case block describes how we handle different phy modes
1259 * based on hardware restriction.
1260 */
1261 switch (emac->phy_if) {
1262 case PHY_INTERFACE_MODE_RGMII_ID:
1263 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1264 break;
1265 case PHY_INTERFACE_MODE_RGMII_TXID:
1266 emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1267 break;
1268 case PHY_INTERFACE_MODE_RGMII:
1269 case PHY_INTERFACE_MODE_RGMII_RXID:
1270 dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1271 ret = -EINVAL;
1272 goto free;
1273 default:
1274 break;
1275 }
1276
1277 /* get mac address from DT and set private and netdev addr */
1278 ret = of_get_ethdev_address(eth_node, ndev);
1279 if (!is_valid_ether_addr(ndev->dev_addr)) {
1280 eth_hw_addr_random(ndev);
1281 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1282 port, ndev->dev_addr);
1283 }
1284 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1285
1286 ndev->dev.of_node = eth_node;
1287 ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1288 ndev->max_mtu = PRUETH_MAX_MTU;
1289 ndev->netdev_ops = &emac_netdev_ops;
1290 ndev->ethtool_ops = &icssg_ethtool_ops;
1291 ndev->hw_features = NETIF_F_SG;
1292 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1293 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
1294
1295 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
1296 hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
1297 HRTIMER_MODE_REL_PINNED);
1298 prueth->emac[mac] = emac;
1299
1300 return 0;
1301
1302 free:
1303 pruss_release_mem_region(prueth->pruss, &emac->dram);
1304 free_wq:
1305 destroy_workqueue(emac->cmd_wq);
1306 free_ndev:
1307 emac->ndev = NULL;
1308 prueth->emac[mac] = NULL;
1309 free_netdev(ndev);
1310
1311 return ret;
1312 }
1313
prueth_dev_check(const struct net_device * ndev)1314 bool prueth_dev_check(const struct net_device *ndev)
1315 {
1316 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
1317 struct prueth_emac *emac = netdev_priv(ndev);
1318
1319 return emac->prueth->is_switch_mode;
1320 }
1321
1322 return false;
1323 }
1324
prueth_offload_fwd_mark_update(struct prueth * prueth)1325 static void prueth_offload_fwd_mark_update(struct prueth *prueth)
1326 {
1327 int set_val = 0;
1328 int i;
1329
1330 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
1331 set_val = 1;
1332
1333 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
1334
1335 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
1336 struct prueth_emac *emac = prueth->emac[i];
1337
1338 if (!emac || !emac->ndev)
1339 continue;
1340
1341 emac->offload_fwd_mark = set_val;
1342 }
1343 }
1344
prueth_emac_restart(struct prueth * prueth)1345 static int prueth_emac_restart(struct prueth *prueth)
1346 {
1347 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
1348 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
1349 int ret;
1350
1351 /* Detach the net_device for both PRUeth ports*/
1352 if (netif_running(emac0->ndev))
1353 netif_device_detach(emac0->ndev);
1354 if (netif_running(emac1->ndev))
1355 netif_device_detach(emac1->ndev);
1356
1357 /* Disable both PRUeth ports */
1358 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
1359 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
1360 if (ret)
1361 return ret;
1362
1363 /* Stop both pru cores for both PRUeth ports*/
1364 ret = prueth_emac_common_stop(prueth);
1365 if (ret) {
1366 dev_err(prueth->dev, "Failed to stop the firmwares");
1367 return ret;
1368 }
1369
1370 /* Start both pru cores for both PRUeth ports */
1371 ret = prueth_emac_common_start(prueth);
1372 if (ret) {
1373 dev_err(prueth->dev, "Failed to start the firmwares");
1374 return ret;
1375 }
1376
1377 /* Enable forwarding for both PRUeth ports */
1378 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
1379 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
1380
1381 /* Attache net_device for both PRUeth ports */
1382 netif_device_attach(emac0->ndev);
1383 netif_device_attach(emac1->ndev);
1384
1385 return ret;
1386 }
1387
icssg_change_mode(struct prueth * prueth)1388 static void icssg_change_mode(struct prueth *prueth)
1389 {
1390 struct prueth_emac *emac;
1391 int mac, ret;
1392
1393 ret = prueth_emac_restart(prueth);
1394 if (ret) {
1395 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1396 return;
1397 }
1398
1399 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
1400 emac = prueth->emac[mac];
1401 if (prueth->is_hsr_offload_mode) {
1402 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
1403 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
1404 else
1405 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
1406 }
1407
1408 if (netif_running(emac->ndev)) {
1409 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
1410 ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
1411 ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
1412 ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
1413 ICSSG_FDB_ENTRY_BLOCK,
1414 true);
1415 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
1416 BIT(emac->port_id) | DEFAULT_PORT_MASK,
1417 BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
1418 true);
1419 if (prueth->is_hsr_offload_mode)
1420 icssg_vtbl_modify(emac, DEFAULT_VID,
1421 DEFAULT_PORT_MASK,
1422 DEFAULT_UNTAG_MASK, true);
1423 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
1424 if (prueth->is_switch_mode)
1425 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
1426 }
1427 }
1428 }
1429
prueth_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)1430 static int prueth_netdevice_port_link(struct net_device *ndev,
1431 struct net_device *br_ndev,
1432 struct netlink_ext_ack *extack)
1433 {
1434 struct prueth_emac *emac = netdev_priv(ndev);
1435 struct prueth *prueth = emac->prueth;
1436 int err;
1437
1438 if (!prueth->br_members) {
1439 prueth->hw_bridge_dev = br_ndev;
1440 } else {
1441 /* This is adding the port to a second bridge, this is
1442 * unsupported
1443 */
1444 if (prueth->hw_bridge_dev != br_ndev)
1445 return -EOPNOTSUPP;
1446 }
1447
1448 err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
1449 &prueth->prueth_switchdev_nb,
1450 &prueth->prueth_switchdev_bl_nb,
1451 false, extack);
1452 if (err)
1453 return err;
1454
1455 prueth->br_members |= BIT(emac->port_id);
1456
1457 if (!prueth->is_switch_mode) {
1458 if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
1459 prueth->br_members & BIT(PRUETH_PORT_MII1)) {
1460 prueth->is_switch_mode = true;
1461 prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
1462 emac->port_vlan = prueth->default_vlan;
1463 icssg_change_mode(prueth);
1464 }
1465 }
1466
1467 prueth_offload_fwd_mark_update(prueth);
1468
1469 return NOTIFY_DONE;
1470 }
1471
prueth_netdevice_port_unlink(struct net_device * ndev)1472 static void prueth_netdevice_port_unlink(struct net_device *ndev)
1473 {
1474 struct prueth_emac *emac = netdev_priv(ndev);
1475 struct prueth *prueth = emac->prueth;
1476 int ret;
1477
1478 prueth->br_members &= ~BIT(emac->port_id);
1479
1480 if (prueth->is_switch_mode) {
1481 prueth->is_switch_mode = false;
1482 emac->port_vlan = 0;
1483 ret = prueth_emac_restart(prueth);
1484 if (ret) {
1485 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1486 return;
1487 }
1488 }
1489
1490 prueth_offload_fwd_mark_update(prueth);
1491
1492 if (!prueth->br_members)
1493 prueth->hw_bridge_dev = NULL;
1494 }
1495
prueth_hsr_port_link(struct net_device * ndev)1496 static int prueth_hsr_port_link(struct net_device *ndev)
1497 {
1498 struct prueth_emac *emac = netdev_priv(ndev);
1499 struct prueth *prueth = emac->prueth;
1500 struct prueth_emac *emac0;
1501 struct prueth_emac *emac1;
1502
1503 emac0 = prueth->emac[PRUETH_MAC0];
1504 emac1 = prueth->emac[PRUETH_MAC1];
1505
1506 if (prueth->is_switch_mode)
1507 return -EOPNOTSUPP;
1508
1509 prueth->hsr_members |= BIT(emac->port_id);
1510 if (!prueth->is_hsr_offload_mode) {
1511 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
1512 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
1513 if (!(emac0->ndev->features &
1514 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1515 !(emac1->ndev->features &
1516 NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
1517 return -EOPNOTSUPP;
1518 prueth->is_hsr_offload_mode = true;
1519 prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
1520 emac0->port_vlan = prueth->default_vlan;
1521 emac1->port_vlan = prueth->default_vlan;
1522 icssg_change_mode(prueth);
1523 netdev_dbg(ndev, "Enabling HSR offload mode\n");
1524 }
1525 }
1526
1527 return 0;
1528 }
1529
prueth_hsr_port_unlink(struct net_device * ndev)1530 static void prueth_hsr_port_unlink(struct net_device *ndev)
1531 {
1532 struct prueth_emac *emac = netdev_priv(ndev);
1533 struct prueth *prueth = emac->prueth;
1534 struct prueth_emac *emac0;
1535 struct prueth_emac *emac1;
1536 int ret;
1537
1538 emac0 = prueth->emac[PRUETH_MAC0];
1539 emac1 = prueth->emac[PRUETH_MAC1];
1540
1541 prueth->hsr_members &= ~BIT(emac->port_id);
1542 if (prueth->is_hsr_offload_mode) {
1543 prueth->is_hsr_offload_mode = false;
1544 emac0->port_vlan = 0;
1545 emac1->port_vlan = 0;
1546 prueth->hsr_dev = NULL;
1547 ret = prueth_emac_restart(prueth);
1548 if (ret) {
1549 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
1550 return;
1551 }
1552 netdev_dbg(ndev, "Disabling HSR Offload mode\n");
1553 }
1554 }
1555
1556 /* netdev notifier */
prueth_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)1557 static int prueth_netdevice_event(struct notifier_block *unused,
1558 unsigned long event, void *ptr)
1559 {
1560 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1561 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1562 struct netdev_notifier_changeupper_info *info;
1563 struct prueth_emac *emac = netdev_priv(ndev);
1564 struct prueth *prueth = emac->prueth;
1565 int ret = NOTIFY_DONE;
1566
1567 if (ndev->netdev_ops != &emac_netdev_ops)
1568 return NOTIFY_DONE;
1569
1570 switch (event) {
1571 case NETDEV_CHANGEUPPER:
1572 info = ptr;
1573
1574 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
1575 is_hsr_master(info->upper_dev)) {
1576 if (info->linking) {
1577 if (!prueth->hsr_dev) {
1578 prueth->hsr_dev = info->upper_dev;
1579 icssg_class_set_host_mac_addr(prueth->miig_rt,
1580 prueth->hsr_dev->dev_addr);
1581 } else {
1582 if (prueth->hsr_dev != info->upper_dev) {
1583 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
1584 return -EOPNOTSUPP;
1585 }
1586 }
1587 prueth_hsr_port_link(ndev);
1588 } else {
1589 prueth_hsr_port_unlink(ndev);
1590 }
1591 }
1592
1593 if (netif_is_bridge_master(info->upper_dev)) {
1594 if (info->linking)
1595 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
1596 else
1597 prueth_netdevice_port_unlink(ndev);
1598 }
1599 break;
1600 default:
1601 return NOTIFY_DONE;
1602 }
1603
1604 return notifier_from_errno(ret);
1605 }
1606
prueth_register_notifiers(struct prueth * prueth)1607 static int prueth_register_notifiers(struct prueth *prueth)
1608 {
1609 int ret = 0;
1610
1611 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
1612 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
1613 if (ret) {
1614 dev_err(prueth->dev, "can't register netdevice notifier\n");
1615 return ret;
1616 }
1617
1618 ret = prueth_switchdev_register_notifiers(prueth);
1619 if (ret)
1620 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1621
1622 return ret;
1623 }
1624
prueth_unregister_notifiers(struct prueth * prueth)1625 static void prueth_unregister_notifiers(struct prueth *prueth)
1626 {
1627 prueth_switchdev_unregister_notifiers(prueth);
1628 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
1629 }
1630
prueth_probe(struct platform_device * pdev)1631 static int prueth_probe(struct platform_device *pdev)
1632 {
1633 struct device_node *eth_node, *eth_ports_node;
1634 struct device_node *eth0_node = NULL;
1635 struct device_node *eth1_node = NULL;
1636 struct genpool_data_align gp_data = {
1637 .align = SZ_64K,
1638 };
1639 struct device *dev = &pdev->dev;
1640 struct device_node *np;
1641 struct prueth *prueth;
1642 struct pruss *pruss;
1643 u32 msmc_ram_size;
1644 int i, ret;
1645
1646 np = dev->of_node;
1647
1648 BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
1649 "insufficient SW_DATA size");
1650
1651 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1652 if (!prueth)
1653 return -ENOMEM;
1654
1655 dev_set_drvdata(dev, prueth);
1656 prueth->pdev = pdev;
1657 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
1658
1659 prueth->dev = dev;
1660 eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1661 if (!eth_ports_node)
1662 return -ENOENT;
1663
1664 for_each_child_of_node(eth_ports_node, eth_node) {
1665 u32 reg;
1666
1667 if (strcmp(eth_node->name, "port"))
1668 continue;
1669 ret = of_property_read_u32(eth_node, "reg", ®);
1670 if (ret < 0) {
1671 dev_err(dev, "%pOF error reading port_id %d\n",
1672 eth_node, ret);
1673 }
1674
1675 of_node_get(eth_node);
1676
1677 if (reg == 0) {
1678 eth0_node = eth_node;
1679 if (!of_device_is_available(eth0_node)) {
1680 of_node_put(eth0_node);
1681 eth0_node = NULL;
1682 }
1683 } else if (reg == 1) {
1684 eth1_node = eth_node;
1685 if (!of_device_is_available(eth1_node)) {
1686 of_node_put(eth1_node);
1687 eth1_node = NULL;
1688 }
1689 } else {
1690 dev_err(dev, "port reg should be 0 or 1\n");
1691 }
1692 }
1693
1694 of_node_put(eth_ports_node);
1695
1696 /* At least one node must be present and available else we fail */
1697 if (!eth0_node && !eth1_node) {
1698 dev_err(dev, "neither port0 nor port1 node available\n");
1699 return -ENODEV;
1700 }
1701
1702 if (eth0_node == eth1_node) {
1703 dev_err(dev, "port0 and port1 can't have same reg\n");
1704 of_node_put(eth0_node);
1705 return -ENODEV;
1706 }
1707
1708 prueth->eth_node[PRUETH_MAC0] = eth0_node;
1709 prueth->eth_node[PRUETH_MAC1] = eth1_node;
1710
1711 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1712 if (IS_ERR(prueth->miig_rt)) {
1713 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1714 return -ENODEV;
1715 }
1716
1717 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1718 if (IS_ERR(prueth->mii_rt)) {
1719 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1720 return -ENODEV;
1721 }
1722
1723 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
1724 if (IS_ERR(prueth->pa_stats)) {
1725 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
1726 prueth->pa_stats = NULL;
1727 }
1728
1729 if (eth0_node || eth1_node) {
1730 ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
1731 if (ret)
1732 goto put_cores;
1733 ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
1734 if (ret)
1735 goto put_cores;
1736 }
1737
1738 pruss = pruss_get(eth0_node ?
1739 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1740 if (IS_ERR(pruss)) {
1741 ret = PTR_ERR(pruss);
1742 dev_err(dev, "unable to get pruss handle\n");
1743 goto put_cores;
1744 }
1745
1746 prueth->pruss = pruss;
1747
1748 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1749 &prueth->shram);
1750 if (ret) {
1751 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1752 goto put_pruss;
1753 }
1754
1755 prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1756 if (!prueth->sram_pool) {
1757 dev_err(dev, "unable to get SRAM pool\n");
1758 ret = -ENODEV;
1759
1760 goto put_mem;
1761 }
1762
1763 msmc_ram_size = MSMC_RAM_SIZE;
1764 prueth->is_switchmode_supported = prueth->pdata.switch_mode;
1765 if (prueth->is_switchmode_supported)
1766 msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
1767
1768 /* NOTE: FW bug needs buffer base to be 64KB aligned */
1769 prueth->msmcram.va =
1770 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1771 msmc_ram_size,
1772 gen_pool_first_fit_align,
1773 &gp_data);
1774
1775 if (!prueth->msmcram.va) {
1776 ret = -ENOMEM;
1777 dev_err(dev, "unable to allocate MSMC resource\n");
1778 goto put_mem;
1779 }
1780 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1781 (unsigned long)prueth->msmcram.va);
1782 prueth->msmcram.size = msmc_ram_size;
1783 memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1784 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1785 prueth->msmcram.va, prueth->msmcram.size);
1786
1787 prueth->iep0 = icss_iep_get_idx(np, 0);
1788 if (IS_ERR(prueth->iep0)) {
1789 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
1790 prueth->iep0 = NULL;
1791 goto free_pool;
1792 }
1793
1794 prueth->iep1 = icss_iep_get_idx(np, 1);
1795 if (IS_ERR(prueth->iep1)) {
1796 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
1797 goto put_iep0;
1798 }
1799
1800 if (prueth->pdata.quirk_10m_link_issue) {
1801 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
1802 * traffic.
1803 */
1804 icss_iep_init_fw(prueth->iep1);
1805 }
1806
1807 spin_lock_init(&prueth->vtbl_lock);
1808 spin_lock_init(&prueth->stats_lock);
1809 /* setup netdev interfaces */
1810 if (eth0_node) {
1811 ret = prueth_netdev_init(prueth, eth0_node);
1812 if (ret) {
1813 dev_err_probe(dev, ret, "netdev init %s failed\n",
1814 eth0_node->name);
1815 goto exit_iep;
1816 }
1817
1818 prueth->emac[PRUETH_MAC0]->half_duplex =
1819 of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1820
1821 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1822 }
1823
1824 if (eth1_node) {
1825 ret = prueth_netdev_init(prueth, eth1_node);
1826 if (ret) {
1827 dev_err_probe(dev, ret, "netdev init %s failed\n",
1828 eth1_node->name);
1829 goto netdev_exit;
1830 }
1831
1832 prueth->emac[PRUETH_MAC1]->half_duplex =
1833 of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1834
1835 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
1836 }
1837
1838 /* register the network devices */
1839 if (eth0_node) {
1840 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1841 if (ret) {
1842 dev_err(dev, "can't register netdev for port MII0");
1843 goto netdev_exit;
1844 }
1845
1846 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1847
1848 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1849 if (ret) {
1850 dev_err(dev,
1851 "can't connect to MII0 PHY, error -%d", ret);
1852 goto netdev_unregister;
1853 }
1854 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1855 }
1856
1857 if (eth1_node) {
1858 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1859 if (ret) {
1860 dev_err(dev, "can't register netdev for port MII1");
1861 goto netdev_unregister;
1862 }
1863
1864 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1865 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1866 if (ret) {
1867 dev_err(dev,
1868 "can't connect to MII1 PHY, error %d", ret);
1869 goto netdev_unregister;
1870 }
1871 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1872 }
1873
1874 if (prueth->is_switchmode_supported) {
1875 ret = prueth_register_notifiers(prueth);
1876 if (ret)
1877 goto netdev_unregister;
1878
1879 sprintf(prueth->switch_id, "%s", dev_name(dev));
1880 }
1881
1882 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1883 (!eth0_node || !eth1_node) ? "single" : "dual");
1884
1885 if (eth1_node)
1886 of_node_put(eth1_node);
1887 if (eth0_node)
1888 of_node_put(eth0_node);
1889 return 0;
1890
1891 netdev_unregister:
1892 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1893 if (!prueth->registered_netdevs[i])
1894 continue;
1895 if (prueth->emac[i]->ndev->phydev) {
1896 phy_disconnect(prueth->emac[i]->ndev->phydev);
1897 prueth->emac[i]->ndev->phydev = NULL;
1898 }
1899 unregister_netdev(prueth->registered_netdevs[i]);
1900 }
1901
1902 netdev_exit:
1903 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1904 eth_node = prueth->eth_node[i];
1905 if (!eth_node)
1906 continue;
1907
1908 prueth_netdev_exit(prueth, eth_node);
1909 }
1910
1911 exit_iep:
1912 if (prueth->pdata.quirk_10m_link_issue)
1913 icss_iep_exit_fw(prueth->iep1);
1914 icss_iep_put(prueth->iep1);
1915
1916 put_iep0:
1917 icss_iep_put(prueth->iep0);
1918 prueth->iep0 = NULL;
1919 prueth->iep1 = NULL;
1920
1921 free_pool:
1922 gen_pool_free(prueth->sram_pool,
1923 (unsigned long)prueth->msmcram.va, msmc_ram_size);
1924
1925 put_mem:
1926 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1927
1928 put_pruss:
1929 pruss_put(prueth->pruss);
1930
1931 put_cores:
1932 if (eth0_node || eth1_node) {
1933 prueth_put_cores(prueth, ICSS_SLICE0);
1934 of_node_put(eth0_node);
1935
1936 prueth_put_cores(prueth, ICSS_SLICE1);
1937 of_node_put(eth1_node);
1938 }
1939
1940 return ret;
1941 }
1942
prueth_remove(struct platform_device * pdev)1943 static void prueth_remove(struct platform_device *pdev)
1944 {
1945 struct prueth *prueth = platform_get_drvdata(pdev);
1946 struct device_node *eth_node;
1947 int i;
1948
1949 prueth_unregister_notifiers(prueth);
1950
1951 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1952 if (!prueth->registered_netdevs[i])
1953 continue;
1954 phy_stop(prueth->emac[i]->ndev->phydev);
1955 phy_disconnect(prueth->emac[i]->ndev->phydev);
1956 prueth->emac[i]->ndev->phydev = NULL;
1957 unregister_netdev(prueth->registered_netdevs[i]);
1958 }
1959
1960 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1961 eth_node = prueth->eth_node[i];
1962 if (!eth_node)
1963 continue;
1964
1965 prueth_netdev_exit(prueth, eth_node);
1966 }
1967
1968 if (prueth->pdata.quirk_10m_link_issue)
1969 icss_iep_exit_fw(prueth->iep1);
1970
1971 icss_iep_put(prueth->iep1);
1972 icss_iep_put(prueth->iep0);
1973
1974 gen_pool_free(prueth->sram_pool,
1975 (unsigned long)prueth->msmcram.va,
1976 MSMC_RAM_SIZE);
1977
1978 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1979
1980 pruss_put(prueth->pruss);
1981
1982 if (prueth->eth_node[PRUETH_MAC1])
1983 prueth_put_cores(prueth, ICSS_SLICE1);
1984
1985 if (prueth->eth_node[PRUETH_MAC0])
1986 prueth_put_cores(prueth, ICSS_SLICE0);
1987 }
1988
1989 static const struct prueth_pdata am654_icssg_pdata = {
1990 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1991 .quirk_10m_link_issue = 1,
1992 .switch_mode = 1,
1993 };
1994
1995 static const struct prueth_pdata am64x_icssg_pdata = {
1996 .fdqring_mode = K3_RINGACC_RING_MODE_RING,
1997 .quirk_10m_link_issue = 1,
1998 .switch_mode = 1,
1999 };
2000
2001 static const struct of_device_id prueth_dt_match[] = {
2002 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
2003 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
2004 { /* sentinel */ }
2005 };
2006 MODULE_DEVICE_TABLE(of, prueth_dt_match);
2007
2008 static struct platform_driver prueth_driver = {
2009 .probe = prueth_probe,
2010 .remove = prueth_remove,
2011 .driver = {
2012 .name = "icssg-prueth",
2013 .of_match_table = prueth_dt_match,
2014 .pm = &prueth_dev_pm_ops,
2015 },
2016 };
2017 module_platform_driver(prueth_driver);
2018
2019 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
2020 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
2021 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
2022 MODULE_LICENSE("GPL");
2023