1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 sizeof(u64) }
44
45 static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .adma_rx_dbg0 = 0x0a38,
58 .int_grp = 0x0a50,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .hred = 0x1a44,
73 .ctx_ptr = 0x1b00,
74 .dtx_ptr = 0x1b04,
75 .crx_ptr = 0x1b10,
76 .drx_ptr = 0x1b14,
77 .fq_head = 0x1b20,
78 .fq_tail = 0x1b24,
79 .fq_count = 0x1b28,
80 .fq_blen = 0x1b2c,
81 },
82 .gdm1_cnt = 0x2400,
83 .gdma_to_ppe = {
84 [0] = 0x4444,
85 },
86 .ppe_base = 0x0c00,
87 .wdma_base = {
88 [0] = 0x2800,
89 [1] = 0x2c00,
90 },
91 .pse_iq_sta = 0x0110,
92 .pse_oq_sta = 0x0118,
93 };
94
95 static const struct mtk_reg_map mt7628_reg_map = {
96 .tx_irq_mask = 0x0a28,
97 .tx_irq_status = 0x0a20,
98 .pdma = {
99 .rx_ptr = 0x0900,
100 .rx_cnt_cfg = 0x0904,
101 .pcrx_ptr = 0x0908,
102 .glo_cfg = 0x0a04,
103 .rst_idx = 0x0a08,
104 .delay_irq = 0x0a0c,
105 .irq_status = 0x0a20,
106 .irq_mask = 0x0a28,
107 .int_grp = 0x0a50,
108 },
109 };
110
111 static const struct mtk_reg_map mt7986_reg_map = {
112 .tx_irq_mask = 0x461c,
113 .tx_irq_status = 0x4618,
114 .pdma = {
115 .rx_ptr = 0x4100,
116 .rx_cnt_cfg = 0x4104,
117 .pcrx_ptr = 0x4108,
118 .glo_cfg = 0x4204,
119 .rst_idx = 0x4208,
120 .delay_irq = 0x420c,
121 .irq_status = 0x4220,
122 .irq_mask = 0x4228,
123 .adma_rx_dbg0 = 0x4238,
124 .int_grp = 0x4250,
125 },
126 .qdma = {
127 .qtx_cfg = 0x4400,
128 .qtx_sch = 0x4404,
129 .rx_ptr = 0x4500,
130 .rx_cnt_cfg = 0x4504,
131 .qcrx_ptr = 0x4508,
132 .glo_cfg = 0x4604,
133 .rst_idx = 0x4608,
134 .delay_irq = 0x460c,
135 .fc_th = 0x4610,
136 .int_grp = 0x4620,
137 .hred = 0x4644,
138 .ctx_ptr = 0x4700,
139 .dtx_ptr = 0x4704,
140 .crx_ptr = 0x4710,
141 .drx_ptr = 0x4714,
142 .fq_head = 0x4720,
143 .fq_tail = 0x4724,
144 .fq_count = 0x4728,
145 .fq_blen = 0x472c,
146 .tx_sch_rate = 0x4798,
147 },
148 .gdm1_cnt = 0x1c00,
149 .gdma_to_ppe = {
150 [0] = 0x3333,
151 [1] = 0x4444,
152 },
153 .ppe_base = 0x2000,
154 .wdma_base = {
155 [0] = 0x4800,
156 [1] = 0x4c00,
157 },
158 .pse_iq_sta = 0x0180,
159 .pse_oq_sta = 0x01a0,
160 };
161
162 static const struct mtk_reg_map mt7988_reg_map = {
163 .tx_irq_mask = 0x461c,
164 .tx_irq_status = 0x4618,
165 .pdma = {
166 .rx_ptr = 0x6900,
167 .rx_cnt_cfg = 0x6904,
168 .pcrx_ptr = 0x6908,
169 .glo_cfg = 0x6a04,
170 .rst_idx = 0x6a08,
171 .delay_irq = 0x6a0c,
172 .irq_status = 0x6a20,
173 .irq_mask = 0x6a28,
174 .adma_rx_dbg0 = 0x6a38,
175 .int_grp = 0x6a50,
176 },
177 .qdma = {
178 .qtx_cfg = 0x4400,
179 .qtx_sch = 0x4404,
180 .rx_ptr = 0x4500,
181 .rx_cnt_cfg = 0x4504,
182 .qcrx_ptr = 0x4508,
183 .glo_cfg = 0x4604,
184 .rst_idx = 0x4608,
185 .delay_irq = 0x460c,
186 .fc_th = 0x4610,
187 .int_grp = 0x4620,
188 .hred = 0x4644,
189 .ctx_ptr = 0x4700,
190 .dtx_ptr = 0x4704,
191 .crx_ptr = 0x4710,
192 .drx_ptr = 0x4714,
193 .fq_head = 0x4720,
194 .fq_tail = 0x4724,
195 .fq_count = 0x4728,
196 .fq_blen = 0x472c,
197 .tx_sch_rate = 0x4798,
198 },
199 .gdm1_cnt = 0x1c00,
200 .gdma_to_ppe = {
201 [0] = 0x3333,
202 [1] = 0x4444,
203 [2] = 0xcccc,
204 },
205 .ppe_base = 0x2000,
206 .wdma_base = {
207 [0] = 0x4800,
208 [1] = 0x4c00,
209 [2] = 0x5000,
210 },
211 .pse_iq_sta = 0x0180,
212 .pse_oq_sta = 0x01a0,
213 };
214
215 /* strings used by ethtool */
216 static const struct mtk_ethtool_stats {
217 char str[ETH_GSTRING_LEN];
218 u32 offset;
219 } mtk_ethtool_stats[] = {
220 MTK_ETHTOOL_STAT(tx_bytes),
221 MTK_ETHTOOL_STAT(tx_packets),
222 MTK_ETHTOOL_STAT(tx_skip),
223 MTK_ETHTOOL_STAT(tx_collisions),
224 MTK_ETHTOOL_STAT(rx_bytes),
225 MTK_ETHTOOL_STAT(rx_packets),
226 MTK_ETHTOOL_STAT(rx_overflow),
227 MTK_ETHTOOL_STAT(rx_fcs_errors),
228 MTK_ETHTOOL_STAT(rx_short_errors),
229 MTK_ETHTOOL_STAT(rx_long_errors),
230 MTK_ETHTOOL_STAT(rx_checksum_errors),
231 MTK_ETHTOOL_STAT(rx_flow_control_packets),
232 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
233 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
234 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
235 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
236 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
237 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
238 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
239 };
240
241 static const char * const mtk_clks_source_name[] = {
242 "ethif",
243 "sgmiitop",
244 "esw",
245 "gp0",
246 "gp1",
247 "gp2",
248 "gp3",
249 "xgp1",
250 "xgp2",
251 "xgp3",
252 "crypto",
253 "fe",
254 "trgpll",
255 "sgmii_tx250m",
256 "sgmii_rx250m",
257 "sgmii_cdr_ref",
258 "sgmii_cdr_fb",
259 "sgmii2_tx250m",
260 "sgmii2_rx250m",
261 "sgmii2_cdr_ref",
262 "sgmii2_cdr_fb",
263 "sgmii_ck",
264 "eth2pll",
265 "wocpu0",
266 "wocpu1",
267 "netsys0",
268 "netsys1",
269 "ethwarp_wocpu2",
270 "ethwarp_wocpu1",
271 "ethwarp_wocpu0",
272 "top_sgm0_sel",
273 "top_sgm1_sel",
274 "top_eth_gmii_sel",
275 "top_eth_refck_50m_sel",
276 "top_eth_sys_200m_sel",
277 "top_eth_sys_sel",
278 "top_eth_xgmii_sel",
279 "top_eth_mii_sel",
280 "top_netsys_sel",
281 "top_netsys_500m_sel",
282 "top_netsys_pao_2x_sel",
283 "top_netsys_sync_250m_sel",
284 "top_netsys_ppefb_250m_sel",
285 "top_netsys_warp_sel",
286 };
287
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)288 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
289 {
290 __raw_writel(val, eth->base + reg);
291 }
292
mtk_r32(struct mtk_eth * eth,unsigned reg)293 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
294 {
295 return __raw_readl(eth->base + reg);
296 }
297
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)298 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
299 {
300 u32 val;
301
302 val = mtk_r32(eth, reg);
303 val &= ~mask;
304 val |= set;
305 mtk_w32(eth, val, reg);
306 return reg;
307 }
308
mtk_mdio_busy_wait(struct mtk_eth * eth)309 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
310 {
311 unsigned long t_start = jiffies;
312
313 while (1) {
314 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
315 return 0;
316 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
317 break;
318 cond_resched();
319 }
320
321 dev_err(eth->dev, "mdio: MDIO timeout\n");
322 return -ETIMEDOUT;
323 }
324
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)325 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
326 u32 write_data)
327 {
328 int ret;
329
330 ret = mtk_mdio_busy_wait(eth);
331 if (ret < 0)
332 return ret;
333
334 mtk_w32(eth, PHY_IAC_ACCESS |
335 PHY_IAC_START_C22 |
336 PHY_IAC_CMD_WRITE |
337 PHY_IAC_REG(phy_reg) |
338 PHY_IAC_ADDR(phy_addr) |
339 PHY_IAC_DATA(write_data),
340 MTK_PHY_IAC);
341
342 ret = mtk_mdio_busy_wait(eth);
343 if (ret < 0)
344 return ret;
345
346 return 0;
347 }
348
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)349 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
350 u32 devad, u32 phy_reg, u32 write_data)
351 {
352 int ret;
353
354 ret = mtk_mdio_busy_wait(eth);
355 if (ret < 0)
356 return ret;
357
358 mtk_w32(eth, PHY_IAC_ACCESS |
359 PHY_IAC_START_C45 |
360 PHY_IAC_CMD_C45_ADDR |
361 PHY_IAC_REG(devad) |
362 PHY_IAC_ADDR(phy_addr) |
363 PHY_IAC_DATA(phy_reg),
364 MTK_PHY_IAC);
365
366 ret = mtk_mdio_busy_wait(eth);
367 if (ret < 0)
368 return ret;
369
370 mtk_w32(eth, PHY_IAC_ACCESS |
371 PHY_IAC_START_C45 |
372 PHY_IAC_CMD_WRITE |
373 PHY_IAC_REG(devad) |
374 PHY_IAC_ADDR(phy_addr) |
375 PHY_IAC_DATA(write_data),
376 MTK_PHY_IAC);
377
378 ret = mtk_mdio_busy_wait(eth);
379 if (ret < 0)
380 return ret;
381
382 return 0;
383 }
384
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)385 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
386 {
387 int ret;
388
389 ret = mtk_mdio_busy_wait(eth);
390 if (ret < 0)
391 return ret;
392
393 mtk_w32(eth, PHY_IAC_ACCESS |
394 PHY_IAC_START_C22 |
395 PHY_IAC_CMD_C22_READ |
396 PHY_IAC_REG(phy_reg) |
397 PHY_IAC_ADDR(phy_addr),
398 MTK_PHY_IAC);
399
400 ret = mtk_mdio_busy_wait(eth);
401 if (ret < 0)
402 return ret;
403
404 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
405 }
406
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)407 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
408 u32 devad, u32 phy_reg)
409 {
410 int ret;
411
412 ret = mtk_mdio_busy_wait(eth);
413 if (ret < 0)
414 return ret;
415
416 mtk_w32(eth, PHY_IAC_ACCESS |
417 PHY_IAC_START_C45 |
418 PHY_IAC_CMD_C45_ADDR |
419 PHY_IAC_REG(devad) |
420 PHY_IAC_ADDR(phy_addr) |
421 PHY_IAC_DATA(phy_reg),
422 MTK_PHY_IAC);
423
424 ret = mtk_mdio_busy_wait(eth);
425 if (ret < 0)
426 return ret;
427
428 mtk_w32(eth, PHY_IAC_ACCESS |
429 PHY_IAC_START_C45 |
430 PHY_IAC_CMD_C45_READ |
431 PHY_IAC_REG(devad) |
432 PHY_IAC_ADDR(phy_addr),
433 MTK_PHY_IAC);
434
435 ret = mtk_mdio_busy_wait(eth);
436 if (ret < 0)
437 return ret;
438
439 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
440 }
441
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)442 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
443 int phy_reg, u16 val)
444 {
445 struct mtk_eth *eth = bus->priv;
446
447 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
448 }
449
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)450 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
451 int devad, int phy_reg, u16 val)
452 {
453 struct mtk_eth *eth = bus->priv;
454
455 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
456 }
457
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)458 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
459 {
460 struct mtk_eth *eth = bus->priv;
461
462 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
463 }
464
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)465 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
466 int phy_reg)
467 {
468 struct mtk_eth *eth = bus->priv;
469
470 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
471 }
472
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)473 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
474 phy_interface_t interface)
475 {
476 u32 val;
477
478 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
479 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
480
481 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
482 ETHSYS_TRGMII_MT7621_MASK, val);
483
484 return 0;
485 }
486
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)487 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
488 phy_interface_t interface)
489 {
490 int ret;
491
492 if (interface == PHY_INTERFACE_MODE_TRGMII) {
493 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
494 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
495 if (ret)
496 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
497 return;
498 }
499
500 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
501 }
502
mtk_setup_bridge_switch(struct mtk_eth * eth)503 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
504 {
505 /* Force Port1 XGMAC Link Up */
506 mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
507 MTK_XGMAC_STS(MTK_GMAC1_ID));
508
509 /* Adjust GSW bridge IPG to 11 */
510 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
511 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
512 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
513 MTK_GSW_CFG);
514 }
515
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)516 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
517 phy_interface_t interface)
518 {
519 struct mtk_mac *mac = container_of(config, struct mtk_mac,
520 phylink_config);
521 struct mtk_eth *eth = mac->hw;
522 unsigned int sid;
523
524 if (interface == PHY_INTERFACE_MODE_SGMII ||
525 phy_interface_mode_is_8023z(interface)) {
526 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
527 0 : mac->id;
528
529 return eth->sgmii_pcs[sid];
530 }
531
532 return NULL;
533 }
534
mtk_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t iface)535 static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
536 phy_interface_t iface)
537 {
538 struct mtk_mac *mac = container_of(config, struct mtk_mac,
539 phylink_config);
540 struct mtk_eth *eth = mac->hw;
541
542 if (mtk_interface_mode_is_xgmii(eth, iface) &&
543 mac->id != MTK_GMAC1_ID) {
544 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
545 XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
546
547 mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
548 MTK_XGMAC_FORCE_LINK(mac->id),
549 MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
550 }
551
552 return 0;
553 }
554
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)555 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
556 const struct phylink_link_state *state)
557 {
558 struct mtk_mac *mac = container_of(config, struct mtk_mac,
559 phylink_config);
560 struct mtk_eth *eth = mac->hw;
561 int val, ge_mode, err = 0;
562 u32 i;
563
564 /* MT76x8 has no hardware settings between for the MAC */
565 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
566 mac->interface != state->interface) {
567 /* Setup soc pin functions */
568 switch (state->interface) {
569 case PHY_INTERFACE_MODE_TRGMII:
570 case PHY_INTERFACE_MODE_RGMII_TXID:
571 case PHY_INTERFACE_MODE_RGMII_RXID:
572 case PHY_INTERFACE_MODE_RGMII_ID:
573 case PHY_INTERFACE_MODE_RGMII:
574 case PHY_INTERFACE_MODE_MII:
575 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
576 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
577 if (err)
578 goto init_err;
579 }
580 break;
581 case PHY_INTERFACE_MODE_1000BASEX:
582 case PHY_INTERFACE_MODE_2500BASEX:
583 case PHY_INTERFACE_MODE_SGMII:
584 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
585 if (err)
586 goto init_err;
587 break;
588 case PHY_INTERFACE_MODE_GMII:
589 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
590 err = mtk_gmac_gephy_path_setup(eth, mac->id);
591 if (err)
592 goto init_err;
593 }
594 break;
595 case PHY_INTERFACE_MODE_INTERNAL:
596 if (mac->id == MTK_GMAC2_ID &&
597 MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
598 err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
599 if (err)
600 goto init_err;
601 }
602 break;
603 default:
604 goto err_phy;
605 }
606
607 /* Setup clock for 1st gmac */
608 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
609 !phy_interface_mode_is_8023z(state->interface) &&
610 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
611 if (MTK_HAS_CAPS(mac->hw->soc->caps,
612 MTK_TRGMII_MT7621_CLK)) {
613 if (mt7621_gmac0_rgmii_adjust(mac->hw,
614 state->interface))
615 goto err_phy;
616 } else {
617 mtk_gmac0_rgmii_adjust(mac->hw,
618 state->interface);
619
620 /* mt7623_pad_clk_setup */
621 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
622 mtk_w32(mac->hw,
623 TD_DM_DRVP(8) | TD_DM_DRVN(8),
624 TRGMII_TD_ODT(i));
625
626 /* Assert/release MT7623 RXC reset */
627 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
628 TRGMII_RCK_CTRL);
629 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
630 }
631 }
632
633 switch (state->interface) {
634 case PHY_INTERFACE_MODE_MII:
635 case PHY_INTERFACE_MODE_GMII:
636 ge_mode = 1;
637 break;
638 default:
639 ge_mode = 0;
640 break;
641 }
642
643 /* put the gmac into the right mode */
644 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
645 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
646 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
647 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
648
649 mac->interface = state->interface;
650 }
651
652 /* SGMII */
653 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
654 phy_interface_mode_is_8023z(state->interface)) {
655 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
656 * being setup done.
657 */
658 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
659
660 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
661 SYSCFG0_SGMII_MASK,
662 ~(u32)SYSCFG0_SGMII_MASK);
663
664 /* Save the syscfg0 value for mac_finish */
665 mac->syscfg0 = val;
666 } else if (phylink_autoneg_inband(mode)) {
667 dev_err(eth->dev,
668 "In-band mode not supported in non SGMII mode!\n");
669 return;
670 }
671
672 /* Setup gmac */
673 if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
674 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
675 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
676
677 if (mac->id == MTK_GMAC1_ID)
678 mtk_setup_bridge_switch(eth);
679 }
680
681 return;
682
683 err_phy:
684 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
685 mac->id, phy_modes(state->interface));
686 return;
687
688 init_err:
689 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
690 mac->id, phy_modes(state->interface), err);
691 }
692
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)693 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
694 phy_interface_t interface)
695 {
696 struct mtk_mac *mac = container_of(config, struct mtk_mac,
697 phylink_config);
698 struct mtk_eth *eth = mac->hw;
699 u32 mcr_cur, mcr_new;
700
701 /* Enable SGMII */
702 if (interface == PHY_INTERFACE_MODE_SGMII ||
703 phy_interface_mode_is_8023z(interface))
704 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
705 SYSCFG0_SGMII_MASK, mac->syscfg0);
706
707 /* Setup gmac */
708 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
709 mcr_new = mcr_cur;
710 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
711 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
712
713 /* Only update control register when needed! */
714 if (mcr_new != mcr_cur)
715 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
716
717 return 0;
718 }
719
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)720 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
721 phy_interface_t interface)
722 {
723 struct mtk_mac *mac = container_of(config, struct mtk_mac,
724 phylink_config);
725
726 if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
727 /* GMAC modes */
728 mtk_m32(mac->hw,
729 MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
730 MTK_MAC_MCR(mac->id));
731 } else if (mac->id != MTK_GMAC1_ID) {
732 /* XGMAC except for built-in switch */
733 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
734 MTK_XMAC_MCR(mac->id));
735 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
736 MTK_XGMAC_STS(mac->id));
737 }
738 }
739
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)740 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
741 int speed)
742 {
743 const struct mtk_soc_data *soc = eth->soc;
744 u32 ofs, val;
745
746 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
747 return;
748
749 val = MTK_QTX_SCH_MIN_RATE_EN |
750 /* minimum: 10 Mbps */
751 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
752 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
753 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
754 if (mtk_is_netsys_v1(eth))
755 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
756
757 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
758 switch (speed) {
759 case SPEED_10:
760 val |= MTK_QTX_SCH_MAX_RATE_EN |
761 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
762 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
763 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
764 break;
765 case SPEED_100:
766 val |= MTK_QTX_SCH_MAX_RATE_EN |
767 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
768 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
769 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
770 break;
771 case SPEED_1000:
772 val |= MTK_QTX_SCH_MAX_RATE_EN |
773 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
774 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
775 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
776 break;
777 default:
778 break;
779 }
780 } else {
781 switch (speed) {
782 case SPEED_10:
783 val |= MTK_QTX_SCH_MAX_RATE_EN |
784 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
785 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
786 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
787 break;
788 case SPEED_100:
789 val |= MTK_QTX_SCH_MAX_RATE_EN |
790 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
791 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
792 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
793 break;
794 case SPEED_1000:
795 val |= MTK_QTX_SCH_MAX_RATE_EN |
796 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
797 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
798 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
799 break;
800 default:
801 break;
802 }
803 }
804
805 ofs = MTK_QTX_OFFSET * idx;
806 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
807 }
808
mtk_gdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)809 static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
810 struct phy_device *phy,
811 unsigned int mode, phy_interface_t interface,
812 int speed, int duplex, bool tx_pause,
813 bool rx_pause)
814 {
815 u32 mcr;
816
817 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
818 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
819 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
820 MAC_MCR_FORCE_RX_FC);
821
822 /* Configure speed */
823 mac->speed = speed;
824 switch (speed) {
825 case SPEED_2500:
826 case SPEED_1000:
827 mcr |= MAC_MCR_SPEED_1000;
828 break;
829 case SPEED_100:
830 mcr |= MAC_MCR_SPEED_100;
831 break;
832 }
833
834 /* Configure duplex */
835 if (duplex == DUPLEX_FULL)
836 mcr |= MAC_MCR_FORCE_DPX;
837
838 /* Configure pause modes - phylink will avoid these for half duplex */
839 if (tx_pause)
840 mcr |= MAC_MCR_FORCE_TX_FC;
841 if (rx_pause)
842 mcr |= MAC_MCR_FORCE_RX_FC;
843
844 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
845 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
846 }
847
mtk_xgdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)848 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
849 struct phy_device *phy,
850 unsigned int mode, phy_interface_t interface,
851 int speed, int duplex, bool tx_pause,
852 bool rx_pause)
853 {
854 u32 mcr;
855
856 if (mac->id == MTK_GMAC1_ID)
857 return;
858
859 /* Eliminate the interference(before link-up) caused by PHY noise */
860 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
861 mdelay(20);
862 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
863 MTK_XMAC_CNT_CTRL(mac->id));
864
865 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
866 MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
867
868 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
869 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
870 XMAC_MCR_TRX_DISABLE);
871 /* Configure pause modes -
872 * phylink will avoid these for half duplex
873 */
874 if (tx_pause)
875 mcr |= XMAC_MCR_FORCE_TX_FC;
876 if (rx_pause)
877 mcr |= XMAC_MCR_FORCE_RX_FC;
878
879 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
880 }
881
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)882 static void mtk_mac_link_up(struct phylink_config *config,
883 struct phy_device *phy,
884 unsigned int mode, phy_interface_t interface,
885 int speed, int duplex, bool tx_pause, bool rx_pause)
886 {
887 struct mtk_mac *mac = container_of(config, struct mtk_mac,
888 phylink_config);
889
890 if (mtk_interface_mode_is_xgmii(mac->hw, interface))
891 mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
892 tx_pause, rx_pause);
893 else
894 mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
895 tx_pause, rx_pause);
896 }
897
mtk_mac_disable_tx_lpi(struct phylink_config * config)898 static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
899 {
900 struct mtk_mac *mac = container_of(config, struct mtk_mac,
901 phylink_config);
902 struct mtk_eth *eth = mac->hw;
903
904 mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
905 }
906
mtk_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)907 static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
908 bool tx_clk_stop)
909 {
910 struct mtk_mac *mac = container_of(config, struct mtk_mac,
911 phylink_config);
912 struct mtk_eth *eth = mac->hw;
913 u32 val;
914
915 if (mtk_interface_mode_is_xgmii(eth, mac->interface))
916 return -EOPNOTSUPP;
917
918 /* Tx idle timer in ms */
919 timer = DIV_ROUND_UP(timer, 1000);
920
921 /* If the timer is zero, then set LPI_MODE, which allows the
922 * system to enter LPI mode immediately rather than waiting for
923 * the LPI threshold.
924 */
925 if (!timer)
926 val = MAC_EEE_LPI_MODE;
927 else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
928 val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
929 else
930 val = MAC_EEE_LPI_TXIDLE_THD;
931
932 if (tx_clk_stop)
933 val |= MAC_EEE_CKG_TXIDLE;
934
935 /* PHY Wake-up time, this field does not have a reset value, so use the
936 * reset value from MT7531 (36us for 100M and 17us for 1000M).
937 */
938 val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
939 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
940
941 mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
942 mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
943
944 return 0;
945 }
946
947 static const struct phylink_mac_ops mtk_phylink_ops = {
948 .mac_prepare = mtk_mac_prepare,
949 .mac_select_pcs = mtk_mac_select_pcs,
950 .mac_config = mtk_mac_config,
951 .mac_finish = mtk_mac_finish,
952 .mac_link_down = mtk_mac_link_down,
953 .mac_link_up = mtk_mac_link_up,
954 .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
955 .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
956 };
957
mtk_mdio_config(struct mtk_eth * eth)958 static void mtk_mdio_config(struct mtk_eth *eth)
959 {
960 u32 val;
961
962 /* Configure MDC Divider */
963 val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
964
965 /* Configure MDC Turbo Mode */
966 if (mtk_is_netsys_v3_or_greater(eth))
967 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
968 else
969 val |= PPSC_MDC_TURBO;
970
971 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
972 }
973
mtk_mdio_init(struct mtk_eth * eth)974 static int mtk_mdio_init(struct mtk_eth *eth)
975 {
976 unsigned int max_clk = 2500000;
977 struct device_node *mii_np;
978 int ret;
979 u32 val;
980
981 mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
982 if (!mii_np) {
983 dev_err(eth->dev, "no %s child node found", "mdio-bus");
984 return -ENODEV;
985 }
986
987 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
988 if (!eth->mii_bus) {
989 ret = -ENOMEM;
990 goto err_put_node;
991 }
992
993 eth->mii_bus->name = "mdio";
994 eth->mii_bus->read = mtk_mdio_read_c22;
995 eth->mii_bus->write = mtk_mdio_write_c22;
996 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
997 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
998 eth->mii_bus->priv = eth;
999 eth->mii_bus->parent = eth->dev;
1000
1001 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1002
1003 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1004 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1005 dev_err(eth->dev, "MDIO clock frequency out of range");
1006 ret = -EINVAL;
1007 goto err_put_node;
1008 }
1009 max_clk = val;
1010 }
1011 eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
1012 mtk_mdio_config(eth);
1013 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
1014 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1015
1016 err_put_node:
1017 of_node_put(mii_np);
1018 return ret;
1019 }
1020
mtk_mdio_cleanup(struct mtk_eth * eth)1021 static void mtk_mdio_cleanup(struct mtk_eth *eth)
1022 {
1023 if (!eth->mii_bus)
1024 return;
1025
1026 mdiobus_unregister(eth->mii_bus);
1027 }
1028
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)1029 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1030 {
1031 unsigned long flags;
1032 u32 val;
1033
1034 spin_lock_irqsave(ð->tx_irq_lock, flags);
1035 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1036 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1037 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
1038 }
1039
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)1040 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1041 {
1042 unsigned long flags;
1043 u32 val;
1044
1045 spin_lock_irqsave(ð->tx_irq_lock, flags);
1046 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1047 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1048 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
1049 }
1050
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)1051 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1052 {
1053 unsigned long flags;
1054 u32 val;
1055
1056 spin_lock_irqsave(ð->rx_irq_lock, flags);
1057 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1058 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1059 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
1060 }
1061
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)1062 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1063 {
1064 unsigned long flags;
1065 u32 val;
1066
1067 spin_lock_irqsave(ð->rx_irq_lock, flags);
1068 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1069 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1070 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
1071 }
1072
mtk_set_mac_address(struct net_device * dev,void * p)1073 static int mtk_set_mac_address(struct net_device *dev, void *p)
1074 {
1075 int ret = eth_mac_addr(dev, p);
1076 struct mtk_mac *mac = netdev_priv(dev);
1077 struct mtk_eth *eth = mac->hw;
1078 const char *macaddr = dev->dev_addr;
1079
1080 if (ret)
1081 return ret;
1082
1083 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1084 return -EBUSY;
1085
1086 spin_lock_bh(&mac->hw->page_lock);
1087 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1088 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1089 MT7628_SDM_MAC_ADRH);
1090 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1091 (macaddr[4] << 8) | macaddr[5],
1092 MT7628_SDM_MAC_ADRL);
1093 } else {
1094 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1095 MTK_GDMA_MAC_ADRH(mac->id));
1096 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1097 (macaddr[4] << 8) | macaddr[5],
1098 MTK_GDMA_MAC_ADRL(mac->id));
1099 }
1100 spin_unlock_bh(&mac->hw->page_lock);
1101
1102 return 0;
1103 }
1104
mtk_stats_update_mac(struct mtk_mac * mac)1105 void mtk_stats_update_mac(struct mtk_mac *mac)
1106 {
1107 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1108 struct mtk_eth *eth = mac->hw;
1109
1110 u64_stats_update_begin(&hw_stats->syncp);
1111
1112 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1113 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
1114 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
1115 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
1116 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
1117 hw_stats->rx_checksum_errors +=
1118 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
1119 } else {
1120 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1121 unsigned int offs = hw_stats->reg_offset;
1122 u64 stats;
1123
1124 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1125 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
1126 if (stats)
1127 hw_stats->rx_bytes += (stats << 32);
1128 hw_stats->rx_packets +=
1129 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1130 hw_stats->rx_overflow +=
1131 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1132 hw_stats->rx_fcs_errors +=
1133 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1134 hw_stats->rx_short_errors +=
1135 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1136 hw_stats->rx_long_errors +=
1137 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1138 hw_stats->rx_checksum_errors +=
1139 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1140 hw_stats->rx_flow_control_packets +=
1141 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1142
1143 if (mtk_is_netsys_v3_or_greater(eth)) {
1144 hw_stats->tx_skip +=
1145 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1146 hw_stats->tx_collisions +=
1147 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1148 hw_stats->tx_bytes +=
1149 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1150 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1151 if (stats)
1152 hw_stats->tx_bytes += (stats << 32);
1153 hw_stats->tx_packets +=
1154 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1155 } else {
1156 hw_stats->tx_skip +=
1157 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1158 hw_stats->tx_collisions +=
1159 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1160 hw_stats->tx_bytes +=
1161 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1162 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1163 if (stats)
1164 hw_stats->tx_bytes += (stats << 32);
1165 hw_stats->tx_packets +=
1166 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1167 }
1168 }
1169
1170 u64_stats_update_end(&hw_stats->syncp);
1171 }
1172
mtk_stats_update(struct mtk_eth * eth)1173 static void mtk_stats_update(struct mtk_eth *eth)
1174 {
1175 int i;
1176
1177 for (i = 0; i < MTK_MAX_DEVS; i++) {
1178 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1179 continue;
1180 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1181 mtk_stats_update_mac(eth->mac[i]);
1182 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1183 }
1184 }
1185 }
1186
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1187 static void mtk_get_stats64(struct net_device *dev,
1188 struct rtnl_link_stats64 *storage)
1189 {
1190 struct mtk_mac *mac = netdev_priv(dev);
1191 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1192 unsigned int start;
1193
1194 if (netif_running(dev) && netif_device_present(dev)) {
1195 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1196 mtk_stats_update_mac(mac);
1197 spin_unlock_bh(&hw_stats->stats_lock);
1198 }
1199 }
1200
1201 do {
1202 start = u64_stats_fetch_begin(&hw_stats->syncp);
1203 storage->rx_packets = hw_stats->rx_packets;
1204 storage->tx_packets = hw_stats->tx_packets;
1205 storage->rx_bytes = hw_stats->rx_bytes;
1206 storage->tx_bytes = hw_stats->tx_bytes;
1207 storage->collisions = hw_stats->tx_collisions;
1208 storage->rx_length_errors = hw_stats->rx_short_errors +
1209 hw_stats->rx_long_errors;
1210 storage->rx_over_errors = hw_stats->rx_overflow;
1211 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1212 storage->rx_errors = hw_stats->rx_checksum_errors;
1213 storage->tx_aborted_errors = hw_stats->tx_skip;
1214 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1215
1216 storage->tx_errors = dev->stats.tx_errors;
1217 storage->rx_dropped = dev->stats.rx_dropped;
1218 storage->tx_dropped = dev->stats.tx_dropped;
1219 }
1220
mtk_max_frag_size(int mtu)1221 static inline int mtk_max_frag_size(int mtu)
1222 {
1223 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1224 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1225 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1226
1227 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1228 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1229 }
1230
mtk_max_buf_size(int frag_size)1231 static inline int mtk_max_buf_size(int frag_size)
1232 {
1233 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1234 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1235
1236 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1237
1238 return buf_size;
1239 }
1240
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1241 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1242 struct mtk_rx_dma_v2 *dma_rxd)
1243 {
1244 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1245 if (!(rxd->rxd2 & RX_DMA_DONE))
1246 return false;
1247
1248 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1249 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1250 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1251 if (mtk_is_netsys_v3_or_greater(eth)) {
1252 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1253 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1254 }
1255
1256 return true;
1257 }
1258
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1259 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1260 {
1261 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1262 unsigned long data;
1263
1264 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1265 get_order(size));
1266
1267 return (void *)data;
1268 }
1269
1270 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1271 static int mtk_init_fq_dma(struct mtk_eth *eth)
1272 {
1273 const struct mtk_soc_data *soc = eth->soc;
1274 dma_addr_t phy_ring_tail;
1275 int cnt = soc->tx.fq_dma_size;
1276 dma_addr_t dma_addr;
1277 int i, j, len;
1278
1279 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1280 eth->scratch_ring = eth->sram_base;
1281 else
1282 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1283 cnt * soc->tx.desc_size,
1284 ð->phy_scratch_ring,
1285 GFP_KERNEL);
1286
1287 if (unlikely(!eth->scratch_ring))
1288 return -ENOMEM;
1289
1290 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1291
1292 for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1293 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1294 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1295
1296 if (unlikely(!eth->scratch_head[j]))
1297 return -ENOMEM;
1298
1299 dma_addr = dma_map_single(eth->dma_dev,
1300 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1301 DMA_FROM_DEVICE);
1302
1303 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1304 return -ENOMEM;
1305
1306 for (i = 0; i < len; i++) {
1307 struct mtk_tx_dma_v2 *txd;
1308
1309 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1310 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1311 if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1312 txd->txd2 = eth->phy_scratch_ring +
1313 (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1314
1315 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1316 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1317 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1318
1319 txd->txd4 = 0;
1320 if (mtk_is_netsys_v2_or_greater(eth)) {
1321 txd->txd5 = 0;
1322 txd->txd6 = 0;
1323 txd->txd7 = 0;
1324 txd->txd8 = 0;
1325 }
1326 }
1327 }
1328
1329 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1330 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1331 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1332 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1333
1334 return 0;
1335 }
1336
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1337 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1338 {
1339 return ring->dma + (desc - ring->phys);
1340 }
1341
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1342 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1343 void *txd, u32 txd_size)
1344 {
1345 int idx = (txd - ring->dma) / txd_size;
1346
1347 return &ring->buf[idx];
1348 }
1349
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1350 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1351 struct mtk_tx_dma *dma)
1352 {
1353 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1354 }
1355
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1356 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1357 {
1358 return (dma - ring->dma) / txd_size;
1359 }
1360
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1361 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1362 struct xdp_frame_bulk *bq, bool napi)
1363 {
1364 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1365 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1366 dma_unmap_single(eth->dma_dev,
1367 dma_unmap_addr(tx_buf, dma_addr0),
1368 dma_unmap_len(tx_buf, dma_len0),
1369 DMA_TO_DEVICE);
1370 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1371 dma_unmap_page(eth->dma_dev,
1372 dma_unmap_addr(tx_buf, dma_addr0),
1373 dma_unmap_len(tx_buf, dma_len0),
1374 DMA_TO_DEVICE);
1375 }
1376 } else {
1377 if (dma_unmap_len(tx_buf, dma_len0)) {
1378 dma_unmap_page(eth->dma_dev,
1379 dma_unmap_addr(tx_buf, dma_addr0),
1380 dma_unmap_len(tx_buf, dma_len0),
1381 DMA_TO_DEVICE);
1382 }
1383
1384 if (dma_unmap_len(tx_buf, dma_len1)) {
1385 dma_unmap_page(eth->dma_dev,
1386 dma_unmap_addr(tx_buf, dma_addr1),
1387 dma_unmap_len(tx_buf, dma_len1),
1388 DMA_TO_DEVICE);
1389 }
1390 }
1391
1392 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1393 if (tx_buf->type == MTK_TYPE_SKB) {
1394 struct sk_buff *skb = tx_buf->data;
1395
1396 if (napi)
1397 napi_consume_skb(skb, napi);
1398 else
1399 dev_kfree_skb_any(skb);
1400 } else {
1401 struct xdp_frame *xdpf = tx_buf->data;
1402
1403 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1404 xdp_return_frame_rx_napi(xdpf);
1405 else if (bq)
1406 xdp_return_frame_bulk(xdpf, bq);
1407 else
1408 xdp_return_frame(xdpf);
1409 }
1410 }
1411 tx_buf->flags = 0;
1412 tx_buf->data = NULL;
1413 }
1414
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1415 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1416 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1417 size_t size, int idx)
1418 {
1419 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1420 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1421 dma_unmap_len_set(tx_buf, dma_len0, size);
1422 } else {
1423 if (idx & 1) {
1424 txd->txd3 = mapped_addr;
1425 txd->txd2 |= TX_DMA_PLEN1(size);
1426 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1427 dma_unmap_len_set(tx_buf, dma_len1, size);
1428 } else {
1429 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1430 txd->txd1 = mapped_addr;
1431 txd->txd2 = TX_DMA_PLEN0(size);
1432 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1433 dma_unmap_len_set(tx_buf, dma_len0, size);
1434 }
1435 }
1436 }
1437
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1438 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1439 struct mtk_tx_dma_desc_info *info)
1440 {
1441 struct mtk_mac *mac = netdev_priv(dev);
1442 struct mtk_eth *eth = mac->hw;
1443 struct mtk_tx_dma *desc = txd;
1444 u32 data;
1445
1446 WRITE_ONCE(desc->txd1, info->addr);
1447
1448 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1449 FIELD_PREP(TX_DMA_PQID, info->qid);
1450 if (info->last)
1451 data |= TX_DMA_LS0;
1452 WRITE_ONCE(desc->txd3, data);
1453
1454 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1455 if (info->first) {
1456 if (info->gso)
1457 data |= TX_DMA_TSO;
1458 /* tx checksum offload */
1459 if (info->csum)
1460 data |= TX_DMA_CHKSUM;
1461 /* vlan header offload */
1462 if (info->vlan)
1463 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1464 }
1465 WRITE_ONCE(desc->txd4, data);
1466 }
1467
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1468 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1469 struct mtk_tx_dma_desc_info *info)
1470 {
1471 struct mtk_mac *mac = netdev_priv(dev);
1472 struct mtk_tx_dma_v2 *desc = txd;
1473 struct mtk_eth *eth = mac->hw;
1474 u32 data;
1475
1476 WRITE_ONCE(desc->txd1, info->addr);
1477
1478 data = TX_DMA_PLEN0(info->size);
1479 if (info->last)
1480 data |= TX_DMA_LS0;
1481
1482 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1483 data |= TX_DMA_PREP_ADDR64(info->addr);
1484
1485 WRITE_ONCE(desc->txd3, data);
1486
1487 /* set forward port */
1488 switch (mac->id) {
1489 case MTK_GMAC1_ID:
1490 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1491 break;
1492 case MTK_GMAC2_ID:
1493 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1494 break;
1495 case MTK_GMAC3_ID:
1496 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1497 break;
1498 }
1499
1500 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1501 WRITE_ONCE(desc->txd4, data);
1502
1503 data = 0;
1504 if (info->first) {
1505 if (info->gso)
1506 data |= TX_DMA_TSO_V2;
1507 /* tx checksum offload */
1508 if (info->csum)
1509 data |= TX_DMA_CHKSUM_V2;
1510 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1511 data |= TX_DMA_SPTAG_V3;
1512 }
1513 WRITE_ONCE(desc->txd5, data);
1514
1515 data = 0;
1516 if (info->first && info->vlan)
1517 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1518 WRITE_ONCE(desc->txd6, data);
1519
1520 WRITE_ONCE(desc->txd7, 0);
1521 WRITE_ONCE(desc->txd8, 0);
1522 }
1523
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1524 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1525 struct mtk_tx_dma_desc_info *info)
1526 {
1527 struct mtk_mac *mac = netdev_priv(dev);
1528 struct mtk_eth *eth = mac->hw;
1529
1530 if (mtk_is_netsys_v2_or_greater(eth))
1531 mtk_tx_set_dma_desc_v2(dev, txd, info);
1532 else
1533 mtk_tx_set_dma_desc_v1(dev, txd, info);
1534 }
1535
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1536 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1537 int tx_num, struct mtk_tx_ring *ring, bool gso)
1538 {
1539 struct mtk_tx_dma_desc_info txd_info = {
1540 .size = skb_headlen(skb),
1541 .gso = gso,
1542 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1543 .vlan = skb_vlan_tag_present(skb),
1544 .qid = skb_get_queue_mapping(skb),
1545 .vlan_tci = skb_vlan_tag_get(skb),
1546 .first = true,
1547 .last = !skb_is_nonlinear(skb),
1548 };
1549 struct netdev_queue *txq;
1550 struct mtk_mac *mac = netdev_priv(dev);
1551 struct mtk_eth *eth = mac->hw;
1552 const struct mtk_soc_data *soc = eth->soc;
1553 struct mtk_tx_dma *itxd, *txd;
1554 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1555 struct mtk_tx_buf *itx_buf, *tx_buf;
1556 int i, n_desc = 1;
1557 int queue = skb_get_queue_mapping(skb);
1558 int k = 0;
1559
1560 txq = netdev_get_tx_queue(dev, queue);
1561 itxd = ring->next_free;
1562 itxd_pdma = qdma_to_pdma(ring, itxd);
1563 if (itxd == ring->last_free)
1564 return -ENOMEM;
1565
1566 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1567 memset(itx_buf, 0, sizeof(*itx_buf));
1568
1569 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1570 DMA_TO_DEVICE);
1571 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1572 return -ENOMEM;
1573
1574 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1575
1576 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1577 itx_buf->mac_id = mac->id;
1578 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1579 k++);
1580
1581 /* TX SG offload */
1582 txd = itxd;
1583 txd_pdma = qdma_to_pdma(ring, txd);
1584
1585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1587 unsigned int offset = 0;
1588 int frag_size = skb_frag_size(frag);
1589
1590 while (frag_size) {
1591 bool new_desc = true;
1592
1593 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1594 (i & 0x1)) {
1595 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1596 txd_pdma = qdma_to_pdma(ring, txd);
1597 if (txd == ring->last_free)
1598 goto err_dma;
1599
1600 n_desc++;
1601 } else {
1602 new_desc = false;
1603 }
1604
1605 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1606 txd_info.size = min_t(unsigned int, frag_size,
1607 soc->tx.dma_max_len);
1608 txd_info.qid = queue;
1609 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1610 !(frag_size - txd_info.size);
1611 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1612 offset, txd_info.size,
1613 DMA_TO_DEVICE);
1614 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1615 goto err_dma;
1616
1617 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1618
1619 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1620 soc->tx.desc_size);
1621 if (new_desc)
1622 memset(tx_buf, 0, sizeof(*tx_buf));
1623 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1624 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1625 tx_buf->mac_id = mac->id;
1626
1627 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1628 txd_info.size, k++);
1629
1630 frag_size -= txd_info.size;
1631 offset += txd_info.size;
1632 }
1633 }
1634
1635 /* store skb to cleanup */
1636 itx_buf->type = MTK_TYPE_SKB;
1637 itx_buf->data = skb;
1638
1639 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1640 if (k & 0x1)
1641 txd_pdma->txd2 |= TX_DMA_LS0;
1642 else
1643 txd_pdma->txd2 |= TX_DMA_LS1;
1644 }
1645
1646 netdev_tx_sent_queue(txq, skb->len);
1647 skb_tx_timestamp(skb);
1648
1649 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1650 atomic_sub(n_desc, &ring->free_count);
1651
1652 /* make sure that all changes to the dma ring are flushed before we
1653 * continue
1654 */
1655 wmb();
1656
1657 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1658 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1659 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1660 } else {
1661 int next_idx;
1662
1663 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1664 ring->dma_size);
1665 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1666 }
1667
1668 return 0;
1669
1670 err_dma:
1671 do {
1672 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1673
1674 /* unmap dma */
1675 mtk_tx_unmap(eth, tx_buf, NULL, false);
1676
1677 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1678 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1679 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1680
1681 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1682 itxd_pdma = qdma_to_pdma(ring, itxd);
1683 } while (itxd != txd);
1684
1685 return -ENOMEM;
1686 }
1687
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1688 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1689 {
1690 int i, nfrags = 1;
1691 skb_frag_t *frag;
1692
1693 if (skb_is_gso(skb)) {
1694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1695 frag = &skb_shinfo(skb)->frags[i];
1696 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1697 eth->soc->tx.dma_max_len);
1698 }
1699 } else {
1700 nfrags += skb_shinfo(skb)->nr_frags;
1701 }
1702
1703 return nfrags;
1704 }
1705
mtk_queue_stopped(struct mtk_eth * eth)1706 static int mtk_queue_stopped(struct mtk_eth *eth)
1707 {
1708 int i;
1709
1710 for (i = 0; i < MTK_MAX_DEVS; i++) {
1711 if (!eth->netdev[i])
1712 continue;
1713 if (netif_queue_stopped(eth->netdev[i]))
1714 return 1;
1715 }
1716
1717 return 0;
1718 }
1719
mtk_wake_queue(struct mtk_eth * eth)1720 static void mtk_wake_queue(struct mtk_eth *eth)
1721 {
1722 int i;
1723
1724 for (i = 0; i < MTK_MAX_DEVS; i++) {
1725 if (!eth->netdev[i])
1726 continue;
1727 netif_tx_wake_all_queues(eth->netdev[i]);
1728 }
1729 }
1730
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1731 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1732 {
1733 struct mtk_mac *mac = netdev_priv(dev);
1734 struct mtk_eth *eth = mac->hw;
1735 struct mtk_tx_ring *ring = ð->tx_ring;
1736 struct net_device_stats *stats = &dev->stats;
1737 bool gso = false;
1738 int tx_num;
1739
1740 /* normally we can rely on the stack not calling this more than once,
1741 * however we have 2 queues running on the same ring so we need to lock
1742 * the ring access
1743 */
1744 spin_lock(ð->page_lock);
1745
1746 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1747 goto drop;
1748
1749 tx_num = mtk_cal_txd_req(eth, skb);
1750 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1751 netif_tx_stop_all_queues(dev);
1752 netif_err(eth, tx_queued, dev,
1753 "Tx Ring full when queue awake!\n");
1754 spin_unlock(ð->page_lock);
1755 return NETDEV_TX_BUSY;
1756 }
1757
1758 /* TSO: fill MSS info in tcp checksum field */
1759 if (skb_is_gso(skb)) {
1760 if (skb_cow_head(skb, 0)) {
1761 netif_warn(eth, tx_err, dev,
1762 "GSO expand head fail.\n");
1763 goto drop;
1764 }
1765
1766 if (skb_shinfo(skb)->gso_type &
1767 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1768 gso = true;
1769 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1770 }
1771 }
1772
1773 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1774 goto drop;
1775
1776 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1777 netif_tx_stop_all_queues(dev);
1778
1779 spin_unlock(ð->page_lock);
1780
1781 return NETDEV_TX_OK;
1782
1783 drop:
1784 spin_unlock(ð->page_lock);
1785 stats->tx_dropped++;
1786 dev_kfree_skb_any(skb);
1787 return NETDEV_TX_OK;
1788 }
1789
mtk_get_rx_ring(struct mtk_eth * eth)1790 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1791 {
1792 int i;
1793 struct mtk_rx_ring *ring;
1794 int idx;
1795
1796 if (!eth->hwlro)
1797 return ð->rx_ring[0];
1798
1799 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1800 struct mtk_rx_dma *rxd;
1801
1802 ring = ð->rx_ring[i];
1803 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1804 rxd = ring->dma + idx * eth->soc->rx.desc_size;
1805 if (rxd->rxd2 & RX_DMA_DONE) {
1806 ring->calc_idx_update = true;
1807 return ring;
1808 }
1809 }
1810
1811 return NULL;
1812 }
1813
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1814 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1815 {
1816 struct mtk_rx_ring *ring;
1817 int i;
1818
1819 if (!eth->hwlro) {
1820 ring = ð->rx_ring[0];
1821 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1822 } else {
1823 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1824 ring = ð->rx_ring[i];
1825 if (ring->calc_idx_update) {
1826 ring->calc_idx_update = false;
1827 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1828 }
1829 }
1830 }
1831 }
1832
mtk_page_pool_enabled(struct mtk_eth * eth)1833 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1834 {
1835 return mtk_is_netsys_v2_or_greater(eth);
1836 }
1837
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1838 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1839 struct xdp_rxq_info *xdp_q,
1840 int id, int size)
1841 {
1842 struct page_pool_params pp_params = {
1843 .order = 0,
1844 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1845 .pool_size = size,
1846 .nid = NUMA_NO_NODE,
1847 .dev = eth->dma_dev,
1848 .offset = MTK_PP_HEADROOM,
1849 .max_len = MTK_PP_MAX_BUF_SIZE,
1850 };
1851 struct page_pool *pp;
1852 int err;
1853
1854 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1855 : DMA_FROM_DEVICE;
1856 pp = page_pool_create(&pp_params);
1857 if (IS_ERR(pp))
1858 return pp;
1859
1860 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1861 eth->rx_napi.napi_id, PAGE_SIZE);
1862 if (err < 0)
1863 goto err_free_pp;
1864
1865 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1866 if (err)
1867 goto err_unregister_rxq;
1868
1869 return pp;
1870
1871 err_unregister_rxq:
1872 xdp_rxq_info_unreg(xdp_q);
1873 err_free_pp:
1874 page_pool_destroy(pp);
1875
1876 return ERR_PTR(err);
1877 }
1878
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1879 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1880 gfp_t gfp_mask)
1881 {
1882 struct page *page;
1883
1884 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1885 if (!page)
1886 return NULL;
1887
1888 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1889 return page_address(page);
1890 }
1891
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1892 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1893 {
1894 if (ring->page_pool)
1895 page_pool_put_full_page(ring->page_pool,
1896 virt_to_head_page(data), napi);
1897 else
1898 skb_free_frag(data);
1899 }
1900
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1901 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1902 struct mtk_tx_dma_desc_info *txd_info,
1903 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1904 void *data, u16 headroom, int index, bool dma_map)
1905 {
1906 struct mtk_tx_ring *ring = ð->tx_ring;
1907 struct mtk_mac *mac = netdev_priv(dev);
1908 struct mtk_tx_dma *txd_pdma;
1909
1910 if (dma_map) { /* ndo_xdp_xmit */
1911 txd_info->addr = dma_map_single(eth->dma_dev, data,
1912 txd_info->size, DMA_TO_DEVICE);
1913 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1914 return -ENOMEM;
1915
1916 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1917 } else {
1918 struct page *page = virt_to_head_page(data);
1919
1920 txd_info->addr = page_pool_get_dma_addr(page) +
1921 sizeof(struct xdp_frame) + headroom;
1922 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1923 txd_info->size, DMA_BIDIRECTIONAL);
1924 }
1925 mtk_tx_set_dma_desc(dev, txd, txd_info);
1926
1927 tx_buf->mac_id = mac->id;
1928 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1929 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1930
1931 txd_pdma = qdma_to_pdma(ring, txd);
1932 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1933 index);
1934
1935 return 0;
1936 }
1937
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1938 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1939 struct net_device *dev, bool dma_map)
1940 {
1941 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1942 const struct mtk_soc_data *soc = eth->soc;
1943 struct mtk_tx_ring *ring = ð->tx_ring;
1944 struct mtk_mac *mac = netdev_priv(dev);
1945 struct mtk_tx_dma_desc_info txd_info = {
1946 .size = xdpf->len,
1947 .first = true,
1948 .last = !xdp_frame_has_frags(xdpf),
1949 .qid = mac->id,
1950 };
1951 int err, index = 0, n_desc = 1, nr_frags;
1952 struct mtk_tx_buf *htx_buf, *tx_buf;
1953 struct mtk_tx_dma *htxd, *txd;
1954 void *data = xdpf->data;
1955
1956 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1957 return -EBUSY;
1958
1959 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1960 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1961 return -EBUSY;
1962
1963 spin_lock(ð->page_lock);
1964
1965 txd = ring->next_free;
1966 if (txd == ring->last_free) {
1967 spin_unlock(ð->page_lock);
1968 return -ENOMEM;
1969 }
1970 htxd = txd;
1971
1972 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1973 memset(tx_buf, 0, sizeof(*tx_buf));
1974 htx_buf = tx_buf;
1975
1976 for (;;) {
1977 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1978 data, xdpf->headroom, index, dma_map);
1979 if (err < 0)
1980 goto unmap;
1981
1982 if (txd_info.last)
1983 break;
1984
1985 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1986 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1987 if (txd == ring->last_free)
1988 goto unmap;
1989
1990 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1991 soc->tx.desc_size);
1992 memset(tx_buf, 0, sizeof(*tx_buf));
1993 n_desc++;
1994 }
1995
1996 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1997 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1998 txd_info.last = index + 1 == nr_frags;
1999 txd_info.qid = mac->id;
2000 data = skb_frag_address(&sinfo->frags[index]);
2001
2002 index++;
2003 }
2004 /* store xdpf for cleanup */
2005 htx_buf->data = xdpf;
2006
2007 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2008 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
2009
2010 if (index & 1)
2011 txd_pdma->txd2 |= TX_DMA_LS0;
2012 else
2013 txd_pdma->txd2 |= TX_DMA_LS1;
2014 }
2015
2016 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2017 atomic_sub(n_desc, &ring->free_count);
2018
2019 /* make sure that all changes to the dma ring are flushed before we
2020 * continue
2021 */
2022 wmb();
2023
2024 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2025 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
2026 } else {
2027 int idx;
2028
2029 idx = txd_to_idx(ring, txd, soc->tx.desc_size);
2030 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
2031 MT7628_TX_CTX_IDX0);
2032 }
2033
2034 spin_unlock(ð->page_lock);
2035
2036 return 0;
2037
2038 unmap:
2039 while (htxd != txd) {
2040 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
2041 mtk_tx_unmap(eth, tx_buf, NULL, false);
2042
2043 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2044 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2045 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
2046
2047 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
2048 }
2049
2050 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
2051 }
2052
2053 spin_unlock(ð->page_lock);
2054
2055 return err;
2056 }
2057
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)2058 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
2059 struct xdp_frame **frames, u32 flags)
2060 {
2061 struct mtk_mac *mac = netdev_priv(dev);
2062 struct mtk_hw_stats *hw_stats = mac->hw_stats;
2063 struct mtk_eth *eth = mac->hw;
2064 int i, nxmit = 0;
2065
2066 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2067 return -EINVAL;
2068
2069 for (i = 0; i < num_frame; i++) {
2070 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
2071 break;
2072 nxmit++;
2073 }
2074
2075 u64_stats_update_begin(&hw_stats->syncp);
2076 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
2077 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
2078 u64_stats_update_end(&hw_stats->syncp);
2079
2080 return nxmit;
2081 }
2082
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)2083 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
2084 struct xdp_buff *xdp, struct net_device *dev)
2085 {
2086 struct mtk_mac *mac = netdev_priv(dev);
2087 struct mtk_hw_stats *hw_stats = mac->hw_stats;
2088 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
2089 struct bpf_prog *prog;
2090 u32 act = XDP_PASS;
2091
2092 rcu_read_lock();
2093
2094 prog = rcu_dereference(eth->prog);
2095 if (!prog)
2096 goto out;
2097
2098 act = bpf_prog_run_xdp(prog, xdp);
2099 switch (act) {
2100 case XDP_PASS:
2101 count = &hw_stats->xdp_stats.rx_xdp_pass;
2102 goto update_stats;
2103 case XDP_REDIRECT:
2104 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
2105 act = XDP_DROP;
2106 break;
2107 }
2108
2109 count = &hw_stats->xdp_stats.rx_xdp_redirect;
2110 goto update_stats;
2111 case XDP_TX: {
2112 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2113
2114 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
2115 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
2116 act = XDP_DROP;
2117 break;
2118 }
2119
2120 count = &hw_stats->xdp_stats.rx_xdp_tx;
2121 goto update_stats;
2122 }
2123 default:
2124 bpf_warn_invalid_xdp_action(dev, prog, act);
2125 fallthrough;
2126 case XDP_ABORTED:
2127 trace_xdp_exception(dev, prog, act);
2128 fallthrough;
2129 case XDP_DROP:
2130 break;
2131 }
2132
2133 page_pool_put_full_page(ring->page_pool,
2134 virt_to_head_page(xdp->data), true);
2135
2136 update_stats:
2137 u64_stats_update_begin(&hw_stats->syncp);
2138 *count = *count + 1;
2139 u64_stats_update_end(&hw_stats->syncp);
2140 out:
2141 rcu_read_unlock();
2142
2143 return act;
2144 }
2145
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2146 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2147 struct mtk_eth *eth)
2148 {
2149 struct dim_sample dim_sample = {};
2150 struct mtk_rx_ring *ring;
2151 bool xdp_flush = false;
2152 int idx;
2153 struct sk_buff *skb;
2154 u64 addr64 = 0;
2155 u8 *data, *new_data;
2156 struct mtk_rx_dma_v2 *rxd, trxd;
2157 int done = 0, bytes = 0;
2158 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2159 int ppe_idx = 0;
2160
2161 while (done < budget) {
2162 unsigned int pktlen, *rxdcsum;
2163 struct net_device *netdev;
2164 u32 hash, reason;
2165 int mac = 0;
2166
2167 ring = mtk_get_rx_ring(eth);
2168 if (unlikely(!ring))
2169 goto rx_done;
2170
2171 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2172 rxd = ring->dma + idx * eth->soc->rx.desc_size;
2173 data = ring->data[idx];
2174
2175 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2176 break;
2177
2178 /* find out which mac the packet come from. values start at 1 */
2179 if (mtk_is_netsys_v3_or_greater(eth)) {
2180 u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2181
2182 switch (val) {
2183 case PSE_GDM1_PORT:
2184 case PSE_GDM2_PORT:
2185 mac = val - 1;
2186 break;
2187 case PSE_GDM3_PORT:
2188 mac = MTK_GMAC3_ID;
2189 break;
2190 default:
2191 break;
2192 }
2193 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2194 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2195 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2196 }
2197
2198 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2199 !eth->netdev[mac]))
2200 goto release_desc;
2201
2202 netdev = eth->netdev[mac];
2203 ppe_idx = eth->mac[mac]->ppe_idx;
2204
2205 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2206 goto release_desc;
2207
2208 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2209
2210 /* alloc new buffer */
2211 if (ring->page_pool) {
2212 struct page *page = virt_to_head_page(data);
2213 struct xdp_buff xdp;
2214 u32 ret, metasize;
2215
2216 new_data = mtk_page_pool_get_buff(ring->page_pool,
2217 &dma_addr,
2218 GFP_ATOMIC);
2219 if (unlikely(!new_data)) {
2220 netdev->stats.rx_dropped++;
2221 goto release_desc;
2222 }
2223
2224 dma_sync_single_for_cpu(eth->dma_dev,
2225 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2226 pktlen, page_pool_get_dma_dir(ring->page_pool));
2227
2228 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2229 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2230 true);
2231 xdp_buff_clear_frags_flag(&xdp);
2232
2233 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2234 if (ret == XDP_REDIRECT)
2235 xdp_flush = true;
2236
2237 if (ret != XDP_PASS)
2238 goto skip_rx;
2239
2240 skb = build_skb(data, PAGE_SIZE);
2241 if (unlikely(!skb)) {
2242 page_pool_put_full_page(ring->page_pool,
2243 page, true);
2244 netdev->stats.rx_dropped++;
2245 goto skip_rx;
2246 }
2247
2248 skb_reserve(skb, xdp.data - xdp.data_hard_start);
2249 skb_put(skb, xdp.data_end - xdp.data);
2250 metasize = xdp.data - xdp.data_meta;
2251 if (metasize)
2252 skb_metadata_set(skb, metasize);
2253 skb_mark_for_recycle(skb);
2254 } else {
2255 if (ring->frag_size <= PAGE_SIZE)
2256 new_data = napi_alloc_frag(ring->frag_size);
2257 else
2258 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2259
2260 if (unlikely(!new_data)) {
2261 netdev->stats.rx_dropped++;
2262 goto release_desc;
2263 }
2264
2265 dma_addr = dma_map_single(eth->dma_dev,
2266 new_data + NET_SKB_PAD + eth->ip_align,
2267 ring->buf_size, DMA_FROM_DEVICE);
2268 if (unlikely(dma_mapping_error(eth->dma_dev,
2269 dma_addr))) {
2270 skb_free_frag(new_data);
2271 netdev->stats.rx_dropped++;
2272 goto release_desc;
2273 }
2274
2275 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2276 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2277
2278 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2279 ring->buf_size, DMA_FROM_DEVICE);
2280
2281 skb = build_skb(data, ring->frag_size);
2282 if (unlikely(!skb)) {
2283 netdev->stats.rx_dropped++;
2284 skb_free_frag(data);
2285 goto skip_rx;
2286 }
2287
2288 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2289 skb_put(skb, pktlen);
2290 }
2291
2292 skb->dev = netdev;
2293 bytes += skb->len;
2294
2295 if (mtk_is_netsys_v3_or_greater(eth)) {
2296 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2297 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2298 if (hash != MTK_RXD5_FOE_ENTRY)
2299 skb_set_hash(skb, jhash_1word(hash, 0),
2300 PKT_HASH_TYPE_L4);
2301 rxdcsum = &trxd.rxd3;
2302 } else {
2303 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2304 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2305 if (hash != MTK_RXD4_FOE_ENTRY)
2306 skb_set_hash(skb, jhash_1word(hash, 0),
2307 PKT_HASH_TYPE_L4);
2308 rxdcsum = &trxd.rxd4;
2309 }
2310
2311 if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2312 skb->ip_summed = CHECKSUM_UNNECESSARY;
2313 else
2314 skb_checksum_none_assert(skb);
2315 skb->protocol = eth_type_trans(skb, netdev);
2316
2317 /* When using VLAN untagging in combination with DSA, the
2318 * hardware treats the MTK special tag as a VLAN and untags it.
2319 */
2320 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2321 netdev_uses_dsa(netdev)) {
2322 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2323
2324 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2325 eth->dsa_meta[port])
2326 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2327 }
2328
2329 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2330 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2331
2332 skb_record_rx_queue(skb, 0);
2333 napi_gro_receive(napi, skb);
2334
2335 skip_rx:
2336 ring->data[idx] = new_data;
2337 rxd->rxd1 = (unsigned int)dma_addr;
2338 release_desc:
2339 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
2340 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2341 addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
2342 rxd->rxd2);
2343 else
2344 addr64 = RX_DMA_PREP_ADDR64(dma_addr);
2345 }
2346
2347 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2348 rxd->rxd2 = RX_DMA_LSO;
2349 else
2350 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
2351
2352 ring->calc_idx = idx;
2353 done++;
2354 }
2355
2356 rx_done:
2357 if (done) {
2358 /* make sure that all changes to the dma ring are flushed before
2359 * we continue
2360 */
2361 wmb();
2362 mtk_update_rx_cpu_idx(eth);
2363 }
2364
2365 eth->rx_packets += done;
2366 eth->rx_bytes += bytes;
2367 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2368 &dim_sample);
2369 net_dim(ð->rx_dim, &dim_sample);
2370
2371 if (xdp_flush)
2372 xdp_do_flush();
2373
2374 return done;
2375 }
2376
2377 struct mtk_poll_state {
2378 struct netdev_queue *txq;
2379 unsigned int total;
2380 unsigned int done;
2381 unsigned int bytes;
2382 };
2383
2384 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2385 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2386 struct sk_buff *skb)
2387 {
2388 struct netdev_queue *txq;
2389 struct net_device *dev;
2390 unsigned int bytes = skb->len;
2391
2392 state->total++;
2393 eth->tx_packets++;
2394 eth->tx_bytes += bytes;
2395
2396 dev = eth->netdev[mac];
2397 if (!dev)
2398 return;
2399
2400 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2401 if (state->txq == txq) {
2402 state->done++;
2403 state->bytes += bytes;
2404 return;
2405 }
2406
2407 if (state->txq)
2408 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2409
2410 state->txq = txq;
2411 state->done = 1;
2412 state->bytes = bytes;
2413 }
2414
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2415 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2416 struct mtk_poll_state *state)
2417 {
2418 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2419 struct mtk_tx_ring *ring = ð->tx_ring;
2420 struct mtk_tx_buf *tx_buf;
2421 struct xdp_frame_bulk bq;
2422 struct mtk_tx_dma *desc;
2423 u32 cpu, dma;
2424
2425 cpu = ring->last_free_ptr;
2426 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2427
2428 desc = mtk_qdma_phys_to_virt(ring, cpu);
2429 xdp_frame_bulk_init(&bq);
2430
2431 while ((cpu != dma) && budget) {
2432 u32 next_cpu = desc->txd2;
2433
2434 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2435 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2436 break;
2437
2438 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2439 eth->soc->tx.desc_size);
2440 if (!tx_buf->data)
2441 break;
2442
2443 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2444 if (tx_buf->type == MTK_TYPE_SKB)
2445 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2446 tx_buf->data);
2447
2448 budget--;
2449 }
2450 mtk_tx_unmap(eth, tx_buf, &bq, true);
2451
2452 ring->last_free = desc;
2453 atomic_inc(&ring->free_count);
2454
2455 cpu = next_cpu;
2456 }
2457 xdp_flush_frame_bulk(&bq);
2458
2459 ring->last_free_ptr = cpu;
2460 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2461
2462 return budget;
2463 }
2464
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2465 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2466 struct mtk_poll_state *state)
2467 {
2468 struct mtk_tx_ring *ring = ð->tx_ring;
2469 struct mtk_tx_buf *tx_buf;
2470 struct xdp_frame_bulk bq;
2471 struct mtk_tx_dma *desc;
2472 u32 cpu, dma;
2473
2474 cpu = ring->cpu_idx;
2475 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2476 xdp_frame_bulk_init(&bq);
2477
2478 while ((cpu != dma) && budget) {
2479 tx_buf = &ring->buf[cpu];
2480 if (!tx_buf->data)
2481 break;
2482
2483 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2484 if (tx_buf->type == MTK_TYPE_SKB)
2485 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2486 budget--;
2487 }
2488 mtk_tx_unmap(eth, tx_buf, &bq, true);
2489
2490 desc = ring->dma + cpu * eth->soc->tx.desc_size;
2491 ring->last_free = desc;
2492 atomic_inc(&ring->free_count);
2493
2494 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2495 }
2496 xdp_flush_frame_bulk(&bq);
2497
2498 ring->cpu_idx = cpu;
2499
2500 return budget;
2501 }
2502
mtk_poll_tx(struct mtk_eth * eth,int budget)2503 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2504 {
2505 struct mtk_tx_ring *ring = ð->tx_ring;
2506 struct dim_sample dim_sample = {};
2507 struct mtk_poll_state state = {};
2508
2509 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2510 budget = mtk_poll_tx_qdma(eth, budget, &state);
2511 else
2512 budget = mtk_poll_tx_pdma(eth, budget, &state);
2513
2514 if (state.txq)
2515 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2516
2517 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2518 &dim_sample);
2519 net_dim(ð->tx_dim, &dim_sample);
2520
2521 if (mtk_queue_stopped(eth) &&
2522 (atomic_read(&ring->free_count) > ring->thresh))
2523 mtk_wake_queue(eth);
2524
2525 return state.total;
2526 }
2527
mtk_handle_status_irq(struct mtk_eth * eth)2528 static void mtk_handle_status_irq(struct mtk_eth *eth)
2529 {
2530 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2531
2532 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2533 mtk_stats_update(eth);
2534 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2535 MTK_INT_STATUS2);
2536 }
2537 }
2538
mtk_napi_tx(struct napi_struct * napi,int budget)2539 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2540 {
2541 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2542 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2543 int tx_done = 0;
2544
2545 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2546 mtk_handle_status_irq(eth);
2547 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2548 tx_done = mtk_poll_tx(eth, budget);
2549
2550 if (unlikely(netif_msg_intr(eth))) {
2551 dev_info(eth->dev,
2552 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2553 mtk_r32(eth, reg_map->tx_irq_status),
2554 mtk_r32(eth, reg_map->tx_irq_mask));
2555 }
2556
2557 if (tx_done == budget)
2558 return budget;
2559
2560 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2561 return budget;
2562
2563 if (napi_complete_done(napi, tx_done))
2564 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2565
2566 return tx_done;
2567 }
2568
mtk_napi_rx(struct napi_struct * napi,int budget)2569 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2570 {
2571 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2572 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2573 int rx_done_total = 0;
2574
2575 mtk_handle_status_irq(eth);
2576
2577 do {
2578 int rx_done;
2579
2580 mtk_w32(eth, eth->soc->rx.irq_done_mask,
2581 reg_map->pdma.irq_status);
2582 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2583 rx_done_total += rx_done;
2584
2585 if (unlikely(netif_msg_intr(eth))) {
2586 dev_info(eth->dev,
2587 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2588 mtk_r32(eth, reg_map->pdma.irq_status),
2589 mtk_r32(eth, reg_map->pdma.irq_mask));
2590 }
2591
2592 if (rx_done_total == budget)
2593 return budget;
2594
2595 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2596 eth->soc->rx.irq_done_mask);
2597
2598 if (napi_complete_done(napi, rx_done_total))
2599 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2600
2601 return rx_done_total;
2602 }
2603
mtk_tx_alloc(struct mtk_eth * eth)2604 static int mtk_tx_alloc(struct mtk_eth *eth)
2605 {
2606 const struct mtk_soc_data *soc = eth->soc;
2607 struct mtk_tx_ring *ring = ð->tx_ring;
2608 int i, sz = soc->tx.desc_size;
2609 struct mtk_tx_dma_v2 *txd;
2610 int ring_size;
2611 u32 ofs, val;
2612
2613 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2614 ring_size = MTK_QDMA_RING_SIZE;
2615 else
2616 ring_size = soc->tx.dma_size;
2617
2618 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2619 GFP_KERNEL);
2620 if (!ring->buf)
2621 goto no_tx_mem;
2622
2623 if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2624 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
2625 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
2626 } else {
2627 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2628 &ring->phys, GFP_KERNEL);
2629 }
2630
2631 if (!ring->dma)
2632 goto no_tx_mem;
2633
2634 for (i = 0; i < ring_size; i++) {
2635 int next = (i + 1) % ring_size;
2636 u32 next_ptr = ring->phys + next * sz;
2637
2638 txd = ring->dma + i * sz;
2639 txd->txd2 = next_ptr;
2640 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2641 txd->txd4 = 0;
2642 if (mtk_is_netsys_v2_or_greater(eth)) {
2643 txd->txd5 = 0;
2644 txd->txd6 = 0;
2645 txd->txd7 = 0;
2646 txd->txd8 = 0;
2647 }
2648 }
2649
2650 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2651 * only as the framework. The real HW descriptors are the PDMA
2652 * descriptors in ring->dma_pdma.
2653 */
2654 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2655 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2656 &ring->phys_pdma, GFP_KERNEL);
2657 if (!ring->dma_pdma)
2658 goto no_tx_mem;
2659
2660 for (i = 0; i < ring_size; i++) {
2661 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2662 ring->dma_pdma[i].txd4 = 0;
2663 }
2664 }
2665
2666 ring->dma_size = ring_size;
2667 atomic_set(&ring->free_count, ring_size - 2);
2668 ring->next_free = ring->dma;
2669 ring->last_free = (void *)txd;
2670 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2671 ring->thresh = MAX_SKB_FRAGS;
2672
2673 /* make sure that all changes to the dma ring are flushed before we
2674 * continue
2675 */
2676 wmb();
2677
2678 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2679 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2680 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2681 mtk_w32(eth,
2682 ring->phys + ((ring_size - 1) * sz),
2683 soc->reg_map->qdma.crx_ptr);
2684 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2685
2686 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2687 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2688 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2689
2690 val = MTK_QTX_SCH_MIN_RATE_EN |
2691 /* minimum: 10 Mbps */
2692 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2693 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2694 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2695 if (mtk_is_netsys_v1(eth))
2696 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2697 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2698 ofs += MTK_QTX_OFFSET;
2699 }
2700 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2701 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2702 if (mtk_is_netsys_v2_or_greater(eth))
2703 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2704 } else {
2705 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2706 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2707 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2708 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2709 }
2710
2711 return 0;
2712
2713 no_tx_mem:
2714 return -ENOMEM;
2715 }
2716
mtk_tx_clean(struct mtk_eth * eth)2717 static void mtk_tx_clean(struct mtk_eth *eth)
2718 {
2719 const struct mtk_soc_data *soc = eth->soc;
2720 struct mtk_tx_ring *ring = ð->tx_ring;
2721 int i;
2722
2723 if (ring->buf) {
2724 for (i = 0; i < ring->dma_size; i++)
2725 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2726 kfree(ring->buf);
2727 ring->buf = NULL;
2728 }
2729 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2730 dma_free_coherent(eth->dma_dev,
2731 ring->dma_size * soc->tx.desc_size,
2732 ring->dma, ring->phys);
2733 ring->dma = NULL;
2734 }
2735
2736 if (ring->dma_pdma) {
2737 dma_free_coherent(eth->dma_dev,
2738 ring->dma_size * soc->tx.desc_size,
2739 ring->dma_pdma, ring->phys_pdma);
2740 ring->dma_pdma = NULL;
2741 }
2742 }
2743
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2744 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2745 {
2746 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2747 const struct mtk_soc_data *soc = eth->soc;
2748 struct mtk_rx_ring *ring;
2749 int rx_data_len, rx_dma_size, tx_ring_size;
2750 int i;
2751
2752 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2753 tx_ring_size = MTK_QDMA_RING_SIZE;
2754 else
2755 tx_ring_size = soc->tx.dma_size;
2756
2757 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2758 if (ring_no)
2759 return -EINVAL;
2760 ring = ð->rx_ring_qdma;
2761 } else {
2762 ring = ð->rx_ring[ring_no];
2763 }
2764
2765 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2766 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2767 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2768 } else {
2769 rx_data_len = ETH_DATA_LEN;
2770 rx_dma_size = soc->rx.dma_size;
2771 }
2772
2773 ring->frag_size = mtk_max_frag_size(rx_data_len);
2774 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2775 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2776 GFP_KERNEL);
2777 if (!ring->data)
2778 return -ENOMEM;
2779
2780 if (mtk_page_pool_enabled(eth)) {
2781 struct page_pool *pp;
2782
2783 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2784 rx_dma_size);
2785 if (IS_ERR(pp))
2786 return PTR_ERR(pp);
2787
2788 ring->page_pool = pp;
2789 }
2790
2791 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2792 rx_flag != MTK_RX_FLAGS_NORMAL) {
2793 ring->dma = dma_alloc_coherent(eth->dma_dev,
2794 rx_dma_size * eth->soc->rx.desc_size,
2795 &ring->phys, GFP_KERNEL);
2796 } else {
2797 struct mtk_tx_ring *tx_ring = ð->tx_ring;
2798
2799 ring->dma = tx_ring->dma + tx_ring_size *
2800 eth->soc->tx.desc_size * (ring_no + 1);
2801 ring->phys = tx_ring->phys + tx_ring_size *
2802 eth->soc->tx.desc_size * (ring_no + 1);
2803 }
2804
2805 if (!ring->dma)
2806 return -ENOMEM;
2807
2808 for (i = 0; i < rx_dma_size; i++) {
2809 struct mtk_rx_dma_v2 *rxd;
2810 dma_addr_t dma_addr;
2811 void *data;
2812
2813 rxd = ring->dma + i * eth->soc->rx.desc_size;
2814 if (ring->page_pool) {
2815 data = mtk_page_pool_get_buff(ring->page_pool,
2816 &dma_addr, GFP_KERNEL);
2817 if (!data)
2818 return -ENOMEM;
2819 } else {
2820 if (ring->frag_size <= PAGE_SIZE)
2821 data = netdev_alloc_frag(ring->frag_size);
2822 else
2823 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2824
2825 if (!data)
2826 return -ENOMEM;
2827
2828 dma_addr = dma_map_single(eth->dma_dev,
2829 data + NET_SKB_PAD + eth->ip_align,
2830 ring->buf_size, DMA_FROM_DEVICE);
2831 if (unlikely(dma_mapping_error(eth->dma_dev,
2832 dma_addr))) {
2833 skb_free_frag(data);
2834 return -ENOMEM;
2835 }
2836 }
2837 rxd->rxd1 = (unsigned int)dma_addr;
2838 ring->data[i] = data;
2839
2840 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2841 rxd->rxd2 = RX_DMA_LSO;
2842 else
2843 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2844
2845 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2846 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2847
2848 rxd->rxd3 = 0;
2849 rxd->rxd4 = 0;
2850 if (mtk_is_netsys_v3_or_greater(eth)) {
2851 rxd->rxd5 = 0;
2852 rxd->rxd6 = 0;
2853 rxd->rxd7 = 0;
2854 rxd->rxd8 = 0;
2855 }
2856 }
2857
2858 ring->dma_size = rx_dma_size;
2859 ring->calc_idx_update = false;
2860 ring->calc_idx = rx_dma_size - 1;
2861 if (rx_flag == MTK_RX_FLAGS_QDMA)
2862 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2863 ring_no * MTK_QRX_OFFSET;
2864 else
2865 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2866 ring_no * MTK_QRX_OFFSET;
2867 /* make sure that all changes to the dma ring are flushed before we
2868 * continue
2869 */
2870 wmb();
2871
2872 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2873 mtk_w32(eth, ring->phys,
2874 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2875 mtk_w32(eth, rx_dma_size,
2876 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2877 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2878 reg_map->qdma.rst_idx);
2879 } else {
2880 mtk_w32(eth, ring->phys,
2881 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2882 mtk_w32(eth, rx_dma_size,
2883 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2884 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2885 reg_map->pdma.rst_idx);
2886 }
2887 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2888
2889 return 0;
2890 }
2891
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2892 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2893 {
2894 u64 addr64 = 0;
2895 int i;
2896
2897 if (ring->data && ring->dma) {
2898 for (i = 0; i < ring->dma_size; i++) {
2899 struct mtk_rx_dma *rxd;
2900
2901 if (!ring->data[i])
2902 continue;
2903
2904 rxd = ring->dma + i * eth->soc->rx.desc_size;
2905 if (!rxd->rxd1)
2906 continue;
2907
2908 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2909 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2910
2911 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2912 ring->buf_size, DMA_FROM_DEVICE);
2913 mtk_rx_put_buff(ring, ring->data[i], false);
2914 }
2915 kfree(ring->data);
2916 ring->data = NULL;
2917 }
2918
2919 if (!in_sram && ring->dma) {
2920 dma_free_coherent(eth->dma_dev,
2921 ring->dma_size * eth->soc->rx.desc_size,
2922 ring->dma, ring->phys);
2923 ring->dma = NULL;
2924 }
2925
2926 if (ring->page_pool) {
2927 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2928 xdp_rxq_info_unreg(&ring->xdp_q);
2929 page_pool_destroy(ring->page_pool);
2930 ring->page_pool = NULL;
2931 }
2932 }
2933
mtk_hwlro_rx_init(struct mtk_eth * eth)2934 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2935 {
2936 int i;
2937 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2938 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2939
2940 /* set LRO rings to auto-learn modes */
2941 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2942
2943 /* validate LRO ring */
2944 ring_ctrl_dw2 |= MTK_RING_VLD;
2945
2946 /* set AGE timer (unit: 20us) */
2947 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2948 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2949
2950 /* set max AGG timer (unit: 20us) */
2951 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2952
2953 /* set max LRO AGG count */
2954 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2955 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2956
2957 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2958 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2959 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2960 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2961 }
2962
2963 /* IPv4 checksum update enable */
2964 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2965
2966 /* switch priority comparison to packet count mode */
2967 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2968
2969 /* bandwidth threshold setting */
2970 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2971
2972 /* auto-learn score delta setting */
2973 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2974
2975 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2976 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2977 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2978
2979 /* set HW LRO mode & the max aggregation count for rx packets */
2980 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2981
2982 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2983 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2984
2985 /* enable HW LRO */
2986 lro_ctrl_dw0 |= MTK_LRO_EN;
2987
2988 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2989 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2990
2991 return 0;
2992 }
2993
mtk_hwlro_rx_uninit(struct mtk_eth * eth)2994 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2995 {
2996 int i;
2997 u32 val;
2998
2999 /* relinquish lro rings, flush aggregated packets */
3000 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3001
3002 /* wait for relinquishments done */
3003 for (i = 0; i < 10; i++) {
3004 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3005 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
3006 msleep(20);
3007 continue;
3008 }
3009 break;
3010 }
3011
3012 /* invalidate lro rings */
3013 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3014 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3015
3016 /* disable HW LRO */
3017 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3018 }
3019
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)3020 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3021 {
3022 u32 reg_val;
3023
3024 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3025
3026 /* invalidate the IP setting */
3027 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3028
3029 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3030
3031 /* validate the IP setting */
3032 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3033 }
3034
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)3035 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3036 {
3037 u32 reg_val;
3038
3039 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3040
3041 /* invalidate the IP setting */
3042 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3043
3044 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3045 }
3046
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)3047 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3048 {
3049 int cnt = 0;
3050 int i;
3051
3052 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3053 if (mac->hwlro_ip[i])
3054 cnt++;
3055 }
3056
3057 return cnt;
3058 }
3059
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3060 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3061 struct ethtool_rxnfc *cmd)
3062 {
3063 struct ethtool_rx_flow_spec *fsp =
3064 (struct ethtool_rx_flow_spec *)&cmd->fs;
3065 struct mtk_mac *mac = netdev_priv(dev);
3066 struct mtk_eth *eth = mac->hw;
3067 int hwlro_idx;
3068
3069 if ((fsp->flow_type != TCP_V4_FLOW) ||
3070 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3071 (fsp->location > 1))
3072 return -EINVAL;
3073
3074 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3075 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3076
3077 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3078
3079 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3080
3081 return 0;
3082 }
3083
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3084 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3085 struct ethtool_rxnfc *cmd)
3086 {
3087 struct ethtool_rx_flow_spec *fsp =
3088 (struct ethtool_rx_flow_spec *)&cmd->fs;
3089 struct mtk_mac *mac = netdev_priv(dev);
3090 struct mtk_eth *eth = mac->hw;
3091 int hwlro_idx;
3092
3093 if (fsp->location > 1)
3094 return -EINVAL;
3095
3096 mac->hwlro_ip[fsp->location] = 0;
3097 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3098
3099 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3100
3101 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3102
3103 return 0;
3104 }
3105
mtk_hwlro_netdev_disable(struct net_device * dev)3106 static void mtk_hwlro_netdev_disable(struct net_device *dev)
3107 {
3108 struct mtk_mac *mac = netdev_priv(dev);
3109 struct mtk_eth *eth = mac->hw;
3110 int i, hwlro_idx;
3111
3112 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3113 mac->hwlro_ip[i] = 0;
3114 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3115
3116 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3117 }
3118
3119 mac->hwlro_ip_cnt = 0;
3120 }
3121
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)3122 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3123 struct ethtool_rxnfc *cmd)
3124 {
3125 struct mtk_mac *mac = netdev_priv(dev);
3126 struct ethtool_rx_flow_spec *fsp =
3127 (struct ethtool_rx_flow_spec *)&cmd->fs;
3128
3129 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
3130 return -EINVAL;
3131
3132 /* only tcp dst ipv4 is meaningful, others are meaningless */
3133 fsp->flow_type = TCP_V4_FLOW;
3134 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3135 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3136
3137 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3138 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3139 fsp->h_u.tcp_ip4_spec.psrc = 0;
3140 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3141 fsp->h_u.tcp_ip4_spec.pdst = 0;
3142 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3143 fsp->h_u.tcp_ip4_spec.tos = 0;
3144 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3145
3146 return 0;
3147 }
3148
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3149 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3150 struct ethtool_rxnfc *cmd,
3151 u32 *rule_locs)
3152 {
3153 struct mtk_mac *mac = netdev_priv(dev);
3154 int cnt = 0;
3155 int i;
3156
3157 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3158 if (cnt == cmd->rule_cnt)
3159 return -EMSGSIZE;
3160
3161 if (mac->hwlro_ip[i]) {
3162 rule_locs[cnt] = i;
3163 cnt++;
3164 }
3165 }
3166
3167 cmd->rule_cnt = cnt;
3168
3169 return 0;
3170 }
3171
mtk_fix_features(struct net_device * dev,netdev_features_t features)3172 static netdev_features_t mtk_fix_features(struct net_device *dev,
3173 netdev_features_t features)
3174 {
3175 if (!(features & NETIF_F_LRO)) {
3176 struct mtk_mac *mac = netdev_priv(dev);
3177 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3178
3179 if (ip_cnt) {
3180 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3181
3182 features |= NETIF_F_LRO;
3183 }
3184 }
3185
3186 return features;
3187 }
3188
mtk_set_features(struct net_device * dev,netdev_features_t features)3189 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3190 {
3191 netdev_features_t diff = dev->features ^ features;
3192
3193 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3194 mtk_hwlro_netdev_disable(dev);
3195
3196 return 0;
3197 }
3198
3199 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3200 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3201 {
3202 unsigned int reg;
3203 int ret;
3204 u32 val;
3205
3206 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3207 reg = eth->soc->reg_map->qdma.glo_cfg;
3208 else
3209 reg = eth->soc->reg_map->pdma.glo_cfg;
3210
3211 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3212 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3213 5, MTK_DMA_BUSY_TIMEOUT_US);
3214 if (ret)
3215 dev_err(eth->dev, "DMA init timeout\n");
3216
3217 return ret;
3218 }
3219
mtk_dma_init(struct mtk_eth * eth)3220 static int mtk_dma_init(struct mtk_eth *eth)
3221 {
3222 int err;
3223 u32 i;
3224
3225 if (mtk_dma_busy_wait(eth))
3226 return -EBUSY;
3227
3228 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3229 /* QDMA needs scratch memory for internal reordering of the
3230 * descriptors
3231 */
3232 err = mtk_init_fq_dma(eth);
3233 if (err)
3234 return err;
3235 }
3236
3237 err = mtk_tx_alloc(eth);
3238 if (err)
3239 return err;
3240
3241 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3242 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3243 if (err)
3244 return err;
3245 }
3246
3247 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3248 if (err)
3249 return err;
3250
3251 if (eth->hwlro) {
3252 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3253 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3254 if (err)
3255 return err;
3256 }
3257 err = mtk_hwlro_rx_init(eth);
3258 if (err)
3259 return err;
3260 }
3261
3262 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3263 /* Enable random early drop and set drop threshold
3264 * automatically
3265 */
3266 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3267 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3268 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3269 }
3270
3271 return 0;
3272 }
3273
mtk_dma_free(struct mtk_eth * eth)3274 static void mtk_dma_free(struct mtk_eth *eth)
3275 {
3276 const struct mtk_soc_data *soc = eth->soc;
3277 int i, j, txqs = 1;
3278
3279 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3280 txqs = MTK_QDMA_NUM_QUEUES;
3281
3282 for (i = 0; i < MTK_MAX_DEVS; i++) {
3283 if (!eth->netdev[i])
3284 continue;
3285
3286 for (j = 0; j < txqs; j++)
3287 netdev_tx_reset_subqueue(eth->netdev[i], j);
3288 }
3289
3290 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3291 dma_free_coherent(eth->dma_dev,
3292 MTK_QDMA_RING_SIZE * soc->tx.desc_size,
3293 eth->scratch_ring, eth->phy_scratch_ring);
3294 eth->scratch_ring = NULL;
3295 eth->phy_scratch_ring = 0;
3296 }
3297 mtk_tx_clean(eth);
3298 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3299 mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3300
3301 if (eth->hwlro) {
3302 mtk_hwlro_rx_uninit(eth);
3303 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3304 mtk_rx_clean(eth, ð->rx_ring[i], false);
3305 }
3306
3307 for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3308 kfree(eth->scratch_head[i]);
3309 eth->scratch_head[i] = NULL;
3310 }
3311 }
3312
mtk_hw_reset_check(struct mtk_eth * eth)3313 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3314 {
3315 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3316
3317 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3318 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3319 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3320 }
3321
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3322 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3323 {
3324 struct mtk_mac *mac = netdev_priv(dev);
3325 struct mtk_eth *eth = mac->hw;
3326
3327 if (test_bit(MTK_RESETTING, ð->state))
3328 return;
3329
3330 if (!mtk_hw_reset_check(eth))
3331 return;
3332
3333 eth->netdev[mac->id]->stats.tx_errors++;
3334 netif_err(eth, tx_err, dev, "transmit timed out\n");
3335
3336 schedule_work(ð->pending_work);
3337 }
3338
mtk_handle_irq_rx(int irq,void * _eth)3339 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3340 {
3341 struct mtk_eth *eth = _eth;
3342
3343 eth->rx_events++;
3344 if (likely(napi_schedule_prep(ð->rx_napi))) {
3345 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3346 __napi_schedule(ð->rx_napi);
3347 }
3348
3349 return IRQ_HANDLED;
3350 }
3351
mtk_handle_irq_tx(int irq,void * _eth)3352 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3353 {
3354 struct mtk_eth *eth = _eth;
3355
3356 eth->tx_events++;
3357 if (likely(napi_schedule_prep(ð->tx_napi))) {
3358 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3359 __napi_schedule(ð->tx_napi);
3360 }
3361
3362 return IRQ_HANDLED;
3363 }
3364
mtk_handle_irq(int irq,void * _eth)3365 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3366 {
3367 struct mtk_eth *eth = _eth;
3368 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3369
3370 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3371 eth->soc->rx.irq_done_mask) {
3372 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3373 eth->soc->rx.irq_done_mask)
3374 mtk_handle_irq_rx(irq, _eth);
3375 }
3376 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3377 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3378 mtk_handle_irq_tx(irq, _eth);
3379 }
3380
3381 return IRQ_HANDLED;
3382 }
3383
3384 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3385 static void mtk_poll_controller(struct net_device *dev)
3386 {
3387 struct mtk_mac *mac = netdev_priv(dev);
3388 struct mtk_eth *eth = mac->hw;
3389
3390 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3391 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3392 mtk_handle_irq_rx(eth->irq[2], dev);
3393 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3394 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3395 }
3396 #endif
3397
mtk_start_dma(struct mtk_eth * eth)3398 static int mtk_start_dma(struct mtk_eth *eth)
3399 {
3400 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3401 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3402 int err;
3403
3404 err = mtk_dma_init(eth);
3405 if (err) {
3406 mtk_dma_free(eth);
3407 return err;
3408 }
3409
3410 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3411 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3412 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3413 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3414 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3415
3416 if (mtk_is_netsys_v2_or_greater(eth))
3417 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3418 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3419 MTK_CHK_DDONE_EN;
3420 else
3421 val |= MTK_RX_BT_32DWORDS;
3422 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3423
3424 mtk_w32(eth,
3425 MTK_RX_DMA_EN | rx_2b_offset |
3426 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3427 reg_map->pdma.glo_cfg);
3428 } else {
3429 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3430 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3431 reg_map->pdma.glo_cfg);
3432 }
3433
3434 return 0;
3435 }
3436
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3437 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3438 {
3439 u32 val;
3440
3441 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3442 return;
3443
3444 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3445
3446 /* default setup the forward port to send frame to PDMA */
3447 val &= ~0xffff;
3448
3449 /* Enable RX checksum */
3450 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3451
3452 val |= config;
3453
3454 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3455 val |= MTK_GDMA_SPECIAL_TAG;
3456
3457 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3458 }
3459
3460
mtk_uses_dsa(struct net_device * dev)3461 static bool mtk_uses_dsa(struct net_device *dev)
3462 {
3463 #if IS_ENABLED(CONFIG_NET_DSA)
3464 return netdev_uses_dsa(dev) &&
3465 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3466 #else
3467 return false;
3468 #endif
3469 }
3470
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3471 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3472 {
3473 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3474 struct mtk_eth *eth = mac->hw;
3475 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3476 struct ethtool_link_ksettings s;
3477 struct net_device *ldev;
3478 struct list_head *iter;
3479 struct dsa_port *dp;
3480
3481 if (event != NETDEV_CHANGE)
3482 return NOTIFY_DONE;
3483
3484 netdev_for_each_lower_dev(dev, ldev, iter) {
3485 if (netdev_priv(ldev) == mac)
3486 goto found;
3487 }
3488
3489 return NOTIFY_DONE;
3490
3491 found:
3492 if (!dsa_user_dev_check(dev))
3493 return NOTIFY_DONE;
3494
3495 if (__ethtool_get_link_ksettings(dev, &s))
3496 return NOTIFY_DONE;
3497
3498 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3499 return NOTIFY_DONE;
3500
3501 dp = dsa_port_from_netdev(dev);
3502 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3503 return NOTIFY_DONE;
3504
3505 if (mac->speed > 0 && mac->speed <= s.base.speed)
3506 s.base.speed = 0;
3507
3508 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3509
3510 return NOTIFY_DONE;
3511 }
3512
mtk_open(struct net_device * dev)3513 static int mtk_open(struct net_device *dev)
3514 {
3515 struct mtk_mac *mac = netdev_priv(dev);
3516 struct mtk_eth *eth = mac->hw;
3517 struct mtk_mac *target_mac;
3518 int i, err, ppe_num;
3519
3520 ppe_num = eth->soc->ppe_num;
3521
3522 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3523 if (err) {
3524 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3525 err);
3526 return err;
3527 }
3528
3529 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3530 if (!refcount_read(ð->dma_refcnt)) {
3531 const struct mtk_soc_data *soc = eth->soc;
3532 u32 gdm_config;
3533 int i;
3534
3535 err = mtk_start_dma(eth);
3536 if (err) {
3537 phylink_disconnect_phy(mac->phylink);
3538 return err;
3539 }
3540
3541 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3542 mtk_ppe_start(eth->ppe[i]);
3543
3544 for (i = 0; i < MTK_MAX_DEVS; i++) {
3545 if (!eth->netdev[i])
3546 continue;
3547
3548 target_mac = netdev_priv(eth->netdev[i]);
3549 if (!soc->offload_version) {
3550 target_mac->ppe_idx = 0;
3551 gdm_config = MTK_GDMA_TO_PDMA;
3552 } else if (ppe_num >= 3 && target_mac->id == 2) {
3553 target_mac->ppe_idx = 2;
3554 gdm_config = soc->reg_map->gdma_to_ppe[2];
3555 } else if (ppe_num >= 2 && target_mac->id == 1) {
3556 target_mac->ppe_idx = 1;
3557 gdm_config = soc->reg_map->gdma_to_ppe[1];
3558 } else {
3559 target_mac->ppe_idx = 0;
3560 gdm_config = soc->reg_map->gdma_to_ppe[0];
3561 }
3562 mtk_gdm_config(eth, target_mac->id, gdm_config);
3563 }
3564
3565 napi_enable(ð->tx_napi);
3566 napi_enable(ð->rx_napi);
3567 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3568 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3569 refcount_set(ð->dma_refcnt, 1);
3570 } else {
3571 refcount_inc(ð->dma_refcnt);
3572 }
3573
3574 phylink_start(mac->phylink);
3575 netif_tx_start_all_queues(dev);
3576
3577 if (mtk_is_netsys_v2_or_greater(eth))
3578 return 0;
3579
3580 if (mtk_uses_dsa(dev) && !eth->prog) {
3581 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3582 struct metadata_dst *md_dst = eth->dsa_meta[i];
3583
3584 if (md_dst)
3585 continue;
3586
3587 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3588 GFP_KERNEL);
3589 if (!md_dst)
3590 return -ENOMEM;
3591
3592 md_dst->u.port_info.port_id = i;
3593 eth->dsa_meta[i] = md_dst;
3594 }
3595 } else {
3596 /* Hardware DSA untagging and VLAN RX offloading need to be
3597 * disabled if at least one MAC does not use DSA.
3598 */
3599 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3600
3601 val &= ~MTK_CDMP_STAG_EN;
3602 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3603
3604 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3605 }
3606
3607 return 0;
3608 }
3609
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3610 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3611 {
3612 u32 val;
3613 int i;
3614
3615 /* stop the dma engine */
3616 spin_lock_bh(ð->page_lock);
3617 val = mtk_r32(eth, glo_cfg);
3618 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3619 glo_cfg);
3620 spin_unlock_bh(ð->page_lock);
3621
3622 /* wait for dma stop */
3623 for (i = 0; i < 10; i++) {
3624 val = mtk_r32(eth, glo_cfg);
3625 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3626 msleep(20);
3627 continue;
3628 }
3629 break;
3630 }
3631 }
3632
mtk_stop(struct net_device * dev)3633 static int mtk_stop(struct net_device *dev)
3634 {
3635 struct mtk_mac *mac = netdev_priv(dev);
3636 struct mtk_eth *eth = mac->hw;
3637 int i;
3638
3639 phylink_stop(mac->phylink);
3640
3641 netif_tx_disable(dev);
3642
3643 phylink_disconnect_phy(mac->phylink);
3644
3645 /* only shutdown DMA if this is the last user */
3646 if (!refcount_dec_and_test(ð->dma_refcnt))
3647 return 0;
3648
3649 for (i = 0; i < MTK_MAX_DEVS; i++)
3650 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3651
3652 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3653 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3654 napi_disable(ð->tx_napi);
3655 napi_disable(ð->rx_napi);
3656
3657 cancel_work_sync(ð->rx_dim.work);
3658 cancel_work_sync(ð->tx_dim.work);
3659
3660 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3661 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3662 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3663
3664 mtk_dma_free(eth);
3665
3666 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3667 mtk_ppe_stop(eth->ppe[i]);
3668
3669 return 0;
3670 }
3671
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3672 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3673 struct netlink_ext_ack *extack)
3674 {
3675 struct mtk_mac *mac = netdev_priv(dev);
3676 struct mtk_eth *eth = mac->hw;
3677 struct bpf_prog *old_prog;
3678 bool need_update;
3679
3680 if (eth->hwlro) {
3681 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3682 return -EOPNOTSUPP;
3683 }
3684
3685 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3686 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3687 return -EOPNOTSUPP;
3688 }
3689
3690 need_update = !!eth->prog != !!prog;
3691 if (netif_running(dev) && need_update)
3692 mtk_stop(dev);
3693
3694 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3695 if (old_prog)
3696 bpf_prog_put(old_prog);
3697
3698 if (netif_running(dev) && need_update)
3699 return mtk_open(dev);
3700
3701 return 0;
3702 }
3703
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3704 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3705 {
3706 switch (xdp->command) {
3707 case XDP_SETUP_PROG:
3708 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3709 default:
3710 return -EINVAL;
3711 }
3712 }
3713
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3714 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3715 {
3716 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3717 reset_bits,
3718 reset_bits);
3719
3720 usleep_range(1000, 1100);
3721 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3722 reset_bits,
3723 ~reset_bits);
3724 mdelay(10);
3725 }
3726
mtk_clk_disable(struct mtk_eth * eth)3727 static void mtk_clk_disable(struct mtk_eth *eth)
3728 {
3729 int clk;
3730
3731 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3732 clk_disable_unprepare(eth->clks[clk]);
3733 }
3734
mtk_clk_enable(struct mtk_eth * eth)3735 static int mtk_clk_enable(struct mtk_eth *eth)
3736 {
3737 int clk, ret;
3738
3739 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3740 ret = clk_prepare_enable(eth->clks[clk]);
3741 if (ret)
3742 goto err_disable_clks;
3743 }
3744
3745 return 0;
3746
3747 err_disable_clks:
3748 while (--clk >= 0)
3749 clk_disable_unprepare(eth->clks[clk]);
3750
3751 return ret;
3752 }
3753
mtk_dim_rx(struct work_struct * work)3754 static void mtk_dim_rx(struct work_struct *work)
3755 {
3756 struct dim *dim = container_of(work, struct dim, work);
3757 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3758 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3759 struct dim_cq_moder cur_profile;
3760 u32 val, cur;
3761
3762 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3763 dim->profile_ix);
3764 spin_lock_bh(ð->dim_lock);
3765
3766 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3767 val &= MTK_PDMA_DELAY_TX_MASK;
3768 val |= MTK_PDMA_DELAY_RX_EN;
3769
3770 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3771 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3772
3773 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3774 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3775
3776 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3777 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3778 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3779
3780 spin_unlock_bh(ð->dim_lock);
3781
3782 dim->state = DIM_START_MEASURE;
3783 }
3784
mtk_dim_tx(struct work_struct * work)3785 static void mtk_dim_tx(struct work_struct *work)
3786 {
3787 struct dim *dim = container_of(work, struct dim, work);
3788 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3789 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3790 struct dim_cq_moder cur_profile;
3791 u32 val, cur;
3792
3793 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3794 dim->profile_ix);
3795 spin_lock_bh(ð->dim_lock);
3796
3797 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3798 val &= MTK_PDMA_DELAY_RX_MASK;
3799 val |= MTK_PDMA_DELAY_TX_EN;
3800
3801 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3802 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3803
3804 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3805 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3806
3807 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3808 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3809 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3810
3811 spin_unlock_bh(ð->dim_lock);
3812
3813 dim->state = DIM_START_MEASURE;
3814 }
3815
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3816 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3817 {
3818 struct mtk_eth *eth = mac->hw;
3819 u32 mcr_cur, mcr_new;
3820
3821 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3822 return;
3823
3824 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3825 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3826
3827 if (val <= 1518)
3828 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3829 else if (val <= 1536)
3830 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3831 else if (val <= 1552)
3832 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3833 else
3834 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3835
3836 if (mcr_new != mcr_cur)
3837 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3838 }
3839
mtk_hw_reset(struct mtk_eth * eth)3840 static void mtk_hw_reset(struct mtk_eth *eth)
3841 {
3842 u32 val;
3843
3844 if (mtk_is_netsys_v2_or_greater(eth))
3845 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3846
3847 if (mtk_is_netsys_v3_or_greater(eth)) {
3848 val = RSTCTRL_PPE0_V3;
3849
3850 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3851 val |= RSTCTRL_PPE1_V3;
3852
3853 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3854 val |= RSTCTRL_PPE2;
3855
3856 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3857 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3858 val = RSTCTRL_PPE0_V2;
3859
3860 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3861 val |= RSTCTRL_PPE1;
3862 } else {
3863 val = RSTCTRL_PPE0;
3864 }
3865
3866 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3867
3868 if (mtk_is_netsys_v3_or_greater(eth))
3869 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3870 0x6f8ff);
3871 else if (mtk_is_netsys_v2_or_greater(eth))
3872 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3873 0x3ffffff);
3874 }
3875
mtk_hw_reset_read(struct mtk_eth * eth)3876 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3877 {
3878 u32 val;
3879
3880 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3881 return val;
3882 }
3883
mtk_hw_warm_reset(struct mtk_eth * eth)3884 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3885 {
3886 u32 rst_mask, val;
3887
3888 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3889 RSTCTRL_FE);
3890 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3891 val & RSTCTRL_FE, 1, 1000)) {
3892 dev_err(eth->dev, "warm reset failed\n");
3893 mtk_hw_reset(eth);
3894 return;
3895 }
3896
3897 if (mtk_is_netsys_v3_or_greater(eth)) {
3898 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3899 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3900 rst_mask |= RSTCTRL_PPE1_V3;
3901 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3902 rst_mask |= RSTCTRL_PPE2;
3903
3904 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3905 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3906 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3907 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3908 rst_mask |= RSTCTRL_PPE1;
3909 } else {
3910 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3911 }
3912
3913 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3914
3915 udelay(1);
3916 val = mtk_hw_reset_read(eth);
3917 if (!(val & rst_mask))
3918 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3919 val, rst_mask);
3920
3921 rst_mask |= RSTCTRL_FE;
3922 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3923
3924 udelay(1);
3925 val = mtk_hw_reset_read(eth);
3926 if (val & rst_mask)
3927 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3928 val, rst_mask);
3929 }
3930
mtk_hw_check_dma_hang(struct mtk_eth * eth)3931 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3932 {
3933 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3934 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3935 bool oq_hang, cdm1_busy, adma_busy;
3936 bool wtx_busy, cdm_full, oq_free;
3937 u32 wdidx, val, gdm1_fc, gdm2_fc;
3938 bool qfsm_hang, qfwd_hang;
3939 bool ret = false;
3940
3941 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3942 return false;
3943
3944 /* WDMA sanity checks */
3945 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3946
3947 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3948 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3949
3950 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3951 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3952
3953 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3954 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3955 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3956
3957 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3958 if (++eth->reset.wdma_hang_count > 2) {
3959 eth->reset.wdma_hang_count = 0;
3960 ret = true;
3961 }
3962 goto out;
3963 }
3964
3965 /* QDMA sanity checks */
3966 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3967 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3968
3969 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3970 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3971 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3972 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3973 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3974 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3975
3976 if (qfsm_hang && qfwd_hang &&
3977 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3978 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3979 if (++eth->reset.qdma_hang_count > 2) {
3980 eth->reset.qdma_hang_count = 0;
3981 ret = true;
3982 }
3983 goto out;
3984 }
3985
3986 /* ADMA sanity checks */
3987 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3988 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3989 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3990 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3991
3992 if (oq_hang && cdm1_busy && adma_busy) {
3993 if (++eth->reset.adma_hang_count > 2) {
3994 eth->reset.adma_hang_count = 0;
3995 ret = true;
3996 }
3997 goto out;
3998 }
3999
4000 eth->reset.wdma_hang_count = 0;
4001 eth->reset.qdma_hang_count = 0;
4002 eth->reset.adma_hang_count = 0;
4003 out:
4004 eth->reset.wdidx = wdidx;
4005
4006 return ret;
4007 }
4008
mtk_hw_reset_monitor_work(struct work_struct * work)4009 static void mtk_hw_reset_monitor_work(struct work_struct *work)
4010 {
4011 struct delayed_work *del_work = to_delayed_work(work);
4012 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
4013 reset.monitor_work);
4014
4015 if (test_bit(MTK_RESETTING, ð->state))
4016 goto out;
4017
4018 /* DMA stuck checks */
4019 if (mtk_hw_check_dma_hang(eth))
4020 schedule_work(ð->pending_work);
4021
4022 out:
4023 schedule_delayed_work(ð->reset.monitor_work,
4024 MTK_DMA_MONITOR_TIMEOUT);
4025 }
4026
mtk_hw_init(struct mtk_eth * eth,bool reset)4027 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
4028 {
4029 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4030 ETHSYS_DMA_AG_MAP_PPE;
4031 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4032 int i, val, ret;
4033
4034 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
4035 return 0;
4036
4037 if (!reset) {
4038 pm_runtime_enable(eth->dev);
4039 pm_runtime_get_sync(eth->dev);
4040
4041 ret = mtk_clk_enable(eth);
4042 if (ret)
4043 goto err_disable_pm;
4044 }
4045
4046 if (eth->ethsys)
4047 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4048 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
4049
4050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4051 ret = device_reset(eth->dev);
4052 if (ret) {
4053 dev_err(eth->dev, "MAC reset failed!\n");
4054 goto err_disable_pm;
4055 }
4056
4057 /* set interrupt delays based on current Net DIM sample */
4058 mtk_dim_rx(ð->rx_dim.work);
4059 mtk_dim_tx(ð->tx_dim.work);
4060
4061 /* disable delay and normal interrupt */
4062 mtk_tx_irq_disable(eth, ~0);
4063 mtk_rx_irq_disable(eth, ~0);
4064
4065 return 0;
4066 }
4067
4068 msleep(100);
4069
4070 if (reset)
4071 mtk_hw_warm_reset(eth);
4072 else
4073 mtk_hw_reset(eth);
4074
4075 /* No MT7628/88 support yet */
4076 if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4077 mtk_mdio_config(eth);
4078
4079 if (mtk_is_netsys_v3_or_greater(eth)) {
4080 /* Set FE to PDMAv2 if necessary */
4081 val = mtk_r32(eth, MTK_FE_GLO_MISC);
4082 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
4083 }
4084
4085 if (eth->pctl) {
4086 /* Set GE2 driving and slew rate */
4087 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4088
4089 /* set GE2 TDSEL */
4090 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4091
4092 /* set GE2 TUNE */
4093 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4094 }
4095
4096 /* Set linkdown as the default for each GMAC. Its own MCR would be set
4097 * up with the more appropriate value when mtk_mac_config call is being
4098 * invoked.
4099 */
4100 for (i = 0; i < MTK_MAX_DEVS; i++) {
4101 struct net_device *dev = eth->netdev[i];
4102
4103 if (!dev)
4104 continue;
4105
4106 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4107 mtk_set_mcr_max_rx(netdev_priv(dev),
4108 dev->mtu + MTK_RX_ETH_HLEN);
4109 }
4110
4111 /* Indicates CDM to parse the MTK special tag from CPU
4112 * which also is working out for untag packets.
4113 */
4114 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4115 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4116 if (mtk_is_netsys_v1(eth)) {
4117 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4118 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4119
4120 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4121 }
4122
4123 /* set interrupt delays based on current Net DIM sample */
4124 mtk_dim_rx(ð->rx_dim.work);
4125 mtk_dim_tx(ð->tx_dim.work);
4126
4127 /* disable delay and normal interrupt */
4128 mtk_tx_irq_disable(eth, ~0);
4129 mtk_rx_irq_disable(eth, ~0);
4130
4131 /* FE int grouping */
4132 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
4133 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
4134 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
4135 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
4136 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
4137
4138 if (mtk_is_netsys_v3_or_greater(eth)) {
4139 /* PSE dummy page mechanism */
4140 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
4141 PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
4142
4143 /* PSE free buffer drop threshold */
4144 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4145
4146 /* PSE should not drop port8, port9 and port13 packets from
4147 * WDMA Tx
4148 */
4149 mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
4150
4151 /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
4152 * ring full
4153 */
4154 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
4155 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
4156 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
4157
4158 /* GDM and CDM Threshold */
4159 mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
4160 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4161
4162 /* Disable GDM1 RX CRC stripping */
4163 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4164
4165 /* PSE GDM3 MIB counter has incorrect hw default values,
4166 * so the driver ought to read clear the values beforehand
4167 * in case ethtool retrieve wrong mib values.
4168 */
4169 for (i = 0; i < 0x80; i += 0x4)
4170 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4171 } else if (!mtk_is_netsys_v1(eth)) {
4172 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4173 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4174
4175 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4176 mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
4177
4178 /* PSE Free Queue Flow Control */
4179 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4180
4181 /* PSE config input queue threshold */
4182 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4183 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4184 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4185 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4186 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4187 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4188 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4189 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4190
4191 /* PSE config output queue threshold */
4192 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4193 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4194 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4195 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4196 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4197 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4198 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4199 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4200
4201 /* GDM and CDM Threshold */
4202 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4203 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4204 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4205 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4206 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4207 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4208 }
4209
4210 return 0;
4211
4212 err_disable_pm:
4213 if (!reset) {
4214 pm_runtime_put_sync(eth->dev);
4215 pm_runtime_disable(eth->dev);
4216 }
4217
4218 return ret;
4219 }
4220
mtk_hw_deinit(struct mtk_eth * eth)4221 static int mtk_hw_deinit(struct mtk_eth *eth)
4222 {
4223 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
4224 return 0;
4225
4226 mtk_clk_disable(eth);
4227
4228 pm_runtime_put_sync(eth->dev);
4229 pm_runtime_disable(eth->dev);
4230
4231 return 0;
4232 }
4233
mtk_uninit(struct net_device * dev)4234 static void mtk_uninit(struct net_device *dev)
4235 {
4236 struct mtk_mac *mac = netdev_priv(dev);
4237 struct mtk_eth *eth = mac->hw;
4238
4239 phylink_disconnect_phy(mac->phylink);
4240 mtk_tx_irq_disable(eth, ~0);
4241 mtk_rx_irq_disable(eth, ~0);
4242 }
4243
mtk_change_mtu(struct net_device * dev,int new_mtu)4244 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4245 {
4246 int length = new_mtu + MTK_RX_ETH_HLEN;
4247 struct mtk_mac *mac = netdev_priv(dev);
4248 struct mtk_eth *eth = mac->hw;
4249
4250 if (rcu_access_pointer(eth->prog) &&
4251 length > MTK_PP_MAX_BUF_SIZE) {
4252 netdev_err(dev, "Invalid MTU for XDP mode\n");
4253 return -EINVAL;
4254 }
4255
4256 mtk_set_mcr_max_rx(mac, length);
4257 WRITE_ONCE(dev->mtu, new_mtu);
4258
4259 return 0;
4260 }
4261
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4262 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4263 {
4264 struct mtk_mac *mac = netdev_priv(dev);
4265
4266 switch (cmd) {
4267 case SIOCGMIIPHY:
4268 case SIOCGMIIREG:
4269 case SIOCSMIIREG:
4270 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4271 default:
4272 break;
4273 }
4274
4275 return -EOPNOTSUPP;
4276 }
4277
mtk_prepare_for_reset(struct mtk_eth * eth)4278 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4279 {
4280 u32 val;
4281 int i;
4282
4283 /* set FE PPE ports link down */
4284 for (i = MTK_GMAC1_ID;
4285 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4286 i += 2) {
4287 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4288 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4289 val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4291 val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4292 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4293 }
4294
4295 /* adjust PPE configurations to prepare for reset */
4296 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4297 mtk_ppe_prepare_reset(eth->ppe[i]);
4298
4299 /* disable NETSYS interrupts */
4300 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4301
4302 /* force link down GMAC */
4303 for (i = 0; i < 2; i++) {
4304 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4305 mtk_w32(eth, val, MTK_MAC_MCR(i));
4306 }
4307 }
4308
mtk_pending_work(struct work_struct * work)4309 static void mtk_pending_work(struct work_struct *work)
4310 {
4311 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4312 unsigned long restart = 0;
4313 u32 val;
4314 int i;
4315
4316 rtnl_lock();
4317 set_bit(MTK_RESETTING, ð->state);
4318
4319 mtk_prepare_for_reset(eth);
4320 mtk_wed_fe_reset();
4321 /* Run again reset preliminary configuration in order to avoid any
4322 * possible race during FE reset since it can run releasing RTNL lock.
4323 */
4324 mtk_prepare_for_reset(eth);
4325
4326 /* stop all devices to make sure that dma is properly shut down */
4327 for (i = 0; i < MTK_MAX_DEVS; i++) {
4328 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4329 continue;
4330
4331 mtk_stop(eth->netdev[i]);
4332 __set_bit(i, &restart);
4333 }
4334
4335 usleep_range(15000, 16000);
4336
4337 if (eth->dev->pins)
4338 pinctrl_select_state(eth->dev->pins->p,
4339 eth->dev->pins->default_state);
4340 mtk_hw_init(eth, true);
4341
4342 /* restart DMA and enable IRQs */
4343 for (i = 0; i < MTK_MAX_DEVS; i++) {
4344 if (!eth->netdev[i] || !test_bit(i, &restart))
4345 continue;
4346
4347 if (mtk_open(eth->netdev[i])) {
4348 netif_alert(eth, ifup, eth->netdev[i],
4349 "Driver up/down cycle failed\n");
4350 dev_close(eth->netdev[i]);
4351 }
4352 }
4353
4354 /* set FE PPE ports link up */
4355 for (i = MTK_GMAC1_ID;
4356 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4357 i += 2) {
4358 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4359 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4360 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4361 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4362 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4363
4364 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4365 }
4366
4367 clear_bit(MTK_RESETTING, ð->state);
4368
4369 mtk_wed_fe_reset_complete();
4370
4371 rtnl_unlock();
4372 }
4373
mtk_free_dev(struct mtk_eth * eth)4374 static int mtk_free_dev(struct mtk_eth *eth)
4375 {
4376 int i;
4377
4378 for (i = 0; i < MTK_MAX_DEVS; i++) {
4379 if (!eth->netdev[i])
4380 continue;
4381 free_netdev(eth->netdev[i]);
4382 }
4383
4384 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4385 if (!eth->dsa_meta[i])
4386 break;
4387 metadata_dst_free(eth->dsa_meta[i]);
4388 }
4389
4390 return 0;
4391 }
4392
mtk_unreg_dev(struct mtk_eth * eth)4393 static int mtk_unreg_dev(struct mtk_eth *eth)
4394 {
4395 int i;
4396
4397 for (i = 0; i < MTK_MAX_DEVS; i++) {
4398 struct mtk_mac *mac;
4399 if (!eth->netdev[i])
4400 continue;
4401 mac = netdev_priv(eth->netdev[i]);
4402 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4403 unregister_netdevice_notifier(&mac->device_notifier);
4404 unregister_netdev(eth->netdev[i]);
4405 }
4406
4407 return 0;
4408 }
4409
mtk_sgmii_destroy(struct mtk_eth * eth)4410 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4411 {
4412 int i;
4413
4414 for (i = 0; i < MTK_MAX_DEVS; i++)
4415 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4416 }
4417
mtk_cleanup(struct mtk_eth * eth)4418 static int mtk_cleanup(struct mtk_eth *eth)
4419 {
4420 mtk_sgmii_destroy(eth);
4421 mtk_unreg_dev(eth);
4422 mtk_free_dev(eth);
4423 cancel_work_sync(ð->pending_work);
4424 cancel_delayed_work_sync(ð->reset.monitor_work);
4425
4426 return 0;
4427 }
4428
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4429 static int mtk_get_link_ksettings(struct net_device *ndev,
4430 struct ethtool_link_ksettings *cmd)
4431 {
4432 struct mtk_mac *mac = netdev_priv(ndev);
4433
4434 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4435 return -EBUSY;
4436
4437 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4438 }
4439
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4440 static int mtk_set_link_ksettings(struct net_device *ndev,
4441 const struct ethtool_link_ksettings *cmd)
4442 {
4443 struct mtk_mac *mac = netdev_priv(ndev);
4444
4445 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4446 return -EBUSY;
4447
4448 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4449 }
4450
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4451 static void mtk_get_drvinfo(struct net_device *dev,
4452 struct ethtool_drvinfo *info)
4453 {
4454 struct mtk_mac *mac = netdev_priv(dev);
4455
4456 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4457 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4458 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4459 }
4460
mtk_get_msglevel(struct net_device * dev)4461 static u32 mtk_get_msglevel(struct net_device *dev)
4462 {
4463 struct mtk_mac *mac = netdev_priv(dev);
4464
4465 return mac->hw->msg_enable;
4466 }
4467
mtk_set_msglevel(struct net_device * dev,u32 value)4468 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4469 {
4470 struct mtk_mac *mac = netdev_priv(dev);
4471
4472 mac->hw->msg_enable = value;
4473 }
4474
mtk_nway_reset(struct net_device * dev)4475 static int mtk_nway_reset(struct net_device *dev)
4476 {
4477 struct mtk_mac *mac = netdev_priv(dev);
4478
4479 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4480 return -EBUSY;
4481
4482 if (!mac->phylink)
4483 return -ENOTSUPP;
4484
4485 return phylink_ethtool_nway_reset(mac->phylink);
4486 }
4487
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4488 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4489 {
4490 int i;
4491
4492 switch (stringset) {
4493 case ETH_SS_STATS: {
4494 struct mtk_mac *mac = netdev_priv(dev);
4495
4496 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4497 ethtool_puts(&data, mtk_ethtool_stats[i].str);
4498 if (mtk_page_pool_enabled(mac->hw))
4499 page_pool_ethtool_stats_get_strings(data);
4500 break;
4501 }
4502 default:
4503 break;
4504 }
4505 }
4506
mtk_get_sset_count(struct net_device * dev,int sset)4507 static int mtk_get_sset_count(struct net_device *dev, int sset)
4508 {
4509 switch (sset) {
4510 case ETH_SS_STATS: {
4511 int count = ARRAY_SIZE(mtk_ethtool_stats);
4512 struct mtk_mac *mac = netdev_priv(dev);
4513
4514 if (mtk_page_pool_enabled(mac->hw))
4515 count += page_pool_ethtool_stats_get_count();
4516 return count;
4517 }
4518 default:
4519 return -EOPNOTSUPP;
4520 }
4521 }
4522
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4523 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4524 {
4525 struct page_pool_stats stats = {};
4526 int i;
4527
4528 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4529 struct mtk_rx_ring *ring = ð->rx_ring[i];
4530
4531 if (!ring->page_pool)
4532 continue;
4533
4534 page_pool_get_stats(ring->page_pool, &stats);
4535 }
4536 page_pool_ethtool_stats_get(data, &stats);
4537 }
4538
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4539 static void mtk_get_ethtool_stats(struct net_device *dev,
4540 struct ethtool_stats *stats, u64 *data)
4541 {
4542 struct mtk_mac *mac = netdev_priv(dev);
4543 struct mtk_hw_stats *hwstats = mac->hw_stats;
4544 u64 *data_src, *data_dst;
4545 unsigned int start;
4546 int i;
4547
4548 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4549 return;
4550
4551 if (netif_running(dev) && netif_device_present(dev)) {
4552 if (spin_trylock_bh(&hwstats->stats_lock)) {
4553 mtk_stats_update_mac(mac);
4554 spin_unlock_bh(&hwstats->stats_lock);
4555 }
4556 }
4557
4558 data_src = (u64 *)hwstats;
4559
4560 do {
4561 data_dst = data;
4562 start = u64_stats_fetch_begin(&hwstats->syncp);
4563
4564 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4565 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4566 if (mtk_page_pool_enabled(mac->hw))
4567 mtk_ethtool_pp_stats(mac->hw, data_dst);
4568 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4569 }
4570
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4571 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4572 u32 *rule_locs)
4573 {
4574 int ret = -EOPNOTSUPP;
4575
4576 switch (cmd->cmd) {
4577 case ETHTOOL_GRXRINGS:
4578 if (dev->hw_features & NETIF_F_LRO) {
4579 cmd->data = MTK_MAX_RX_RING_NUM;
4580 ret = 0;
4581 }
4582 break;
4583 case ETHTOOL_GRXCLSRLCNT:
4584 if (dev->hw_features & NETIF_F_LRO) {
4585 struct mtk_mac *mac = netdev_priv(dev);
4586
4587 cmd->rule_cnt = mac->hwlro_ip_cnt;
4588 ret = 0;
4589 }
4590 break;
4591 case ETHTOOL_GRXCLSRULE:
4592 if (dev->hw_features & NETIF_F_LRO)
4593 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4594 break;
4595 case ETHTOOL_GRXCLSRLALL:
4596 if (dev->hw_features & NETIF_F_LRO)
4597 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4598 rule_locs);
4599 break;
4600 default:
4601 break;
4602 }
4603
4604 return ret;
4605 }
4606
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4607 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4608 {
4609 int ret = -EOPNOTSUPP;
4610
4611 switch (cmd->cmd) {
4612 case ETHTOOL_SRXCLSRLINS:
4613 if (dev->hw_features & NETIF_F_LRO)
4614 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4615 break;
4616 case ETHTOOL_SRXCLSRLDEL:
4617 if (dev->hw_features & NETIF_F_LRO)
4618 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4619 break;
4620 default:
4621 break;
4622 }
4623
4624 return ret;
4625 }
4626
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4627 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4628 {
4629 struct mtk_mac *mac = netdev_priv(dev);
4630
4631 phylink_ethtool_get_pauseparam(mac->phylink, pause);
4632 }
4633
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4634 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4635 {
4636 struct mtk_mac *mac = netdev_priv(dev);
4637
4638 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4639 }
4640
mtk_get_eee(struct net_device * dev,struct ethtool_keee * eee)4641 static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4642 {
4643 struct mtk_mac *mac = netdev_priv(dev);
4644
4645 return phylink_ethtool_get_eee(mac->phylink, eee);
4646 }
4647
mtk_set_eee(struct net_device * dev,struct ethtool_keee * eee)4648 static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4649 {
4650 struct mtk_mac *mac = netdev_priv(dev);
4651
4652 return phylink_ethtool_set_eee(mac->phylink, eee);
4653 }
4654
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4655 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4656 struct net_device *sb_dev)
4657 {
4658 struct mtk_mac *mac = netdev_priv(dev);
4659 unsigned int queue = 0;
4660
4661 if (netdev_uses_dsa(dev))
4662 queue = skb_get_queue_mapping(skb) + 3;
4663 else
4664 queue = mac->id;
4665
4666 if (queue >= dev->num_tx_queues)
4667 queue = 0;
4668
4669 return queue;
4670 }
4671
4672 static const struct ethtool_ops mtk_ethtool_ops = {
4673 .get_link_ksettings = mtk_get_link_ksettings,
4674 .set_link_ksettings = mtk_set_link_ksettings,
4675 .get_drvinfo = mtk_get_drvinfo,
4676 .get_msglevel = mtk_get_msglevel,
4677 .set_msglevel = mtk_set_msglevel,
4678 .nway_reset = mtk_nway_reset,
4679 .get_link = ethtool_op_get_link,
4680 .get_strings = mtk_get_strings,
4681 .get_sset_count = mtk_get_sset_count,
4682 .get_ethtool_stats = mtk_get_ethtool_stats,
4683 .get_pauseparam = mtk_get_pauseparam,
4684 .set_pauseparam = mtk_set_pauseparam,
4685 .get_rxnfc = mtk_get_rxnfc,
4686 .set_rxnfc = mtk_set_rxnfc,
4687 .get_eee = mtk_get_eee,
4688 .set_eee = mtk_set_eee,
4689 };
4690
4691 static const struct net_device_ops mtk_netdev_ops = {
4692 .ndo_uninit = mtk_uninit,
4693 .ndo_open = mtk_open,
4694 .ndo_stop = mtk_stop,
4695 .ndo_start_xmit = mtk_start_xmit,
4696 .ndo_set_mac_address = mtk_set_mac_address,
4697 .ndo_validate_addr = eth_validate_addr,
4698 .ndo_eth_ioctl = mtk_do_ioctl,
4699 .ndo_change_mtu = mtk_change_mtu,
4700 .ndo_tx_timeout = mtk_tx_timeout,
4701 .ndo_get_stats64 = mtk_get_stats64,
4702 .ndo_fix_features = mtk_fix_features,
4703 .ndo_set_features = mtk_set_features,
4704 #ifdef CONFIG_NET_POLL_CONTROLLER
4705 .ndo_poll_controller = mtk_poll_controller,
4706 #endif
4707 .ndo_setup_tc = mtk_eth_setup_tc,
4708 .ndo_bpf = mtk_xdp,
4709 .ndo_xdp_xmit = mtk_xdp_xmit,
4710 .ndo_select_queue = mtk_select_queue,
4711 };
4712
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4713 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4714 {
4715 const __be32 *_id = of_get_property(np, "reg", NULL);
4716 phy_interface_t phy_mode;
4717 struct phylink *phylink;
4718 struct mtk_mac *mac;
4719 int id, err;
4720 int txqs = 1;
4721 u32 val;
4722
4723 if (!_id) {
4724 dev_err(eth->dev, "missing mac id\n");
4725 return -EINVAL;
4726 }
4727
4728 id = be32_to_cpup(_id);
4729 if (id >= MTK_MAX_DEVS) {
4730 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4731 return -EINVAL;
4732 }
4733
4734 if (eth->netdev[id]) {
4735 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4736 return -EINVAL;
4737 }
4738
4739 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4740 txqs = MTK_QDMA_NUM_QUEUES;
4741
4742 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4743 if (!eth->netdev[id]) {
4744 dev_err(eth->dev, "alloc_etherdev failed\n");
4745 return -ENOMEM;
4746 }
4747 mac = netdev_priv(eth->netdev[id]);
4748 eth->mac[id] = mac;
4749 mac->id = id;
4750 mac->hw = eth;
4751 mac->of_node = np;
4752
4753 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4754 if (err == -EPROBE_DEFER)
4755 return err;
4756
4757 if (err) {
4758 /* If the mac address is invalid, use random mac address */
4759 eth_hw_addr_random(eth->netdev[id]);
4760 dev_err(eth->dev, "generated random MAC address %pM\n",
4761 eth->netdev[id]->dev_addr);
4762 }
4763
4764 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4765 mac->hwlro_ip_cnt = 0;
4766
4767 mac->hw_stats = devm_kzalloc(eth->dev,
4768 sizeof(*mac->hw_stats),
4769 GFP_KERNEL);
4770 if (!mac->hw_stats) {
4771 dev_err(eth->dev, "failed to allocate counter memory\n");
4772 err = -ENOMEM;
4773 goto free_netdev;
4774 }
4775 spin_lock_init(&mac->hw_stats->stats_lock);
4776 u64_stats_init(&mac->hw_stats->syncp);
4777
4778 if (mtk_is_netsys_v3_or_greater(eth))
4779 mac->hw_stats->reg_offset = id * 0x80;
4780 else
4781 mac->hw_stats->reg_offset = id * 0x40;
4782
4783 /* phylink create */
4784 err = of_get_phy_mode(np, &phy_mode);
4785 if (err) {
4786 dev_err(eth->dev, "incorrect phy-mode\n");
4787 goto free_netdev;
4788 }
4789
4790 /* mac config is not set */
4791 mac->interface = PHY_INTERFACE_MODE_NA;
4792 mac->speed = SPEED_UNKNOWN;
4793
4794 mac->phylink_config.dev = ð->netdev[id]->dev;
4795 mac->phylink_config.type = PHYLINK_NETDEV;
4796 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4797 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4798 mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
4799 MAC_2500FD;
4800 mac->phylink_config.lpi_timer_default = 1000;
4801
4802 /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4803 * in its .mac_config method (since state->speed is not valid there.
4804 * Disable support for MII, GMII and RGMII.
4805 */
4806 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4807 __set_bit(PHY_INTERFACE_MODE_MII,
4808 mac->phylink_config.supported_interfaces);
4809 __set_bit(PHY_INTERFACE_MODE_GMII,
4810 mac->phylink_config.supported_interfaces);
4811
4812 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4813 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4814 }
4815
4816 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4817 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4818 mac->phylink_config.supported_interfaces);
4819
4820 /* TRGMII is not permitted on MT7621 if using DDR2 */
4821 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4822 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4823 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4824 if (val & SYSCFG_DRAM_TYPE_DDR2)
4825 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4826 mac->phylink_config.supported_interfaces);
4827 }
4828
4829 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4830 __set_bit(PHY_INTERFACE_MODE_SGMII,
4831 mac->phylink_config.supported_interfaces);
4832 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4833 mac->phylink_config.supported_interfaces);
4834 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4835 mac->phylink_config.supported_interfaces);
4836 }
4837
4838 if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4839 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
4840 id == MTK_GMAC1_ID) {
4841 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4842 MAC_SYM_PAUSE |
4843 MAC_10000FD;
4844 phy_interface_zero(mac->phylink_config.supported_interfaces);
4845 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4846 mac->phylink_config.supported_interfaces);
4847 }
4848
4849 phylink = phylink_create(&mac->phylink_config,
4850 of_fwnode_handle(mac->of_node),
4851 phy_mode, &mtk_phylink_ops);
4852 if (IS_ERR(phylink)) {
4853 err = PTR_ERR(phylink);
4854 goto free_netdev;
4855 }
4856
4857 mac->phylink = phylink;
4858
4859 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
4860 id == MTK_GMAC2_ID)
4861 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4862 mac->phylink_config.supported_interfaces);
4863
4864 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4865 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4866 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4867 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4868
4869 eth->netdev[id]->hw_features = eth->soc->hw_features;
4870 if (eth->hwlro)
4871 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4872
4873 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4874 ~NETIF_F_HW_VLAN_CTAG_TX;
4875 eth->netdev[id]->features |= eth->soc->hw_features;
4876 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4877
4878 eth->netdev[id]->irq = eth->irq[0];
4879 eth->netdev[id]->dev.of_node = np;
4880
4881 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4882 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4883 else
4884 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4885
4886 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4887 mac->device_notifier.notifier_call = mtk_device_event;
4888 register_netdevice_notifier(&mac->device_notifier);
4889 }
4890
4891 if (mtk_page_pool_enabled(eth))
4892 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4893 NETDEV_XDP_ACT_REDIRECT |
4894 NETDEV_XDP_ACT_NDO_XMIT |
4895 NETDEV_XDP_ACT_NDO_XMIT_SG;
4896
4897 return 0;
4898
4899 free_netdev:
4900 free_netdev(eth->netdev[id]);
4901 return err;
4902 }
4903
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4904 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4905 {
4906 struct net_device *dev, *tmp;
4907 LIST_HEAD(dev_list);
4908 int i;
4909
4910 rtnl_lock();
4911
4912 for (i = 0; i < MTK_MAX_DEVS; i++) {
4913 dev = eth->netdev[i];
4914
4915 if (!dev || !(dev->flags & IFF_UP))
4916 continue;
4917
4918 list_add_tail(&dev->close_list, &dev_list);
4919 }
4920
4921 dev_close_many(&dev_list, false);
4922
4923 eth->dma_dev = dma_dev;
4924
4925 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4926 list_del_init(&dev->close_list);
4927 dev_open(dev, NULL);
4928 }
4929
4930 rtnl_unlock();
4931 }
4932
mtk_sgmii_init(struct mtk_eth * eth)4933 static int mtk_sgmii_init(struct mtk_eth *eth)
4934 {
4935 struct device_node *np;
4936 struct regmap *regmap;
4937 u32 flags;
4938 int i;
4939
4940 for (i = 0; i < MTK_MAX_DEVS; i++) {
4941 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4942 if (!np)
4943 break;
4944
4945 regmap = syscon_node_to_regmap(np);
4946 flags = 0;
4947 if (of_property_read_bool(np, "mediatek,pnswap"))
4948 flags |= MTK_SGMII_FLAG_PN_SWAP;
4949
4950 of_node_put(np);
4951
4952 if (IS_ERR(regmap))
4953 return PTR_ERR(regmap);
4954
4955 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4956 eth->soc->ana_rgc3,
4957 flags);
4958 }
4959
4960 return 0;
4961 }
4962
mtk_probe(struct platform_device * pdev)4963 static int mtk_probe(struct platform_device *pdev)
4964 {
4965 struct resource *res = NULL, *res_sram;
4966 struct device_node *mac_np;
4967 struct mtk_eth *eth;
4968 int err, i;
4969
4970 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4971 if (!eth)
4972 return -ENOMEM;
4973
4974 eth->soc = of_device_get_match_data(&pdev->dev);
4975
4976 eth->dev = &pdev->dev;
4977 eth->dma_dev = &pdev->dev;
4978 eth->base = devm_platform_ioremap_resource(pdev, 0);
4979 if (IS_ERR(eth->base))
4980 return PTR_ERR(eth->base);
4981
4982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4983 eth->ip_align = NET_IP_ALIGN;
4984
4985 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4986 /* SRAM is actual memory and supports transparent access just like DRAM.
4987 * Hence we don't require __iomem being set and don't need to use accessor
4988 * functions to read from or write to SRAM.
4989 */
4990 if (mtk_is_netsys_v3_or_greater(eth)) {
4991 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4992 if (IS_ERR(eth->sram_base))
4993 return PTR_ERR(eth->sram_base);
4994 } else {
4995 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4996 }
4997 }
4998
4999 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
5000 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5001 if (!err)
5002 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5003
5004 if (err) {
5005 dev_err(&pdev->dev, "Wrong DMA config\n");
5006 return -EINVAL;
5007 }
5008 }
5009
5010 spin_lock_init(ð->page_lock);
5011 spin_lock_init(ð->tx_irq_lock);
5012 spin_lock_init(ð->rx_irq_lock);
5013 spin_lock_init(ð->dim_lock);
5014
5015 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5016 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
5017 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
5018
5019 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5020 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
5021
5022 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5023 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5024 "mediatek,ethsys");
5025 if (IS_ERR(eth->ethsys)) {
5026 dev_err(&pdev->dev, "no ethsys regmap found\n");
5027 return PTR_ERR(eth->ethsys);
5028 }
5029 }
5030
5031 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5032 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5033 "mediatek,infracfg");
5034 if (IS_ERR(eth->infra)) {
5035 dev_err(&pdev->dev, "no infracfg regmap found\n");
5036 return PTR_ERR(eth->infra);
5037 }
5038 }
5039
5040 if (of_dma_is_coherent(pdev->dev.of_node)) {
5041 struct regmap *cci;
5042
5043 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5044 "cci-control-port");
5045 /* enable CPU/bus coherency */
5046 if (!IS_ERR(cci))
5047 regmap_write(cci, 0, 3);
5048 }
5049
5050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
5051 err = mtk_sgmii_init(eth);
5052
5053 if (err)
5054 return err;
5055 }
5056
5057 if (eth->soc->required_pctl) {
5058 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5059 "mediatek,pctl");
5060 if (IS_ERR(eth->pctl)) {
5061 dev_err(&pdev->dev, "no pctl regmap found\n");
5062 err = PTR_ERR(eth->pctl);
5063 goto err_destroy_sgmii;
5064 }
5065 }
5066
5067 if (mtk_is_netsys_v2_or_greater(eth)) {
5068 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5069 if (!res) {
5070 err = -EINVAL;
5071 goto err_destroy_sgmii;
5072 }
5073 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
5074 if (mtk_is_netsys_v3_or_greater(eth)) {
5075 res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5076 if (!res_sram) {
5077 err = -EINVAL;
5078 goto err_destroy_sgmii;
5079 }
5080 eth->phy_scratch_ring = res_sram->start;
5081 } else {
5082 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
5083 }
5084 }
5085 }
5086
5087 if (eth->soc->offload_version) {
5088 for (i = 0;; i++) {
5089 struct device_node *np;
5090 phys_addr_t wdma_phy;
5091 u32 wdma_base;
5092
5093 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
5094 break;
5095
5096 np = of_parse_phandle(pdev->dev.of_node,
5097 "mediatek,wed", i);
5098 if (!np)
5099 break;
5100
5101 wdma_base = eth->soc->reg_map->wdma_base[i];
5102 wdma_phy = res ? res->start + wdma_base : 0;
5103 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
5104 wdma_phy, i);
5105 }
5106 }
5107
5108 for (i = 0; i < 3; i++) {
5109 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
5110 eth->irq[i] = eth->irq[0];
5111 else
5112 eth->irq[i] = platform_get_irq(pdev, i);
5113 if (eth->irq[i] < 0) {
5114 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
5115 err = -ENXIO;
5116 goto err_wed_exit;
5117 }
5118 }
5119 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5120 eth->clks[i] = devm_clk_get(eth->dev,
5121 mtk_clks_source_name[i]);
5122 if (IS_ERR(eth->clks[i])) {
5123 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
5124 err = -EPROBE_DEFER;
5125 goto err_wed_exit;
5126 }
5127 if (eth->soc->required_clks & BIT(i)) {
5128 dev_err(&pdev->dev, "clock %s not found\n",
5129 mtk_clks_source_name[i]);
5130 err = -EINVAL;
5131 goto err_wed_exit;
5132 }
5133 eth->clks[i] = NULL;
5134 }
5135 }
5136
5137 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5138 INIT_WORK(ð->pending_work, mtk_pending_work);
5139
5140 err = mtk_hw_init(eth, false);
5141 if (err)
5142 goto err_wed_exit;
5143
5144 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5145
5146 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5147 if (!of_device_is_compatible(mac_np,
5148 "mediatek,eth-mac"))
5149 continue;
5150
5151 if (!of_device_is_available(mac_np))
5152 continue;
5153
5154 err = mtk_add_mac(eth, mac_np);
5155 if (err) {
5156 of_node_put(mac_np);
5157 goto err_deinit_hw;
5158 }
5159 }
5160
5161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
5162 err = devm_request_irq(eth->dev, eth->irq[0],
5163 mtk_handle_irq, 0,
5164 dev_name(eth->dev), eth);
5165 } else {
5166 err = devm_request_irq(eth->dev, eth->irq[1],
5167 mtk_handle_irq_tx, 0,
5168 dev_name(eth->dev), eth);
5169 if (err)
5170 goto err_free_dev;
5171
5172 err = devm_request_irq(eth->dev, eth->irq[2],
5173 mtk_handle_irq_rx, 0,
5174 dev_name(eth->dev), eth);
5175 }
5176 if (err)
5177 goto err_free_dev;
5178
5179 /* No MT7628/88 support yet */
5180 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5181 err = mtk_mdio_init(eth);
5182 if (err)
5183 goto err_free_dev;
5184 }
5185
5186 if (eth->soc->offload_version) {
5187 u8 ppe_num = eth->soc->ppe_num;
5188
5189 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5190 for (i = 0; i < ppe_num; i++) {
5191 u32 ppe_addr = eth->soc->reg_map->ppe_base;
5192
5193 ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5194 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5195
5196 if (!eth->ppe[i]) {
5197 err = -ENOMEM;
5198 goto err_deinit_ppe;
5199 }
5200 err = mtk_eth_offload_init(eth, i);
5201
5202 if (err)
5203 goto err_deinit_ppe;
5204 }
5205 }
5206
5207 for (i = 0; i < MTK_MAX_DEVS; i++) {
5208 if (!eth->netdev[i])
5209 continue;
5210
5211 err = register_netdev(eth->netdev[i]);
5212 if (err) {
5213 dev_err(eth->dev, "error bringing up device\n");
5214 goto err_deinit_ppe;
5215 } else
5216 netif_info(eth, probe, eth->netdev[i],
5217 "mediatek frame engine at 0x%08lx, irq %d\n",
5218 eth->netdev[i]->base_addr, eth->irq[0]);
5219 }
5220
5221 /* we run 2 devices on the same DMA ring so we need a dummy device
5222 * for NAPI to work
5223 */
5224 eth->dummy_dev = alloc_netdev_dummy(0);
5225 if (!eth->dummy_dev) {
5226 err = -ENOMEM;
5227 dev_err(eth->dev, "failed to allocated dummy device\n");
5228 goto err_unreg_netdev;
5229 }
5230 netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx);
5231 netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx);
5232
5233 platform_set_drvdata(pdev, eth);
5234 schedule_delayed_work(ð->reset.monitor_work,
5235 MTK_DMA_MONITOR_TIMEOUT);
5236
5237 return 0;
5238
5239 err_unreg_netdev:
5240 mtk_unreg_dev(eth);
5241 err_deinit_ppe:
5242 mtk_ppe_deinit(eth);
5243 mtk_mdio_cleanup(eth);
5244 err_free_dev:
5245 mtk_free_dev(eth);
5246 err_deinit_hw:
5247 mtk_hw_deinit(eth);
5248 err_wed_exit:
5249 mtk_wed_exit();
5250 err_destroy_sgmii:
5251 mtk_sgmii_destroy(eth);
5252
5253 return err;
5254 }
5255
mtk_remove(struct platform_device * pdev)5256 static void mtk_remove(struct platform_device *pdev)
5257 {
5258 struct mtk_eth *eth = platform_get_drvdata(pdev);
5259 struct mtk_mac *mac;
5260 int i;
5261
5262 /* stop all devices to make sure that dma is properly shut down */
5263 for (i = 0; i < MTK_MAX_DEVS; i++) {
5264 if (!eth->netdev[i])
5265 continue;
5266 mtk_stop(eth->netdev[i]);
5267 mac = netdev_priv(eth->netdev[i]);
5268 phylink_disconnect_phy(mac->phylink);
5269 }
5270
5271 mtk_wed_exit();
5272 mtk_hw_deinit(eth);
5273
5274 netif_napi_del(ð->tx_napi);
5275 netif_napi_del(ð->rx_napi);
5276 mtk_cleanup(eth);
5277 free_netdev(eth->dummy_dev);
5278 mtk_mdio_cleanup(eth);
5279 }
5280
5281 static const struct mtk_soc_data mt2701_data = {
5282 .reg_map = &mtk_reg_map,
5283 .caps = MT7623_CAPS | MTK_HWLRO,
5284 .hw_features = MTK_HW_FEATURES,
5285 .required_clks = MT7623_CLKS_BITMAP,
5286 .required_pctl = true,
5287 .version = 1,
5288 .tx = {
5289 .desc_size = sizeof(struct mtk_tx_dma),
5290 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5291 .dma_len_offset = 16,
5292 .dma_size = MTK_DMA_SIZE(2K),
5293 .fq_dma_size = MTK_DMA_SIZE(2K),
5294 },
5295 .rx = {
5296 .desc_size = sizeof(struct mtk_rx_dma),
5297 .irq_done_mask = MTK_RX_DONE_INT,
5298 .dma_l4_valid = RX_DMA_L4_VALID,
5299 .dma_size = MTK_DMA_SIZE(2K),
5300 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5301 .dma_len_offset = 16,
5302 },
5303 };
5304
5305 static const struct mtk_soc_data mt7621_data = {
5306 .reg_map = &mtk_reg_map,
5307 .caps = MT7621_CAPS,
5308 .hw_features = MTK_HW_FEATURES,
5309 .required_clks = MT7621_CLKS_BITMAP,
5310 .required_pctl = false,
5311 .version = 1,
5312 .offload_version = 1,
5313 .ppe_num = 1,
5314 .hash_offset = 2,
5315 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5316 .tx = {
5317 .desc_size = sizeof(struct mtk_tx_dma),
5318 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5319 .dma_len_offset = 16,
5320 .dma_size = MTK_DMA_SIZE(2K),
5321 .fq_dma_size = MTK_DMA_SIZE(2K),
5322 },
5323 .rx = {
5324 .desc_size = sizeof(struct mtk_rx_dma),
5325 .irq_done_mask = MTK_RX_DONE_INT,
5326 .dma_l4_valid = RX_DMA_L4_VALID,
5327 .dma_size = MTK_DMA_SIZE(2K),
5328 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5329 .dma_len_offset = 16,
5330 },
5331 };
5332
5333 static const struct mtk_soc_data mt7622_data = {
5334 .reg_map = &mtk_reg_map,
5335 .ana_rgc3 = 0x2028,
5336 .caps = MT7622_CAPS | MTK_HWLRO,
5337 .hw_features = MTK_HW_FEATURES,
5338 .required_clks = MT7622_CLKS_BITMAP,
5339 .required_pctl = false,
5340 .version = 1,
5341 .offload_version = 2,
5342 .ppe_num = 1,
5343 .hash_offset = 2,
5344 .has_accounting = true,
5345 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5346 .tx = {
5347 .desc_size = sizeof(struct mtk_tx_dma),
5348 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5349 .dma_len_offset = 16,
5350 .dma_size = MTK_DMA_SIZE(2K),
5351 .fq_dma_size = MTK_DMA_SIZE(2K),
5352 },
5353 .rx = {
5354 .desc_size = sizeof(struct mtk_rx_dma),
5355 .irq_done_mask = MTK_RX_DONE_INT,
5356 .dma_l4_valid = RX_DMA_L4_VALID,
5357 .dma_size = MTK_DMA_SIZE(2K),
5358 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5359 .dma_len_offset = 16,
5360 },
5361 };
5362
5363 static const struct mtk_soc_data mt7623_data = {
5364 .reg_map = &mtk_reg_map,
5365 .caps = MT7623_CAPS | MTK_HWLRO,
5366 .hw_features = MTK_HW_FEATURES,
5367 .required_clks = MT7623_CLKS_BITMAP,
5368 .required_pctl = true,
5369 .version = 1,
5370 .offload_version = 1,
5371 .ppe_num = 1,
5372 .hash_offset = 2,
5373 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5374 .disable_pll_modes = true,
5375 .tx = {
5376 .desc_size = sizeof(struct mtk_tx_dma),
5377 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5378 .dma_len_offset = 16,
5379 .dma_size = MTK_DMA_SIZE(2K),
5380 .fq_dma_size = MTK_DMA_SIZE(2K),
5381 },
5382 .rx = {
5383 .desc_size = sizeof(struct mtk_rx_dma),
5384 .irq_done_mask = MTK_RX_DONE_INT,
5385 .dma_l4_valid = RX_DMA_L4_VALID,
5386 .dma_size = MTK_DMA_SIZE(2K),
5387 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5388 .dma_len_offset = 16,
5389 },
5390 };
5391
5392 static const struct mtk_soc_data mt7629_data = {
5393 .reg_map = &mtk_reg_map,
5394 .ana_rgc3 = 0x128,
5395 .caps = MT7629_CAPS | MTK_HWLRO,
5396 .hw_features = MTK_HW_FEATURES,
5397 .required_clks = MT7629_CLKS_BITMAP,
5398 .required_pctl = false,
5399 .has_accounting = true,
5400 .version = 1,
5401 .tx = {
5402 .desc_size = sizeof(struct mtk_tx_dma),
5403 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5404 .dma_len_offset = 16,
5405 .dma_size = MTK_DMA_SIZE(2K),
5406 .fq_dma_size = MTK_DMA_SIZE(2K),
5407 },
5408 .rx = {
5409 .desc_size = sizeof(struct mtk_rx_dma),
5410 .irq_done_mask = MTK_RX_DONE_INT,
5411 .dma_l4_valid = RX_DMA_L4_VALID,
5412 .dma_size = MTK_DMA_SIZE(2K),
5413 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5414 .dma_len_offset = 16,
5415 },
5416 };
5417
5418 static const struct mtk_soc_data mt7981_data = {
5419 .reg_map = &mt7986_reg_map,
5420 .ana_rgc3 = 0x128,
5421 .caps = MT7981_CAPS,
5422 .hw_features = MTK_HW_FEATURES,
5423 .required_clks = MT7981_CLKS_BITMAP,
5424 .required_pctl = false,
5425 .version = 2,
5426 .offload_version = 2,
5427 .ppe_num = 2,
5428 .hash_offset = 4,
5429 .has_accounting = true,
5430 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5431 .tx = {
5432 .desc_size = sizeof(struct mtk_tx_dma_v2),
5433 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5434 .dma_len_offset = 8,
5435 .dma_size = MTK_DMA_SIZE(2K),
5436 .fq_dma_size = MTK_DMA_SIZE(2K),
5437 },
5438 .rx = {
5439 .desc_size = sizeof(struct mtk_rx_dma),
5440 .irq_done_mask = MTK_RX_DONE_INT,
5441 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5442 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5443 .dma_len_offset = 16,
5444 .dma_size = MTK_DMA_SIZE(2K),
5445 },
5446 };
5447
5448 static const struct mtk_soc_data mt7986_data = {
5449 .reg_map = &mt7986_reg_map,
5450 .ana_rgc3 = 0x128,
5451 .caps = MT7986_CAPS,
5452 .hw_features = MTK_HW_FEATURES,
5453 .required_clks = MT7986_CLKS_BITMAP,
5454 .required_pctl = false,
5455 .version = 2,
5456 .offload_version = 2,
5457 .ppe_num = 2,
5458 .hash_offset = 4,
5459 .has_accounting = true,
5460 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5461 .tx = {
5462 .desc_size = sizeof(struct mtk_tx_dma_v2),
5463 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5464 .dma_len_offset = 8,
5465 .dma_size = MTK_DMA_SIZE(2K),
5466 .fq_dma_size = MTK_DMA_SIZE(2K),
5467 },
5468 .rx = {
5469 .desc_size = sizeof(struct mtk_rx_dma),
5470 .irq_done_mask = MTK_RX_DONE_INT,
5471 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5472 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5473 .dma_len_offset = 16,
5474 .dma_size = MTK_DMA_SIZE(2K),
5475 },
5476 };
5477
5478 static const struct mtk_soc_data mt7988_data = {
5479 .reg_map = &mt7988_reg_map,
5480 .ana_rgc3 = 0x128,
5481 .caps = MT7988_CAPS,
5482 .hw_features = MTK_HW_FEATURES,
5483 .required_clks = MT7988_CLKS_BITMAP,
5484 .required_pctl = false,
5485 .version = 3,
5486 .offload_version = 2,
5487 .ppe_num = 3,
5488 .hash_offset = 4,
5489 .has_accounting = true,
5490 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5491 .tx = {
5492 .desc_size = sizeof(struct mtk_tx_dma_v2),
5493 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5494 .dma_len_offset = 8,
5495 .dma_size = MTK_DMA_SIZE(2K),
5496 .fq_dma_size = MTK_DMA_SIZE(4K),
5497 },
5498 .rx = {
5499 .desc_size = sizeof(struct mtk_rx_dma_v2),
5500 .irq_done_mask = MTK_RX_DONE_INT_V2,
5501 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5502 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5503 .dma_len_offset = 8,
5504 .dma_size = MTK_DMA_SIZE(2K),
5505 },
5506 };
5507
5508 static const struct mtk_soc_data rt5350_data = {
5509 .reg_map = &mt7628_reg_map,
5510 .caps = MT7628_CAPS,
5511 .hw_features = MTK_HW_FEATURES_MT7628,
5512 .required_clks = MT7628_CLKS_BITMAP,
5513 .required_pctl = false,
5514 .version = 1,
5515 .tx = {
5516 .desc_size = sizeof(struct mtk_tx_dma),
5517 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5518 .dma_len_offset = 16,
5519 .dma_size = MTK_DMA_SIZE(2K),
5520 },
5521 .rx = {
5522 .desc_size = sizeof(struct mtk_rx_dma),
5523 .irq_done_mask = MTK_RX_DONE_INT,
5524 .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5525 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5526 .dma_len_offset = 16,
5527 .dma_size = MTK_DMA_SIZE(2K),
5528 },
5529 };
5530
5531 const struct of_device_id of_mtk_match[] = {
5532 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5533 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5534 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5535 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5536 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5537 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5538 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5539 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5540 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5541 {},
5542 };
5543 MODULE_DEVICE_TABLE(of, of_mtk_match);
5544
5545 static struct platform_driver mtk_driver = {
5546 .probe = mtk_probe,
5547 .remove = mtk_remove,
5548 .driver = {
5549 .name = "mtk_soc_eth",
5550 .of_match_table = of_mtk_match,
5551 },
5552 };
5553
5554 module_platform_driver(mtk_driver);
5555
5556 MODULE_LICENSE("GPL");
5557 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5558 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5559