xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision 2151003e773c7e7dba4d64bed4bfc483681b5f6a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33 
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37 
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40 
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 				  sizeof(u64) }
44 
45 static const struct mtk_reg_map mtk_reg_map = {
46 	.tx_irq_mask		= 0x1a1c,
47 	.tx_irq_status		= 0x1a18,
48 	.pdma = {
49 		.rx_ptr		= 0x0900,
50 		.rx_cnt_cfg	= 0x0904,
51 		.pcrx_ptr	= 0x0908,
52 		.glo_cfg	= 0x0a04,
53 		.rst_idx	= 0x0a08,
54 		.delay_irq	= 0x0a0c,
55 		.irq_status	= 0x0a20,
56 		.irq_mask	= 0x0a28,
57 		.adma_rx_dbg0	= 0x0a38,
58 		.int_grp	= 0x0a50,
59 	},
60 	.qdma = {
61 		.qtx_cfg	= 0x1800,
62 		.qtx_sch	= 0x1804,
63 		.rx_ptr		= 0x1900,
64 		.rx_cnt_cfg	= 0x1904,
65 		.qcrx_ptr	= 0x1908,
66 		.glo_cfg	= 0x1a04,
67 		.rst_idx	= 0x1a08,
68 		.delay_irq	= 0x1a0c,
69 		.fc_th		= 0x1a10,
70 		.tx_sch_rate	= 0x1a14,
71 		.int_grp	= 0x1a20,
72 		.hred		= 0x1a44,
73 		.ctx_ptr	= 0x1b00,
74 		.dtx_ptr	= 0x1b04,
75 		.crx_ptr	= 0x1b10,
76 		.drx_ptr	= 0x1b14,
77 		.fq_head	= 0x1b20,
78 		.fq_tail	= 0x1b24,
79 		.fq_count	= 0x1b28,
80 		.fq_blen	= 0x1b2c,
81 	},
82 	.gdm1_cnt		= 0x2400,
83 	.gdma_to_ppe	= {
84 		[0]		= 0x4444,
85 	},
86 	.ppe_base		= 0x0c00,
87 	.wdma_base = {
88 		[0]		= 0x2800,
89 		[1]		= 0x2c00,
90 	},
91 	.pse_iq_sta		= 0x0110,
92 	.pse_oq_sta		= 0x0118,
93 };
94 
95 static const struct mtk_reg_map mt7628_reg_map = {
96 	.tx_irq_mask		= 0x0a28,
97 	.tx_irq_status		= 0x0a20,
98 	.pdma = {
99 		.rx_ptr		= 0x0900,
100 		.rx_cnt_cfg	= 0x0904,
101 		.pcrx_ptr	= 0x0908,
102 		.glo_cfg	= 0x0a04,
103 		.rst_idx	= 0x0a08,
104 		.delay_irq	= 0x0a0c,
105 		.irq_status	= 0x0a20,
106 		.irq_mask	= 0x0a28,
107 		.int_grp	= 0x0a50,
108 	},
109 };
110 
111 static const struct mtk_reg_map mt7986_reg_map = {
112 	.tx_irq_mask		= 0x461c,
113 	.tx_irq_status		= 0x4618,
114 	.pdma = {
115 		.rx_ptr		= 0x4100,
116 		.rx_cnt_cfg	= 0x4104,
117 		.pcrx_ptr	= 0x4108,
118 		.glo_cfg	= 0x4204,
119 		.rst_idx	= 0x4208,
120 		.delay_irq	= 0x420c,
121 		.irq_status	= 0x4220,
122 		.irq_mask	= 0x4228,
123 		.adma_rx_dbg0	= 0x4238,
124 		.int_grp	= 0x4250,
125 	},
126 	.qdma = {
127 		.qtx_cfg	= 0x4400,
128 		.qtx_sch	= 0x4404,
129 		.rx_ptr		= 0x4500,
130 		.rx_cnt_cfg	= 0x4504,
131 		.qcrx_ptr	= 0x4508,
132 		.glo_cfg	= 0x4604,
133 		.rst_idx	= 0x4608,
134 		.delay_irq	= 0x460c,
135 		.fc_th		= 0x4610,
136 		.int_grp	= 0x4620,
137 		.hred		= 0x4644,
138 		.ctx_ptr	= 0x4700,
139 		.dtx_ptr	= 0x4704,
140 		.crx_ptr	= 0x4710,
141 		.drx_ptr	= 0x4714,
142 		.fq_head	= 0x4720,
143 		.fq_tail	= 0x4724,
144 		.fq_count	= 0x4728,
145 		.fq_blen	= 0x472c,
146 		.tx_sch_rate	= 0x4798,
147 	},
148 	.gdm1_cnt		= 0x1c00,
149 	.gdma_to_ppe	= {
150 		[0]		= 0x3333,
151 		[1]		= 0x4444,
152 	},
153 	.ppe_base		= 0x2000,
154 	.wdma_base = {
155 		[0]		= 0x4800,
156 		[1]		= 0x4c00,
157 	},
158 	.pse_iq_sta		= 0x0180,
159 	.pse_oq_sta		= 0x01a0,
160 };
161 
162 static const struct mtk_reg_map mt7988_reg_map = {
163 	.tx_irq_mask		= 0x461c,
164 	.tx_irq_status		= 0x4618,
165 	.pdma = {
166 		.rx_ptr		= 0x6900,
167 		.rx_cnt_cfg	= 0x6904,
168 		.pcrx_ptr	= 0x6908,
169 		.glo_cfg	= 0x6a04,
170 		.rst_idx	= 0x6a08,
171 		.delay_irq	= 0x6a0c,
172 		.irq_status	= 0x6a20,
173 		.irq_mask	= 0x6a28,
174 		.adma_rx_dbg0	= 0x6a38,
175 		.int_grp	= 0x6a50,
176 	},
177 	.qdma = {
178 		.qtx_cfg	= 0x4400,
179 		.qtx_sch	= 0x4404,
180 		.rx_ptr		= 0x4500,
181 		.rx_cnt_cfg	= 0x4504,
182 		.qcrx_ptr	= 0x4508,
183 		.glo_cfg	= 0x4604,
184 		.rst_idx	= 0x4608,
185 		.delay_irq	= 0x460c,
186 		.fc_th		= 0x4610,
187 		.int_grp	= 0x4620,
188 		.hred		= 0x4644,
189 		.ctx_ptr	= 0x4700,
190 		.dtx_ptr	= 0x4704,
191 		.crx_ptr	= 0x4710,
192 		.drx_ptr	= 0x4714,
193 		.fq_head	= 0x4720,
194 		.fq_tail	= 0x4724,
195 		.fq_count	= 0x4728,
196 		.fq_blen	= 0x472c,
197 		.tx_sch_rate	= 0x4798,
198 	},
199 	.gdm1_cnt		= 0x1c00,
200 	.gdma_to_ppe	= {
201 		[0]		= 0x3333,
202 		[1]		= 0x4444,
203 		[2]		= 0xcccc,
204 	},
205 	.ppe_base		= 0x2000,
206 	.wdma_base = {
207 		[0]		= 0x4800,
208 		[1]		= 0x4c00,
209 		[2]		= 0x5000,
210 	},
211 	.pse_iq_sta		= 0x0180,
212 	.pse_oq_sta		= 0x01a0,
213 };
214 
215 /* strings used by ethtool */
216 static const struct mtk_ethtool_stats {
217 	char str[ETH_GSTRING_LEN];
218 	u32 offset;
219 } mtk_ethtool_stats[] = {
220 	MTK_ETHTOOL_STAT(tx_bytes),
221 	MTK_ETHTOOL_STAT(tx_packets),
222 	MTK_ETHTOOL_STAT(tx_skip),
223 	MTK_ETHTOOL_STAT(tx_collisions),
224 	MTK_ETHTOOL_STAT(rx_bytes),
225 	MTK_ETHTOOL_STAT(rx_packets),
226 	MTK_ETHTOOL_STAT(rx_overflow),
227 	MTK_ETHTOOL_STAT(rx_fcs_errors),
228 	MTK_ETHTOOL_STAT(rx_short_errors),
229 	MTK_ETHTOOL_STAT(rx_long_errors),
230 	MTK_ETHTOOL_STAT(rx_checksum_errors),
231 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
232 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
233 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
234 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
235 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
236 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
237 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
238 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
239 };
240 
241 static const char * const mtk_clks_source_name[] = {
242 	"ethif",
243 	"sgmiitop",
244 	"esw",
245 	"gp0",
246 	"gp1",
247 	"gp2",
248 	"gp3",
249 	"xgp1",
250 	"xgp2",
251 	"xgp3",
252 	"crypto",
253 	"fe",
254 	"trgpll",
255 	"sgmii_tx250m",
256 	"sgmii_rx250m",
257 	"sgmii_cdr_ref",
258 	"sgmii_cdr_fb",
259 	"sgmii2_tx250m",
260 	"sgmii2_rx250m",
261 	"sgmii2_cdr_ref",
262 	"sgmii2_cdr_fb",
263 	"sgmii_ck",
264 	"eth2pll",
265 	"wocpu0",
266 	"wocpu1",
267 	"netsys0",
268 	"netsys1",
269 	"ethwarp_wocpu2",
270 	"ethwarp_wocpu1",
271 	"ethwarp_wocpu0",
272 	"top_usxgmii0_sel",
273 	"top_usxgmii1_sel",
274 	"top_sgm0_sel",
275 	"top_sgm1_sel",
276 	"top_xfi_phy0_xtal_sel",
277 	"top_xfi_phy1_xtal_sel",
278 	"top_eth_gmii_sel",
279 	"top_eth_refck_50m_sel",
280 	"top_eth_sys_200m_sel",
281 	"top_eth_sys_sel",
282 	"top_eth_xgmii_sel",
283 	"top_eth_mii_sel",
284 	"top_netsys_sel",
285 	"top_netsys_500m_sel",
286 	"top_netsys_pao_2x_sel",
287 	"top_netsys_sync_250m_sel",
288 	"top_netsys_ppefb_250m_sel",
289 	"top_netsys_warp_sel",
290 };
291 
292 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
293 {
294 	__raw_writel(val, eth->base + reg);
295 }
296 
297 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
298 {
299 	return __raw_readl(eth->base + reg);
300 }
301 
302 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
303 {
304 	u32 val;
305 
306 	val = mtk_r32(eth, reg);
307 	val &= ~mask;
308 	val |= set;
309 	mtk_w32(eth, val, reg);
310 	return reg;
311 }
312 
313 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
314 {
315 	unsigned long t_start = jiffies;
316 
317 	while (1) {
318 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
319 			return 0;
320 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
321 			break;
322 		cond_resched();
323 	}
324 
325 	dev_err(eth->dev, "mdio: MDIO timeout\n");
326 	return -ETIMEDOUT;
327 }
328 
329 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
330 			       u32 write_data)
331 {
332 	int ret;
333 
334 	ret = mtk_mdio_busy_wait(eth);
335 	if (ret < 0)
336 		return ret;
337 
338 	mtk_w32(eth, PHY_IAC_ACCESS |
339 		PHY_IAC_START_C22 |
340 		PHY_IAC_CMD_WRITE |
341 		PHY_IAC_REG(phy_reg) |
342 		PHY_IAC_ADDR(phy_addr) |
343 		PHY_IAC_DATA(write_data),
344 		MTK_PHY_IAC);
345 
346 	ret = mtk_mdio_busy_wait(eth);
347 	if (ret < 0)
348 		return ret;
349 
350 	return 0;
351 }
352 
353 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
354 			       u32 devad, u32 phy_reg, u32 write_data)
355 {
356 	int ret;
357 
358 	ret = mtk_mdio_busy_wait(eth);
359 	if (ret < 0)
360 		return ret;
361 
362 	mtk_w32(eth, PHY_IAC_ACCESS |
363 		PHY_IAC_START_C45 |
364 		PHY_IAC_CMD_C45_ADDR |
365 		PHY_IAC_REG(devad) |
366 		PHY_IAC_ADDR(phy_addr) |
367 		PHY_IAC_DATA(phy_reg),
368 		MTK_PHY_IAC);
369 
370 	ret = mtk_mdio_busy_wait(eth);
371 	if (ret < 0)
372 		return ret;
373 
374 	mtk_w32(eth, PHY_IAC_ACCESS |
375 		PHY_IAC_START_C45 |
376 		PHY_IAC_CMD_WRITE |
377 		PHY_IAC_REG(devad) |
378 		PHY_IAC_ADDR(phy_addr) |
379 		PHY_IAC_DATA(write_data),
380 		MTK_PHY_IAC);
381 
382 	ret = mtk_mdio_busy_wait(eth);
383 	if (ret < 0)
384 		return ret;
385 
386 	return 0;
387 }
388 
389 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
390 {
391 	int ret;
392 
393 	ret = mtk_mdio_busy_wait(eth);
394 	if (ret < 0)
395 		return ret;
396 
397 	mtk_w32(eth, PHY_IAC_ACCESS |
398 		PHY_IAC_START_C22 |
399 		PHY_IAC_CMD_C22_READ |
400 		PHY_IAC_REG(phy_reg) |
401 		PHY_IAC_ADDR(phy_addr),
402 		MTK_PHY_IAC);
403 
404 	ret = mtk_mdio_busy_wait(eth);
405 	if (ret < 0)
406 		return ret;
407 
408 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
409 }
410 
411 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
412 			      u32 devad, u32 phy_reg)
413 {
414 	int ret;
415 
416 	ret = mtk_mdio_busy_wait(eth);
417 	if (ret < 0)
418 		return ret;
419 
420 	mtk_w32(eth, PHY_IAC_ACCESS |
421 		PHY_IAC_START_C45 |
422 		PHY_IAC_CMD_C45_ADDR |
423 		PHY_IAC_REG(devad) |
424 		PHY_IAC_ADDR(phy_addr) |
425 		PHY_IAC_DATA(phy_reg),
426 		MTK_PHY_IAC);
427 
428 	ret = mtk_mdio_busy_wait(eth);
429 	if (ret < 0)
430 		return ret;
431 
432 	mtk_w32(eth, PHY_IAC_ACCESS |
433 		PHY_IAC_START_C45 |
434 		PHY_IAC_CMD_C45_READ |
435 		PHY_IAC_REG(devad) |
436 		PHY_IAC_ADDR(phy_addr),
437 		MTK_PHY_IAC);
438 
439 	ret = mtk_mdio_busy_wait(eth);
440 	if (ret < 0)
441 		return ret;
442 
443 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
444 }
445 
446 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
447 			      int phy_reg, u16 val)
448 {
449 	struct mtk_eth *eth = bus->priv;
450 
451 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
452 }
453 
454 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
455 			      int devad, int phy_reg, u16 val)
456 {
457 	struct mtk_eth *eth = bus->priv;
458 
459 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
460 }
461 
462 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
463 {
464 	struct mtk_eth *eth = bus->priv;
465 
466 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
467 }
468 
469 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
470 			     int phy_reg)
471 {
472 	struct mtk_eth *eth = bus->priv;
473 
474 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
475 }
476 
477 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
478 				     phy_interface_t interface)
479 {
480 	u32 val;
481 
482 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
483 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
484 
485 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
486 			   ETHSYS_TRGMII_MT7621_MASK, val);
487 
488 	return 0;
489 }
490 
491 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
492 				   phy_interface_t interface)
493 {
494 	int ret;
495 
496 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
497 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
498 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
499 		if (ret)
500 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
501 		return;
502 	}
503 
504 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
505 }
506 
507 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
508 {
509 	/* Force Port1 XGMAC Link Up */
510 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
511 		MTK_XGMAC_STS(MTK_GMAC1_ID));
512 
513 	/* Adjust GSW bridge IPG to 11 */
514 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
515 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
516 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
517 		MTK_GSW_CFG);
518 }
519 
520 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
521 					      phy_interface_t interface)
522 {
523 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
524 					   phylink_config);
525 	struct mtk_eth *eth = mac->hw;
526 	unsigned int sid;
527 
528 	if (interface == PHY_INTERFACE_MODE_SGMII ||
529 	    phy_interface_mode_is_8023z(interface)) {
530 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
531 		       0 : mac->id;
532 
533 		return eth->sgmii_pcs[sid];
534 	}
535 
536 	return NULL;
537 }
538 
539 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
540 			   const struct phylink_link_state *state)
541 {
542 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
543 					   phylink_config);
544 	struct mtk_eth *eth = mac->hw;
545 	int val, ge_mode, err = 0;
546 	u32 i;
547 
548 	/* MT76x8 has no hardware settings between for the MAC */
549 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
550 	    mac->interface != state->interface) {
551 		/* Setup soc pin functions */
552 		switch (state->interface) {
553 		case PHY_INTERFACE_MODE_TRGMII:
554 		case PHY_INTERFACE_MODE_RGMII_TXID:
555 		case PHY_INTERFACE_MODE_RGMII_RXID:
556 		case PHY_INTERFACE_MODE_RGMII_ID:
557 		case PHY_INTERFACE_MODE_RGMII:
558 		case PHY_INTERFACE_MODE_MII:
559 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
560 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
561 				if (err)
562 					goto init_err;
563 			}
564 			break;
565 		case PHY_INTERFACE_MODE_1000BASEX:
566 		case PHY_INTERFACE_MODE_2500BASEX:
567 		case PHY_INTERFACE_MODE_SGMII:
568 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
569 			if (err)
570 				goto init_err;
571 			break;
572 		case PHY_INTERFACE_MODE_GMII:
573 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
574 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
575 				if (err)
576 					goto init_err;
577 			}
578 			break;
579 		case PHY_INTERFACE_MODE_INTERNAL:
580 			break;
581 		default:
582 			goto err_phy;
583 		}
584 
585 		/* Setup clock for 1st gmac */
586 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
587 		    !phy_interface_mode_is_8023z(state->interface) &&
588 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
589 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
590 					 MTK_TRGMII_MT7621_CLK)) {
591 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
592 							      state->interface))
593 					goto err_phy;
594 			} else {
595 				mtk_gmac0_rgmii_adjust(mac->hw,
596 						       state->interface);
597 
598 				/* mt7623_pad_clk_setup */
599 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
600 					mtk_w32(mac->hw,
601 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
602 						TRGMII_TD_ODT(i));
603 
604 				/* Assert/release MT7623 RXC reset */
605 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
606 					TRGMII_RCK_CTRL);
607 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
608 			}
609 		}
610 
611 		switch (state->interface) {
612 		case PHY_INTERFACE_MODE_MII:
613 		case PHY_INTERFACE_MODE_GMII:
614 			ge_mode = 1;
615 			break;
616 		default:
617 			ge_mode = 0;
618 			break;
619 		}
620 
621 		/* put the gmac into the right mode */
622 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
623 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
624 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
625 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
626 
627 		mac->interface = state->interface;
628 	}
629 
630 	/* SGMII */
631 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
632 	    phy_interface_mode_is_8023z(state->interface)) {
633 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
634 		 * being setup done.
635 		 */
636 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
637 
638 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
639 				   SYSCFG0_SGMII_MASK,
640 				   ~(u32)SYSCFG0_SGMII_MASK);
641 
642 		/* Save the syscfg0 value for mac_finish */
643 		mac->syscfg0 = val;
644 	} else if (phylink_autoneg_inband(mode)) {
645 		dev_err(eth->dev,
646 			"In-band mode not supported in non SGMII mode!\n");
647 		return;
648 	}
649 
650 	/* Setup gmac */
651 	if (mtk_is_netsys_v3_or_greater(eth) &&
652 	    mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
653 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
654 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
655 
656 		mtk_setup_bridge_switch(eth);
657 	}
658 
659 	return;
660 
661 err_phy:
662 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
663 		mac->id, phy_modes(state->interface));
664 	return;
665 
666 init_err:
667 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
668 		mac->id, phy_modes(state->interface), err);
669 }
670 
671 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
672 			  phy_interface_t interface)
673 {
674 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
675 					   phylink_config);
676 	struct mtk_eth *eth = mac->hw;
677 	u32 mcr_cur, mcr_new;
678 
679 	/* Enable SGMII */
680 	if (interface == PHY_INTERFACE_MODE_SGMII ||
681 	    phy_interface_mode_is_8023z(interface))
682 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
683 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
684 
685 	/* Setup gmac */
686 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
687 	mcr_new = mcr_cur;
688 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
689 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
690 
691 	/* Only update control register when needed! */
692 	if (mcr_new != mcr_cur)
693 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
694 
695 	return 0;
696 }
697 
698 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
699 			      phy_interface_t interface)
700 {
701 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
702 					   phylink_config);
703 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
704 
705 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
706 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
707 }
708 
709 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
710 				int speed)
711 {
712 	const struct mtk_soc_data *soc = eth->soc;
713 	u32 ofs, val;
714 
715 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
716 		return;
717 
718 	val = MTK_QTX_SCH_MIN_RATE_EN |
719 	      /* minimum: 10 Mbps */
720 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
721 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
722 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
723 	if (mtk_is_netsys_v1(eth))
724 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
725 
726 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
727 		switch (speed) {
728 		case SPEED_10:
729 			val |= MTK_QTX_SCH_MAX_RATE_EN |
730 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
731 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
732 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
733 			break;
734 		case SPEED_100:
735 			val |= MTK_QTX_SCH_MAX_RATE_EN |
736 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
737 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
738 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
739 			break;
740 		case SPEED_1000:
741 			val |= MTK_QTX_SCH_MAX_RATE_EN |
742 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
743 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
744 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
745 			break;
746 		default:
747 			break;
748 		}
749 	} else {
750 		switch (speed) {
751 		case SPEED_10:
752 			val |= MTK_QTX_SCH_MAX_RATE_EN |
753 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
754 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
755 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
756 			break;
757 		case SPEED_100:
758 			val |= MTK_QTX_SCH_MAX_RATE_EN |
759 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
760 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
761 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
762 			break;
763 		case SPEED_1000:
764 			val |= MTK_QTX_SCH_MAX_RATE_EN |
765 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
766 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
767 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
768 			break;
769 		default:
770 			break;
771 		}
772 	}
773 
774 	ofs = MTK_QTX_OFFSET * idx;
775 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
776 }
777 
778 static void mtk_mac_link_up(struct phylink_config *config,
779 			    struct phy_device *phy,
780 			    unsigned int mode, phy_interface_t interface,
781 			    int speed, int duplex, bool tx_pause, bool rx_pause)
782 {
783 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
784 					   phylink_config);
785 	u32 mcr;
786 
787 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
788 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
789 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
790 		 MAC_MCR_FORCE_RX_FC);
791 
792 	/* Configure speed */
793 	mac->speed = speed;
794 	switch (speed) {
795 	case SPEED_2500:
796 	case SPEED_1000:
797 		mcr |= MAC_MCR_SPEED_1000;
798 		break;
799 	case SPEED_100:
800 		mcr |= MAC_MCR_SPEED_100;
801 		break;
802 	}
803 
804 	/* Configure duplex */
805 	if (duplex == DUPLEX_FULL)
806 		mcr |= MAC_MCR_FORCE_DPX;
807 
808 	/* Configure pause modes - phylink will avoid these for half duplex */
809 	if (tx_pause)
810 		mcr |= MAC_MCR_FORCE_TX_FC;
811 	if (rx_pause)
812 		mcr |= MAC_MCR_FORCE_RX_FC;
813 
814 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
815 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
816 }
817 
818 static const struct phylink_mac_ops mtk_phylink_ops = {
819 	.mac_select_pcs = mtk_mac_select_pcs,
820 	.mac_config = mtk_mac_config,
821 	.mac_finish = mtk_mac_finish,
822 	.mac_link_down = mtk_mac_link_down,
823 	.mac_link_up = mtk_mac_link_up,
824 };
825 
826 static int mtk_mdio_init(struct mtk_eth *eth)
827 {
828 	unsigned int max_clk = 2500000, divider;
829 	struct device_node *mii_np;
830 	int ret;
831 	u32 val;
832 
833 	mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
834 	if (!mii_np) {
835 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
836 		return -ENODEV;
837 	}
838 
839 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
840 	if (!eth->mii_bus) {
841 		ret = -ENOMEM;
842 		goto err_put_node;
843 	}
844 
845 	eth->mii_bus->name = "mdio";
846 	eth->mii_bus->read = mtk_mdio_read_c22;
847 	eth->mii_bus->write = mtk_mdio_write_c22;
848 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
849 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
850 	eth->mii_bus->priv = eth;
851 	eth->mii_bus->parent = eth->dev;
852 
853 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
854 
855 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
856 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
857 			dev_err(eth->dev, "MDIO clock frequency out of range");
858 			ret = -EINVAL;
859 			goto err_put_node;
860 		}
861 		max_clk = val;
862 	}
863 	divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
864 
865 	/* Configure MDC Turbo Mode */
866 	if (mtk_is_netsys_v3_or_greater(eth))
867 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
868 
869 	/* Configure MDC Divider */
870 	val = FIELD_PREP(PPSC_MDC_CFG, divider);
871 	if (!mtk_is_netsys_v3_or_greater(eth))
872 		val |= PPSC_MDC_TURBO;
873 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
874 
875 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
876 
877 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
878 
879 err_put_node:
880 	of_node_put(mii_np);
881 	return ret;
882 }
883 
884 static void mtk_mdio_cleanup(struct mtk_eth *eth)
885 {
886 	if (!eth->mii_bus)
887 		return;
888 
889 	mdiobus_unregister(eth->mii_bus);
890 }
891 
892 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
893 {
894 	unsigned long flags;
895 	u32 val;
896 
897 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
898 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
899 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
900 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
901 }
902 
903 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
904 {
905 	unsigned long flags;
906 	u32 val;
907 
908 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
909 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
910 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
911 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
912 }
913 
914 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
915 {
916 	unsigned long flags;
917 	u32 val;
918 
919 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
920 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
921 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
922 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
923 }
924 
925 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
926 {
927 	unsigned long flags;
928 	u32 val;
929 
930 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
931 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
932 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
933 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
934 }
935 
936 static int mtk_set_mac_address(struct net_device *dev, void *p)
937 {
938 	int ret = eth_mac_addr(dev, p);
939 	struct mtk_mac *mac = netdev_priv(dev);
940 	struct mtk_eth *eth = mac->hw;
941 	const char *macaddr = dev->dev_addr;
942 
943 	if (ret)
944 		return ret;
945 
946 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
947 		return -EBUSY;
948 
949 	spin_lock_bh(&mac->hw->page_lock);
950 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
951 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
952 			MT7628_SDM_MAC_ADRH);
953 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
954 			(macaddr[4] << 8) | macaddr[5],
955 			MT7628_SDM_MAC_ADRL);
956 	} else {
957 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
958 			MTK_GDMA_MAC_ADRH(mac->id));
959 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
960 			(macaddr[4] << 8) | macaddr[5],
961 			MTK_GDMA_MAC_ADRL(mac->id));
962 	}
963 	spin_unlock_bh(&mac->hw->page_lock);
964 
965 	return 0;
966 }
967 
968 void mtk_stats_update_mac(struct mtk_mac *mac)
969 {
970 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
971 	struct mtk_eth *eth = mac->hw;
972 
973 	u64_stats_update_begin(&hw_stats->syncp);
974 
975 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
976 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
977 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
978 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
979 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
980 		hw_stats->rx_checksum_errors +=
981 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
982 	} else {
983 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
984 		unsigned int offs = hw_stats->reg_offset;
985 		u64 stats;
986 
987 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
988 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
989 		if (stats)
990 			hw_stats->rx_bytes += (stats << 32);
991 		hw_stats->rx_packets +=
992 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
993 		hw_stats->rx_overflow +=
994 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
995 		hw_stats->rx_fcs_errors +=
996 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
997 		hw_stats->rx_short_errors +=
998 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
999 		hw_stats->rx_long_errors +=
1000 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1001 		hw_stats->rx_checksum_errors +=
1002 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1003 		hw_stats->rx_flow_control_packets +=
1004 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1005 
1006 		if (mtk_is_netsys_v3_or_greater(eth)) {
1007 			hw_stats->tx_skip +=
1008 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1009 			hw_stats->tx_collisions +=
1010 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1011 			hw_stats->tx_bytes +=
1012 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1013 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1014 			if (stats)
1015 				hw_stats->tx_bytes += (stats << 32);
1016 			hw_stats->tx_packets +=
1017 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1018 		} else {
1019 			hw_stats->tx_skip +=
1020 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1021 			hw_stats->tx_collisions +=
1022 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1023 			hw_stats->tx_bytes +=
1024 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1025 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1026 			if (stats)
1027 				hw_stats->tx_bytes += (stats << 32);
1028 			hw_stats->tx_packets +=
1029 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1030 		}
1031 	}
1032 
1033 	u64_stats_update_end(&hw_stats->syncp);
1034 }
1035 
1036 static void mtk_stats_update(struct mtk_eth *eth)
1037 {
1038 	int i;
1039 
1040 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1041 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1042 			continue;
1043 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1044 			mtk_stats_update_mac(eth->mac[i]);
1045 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1046 		}
1047 	}
1048 }
1049 
1050 static void mtk_get_stats64(struct net_device *dev,
1051 			    struct rtnl_link_stats64 *storage)
1052 {
1053 	struct mtk_mac *mac = netdev_priv(dev);
1054 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1055 	unsigned int start;
1056 
1057 	if (netif_running(dev) && netif_device_present(dev)) {
1058 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1059 			mtk_stats_update_mac(mac);
1060 			spin_unlock_bh(&hw_stats->stats_lock);
1061 		}
1062 	}
1063 
1064 	do {
1065 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1066 		storage->rx_packets = hw_stats->rx_packets;
1067 		storage->tx_packets = hw_stats->tx_packets;
1068 		storage->rx_bytes = hw_stats->rx_bytes;
1069 		storage->tx_bytes = hw_stats->tx_bytes;
1070 		storage->collisions = hw_stats->tx_collisions;
1071 		storage->rx_length_errors = hw_stats->rx_short_errors +
1072 			hw_stats->rx_long_errors;
1073 		storage->rx_over_errors = hw_stats->rx_overflow;
1074 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1075 		storage->rx_errors = hw_stats->rx_checksum_errors;
1076 		storage->tx_aborted_errors = hw_stats->tx_skip;
1077 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1078 
1079 	storage->tx_errors = dev->stats.tx_errors;
1080 	storage->rx_dropped = dev->stats.rx_dropped;
1081 	storage->tx_dropped = dev->stats.tx_dropped;
1082 }
1083 
1084 static inline int mtk_max_frag_size(int mtu)
1085 {
1086 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1087 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1088 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1089 
1090 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1091 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1092 }
1093 
1094 static inline int mtk_max_buf_size(int frag_size)
1095 {
1096 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1097 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1098 
1099 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1100 
1101 	return buf_size;
1102 }
1103 
1104 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1105 			    struct mtk_rx_dma_v2 *dma_rxd)
1106 {
1107 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1108 	if (!(rxd->rxd2 & RX_DMA_DONE))
1109 		return false;
1110 
1111 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1112 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1113 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1114 	if (mtk_is_netsys_v3_or_greater(eth)) {
1115 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1116 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1117 	}
1118 
1119 	return true;
1120 }
1121 
1122 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1123 {
1124 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1125 	unsigned long data;
1126 
1127 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1128 				get_order(size));
1129 
1130 	return (void *)data;
1131 }
1132 
1133 /* the qdma core needs scratch memory to be setup */
1134 static int mtk_init_fq_dma(struct mtk_eth *eth)
1135 {
1136 	const struct mtk_soc_data *soc = eth->soc;
1137 	dma_addr_t phy_ring_tail;
1138 	int cnt = soc->tx.fq_dma_size;
1139 	dma_addr_t dma_addr;
1140 	int i, j, len;
1141 
1142 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1143 		eth->scratch_ring = eth->sram_base;
1144 	else
1145 		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1146 						       cnt * soc->tx.desc_size,
1147 						       &eth->phy_scratch_ring,
1148 						       GFP_KERNEL);
1149 
1150 	if (unlikely(!eth->scratch_ring))
1151 		return -ENOMEM;
1152 
1153 	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1154 
1155 	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1156 		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1157 		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1158 
1159 		if (unlikely(!eth->scratch_head[j]))
1160 			return -ENOMEM;
1161 
1162 		dma_addr = dma_map_single(eth->dma_dev,
1163 					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1164 					  DMA_FROM_DEVICE);
1165 
1166 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1167 			return -ENOMEM;
1168 
1169 		for (i = 0; i < len; i++) {
1170 			struct mtk_tx_dma_v2 *txd;
1171 
1172 			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1173 			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1174 			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1175 				txd->txd2 = eth->phy_scratch_ring +
1176 					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1177 
1178 			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1179 			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1180 				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1181 
1182 			txd->txd4 = 0;
1183 			if (mtk_is_netsys_v2_or_greater(eth)) {
1184 				txd->txd5 = 0;
1185 				txd->txd6 = 0;
1186 				txd->txd7 = 0;
1187 				txd->txd8 = 0;
1188 			}
1189 		}
1190 	}
1191 
1192 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1193 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1194 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1195 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1196 
1197 	return 0;
1198 }
1199 
1200 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1201 {
1202 	return ring->dma + (desc - ring->phys);
1203 }
1204 
1205 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1206 					     void *txd, u32 txd_size)
1207 {
1208 	int idx = (txd - ring->dma) / txd_size;
1209 
1210 	return &ring->buf[idx];
1211 }
1212 
1213 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1214 				       struct mtk_tx_dma *dma)
1215 {
1216 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1217 }
1218 
1219 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1220 {
1221 	return (dma - ring->dma) / txd_size;
1222 }
1223 
1224 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1225 			 struct xdp_frame_bulk *bq, bool napi)
1226 {
1227 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1228 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1229 			dma_unmap_single(eth->dma_dev,
1230 					 dma_unmap_addr(tx_buf, dma_addr0),
1231 					 dma_unmap_len(tx_buf, dma_len0),
1232 					 DMA_TO_DEVICE);
1233 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1234 			dma_unmap_page(eth->dma_dev,
1235 				       dma_unmap_addr(tx_buf, dma_addr0),
1236 				       dma_unmap_len(tx_buf, dma_len0),
1237 				       DMA_TO_DEVICE);
1238 		}
1239 	} else {
1240 		if (dma_unmap_len(tx_buf, dma_len0)) {
1241 			dma_unmap_page(eth->dma_dev,
1242 				       dma_unmap_addr(tx_buf, dma_addr0),
1243 				       dma_unmap_len(tx_buf, dma_len0),
1244 				       DMA_TO_DEVICE);
1245 		}
1246 
1247 		if (dma_unmap_len(tx_buf, dma_len1)) {
1248 			dma_unmap_page(eth->dma_dev,
1249 				       dma_unmap_addr(tx_buf, dma_addr1),
1250 				       dma_unmap_len(tx_buf, dma_len1),
1251 				       DMA_TO_DEVICE);
1252 		}
1253 	}
1254 
1255 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1256 		if (tx_buf->type == MTK_TYPE_SKB) {
1257 			struct sk_buff *skb = tx_buf->data;
1258 
1259 			if (napi)
1260 				napi_consume_skb(skb, napi);
1261 			else
1262 				dev_kfree_skb_any(skb);
1263 		} else {
1264 			struct xdp_frame *xdpf = tx_buf->data;
1265 
1266 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1267 				xdp_return_frame_rx_napi(xdpf);
1268 			else if (bq)
1269 				xdp_return_frame_bulk(xdpf, bq);
1270 			else
1271 				xdp_return_frame(xdpf);
1272 		}
1273 	}
1274 	tx_buf->flags = 0;
1275 	tx_buf->data = NULL;
1276 }
1277 
1278 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1279 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1280 			 size_t size, int idx)
1281 {
1282 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1283 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1284 		dma_unmap_len_set(tx_buf, dma_len0, size);
1285 	} else {
1286 		if (idx & 1) {
1287 			txd->txd3 = mapped_addr;
1288 			txd->txd2 |= TX_DMA_PLEN1(size);
1289 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1290 			dma_unmap_len_set(tx_buf, dma_len1, size);
1291 		} else {
1292 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1293 			txd->txd1 = mapped_addr;
1294 			txd->txd2 = TX_DMA_PLEN0(size);
1295 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1296 			dma_unmap_len_set(tx_buf, dma_len0, size);
1297 		}
1298 	}
1299 }
1300 
1301 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1302 				   struct mtk_tx_dma_desc_info *info)
1303 {
1304 	struct mtk_mac *mac = netdev_priv(dev);
1305 	struct mtk_eth *eth = mac->hw;
1306 	struct mtk_tx_dma *desc = txd;
1307 	u32 data;
1308 
1309 	WRITE_ONCE(desc->txd1, info->addr);
1310 
1311 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1312 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1313 	if (info->last)
1314 		data |= TX_DMA_LS0;
1315 	WRITE_ONCE(desc->txd3, data);
1316 
1317 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1318 	if (info->first) {
1319 		if (info->gso)
1320 			data |= TX_DMA_TSO;
1321 		/* tx checksum offload */
1322 		if (info->csum)
1323 			data |= TX_DMA_CHKSUM;
1324 		/* vlan header offload */
1325 		if (info->vlan)
1326 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1327 	}
1328 	WRITE_ONCE(desc->txd4, data);
1329 }
1330 
1331 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1332 				   struct mtk_tx_dma_desc_info *info)
1333 {
1334 	struct mtk_mac *mac = netdev_priv(dev);
1335 	struct mtk_tx_dma_v2 *desc = txd;
1336 	struct mtk_eth *eth = mac->hw;
1337 	u32 data;
1338 
1339 	WRITE_ONCE(desc->txd1, info->addr);
1340 
1341 	data = TX_DMA_PLEN0(info->size);
1342 	if (info->last)
1343 		data |= TX_DMA_LS0;
1344 
1345 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1346 		data |= TX_DMA_PREP_ADDR64(info->addr);
1347 
1348 	WRITE_ONCE(desc->txd3, data);
1349 
1350 	 /* set forward port */
1351 	switch (mac->id) {
1352 	case MTK_GMAC1_ID:
1353 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1354 		break;
1355 	case MTK_GMAC2_ID:
1356 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1357 		break;
1358 	case MTK_GMAC3_ID:
1359 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1360 		break;
1361 	}
1362 
1363 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1364 	WRITE_ONCE(desc->txd4, data);
1365 
1366 	data = 0;
1367 	if (info->first) {
1368 		if (info->gso)
1369 			data |= TX_DMA_TSO_V2;
1370 		/* tx checksum offload */
1371 		if (info->csum)
1372 			data |= TX_DMA_CHKSUM_V2;
1373 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1374 			data |= TX_DMA_SPTAG_V3;
1375 	}
1376 	WRITE_ONCE(desc->txd5, data);
1377 
1378 	data = 0;
1379 	if (info->first && info->vlan)
1380 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1381 	WRITE_ONCE(desc->txd6, data);
1382 
1383 	WRITE_ONCE(desc->txd7, 0);
1384 	WRITE_ONCE(desc->txd8, 0);
1385 }
1386 
1387 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1388 				struct mtk_tx_dma_desc_info *info)
1389 {
1390 	struct mtk_mac *mac = netdev_priv(dev);
1391 	struct mtk_eth *eth = mac->hw;
1392 
1393 	if (mtk_is_netsys_v2_or_greater(eth))
1394 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1395 	else
1396 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1397 }
1398 
1399 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1400 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1401 {
1402 	struct mtk_tx_dma_desc_info txd_info = {
1403 		.size = skb_headlen(skb),
1404 		.gso = gso,
1405 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1406 		.vlan = skb_vlan_tag_present(skb),
1407 		.qid = skb_get_queue_mapping(skb),
1408 		.vlan_tci = skb_vlan_tag_get(skb),
1409 		.first = true,
1410 		.last = !skb_is_nonlinear(skb),
1411 	};
1412 	struct netdev_queue *txq;
1413 	struct mtk_mac *mac = netdev_priv(dev);
1414 	struct mtk_eth *eth = mac->hw;
1415 	const struct mtk_soc_data *soc = eth->soc;
1416 	struct mtk_tx_dma *itxd, *txd;
1417 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1418 	struct mtk_tx_buf *itx_buf, *tx_buf;
1419 	int i, n_desc = 1;
1420 	int queue = skb_get_queue_mapping(skb);
1421 	int k = 0;
1422 
1423 	txq = netdev_get_tx_queue(dev, queue);
1424 	itxd = ring->next_free;
1425 	itxd_pdma = qdma_to_pdma(ring, itxd);
1426 	if (itxd == ring->last_free)
1427 		return -ENOMEM;
1428 
1429 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1430 	memset(itx_buf, 0, sizeof(*itx_buf));
1431 
1432 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1433 				       DMA_TO_DEVICE);
1434 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1435 		return -ENOMEM;
1436 
1437 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1438 
1439 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1440 	itx_buf->mac_id = mac->id;
1441 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1442 		     k++);
1443 
1444 	/* TX SG offload */
1445 	txd = itxd;
1446 	txd_pdma = qdma_to_pdma(ring, txd);
1447 
1448 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1449 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1450 		unsigned int offset = 0;
1451 		int frag_size = skb_frag_size(frag);
1452 
1453 		while (frag_size) {
1454 			bool new_desc = true;
1455 
1456 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1457 			    (i & 0x1)) {
1458 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1459 				txd_pdma = qdma_to_pdma(ring, txd);
1460 				if (txd == ring->last_free)
1461 					goto err_dma;
1462 
1463 				n_desc++;
1464 			} else {
1465 				new_desc = false;
1466 			}
1467 
1468 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1469 			txd_info.size = min_t(unsigned int, frag_size,
1470 					      soc->tx.dma_max_len);
1471 			txd_info.qid = queue;
1472 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1473 					!(frag_size - txd_info.size);
1474 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1475 							 offset, txd_info.size,
1476 							 DMA_TO_DEVICE);
1477 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1478 				goto err_dma;
1479 
1480 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1481 
1482 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1483 						    soc->tx.desc_size);
1484 			if (new_desc)
1485 				memset(tx_buf, 0, sizeof(*tx_buf));
1486 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1487 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1488 			tx_buf->mac_id = mac->id;
1489 
1490 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1491 				     txd_info.size, k++);
1492 
1493 			frag_size -= txd_info.size;
1494 			offset += txd_info.size;
1495 		}
1496 	}
1497 
1498 	/* store skb to cleanup */
1499 	itx_buf->type = MTK_TYPE_SKB;
1500 	itx_buf->data = skb;
1501 
1502 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1503 		if (k & 0x1)
1504 			txd_pdma->txd2 |= TX_DMA_LS0;
1505 		else
1506 			txd_pdma->txd2 |= TX_DMA_LS1;
1507 	}
1508 
1509 	netdev_tx_sent_queue(txq, skb->len);
1510 	skb_tx_timestamp(skb);
1511 
1512 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1513 	atomic_sub(n_desc, &ring->free_count);
1514 
1515 	/* make sure that all changes to the dma ring are flushed before we
1516 	 * continue
1517 	 */
1518 	wmb();
1519 
1520 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1521 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1522 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1523 	} else {
1524 		int next_idx;
1525 
1526 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1527 					 ring->dma_size);
1528 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1529 	}
1530 
1531 	return 0;
1532 
1533 err_dma:
1534 	do {
1535 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1536 
1537 		/* unmap dma */
1538 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1539 
1540 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1541 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1542 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1543 
1544 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1545 		itxd_pdma = qdma_to_pdma(ring, itxd);
1546 	} while (itxd != txd);
1547 
1548 	return -ENOMEM;
1549 }
1550 
1551 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1552 {
1553 	int i, nfrags = 1;
1554 	skb_frag_t *frag;
1555 
1556 	if (skb_is_gso(skb)) {
1557 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1558 			frag = &skb_shinfo(skb)->frags[i];
1559 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1560 					       eth->soc->tx.dma_max_len);
1561 		}
1562 	} else {
1563 		nfrags += skb_shinfo(skb)->nr_frags;
1564 	}
1565 
1566 	return nfrags;
1567 }
1568 
1569 static int mtk_queue_stopped(struct mtk_eth *eth)
1570 {
1571 	int i;
1572 
1573 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1574 		if (!eth->netdev[i])
1575 			continue;
1576 		if (netif_queue_stopped(eth->netdev[i]))
1577 			return 1;
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 static void mtk_wake_queue(struct mtk_eth *eth)
1584 {
1585 	int i;
1586 
1587 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1588 		if (!eth->netdev[i])
1589 			continue;
1590 		netif_tx_wake_all_queues(eth->netdev[i]);
1591 	}
1592 }
1593 
1594 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1595 {
1596 	struct mtk_mac *mac = netdev_priv(dev);
1597 	struct mtk_eth *eth = mac->hw;
1598 	struct mtk_tx_ring *ring = &eth->tx_ring;
1599 	struct net_device_stats *stats = &dev->stats;
1600 	bool gso = false;
1601 	int tx_num;
1602 
1603 	/* normally we can rely on the stack not calling this more than once,
1604 	 * however we have 2 queues running on the same ring so we need to lock
1605 	 * the ring access
1606 	 */
1607 	spin_lock(&eth->page_lock);
1608 
1609 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1610 		goto drop;
1611 
1612 	tx_num = mtk_cal_txd_req(eth, skb);
1613 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1614 		netif_tx_stop_all_queues(dev);
1615 		netif_err(eth, tx_queued, dev,
1616 			  "Tx Ring full when queue awake!\n");
1617 		spin_unlock(&eth->page_lock);
1618 		return NETDEV_TX_BUSY;
1619 	}
1620 
1621 	/* TSO: fill MSS info in tcp checksum field */
1622 	if (skb_is_gso(skb)) {
1623 		if (skb_cow_head(skb, 0)) {
1624 			netif_warn(eth, tx_err, dev,
1625 				   "GSO expand head fail.\n");
1626 			goto drop;
1627 		}
1628 
1629 		if (skb_shinfo(skb)->gso_type &
1630 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1631 			gso = true;
1632 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1633 		}
1634 	}
1635 
1636 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1637 		goto drop;
1638 
1639 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1640 		netif_tx_stop_all_queues(dev);
1641 
1642 	spin_unlock(&eth->page_lock);
1643 
1644 	return NETDEV_TX_OK;
1645 
1646 drop:
1647 	spin_unlock(&eth->page_lock);
1648 	stats->tx_dropped++;
1649 	dev_kfree_skb_any(skb);
1650 	return NETDEV_TX_OK;
1651 }
1652 
1653 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1654 {
1655 	int i;
1656 	struct mtk_rx_ring *ring;
1657 	int idx;
1658 
1659 	if (!eth->hwlro)
1660 		return &eth->rx_ring[0];
1661 
1662 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1663 		struct mtk_rx_dma *rxd;
1664 
1665 		ring = &eth->rx_ring[i];
1666 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1667 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
1668 		if (rxd->rxd2 & RX_DMA_DONE) {
1669 			ring->calc_idx_update = true;
1670 			return ring;
1671 		}
1672 	}
1673 
1674 	return NULL;
1675 }
1676 
1677 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1678 {
1679 	struct mtk_rx_ring *ring;
1680 	int i;
1681 
1682 	if (!eth->hwlro) {
1683 		ring = &eth->rx_ring[0];
1684 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1685 	} else {
1686 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1687 			ring = &eth->rx_ring[i];
1688 			if (ring->calc_idx_update) {
1689 				ring->calc_idx_update = false;
1690 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1691 			}
1692 		}
1693 	}
1694 }
1695 
1696 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1697 {
1698 	return mtk_is_netsys_v2_or_greater(eth);
1699 }
1700 
1701 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1702 					      struct xdp_rxq_info *xdp_q,
1703 					      int id, int size)
1704 {
1705 	struct page_pool_params pp_params = {
1706 		.order = 0,
1707 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1708 		.pool_size = size,
1709 		.nid = NUMA_NO_NODE,
1710 		.dev = eth->dma_dev,
1711 		.offset = MTK_PP_HEADROOM,
1712 		.max_len = MTK_PP_MAX_BUF_SIZE,
1713 	};
1714 	struct page_pool *pp;
1715 	int err;
1716 
1717 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1718 							  : DMA_FROM_DEVICE;
1719 	pp = page_pool_create(&pp_params);
1720 	if (IS_ERR(pp))
1721 		return pp;
1722 
1723 	err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1724 				 eth->rx_napi.napi_id, PAGE_SIZE);
1725 	if (err < 0)
1726 		goto err_free_pp;
1727 
1728 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1729 	if (err)
1730 		goto err_unregister_rxq;
1731 
1732 	return pp;
1733 
1734 err_unregister_rxq:
1735 	xdp_rxq_info_unreg(xdp_q);
1736 err_free_pp:
1737 	page_pool_destroy(pp);
1738 
1739 	return ERR_PTR(err);
1740 }
1741 
1742 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1743 				    gfp_t gfp_mask)
1744 {
1745 	struct page *page;
1746 
1747 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1748 	if (!page)
1749 		return NULL;
1750 
1751 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1752 	return page_address(page);
1753 }
1754 
1755 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1756 {
1757 	if (ring->page_pool)
1758 		page_pool_put_full_page(ring->page_pool,
1759 					virt_to_head_page(data), napi);
1760 	else
1761 		skb_free_frag(data);
1762 }
1763 
1764 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1765 			     struct mtk_tx_dma_desc_info *txd_info,
1766 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1767 			     void *data, u16 headroom, int index, bool dma_map)
1768 {
1769 	struct mtk_tx_ring *ring = &eth->tx_ring;
1770 	struct mtk_mac *mac = netdev_priv(dev);
1771 	struct mtk_tx_dma *txd_pdma;
1772 
1773 	if (dma_map) {  /* ndo_xdp_xmit */
1774 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1775 						txd_info->size, DMA_TO_DEVICE);
1776 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1777 			return -ENOMEM;
1778 
1779 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1780 	} else {
1781 		struct page *page = virt_to_head_page(data);
1782 
1783 		txd_info->addr = page_pool_get_dma_addr(page) +
1784 				 sizeof(struct xdp_frame) + headroom;
1785 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1786 					   txd_info->size, DMA_BIDIRECTIONAL);
1787 	}
1788 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1789 
1790 	tx_buf->mac_id = mac->id;
1791 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1792 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1793 
1794 	txd_pdma = qdma_to_pdma(ring, txd);
1795 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1796 		     index);
1797 
1798 	return 0;
1799 }
1800 
1801 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1802 				struct net_device *dev, bool dma_map)
1803 {
1804 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1805 	const struct mtk_soc_data *soc = eth->soc;
1806 	struct mtk_tx_ring *ring = &eth->tx_ring;
1807 	struct mtk_mac *mac = netdev_priv(dev);
1808 	struct mtk_tx_dma_desc_info txd_info = {
1809 		.size	= xdpf->len,
1810 		.first	= true,
1811 		.last	= !xdp_frame_has_frags(xdpf),
1812 		.qid	= mac->id,
1813 	};
1814 	int err, index = 0, n_desc = 1, nr_frags;
1815 	struct mtk_tx_buf *htx_buf, *tx_buf;
1816 	struct mtk_tx_dma *htxd, *txd;
1817 	void *data = xdpf->data;
1818 
1819 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1820 		return -EBUSY;
1821 
1822 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1823 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1824 		return -EBUSY;
1825 
1826 	spin_lock(&eth->page_lock);
1827 
1828 	txd = ring->next_free;
1829 	if (txd == ring->last_free) {
1830 		spin_unlock(&eth->page_lock);
1831 		return -ENOMEM;
1832 	}
1833 	htxd = txd;
1834 
1835 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1836 	memset(tx_buf, 0, sizeof(*tx_buf));
1837 	htx_buf = tx_buf;
1838 
1839 	for (;;) {
1840 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1841 					data, xdpf->headroom, index, dma_map);
1842 		if (err < 0)
1843 			goto unmap;
1844 
1845 		if (txd_info.last)
1846 			break;
1847 
1848 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1849 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1850 			if (txd == ring->last_free)
1851 				goto unmap;
1852 
1853 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1854 						    soc->tx.desc_size);
1855 			memset(tx_buf, 0, sizeof(*tx_buf));
1856 			n_desc++;
1857 		}
1858 
1859 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1860 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1861 		txd_info.last = index + 1 == nr_frags;
1862 		txd_info.qid = mac->id;
1863 		data = skb_frag_address(&sinfo->frags[index]);
1864 
1865 		index++;
1866 	}
1867 	/* store xdpf for cleanup */
1868 	htx_buf->data = xdpf;
1869 
1870 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1871 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1872 
1873 		if (index & 1)
1874 			txd_pdma->txd2 |= TX_DMA_LS0;
1875 		else
1876 			txd_pdma->txd2 |= TX_DMA_LS1;
1877 	}
1878 
1879 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1880 	atomic_sub(n_desc, &ring->free_count);
1881 
1882 	/* make sure that all changes to the dma ring are flushed before we
1883 	 * continue
1884 	 */
1885 	wmb();
1886 
1887 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1888 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1889 	} else {
1890 		int idx;
1891 
1892 		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
1893 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1894 			MT7628_TX_CTX_IDX0);
1895 	}
1896 
1897 	spin_unlock(&eth->page_lock);
1898 
1899 	return 0;
1900 
1901 unmap:
1902 	while (htxd != txd) {
1903 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
1904 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1905 
1906 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1907 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1908 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1909 
1910 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1911 		}
1912 
1913 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1914 	}
1915 
1916 	spin_unlock(&eth->page_lock);
1917 
1918 	return err;
1919 }
1920 
1921 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1922 			struct xdp_frame **frames, u32 flags)
1923 {
1924 	struct mtk_mac *mac = netdev_priv(dev);
1925 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1926 	struct mtk_eth *eth = mac->hw;
1927 	int i, nxmit = 0;
1928 
1929 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1930 		return -EINVAL;
1931 
1932 	for (i = 0; i < num_frame; i++) {
1933 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1934 			break;
1935 		nxmit++;
1936 	}
1937 
1938 	u64_stats_update_begin(&hw_stats->syncp);
1939 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1940 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1941 	u64_stats_update_end(&hw_stats->syncp);
1942 
1943 	return nxmit;
1944 }
1945 
1946 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1947 		       struct xdp_buff *xdp, struct net_device *dev)
1948 {
1949 	struct mtk_mac *mac = netdev_priv(dev);
1950 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1951 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1952 	struct bpf_prog *prog;
1953 	u32 act = XDP_PASS;
1954 
1955 	rcu_read_lock();
1956 
1957 	prog = rcu_dereference(eth->prog);
1958 	if (!prog)
1959 		goto out;
1960 
1961 	act = bpf_prog_run_xdp(prog, xdp);
1962 	switch (act) {
1963 	case XDP_PASS:
1964 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1965 		goto update_stats;
1966 	case XDP_REDIRECT:
1967 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1968 			act = XDP_DROP;
1969 			break;
1970 		}
1971 
1972 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1973 		goto update_stats;
1974 	case XDP_TX: {
1975 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1976 
1977 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1978 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1979 			act = XDP_DROP;
1980 			break;
1981 		}
1982 
1983 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1984 		goto update_stats;
1985 	}
1986 	default:
1987 		bpf_warn_invalid_xdp_action(dev, prog, act);
1988 		fallthrough;
1989 	case XDP_ABORTED:
1990 		trace_xdp_exception(dev, prog, act);
1991 		fallthrough;
1992 	case XDP_DROP:
1993 		break;
1994 	}
1995 
1996 	page_pool_put_full_page(ring->page_pool,
1997 				virt_to_head_page(xdp->data), true);
1998 
1999 update_stats:
2000 	u64_stats_update_begin(&hw_stats->syncp);
2001 	*count = *count + 1;
2002 	u64_stats_update_end(&hw_stats->syncp);
2003 out:
2004 	rcu_read_unlock();
2005 
2006 	return act;
2007 }
2008 
2009 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2010 		       struct mtk_eth *eth)
2011 {
2012 	struct dim_sample dim_sample = {};
2013 	struct mtk_rx_ring *ring;
2014 	bool xdp_flush = false;
2015 	int idx;
2016 	struct sk_buff *skb;
2017 	u64 addr64 = 0;
2018 	u8 *data, *new_data;
2019 	struct mtk_rx_dma_v2 *rxd, trxd;
2020 	int done = 0, bytes = 0;
2021 	dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2022 	int ppe_idx = 0;
2023 
2024 	while (done < budget) {
2025 		unsigned int pktlen, *rxdcsum;
2026 		struct net_device *netdev;
2027 		u32 hash, reason;
2028 		int mac = 0;
2029 
2030 		ring = mtk_get_rx_ring(eth);
2031 		if (unlikely(!ring))
2032 			goto rx_done;
2033 
2034 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2035 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
2036 		data = ring->data[idx];
2037 
2038 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2039 			break;
2040 
2041 		/* find out which mac the packet come from. values start at 1 */
2042 		if (mtk_is_netsys_v3_or_greater(eth)) {
2043 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2044 
2045 			switch (val) {
2046 			case PSE_GDM1_PORT:
2047 			case PSE_GDM2_PORT:
2048 				mac = val - 1;
2049 				break;
2050 			case PSE_GDM3_PORT:
2051 				mac = MTK_GMAC3_ID;
2052 				break;
2053 			default:
2054 				break;
2055 			}
2056 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2057 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2058 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2059 		}
2060 
2061 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2062 			     !eth->netdev[mac]))
2063 			goto release_desc;
2064 
2065 		netdev = eth->netdev[mac];
2066 		ppe_idx = eth->mac[mac]->ppe_idx;
2067 
2068 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2069 			goto release_desc;
2070 
2071 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2072 
2073 		/* alloc new buffer */
2074 		if (ring->page_pool) {
2075 			struct page *page = virt_to_head_page(data);
2076 			struct xdp_buff xdp;
2077 			u32 ret;
2078 
2079 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2080 							  &dma_addr,
2081 							  GFP_ATOMIC);
2082 			if (unlikely(!new_data)) {
2083 				netdev->stats.rx_dropped++;
2084 				goto release_desc;
2085 			}
2086 
2087 			dma_sync_single_for_cpu(eth->dma_dev,
2088 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2089 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2090 
2091 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2092 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2093 					 false);
2094 			xdp_buff_clear_frags_flag(&xdp);
2095 
2096 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2097 			if (ret == XDP_REDIRECT)
2098 				xdp_flush = true;
2099 
2100 			if (ret != XDP_PASS)
2101 				goto skip_rx;
2102 
2103 			skb = build_skb(data, PAGE_SIZE);
2104 			if (unlikely(!skb)) {
2105 				page_pool_put_full_page(ring->page_pool,
2106 							page, true);
2107 				netdev->stats.rx_dropped++;
2108 				goto skip_rx;
2109 			}
2110 
2111 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2112 			skb_put(skb, xdp.data_end - xdp.data);
2113 			skb_mark_for_recycle(skb);
2114 		} else {
2115 			if (ring->frag_size <= PAGE_SIZE)
2116 				new_data = napi_alloc_frag(ring->frag_size);
2117 			else
2118 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2119 
2120 			if (unlikely(!new_data)) {
2121 				netdev->stats.rx_dropped++;
2122 				goto release_desc;
2123 			}
2124 
2125 			dma_addr = dma_map_single(eth->dma_dev,
2126 				new_data + NET_SKB_PAD + eth->ip_align,
2127 				ring->buf_size, DMA_FROM_DEVICE);
2128 			if (unlikely(dma_mapping_error(eth->dma_dev,
2129 						       dma_addr))) {
2130 				skb_free_frag(new_data);
2131 				netdev->stats.rx_dropped++;
2132 				goto release_desc;
2133 			}
2134 
2135 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2136 				addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2137 
2138 			dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2139 					 ring->buf_size, DMA_FROM_DEVICE);
2140 
2141 			skb = build_skb(data, ring->frag_size);
2142 			if (unlikely(!skb)) {
2143 				netdev->stats.rx_dropped++;
2144 				skb_free_frag(data);
2145 				goto skip_rx;
2146 			}
2147 
2148 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2149 			skb_put(skb, pktlen);
2150 		}
2151 
2152 		skb->dev = netdev;
2153 		bytes += skb->len;
2154 
2155 		if (mtk_is_netsys_v3_or_greater(eth)) {
2156 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2157 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2158 			if (hash != MTK_RXD5_FOE_ENTRY)
2159 				skb_set_hash(skb, jhash_1word(hash, 0),
2160 					     PKT_HASH_TYPE_L4);
2161 			rxdcsum = &trxd.rxd3;
2162 		} else {
2163 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2164 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2165 			if (hash != MTK_RXD4_FOE_ENTRY)
2166 				skb_set_hash(skb, jhash_1word(hash, 0),
2167 					     PKT_HASH_TYPE_L4);
2168 			rxdcsum = &trxd.rxd4;
2169 		}
2170 
2171 		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2172 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2173 		else
2174 			skb_checksum_none_assert(skb);
2175 		skb->protocol = eth_type_trans(skb, netdev);
2176 
2177 		/* When using VLAN untagging in combination with DSA, the
2178 		 * hardware treats the MTK special tag as a VLAN and untags it.
2179 		 */
2180 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2181 		    netdev_uses_dsa(netdev)) {
2182 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2183 
2184 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2185 			    eth->dsa_meta[port])
2186 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2187 		}
2188 
2189 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2190 			mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2191 
2192 		skb_record_rx_queue(skb, 0);
2193 		napi_gro_receive(napi, skb);
2194 
2195 skip_rx:
2196 		ring->data[idx] = new_data;
2197 		rxd->rxd1 = (unsigned int)dma_addr;
2198 release_desc:
2199 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2200 			rxd->rxd2 = RX_DMA_LSO;
2201 		else
2202 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2203 
2204 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2205 		    likely(dma_addr != DMA_MAPPING_ERROR))
2206 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2207 
2208 		ring->calc_idx = idx;
2209 		done++;
2210 	}
2211 
2212 rx_done:
2213 	if (done) {
2214 		/* make sure that all changes to the dma ring are flushed before
2215 		 * we continue
2216 		 */
2217 		wmb();
2218 		mtk_update_rx_cpu_idx(eth);
2219 	}
2220 
2221 	eth->rx_packets += done;
2222 	eth->rx_bytes += bytes;
2223 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2224 			  &dim_sample);
2225 	net_dim(&eth->rx_dim, &dim_sample);
2226 
2227 	if (xdp_flush)
2228 		xdp_do_flush();
2229 
2230 	return done;
2231 }
2232 
2233 struct mtk_poll_state {
2234     struct netdev_queue *txq;
2235     unsigned int total;
2236     unsigned int done;
2237     unsigned int bytes;
2238 };
2239 
2240 static void
2241 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2242 		 struct sk_buff *skb)
2243 {
2244 	struct netdev_queue *txq;
2245 	struct net_device *dev;
2246 	unsigned int bytes = skb->len;
2247 
2248 	state->total++;
2249 	eth->tx_packets++;
2250 	eth->tx_bytes += bytes;
2251 
2252 	dev = eth->netdev[mac];
2253 	if (!dev)
2254 		return;
2255 
2256 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2257 	if (state->txq == txq) {
2258 		state->done++;
2259 		state->bytes += bytes;
2260 		return;
2261 	}
2262 
2263 	if (state->txq)
2264 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2265 
2266 	state->txq = txq;
2267 	state->done = 1;
2268 	state->bytes = bytes;
2269 }
2270 
2271 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2272 			    struct mtk_poll_state *state)
2273 {
2274 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2275 	struct mtk_tx_ring *ring = &eth->tx_ring;
2276 	struct mtk_tx_buf *tx_buf;
2277 	struct xdp_frame_bulk bq;
2278 	struct mtk_tx_dma *desc;
2279 	u32 cpu, dma;
2280 
2281 	cpu = ring->last_free_ptr;
2282 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2283 
2284 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2285 	xdp_frame_bulk_init(&bq);
2286 
2287 	while ((cpu != dma) && budget) {
2288 		u32 next_cpu = desc->txd2;
2289 
2290 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2291 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2292 			break;
2293 
2294 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2295 					    eth->soc->tx.desc_size);
2296 		if (!tx_buf->data)
2297 			break;
2298 
2299 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2300 			if (tx_buf->type == MTK_TYPE_SKB)
2301 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2302 						 tx_buf->data);
2303 
2304 			budget--;
2305 		}
2306 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2307 
2308 		ring->last_free = desc;
2309 		atomic_inc(&ring->free_count);
2310 
2311 		cpu = next_cpu;
2312 	}
2313 	xdp_flush_frame_bulk(&bq);
2314 
2315 	ring->last_free_ptr = cpu;
2316 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2317 
2318 	return budget;
2319 }
2320 
2321 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2322 			    struct mtk_poll_state *state)
2323 {
2324 	struct mtk_tx_ring *ring = &eth->tx_ring;
2325 	struct mtk_tx_buf *tx_buf;
2326 	struct xdp_frame_bulk bq;
2327 	struct mtk_tx_dma *desc;
2328 	u32 cpu, dma;
2329 
2330 	cpu = ring->cpu_idx;
2331 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2332 	xdp_frame_bulk_init(&bq);
2333 
2334 	while ((cpu != dma) && budget) {
2335 		tx_buf = &ring->buf[cpu];
2336 		if (!tx_buf->data)
2337 			break;
2338 
2339 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2340 			if (tx_buf->type == MTK_TYPE_SKB)
2341 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2342 			budget--;
2343 		}
2344 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2345 
2346 		desc = ring->dma + cpu * eth->soc->tx.desc_size;
2347 		ring->last_free = desc;
2348 		atomic_inc(&ring->free_count);
2349 
2350 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2351 	}
2352 	xdp_flush_frame_bulk(&bq);
2353 
2354 	ring->cpu_idx = cpu;
2355 
2356 	return budget;
2357 }
2358 
2359 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2360 {
2361 	struct mtk_tx_ring *ring = &eth->tx_ring;
2362 	struct dim_sample dim_sample = {};
2363 	struct mtk_poll_state state = {};
2364 
2365 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2366 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2367 	else
2368 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2369 
2370 	if (state.txq)
2371 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2372 
2373 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2374 			  &dim_sample);
2375 	net_dim(&eth->tx_dim, &dim_sample);
2376 
2377 	if (mtk_queue_stopped(eth) &&
2378 	    (atomic_read(&ring->free_count) > ring->thresh))
2379 		mtk_wake_queue(eth);
2380 
2381 	return state.total;
2382 }
2383 
2384 static void mtk_handle_status_irq(struct mtk_eth *eth)
2385 {
2386 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2387 
2388 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2389 		mtk_stats_update(eth);
2390 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2391 			MTK_INT_STATUS2);
2392 	}
2393 }
2394 
2395 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2396 {
2397 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2398 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2399 	int tx_done = 0;
2400 
2401 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2402 		mtk_handle_status_irq(eth);
2403 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2404 	tx_done = mtk_poll_tx(eth, budget);
2405 
2406 	if (unlikely(netif_msg_intr(eth))) {
2407 		dev_info(eth->dev,
2408 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2409 			 mtk_r32(eth, reg_map->tx_irq_status),
2410 			 mtk_r32(eth, reg_map->tx_irq_mask));
2411 	}
2412 
2413 	if (tx_done == budget)
2414 		return budget;
2415 
2416 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2417 		return budget;
2418 
2419 	if (napi_complete_done(napi, tx_done))
2420 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2421 
2422 	return tx_done;
2423 }
2424 
2425 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2426 {
2427 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2428 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2429 	int rx_done_total = 0;
2430 
2431 	mtk_handle_status_irq(eth);
2432 
2433 	do {
2434 		int rx_done;
2435 
2436 		mtk_w32(eth, eth->soc->rx.irq_done_mask,
2437 			reg_map->pdma.irq_status);
2438 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2439 		rx_done_total += rx_done;
2440 
2441 		if (unlikely(netif_msg_intr(eth))) {
2442 			dev_info(eth->dev,
2443 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2444 				 mtk_r32(eth, reg_map->pdma.irq_status),
2445 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2446 		}
2447 
2448 		if (rx_done_total == budget)
2449 			return budget;
2450 
2451 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2452 		 eth->soc->rx.irq_done_mask);
2453 
2454 	if (napi_complete_done(napi, rx_done_total))
2455 		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2456 
2457 	return rx_done_total;
2458 }
2459 
2460 static int mtk_tx_alloc(struct mtk_eth *eth)
2461 {
2462 	const struct mtk_soc_data *soc = eth->soc;
2463 	struct mtk_tx_ring *ring = &eth->tx_ring;
2464 	int i, sz = soc->tx.desc_size;
2465 	struct mtk_tx_dma_v2 *txd;
2466 	int ring_size;
2467 	u32 ofs, val;
2468 
2469 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2470 		ring_size = MTK_QDMA_RING_SIZE;
2471 	else
2472 		ring_size = soc->tx.dma_size;
2473 
2474 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2475 			       GFP_KERNEL);
2476 	if (!ring->buf)
2477 		goto no_tx_mem;
2478 
2479 	if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2480 		ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
2481 		ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
2482 	} else {
2483 		ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2484 					       &ring->phys, GFP_KERNEL);
2485 	}
2486 
2487 	if (!ring->dma)
2488 		goto no_tx_mem;
2489 
2490 	for (i = 0; i < ring_size; i++) {
2491 		int next = (i + 1) % ring_size;
2492 		u32 next_ptr = ring->phys + next * sz;
2493 
2494 		txd = ring->dma + i * sz;
2495 		txd->txd2 = next_ptr;
2496 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2497 		txd->txd4 = 0;
2498 		if (mtk_is_netsys_v2_or_greater(eth)) {
2499 			txd->txd5 = 0;
2500 			txd->txd6 = 0;
2501 			txd->txd7 = 0;
2502 			txd->txd8 = 0;
2503 		}
2504 	}
2505 
2506 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2507 	 * only as the framework. The real HW descriptors are the PDMA
2508 	 * descriptors in ring->dma_pdma.
2509 	 */
2510 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2511 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2512 						    &ring->phys_pdma, GFP_KERNEL);
2513 		if (!ring->dma_pdma)
2514 			goto no_tx_mem;
2515 
2516 		for (i = 0; i < ring_size; i++) {
2517 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2518 			ring->dma_pdma[i].txd4 = 0;
2519 		}
2520 	}
2521 
2522 	ring->dma_size = ring_size;
2523 	atomic_set(&ring->free_count, ring_size - 2);
2524 	ring->next_free = ring->dma;
2525 	ring->last_free = (void *)txd;
2526 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2527 	ring->thresh = MAX_SKB_FRAGS;
2528 
2529 	/* make sure that all changes to the dma ring are flushed before we
2530 	 * continue
2531 	 */
2532 	wmb();
2533 
2534 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2535 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2536 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2537 		mtk_w32(eth,
2538 			ring->phys + ((ring_size - 1) * sz),
2539 			soc->reg_map->qdma.crx_ptr);
2540 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2541 
2542 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2543 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2544 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2545 
2546 			val = MTK_QTX_SCH_MIN_RATE_EN |
2547 			      /* minimum: 10 Mbps */
2548 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2549 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2550 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2551 			if (mtk_is_netsys_v1(eth))
2552 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2553 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2554 			ofs += MTK_QTX_OFFSET;
2555 		}
2556 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2557 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2558 		if (mtk_is_netsys_v2_or_greater(eth))
2559 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2560 	} else {
2561 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2562 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2563 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2564 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2565 	}
2566 
2567 	return 0;
2568 
2569 no_tx_mem:
2570 	return -ENOMEM;
2571 }
2572 
2573 static void mtk_tx_clean(struct mtk_eth *eth)
2574 {
2575 	const struct mtk_soc_data *soc = eth->soc;
2576 	struct mtk_tx_ring *ring = &eth->tx_ring;
2577 	int i;
2578 
2579 	if (ring->buf) {
2580 		for (i = 0; i < ring->dma_size; i++)
2581 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2582 		kfree(ring->buf);
2583 		ring->buf = NULL;
2584 	}
2585 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2586 		dma_free_coherent(eth->dma_dev,
2587 				  ring->dma_size * soc->tx.desc_size,
2588 				  ring->dma, ring->phys);
2589 		ring->dma = NULL;
2590 	}
2591 
2592 	if (ring->dma_pdma) {
2593 		dma_free_coherent(eth->dma_dev,
2594 				  ring->dma_size * soc->tx.desc_size,
2595 				  ring->dma_pdma, ring->phys_pdma);
2596 		ring->dma_pdma = NULL;
2597 	}
2598 }
2599 
2600 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2601 {
2602 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2603 	const struct mtk_soc_data *soc = eth->soc;
2604 	struct mtk_rx_ring *ring;
2605 	int rx_data_len, rx_dma_size, tx_ring_size;
2606 	int i;
2607 
2608 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2609 		tx_ring_size = MTK_QDMA_RING_SIZE;
2610 	else
2611 		tx_ring_size = soc->tx.dma_size;
2612 
2613 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2614 		if (ring_no)
2615 			return -EINVAL;
2616 		ring = &eth->rx_ring_qdma;
2617 	} else {
2618 		ring = &eth->rx_ring[ring_no];
2619 	}
2620 
2621 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2622 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2623 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2624 	} else {
2625 		rx_data_len = ETH_DATA_LEN;
2626 		rx_dma_size = soc->rx.dma_size;
2627 	}
2628 
2629 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2630 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2631 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2632 			     GFP_KERNEL);
2633 	if (!ring->data)
2634 		return -ENOMEM;
2635 
2636 	if (mtk_page_pool_enabled(eth)) {
2637 		struct page_pool *pp;
2638 
2639 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2640 					  rx_dma_size);
2641 		if (IS_ERR(pp))
2642 			return PTR_ERR(pp);
2643 
2644 		ring->page_pool = pp;
2645 	}
2646 
2647 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2648 	    rx_flag != MTK_RX_FLAGS_NORMAL) {
2649 		ring->dma = dma_alloc_coherent(eth->dma_dev,
2650 				rx_dma_size * eth->soc->rx.desc_size,
2651 				&ring->phys, GFP_KERNEL);
2652 	} else {
2653 		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
2654 
2655 		ring->dma = tx_ring->dma + tx_ring_size *
2656 			    eth->soc->tx.desc_size * (ring_no + 1);
2657 		ring->phys = tx_ring->phys + tx_ring_size *
2658 			     eth->soc->tx.desc_size * (ring_no + 1);
2659 	}
2660 
2661 	if (!ring->dma)
2662 		return -ENOMEM;
2663 
2664 	for (i = 0; i < rx_dma_size; i++) {
2665 		struct mtk_rx_dma_v2 *rxd;
2666 		dma_addr_t dma_addr;
2667 		void *data;
2668 
2669 		rxd = ring->dma + i * eth->soc->rx.desc_size;
2670 		if (ring->page_pool) {
2671 			data = mtk_page_pool_get_buff(ring->page_pool,
2672 						      &dma_addr, GFP_KERNEL);
2673 			if (!data)
2674 				return -ENOMEM;
2675 		} else {
2676 			if (ring->frag_size <= PAGE_SIZE)
2677 				data = netdev_alloc_frag(ring->frag_size);
2678 			else
2679 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2680 
2681 			if (!data)
2682 				return -ENOMEM;
2683 
2684 			dma_addr = dma_map_single(eth->dma_dev,
2685 				data + NET_SKB_PAD + eth->ip_align,
2686 				ring->buf_size, DMA_FROM_DEVICE);
2687 			if (unlikely(dma_mapping_error(eth->dma_dev,
2688 						       dma_addr))) {
2689 				skb_free_frag(data);
2690 				return -ENOMEM;
2691 			}
2692 		}
2693 		rxd->rxd1 = (unsigned int)dma_addr;
2694 		ring->data[i] = data;
2695 
2696 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2697 			rxd->rxd2 = RX_DMA_LSO;
2698 		else
2699 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2700 
2701 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2702 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2703 
2704 		rxd->rxd3 = 0;
2705 		rxd->rxd4 = 0;
2706 		if (mtk_is_netsys_v3_or_greater(eth)) {
2707 			rxd->rxd5 = 0;
2708 			rxd->rxd6 = 0;
2709 			rxd->rxd7 = 0;
2710 			rxd->rxd8 = 0;
2711 		}
2712 	}
2713 
2714 	ring->dma_size = rx_dma_size;
2715 	ring->calc_idx_update = false;
2716 	ring->calc_idx = rx_dma_size - 1;
2717 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2718 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2719 				    ring_no * MTK_QRX_OFFSET;
2720 	else
2721 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2722 				    ring_no * MTK_QRX_OFFSET;
2723 	/* make sure that all changes to the dma ring are flushed before we
2724 	 * continue
2725 	 */
2726 	wmb();
2727 
2728 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2729 		mtk_w32(eth, ring->phys,
2730 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2731 		mtk_w32(eth, rx_dma_size,
2732 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2733 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2734 			reg_map->qdma.rst_idx);
2735 	} else {
2736 		mtk_w32(eth, ring->phys,
2737 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2738 		mtk_w32(eth, rx_dma_size,
2739 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2740 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2741 			reg_map->pdma.rst_idx);
2742 	}
2743 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2744 
2745 	return 0;
2746 }
2747 
2748 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2749 {
2750 	u64 addr64 = 0;
2751 	int i;
2752 
2753 	if (ring->data && ring->dma) {
2754 		for (i = 0; i < ring->dma_size; i++) {
2755 			struct mtk_rx_dma *rxd;
2756 
2757 			if (!ring->data[i])
2758 				continue;
2759 
2760 			rxd = ring->dma + i * eth->soc->rx.desc_size;
2761 			if (!rxd->rxd1)
2762 				continue;
2763 
2764 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2765 				addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2766 
2767 			dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2768 					 ring->buf_size, DMA_FROM_DEVICE);
2769 			mtk_rx_put_buff(ring, ring->data[i], false);
2770 		}
2771 		kfree(ring->data);
2772 		ring->data = NULL;
2773 	}
2774 
2775 	if (!in_sram && ring->dma) {
2776 		dma_free_coherent(eth->dma_dev,
2777 				  ring->dma_size * eth->soc->rx.desc_size,
2778 				  ring->dma, ring->phys);
2779 		ring->dma = NULL;
2780 	}
2781 
2782 	if (ring->page_pool) {
2783 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2784 			xdp_rxq_info_unreg(&ring->xdp_q);
2785 		page_pool_destroy(ring->page_pool);
2786 		ring->page_pool = NULL;
2787 	}
2788 }
2789 
2790 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2791 {
2792 	int i;
2793 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2794 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2795 
2796 	/* set LRO rings to auto-learn modes */
2797 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2798 
2799 	/* validate LRO ring */
2800 	ring_ctrl_dw2 |= MTK_RING_VLD;
2801 
2802 	/* set AGE timer (unit: 20us) */
2803 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2804 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2805 
2806 	/* set max AGG timer (unit: 20us) */
2807 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2808 
2809 	/* set max LRO AGG count */
2810 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2811 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2812 
2813 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2814 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2815 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2816 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2817 	}
2818 
2819 	/* IPv4 checksum update enable */
2820 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2821 
2822 	/* switch priority comparison to packet count mode */
2823 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2824 
2825 	/* bandwidth threshold setting */
2826 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2827 
2828 	/* auto-learn score delta setting */
2829 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2830 
2831 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2832 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2833 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2834 
2835 	/* set HW LRO mode & the max aggregation count for rx packets */
2836 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2837 
2838 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2839 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2840 
2841 	/* enable HW LRO */
2842 	lro_ctrl_dw0 |= MTK_LRO_EN;
2843 
2844 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2845 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2846 
2847 	return 0;
2848 }
2849 
2850 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2851 {
2852 	int i;
2853 	u32 val;
2854 
2855 	/* relinquish lro rings, flush aggregated packets */
2856 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2857 
2858 	/* wait for relinquishments done */
2859 	for (i = 0; i < 10; i++) {
2860 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2861 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2862 			msleep(20);
2863 			continue;
2864 		}
2865 		break;
2866 	}
2867 
2868 	/* invalidate lro rings */
2869 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2870 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2871 
2872 	/* disable HW LRO */
2873 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2874 }
2875 
2876 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2877 {
2878 	u32 reg_val;
2879 
2880 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2881 
2882 	/* invalidate the IP setting */
2883 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2884 
2885 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2886 
2887 	/* validate the IP setting */
2888 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2889 }
2890 
2891 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2892 {
2893 	u32 reg_val;
2894 
2895 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2896 
2897 	/* invalidate the IP setting */
2898 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2899 
2900 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2901 }
2902 
2903 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2904 {
2905 	int cnt = 0;
2906 	int i;
2907 
2908 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2909 		if (mac->hwlro_ip[i])
2910 			cnt++;
2911 	}
2912 
2913 	return cnt;
2914 }
2915 
2916 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2917 				struct ethtool_rxnfc *cmd)
2918 {
2919 	struct ethtool_rx_flow_spec *fsp =
2920 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2921 	struct mtk_mac *mac = netdev_priv(dev);
2922 	struct mtk_eth *eth = mac->hw;
2923 	int hwlro_idx;
2924 
2925 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2926 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2927 	    (fsp->location > 1))
2928 		return -EINVAL;
2929 
2930 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2931 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2932 
2933 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2934 
2935 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2936 
2937 	return 0;
2938 }
2939 
2940 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2941 				struct ethtool_rxnfc *cmd)
2942 {
2943 	struct ethtool_rx_flow_spec *fsp =
2944 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2945 	struct mtk_mac *mac = netdev_priv(dev);
2946 	struct mtk_eth *eth = mac->hw;
2947 	int hwlro_idx;
2948 
2949 	if (fsp->location > 1)
2950 		return -EINVAL;
2951 
2952 	mac->hwlro_ip[fsp->location] = 0;
2953 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2954 
2955 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2956 
2957 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2958 
2959 	return 0;
2960 }
2961 
2962 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2963 {
2964 	struct mtk_mac *mac = netdev_priv(dev);
2965 	struct mtk_eth *eth = mac->hw;
2966 	int i, hwlro_idx;
2967 
2968 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2969 		mac->hwlro_ip[i] = 0;
2970 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2971 
2972 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2973 	}
2974 
2975 	mac->hwlro_ip_cnt = 0;
2976 }
2977 
2978 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2979 				    struct ethtool_rxnfc *cmd)
2980 {
2981 	struct mtk_mac *mac = netdev_priv(dev);
2982 	struct ethtool_rx_flow_spec *fsp =
2983 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2984 
2985 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2986 		return -EINVAL;
2987 
2988 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2989 	fsp->flow_type = TCP_V4_FLOW;
2990 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2991 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2992 
2993 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2994 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2995 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2996 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2997 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2998 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2999 	fsp->h_u.tcp_ip4_spec.tos = 0;
3000 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
3001 
3002 	return 0;
3003 }
3004 
3005 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3006 				  struct ethtool_rxnfc *cmd,
3007 				  u32 *rule_locs)
3008 {
3009 	struct mtk_mac *mac = netdev_priv(dev);
3010 	int cnt = 0;
3011 	int i;
3012 
3013 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3014 		if (cnt == cmd->rule_cnt)
3015 			return -EMSGSIZE;
3016 
3017 		if (mac->hwlro_ip[i]) {
3018 			rule_locs[cnt] = i;
3019 			cnt++;
3020 		}
3021 	}
3022 
3023 	cmd->rule_cnt = cnt;
3024 
3025 	return 0;
3026 }
3027 
3028 static netdev_features_t mtk_fix_features(struct net_device *dev,
3029 					  netdev_features_t features)
3030 {
3031 	if (!(features & NETIF_F_LRO)) {
3032 		struct mtk_mac *mac = netdev_priv(dev);
3033 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3034 
3035 		if (ip_cnt) {
3036 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3037 
3038 			features |= NETIF_F_LRO;
3039 		}
3040 	}
3041 
3042 	return features;
3043 }
3044 
3045 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3046 {
3047 	netdev_features_t diff = dev->features ^ features;
3048 
3049 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3050 		mtk_hwlro_netdev_disable(dev);
3051 
3052 	return 0;
3053 }
3054 
3055 /* wait for DMA to finish whatever it is doing before we start using it again */
3056 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3057 {
3058 	unsigned int reg;
3059 	int ret;
3060 	u32 val;
3061 
3062 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3063 		reg = eth->soc->reg_map->qdma.glo_cfg;
3064 	else
3065 		reg = eth->soc->reg_map->pdma.glo_cfg;
3066 
3067 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3068 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3069 					5, MTK_DMA_BUSY_TIMEOUT_US);
3070 	if (ret)
3071 		dev_err(eth->dev, "DMA init timeout\n");
3072 
3073 	return ret;
3074 }
3075 
3076 static int mtk_dma_init(struct mtk_eth *eth)
3077 {
3078 	int err;
3079 	u32 i;
3080 
3081 	if (mtk_dma_busy_wait(eth))
3082 		return -EBUSY;
3083 
3084 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3085 		/* QDMA needs scratch memory for internal reordering of the
3086 		 * descriptors
3087 		 */
3088 		err = mtk_init_fq_dma(eth);
3089 		if (err)
3090 			return err;
3091 	}
3092 
3093 	err = mtk_tx_alloc(eth);
3094 	if (err)
3095 		return err;
3096 
3097 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3098 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3099 		if (err)
3100 			return err;
3101 	}
3102 
3103 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3104 	if (err)
3105 		return err;
3106 
3107 	if (eth->hwlro) {
3108 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3109 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3110 			if (err)
3111 				return err;
3112 		}
3113 		err = mtk_hwlro_rx_init(eth);
3114 		if (err)
3115 			return err;
3116 	}
3117 
3118 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3119 		/* Enable random early drop and set drop threshold
3120 		 * automatically
3121 		 */
3122 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3123 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3124 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 static void mtk_dma_free(struct mtk_eth *eth)
3131 {
3132 	const struct mtk_soc_data *soc = eth->soc;
3133 	int i;
3134 
3135 	for (i = 0; i < MTK_MAX_DEVS; i++)
3136 		if (eth->netdev[i])
3137 			netdev_reset_queue(eth->netdev[i]);
3138 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3139 		dma_free_coherent(eth->dma_dev,
3140 				  MTK_QDMA_RING_SIZE * soc->tx.desc_size,
3141 				  eth->scratch_ring, eth->phy_scratch_ring);
3142 		eth->scratch_ring = NULL;
3143 		eth->phy_scratch_ring = 0;
3144 	}
3145 	mtk_tx_clean(eth);
3146 	mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3147 	mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3148 
3149 	if (eth->hwlro) {
3150 		mtk_hwlro_rx_uninit(eth);
3151 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3152 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
3153 	}
3154 
3155 	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3156 		kfree(eth->scratch_head[i]);
3157 		eth->scratch_head[i] = NULL;
3158 	}
3159 }
3160 
3161 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3162 {
3163 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3164 
3165 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3166 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3167 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3168 }
3169 
3170 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3171 {
3172 	struct mtk_mac *mac = netdev_priv(dev);
3173 	struct mtk_eth *eth = mac->hw;
3174 
3175 	if (test_bit(MTK_RESETTING, &eth->state))
3176 		return;
3177 
3178 	if (!mtk_hw_reset_check(eth))
3179 		return;
3180 
3181 	eth->netdev[mac->id]->stats.tx_errors++;
3182 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3183 
3184 	schedule_work(&eth->pending_work);
3185 }
3186 
3187 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3188 {
3189 	struct mtk_eth *eth = _eth;
3190 
3191 	eth->rx_events++;
3192 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3193 		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3194 		__napi_schedule(&eth->rx_napi);
3195 	}
3196 
3197 	return IRQ_HANDLED;
3198 }
3199 
3200 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3201 {
3202 	struct mtk_eth *eth = _eth;
3203 
3204 	eth->tx_events++;
3205 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3206 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3207 		__napi_schedule(&eth->tx_napi);
3208 	}
3209 
3210 	return IRQ_HANDLED;
3211 }
3212 
3213 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3214 {
3215 	struct mtk_eth *eth = _eth;
3216 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3217 
3218 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3219 	    eth->soc->rx.irq_done_mask) {
3220 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3221 		    eth->soc->rx.irq_done_mask)
3222 			mtk_handle_irq_rx(irq, _eth);
3223 	}
3224 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3225 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3226 			mtk_handle_irq_tx(irq, _eth);
3227 	}
3228 
3229 	return IRQ_HANDLED;
3230 }
3231 
3232 #ifdef CONFIG_NET_POLL_CONTROLLER
3233 static void mtk_poll_controller(struct net_device *dev)
3234 {
3235 	struct mtk_mac *mac = netdev_priv(dev);
3236 	struct mtk_eth *eth = mac->hw;
3237 
3238 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3239 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3240 	mtk_handle_irq_rx(eth->irq[2], dev);
3241 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3242 	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3243 }
3244 #endif
3245 
3246 static int mtk_start_dma(struct mtk_eth *eth)
3247 {
3248 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3249 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3250 	int err;
3251 
3252 	err = mtk_dma_init(eth);
3253 	if (err) {
3254 		mtk_dma_free(eth);
3255 		return err;
3256 	}
3257 
3258 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3259 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3260 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3261 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3262 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3263 
3264 		if (mtk_is_netsys_v2_or_greater(eth))
3265 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3266 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3267 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3268 		else
3269 			val |= MTK_RX_BT_32DWORDS;
3270 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3271 
3272 		mtk_w32(eth,
3273 			MTK_RX_DMA_EN | rx_2b_offset |
3274 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3275 			reg_map->pdma.glo_cfg);
3276 	} else {
3277 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3278 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3279 			reg_map->pdma.glo_cfg);
3280 	}
3281 
3282 	return 0;
3283 }
3284 
3285 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3286 {
3287 	u32 val;
3288 
3289 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3290 		return;
3291 
3292 	val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3293 
3294 	/* default setup the forward port to send frame to PDMA */
3295 	val &= ~0xffff;
3296 
3297 	/* Enable RX checksum */
3298 	val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3299 
3300 	val |= config;
3301 
3302 	if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3303 		val |= MTK_GDMA_SPECIAL_TAG;
3304 
3305 	mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3306 }
3307 
3308 
3309 static bool mtk_uses_dsa(struct net_device *dev)
3310 {
3311 #if IS_ENABLED(CONFIG_NET_DSA)
3312 	return netdev_uses_dsa(dev) &&
3313 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3314 #else
3315 	return false;
3316 #endif
3317 }
3318 
3319 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3320 {
3321 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3322 	struct mtk_eth *eth = mac->hw;
3323 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3324 	struct ethtool_link_ksettings s;
3325 	struct net_device *ldev;
3326 	struct list_head *iter;
3327 	struct dsa_port *dp;
3328 
3329 	if (event != NETDEV_CHANGE)
3330 		return NOTIFY_DONE;
3331 
3332 	netdev_for_each_lower_dev(dev, ldev, iter) {
3333 		if (netdev_priv(ldev) == mac)
3334 			goto found;
3335 	}
3336 
3337 	return NOTIFY_DONE;
3338 
3339 found:
3340 	if (!dsa_user_dev_check(dev))
3341 		return NOTIFY_DONE;
3342 
3343 	if (__ethtool_get_link_ksettings(dev, &s))
3344 		return NOTIFY_DONE;
3345 
3346 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3347 		return NOTIFY_DONE;
3348 
3349 	dp = dsa_port_from_netdev(dev);
3350 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3351 		return NOTIFY_DONE;
3352 
3353 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3354 		s.base.speed = 0;
3355 
3356 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3357 
3358 	return NOTIFY_DONE;
3359 }
3360 
3361 static int mtk_open(struct net_device *dev)
3362 {
3363 	struct mtk_mac *mac = netdev_priv(dev);
3364 	struct mtk_eth *eth = mac->hw;
3365 	struct mtk_mac *target_mac;
3366 	int i, err, ppe_num;
3367 
3368 	ppe_num = eth->soc->ppe_num;
3369 
3370 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3371 	if (err) {
3372 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3373 			   err);
3374 		return err;
3375 	}
3376 
3377 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3378 	if (!refcount_read(&eth->dma_refcnt)) {
3379 		const struct mtk_soc_data *soc = eth->soc;
3380 		u32 gdm_config;
3381 		int i;
3382 
3383 		err = mtk_start_dma(eth);
3384 		if (err) {
3385 			phylink_disconnect_phy(mac->phylink);
3386 			return err;
3387 		}
3388 
3389 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3390 			mtk_ppe_start(eth->ppe[i]);
3391 
3392 		for (i = 0; i < MTK_MAX_DEVS; i++) {
3393 			if (!eth->netdev[i])
3394 				continue;
3395 
3396 			target_mac = netdev_priv(eth->netdev[i]);
3397 			if (!soc->offload_version) {
3398 				target_mac->ppe_idx = 0;
3399 				gdm_config = MTK_GDMA_TO_PDMA;
3400 			} else if (ppe_num >= 3 && target_mac->id == 2) {
3401 				target_mac->ppe_idx = 2;
3402 				gdm_config = soc->reg_map->gdma_to_ppe[2];
3403 			} else if (ppe_num >= 2 && target_mac->id == 1) {
3404 				target_mac->ppe_idx = 1;
3405 				gdm_config = soc->reg_map->gdma_to_ppe[1];
3406 			} else {
3407 				target_mac->ppe_idx = 0;
3408 				gdm_config = soc->reg_map->gdma_to_ppe[0];
3409 			}
3410 			mtk_gdm_config(eth, target_mac->id, gdm_config);
3411 		}
3412 		/* Reset and enable PSE */
3413 		mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3414 		mtk_w32(eth, 0, MTK_RST_GL);
3415 
3416 		napi_enable(&eth->tx_napi);
3417 		napi_enable(&eth->rx_napi);
3418 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3419 		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3420 		refcount_set(&eth->dma_refcnt, 1);
3421 	} else {
3422 		refcount_inc(&eth->dma_refcnt);
3423 	}
3424 
3425 	phylink_start(mac->phylink);
3426 	netif_tx_start_all_queues(dev);
3427 
3428 	if (mtk_is_netsys_v2_or_greater(eth))
3429 		return 0;
3430 
3431 	if (mtk_uses_dsa(dev) && !eth->prog) {
3432 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3433 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3434 
3435 			if (md_dst)
3436 				continue;
3437 
3438 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3439 						    GFP_KERNEL);
3440 			if (!md_dst)
3441 				return -ENOMEM;
3442 
3443 			md_dst->u.port_info.port_id = i;
3444 			eth->dsa_meta[i] = md_dst;
3445 		}
3446 	} else {
3447 		/* Hardware DSA untagging and VLAN RX offloading need to be
3448 		 * disabled if at least one MAC does not use DSA.
3449 		 */
3450 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3451 
3452 		val &= ~MTK_CDMP_STAG_EN;
3453 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3454 
3455 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3462 {
3463 	u32 val;
3464 	int i;
3465 
3466 	/* stop the dma engine */
3467 	spin_lock_bh(&eth->page_lock);
3468 	val = mtk_r32(eth, glo_cfg);
3469 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3470 		glo_cfg);
3471 	spin_unlock_bh(&eth->page_lock);
3472 
3473 	/* wait for dma stop */
3474 	for (i = 0; i < 10; i++) {
3475 		val = mtk_r32(eth, glo_cfg);
3476 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3477 			msleep(20);
3478 			continue;
3479 		}
3480 		break;
3481 	}
3482 }
3483 
3484 static int mtk_stop(struct net_device *dev)
3485 {
3486 	struct mtk_mac *mac = netdev_priv(dev);
3487 	struct mtk_eth *eth = mac->hw;
3488 	int i;
3489 
3490 	phylink_stop(mac->phylink);
3491 
3492 	netif_tx_disable(dev);
3493 
3494 	phylink_disconnect_phy(mac->phylink);
3495 
3496 	/* only shutdown DMA if this is the last user */
3497 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3498 		return 0;
3499 
3500 	for (i = 0; i < MTK_MAX_DEVS; i++)
3501 		mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3502 
3503 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3504 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3505 	napi_disable(&eth->tx_napi);
3506 	napi_disable(&eth->rx_napi);
3507 
3508 	cancel_work_sync(&eth->rx_dim.work);
3509 	cancel_work_sync(&eth->tx_dim.work);
3510 
3511 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3512 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3513 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3514 
3515 	mtk_dma_free(eth);
3516 
3517 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3518 		mtk_ppe_stop(eth->ppe[i]);
3519 
3520 	return 0;
3521 }
3522 
3523 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3524 			 struct netlink_ext_ack *extack)
3525 {
3526 	struct mtk_mac *mac = netdev_priv(dev);
3527 	struct mtk_eth *eth = mac->hw;
3528 	struct bpf_prog *old_prog;
3529 	bool need_update;
3530 
3531 	if (eth->hwlro) {
3532 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3533 		return -EOPNOTSUPP;
3534 	}
3535 
3536 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3537 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3538 		return -EOPNOTSUPP;
3539 	}
3540 
3541 	need_update = !!eth->prog != !!prog;
3542 	if (netif_running(dev) && need_update)
3543 		mtk_stop(dev);
3544 
3545 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3546 	if (old_prog)
3547 		bpf_prog_put(old_prog);
3548 
3549 	if (netif_running(dev) && need_update)
3550 		return mtk_open(dev);
3551 
3552 	return 0;
3553 }
3554 
3555 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3556 {
3557 	switch (xdp->command) {
3558 	case XDP_SETUP_PROG:
3559 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3560 	default:
3561 		return -EINVAL;
3562 	}
3563 }
3564 
3565 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3566 {
3567 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3568 			   reset_bits,
3569 			   reset_bits);
3570 
3571 	usleep_range(1000, 1100);
3572 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3573 			   reset_bits,
3574 			   ~reset_bits);
3575 	mdelay(10);
3576 }
3577 
3578 static void mtk_clk_disable(struct mtk_eth *eth)
3579 {
3580 	int clk;
3581 
3582 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3583 		clk_disable_unprepare(eth->clks[clk]);
3584 }
3585 
3586 static int mtk_clk_enable(struct mtk_eth *eth)
3587 {
3588 	int clk, ret;
3589 
3590 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3591 		ret = clk_prepare_enable(eth->clks[clk]);
3592 		if (ret)
3593 			goto err_disable_clks;
3594 	}
3595 
3596 	return 0;
3597 
3598 err_disable_clks:
3599 	while (--clk >= 0)
3600 		clk_disable_unprepare(eth->clks[clk]);
3601 
3602 	return ret;
3603 }
3604 
3605 static void mtk_dim_rx(struct work_struct *work)
3606 {
3607 	struct dim *dim = container_of(work, struct dim, work);
3608 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3609 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3610 	struct dim_cq_moder cur_profile;
3611 	u32 val, cur;
3612 
3613 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3614 						dim->profile_ix);
3615 	spin_lock_bh(&eth->dim_lock);
3616 
3617 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3618 	val &= MTK_PDMA_DELAY_TX_MASK;
3619 	val |= MTK_PDMA_DELAY_RX_EN;
3620 
3621 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3622 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3623 
3624 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3625 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3626 
3627 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3628 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3629 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3630 
3631 	spin_unlock_bh(&eth->dim_lock);
3632 
3633 	dim->state = DIM_START_MEASURE;
3634 }
3635 
3636 static void mtk_dim_tx(struct work_struct *work)
3637 {
3638 	struct dim *dim = container_of(work, struct dim, work);
3639 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3640 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3641 	struct dim_cq_moder cur_profile;
3642 	u32 val, cur;
3643 
3644 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3645 						dim->profile_ix);
3646 	spin_lock_bh(&eth->dim_lock);
3647 
3648 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3649 	val &= MTK_PDMA_DELAY_RX_MASK;
3650 	val |= MTK_PDMA_DELAY_TX_EN;
3651 
3652 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3653 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3654 
3655 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3656 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3657 
3658 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3659 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3660 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3661 
3662 	spin_unlock_bh(&eth->dim_lock);
3663 
3664 	dim->state = DIM_START_MEASURE;
3665 }
3666 
3667 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3668 {
3669 	struct mtk_eth *eth = mac->hw;
3670 	u32 mcr_cur, mcr_new;
3671 
3672 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3673 		return;
3674 
3675 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3676 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3677 
3678 	if (val <= 1518)
3679 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3680 	else if (val <= 1536)
3681 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3682 	else if (val <= 1552)
3683 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3684 	else
3685 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3686 
3687 	if (mcr_new != mcr_cur)
3688 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3689 }
3690 
3691 static void mtk_hw_reset(struct mtk_eth *eth)
3692 {
3693 	u32 val;
3694 
3695 	if (mtk_is_netsys_v2_or_greater(eth))
3696 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3697 
3698 	if (mtk_is_netsys_v3_or_greater(eth)) {
3699 		val = RSTCTRL_PPE0_V3;
3700 
3701 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3702 			val |= RSTCTRL_PPE1_V3;
3703 
3704 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3705 			val |= RSTCTRL_PPE2;
3706 
3707 		val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3708 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3709 		val = RSTCTRL_PPE0_V2;
3710 
3711 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3712 			val |= RSTCTRL_PPE1;
3713 	} else {
3714 		val = RSTCTRL_PPE0;
3715 	}
3716 
3717 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3718 
3719 	if (mtk_is_netsys_v3_or_greater(eth))
3720 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3721 			     0x6f8ff);
3722 	else if (mtk_is_netsys_v2_or_greater(eth))
3723 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3724 			     0x3ffffff);
3725 }
3726 
3727 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3728 {
3729 	u32 val;
3730 
3731 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3732 	return val;
3733 }
3734 
3735 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3736 {
3737 	u32 rst_mask, val;
3738 
3739 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3740 			   RSTCTRL_FE);
3741 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3742 				      val & RSTCTRL_FE, 1, 1000)) {
3743 		dev_err(eth->dev, "warm reset failed\n");
3744 		mtk_hw_reset(eth);
3745 		return;
3746 	}
3747 
3748 	if (mtk_is_netsys_v3_or_greater(eth)) {
3749 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3750 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3751 			rst_mask |= RSTCTRL_PPE1_V3;
3752 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3753 			rst_mask |= RSTCTRL_PPE2;
3754 
3755 		rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3756 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3757 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3758 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3759 			rst_mask |= RSTCTRL_PPE1;
3760 	} else {
3761 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3762 	}
3763 
3764 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3765 
3766 	udelay(1);
3767 	val = mtk_hw_reset_read(eth);
3768 	if (!(val & rst_mask))
3769 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3770 			val, rst_mask);
3771 
3772 	rst_mask |= RSTCTRL_FE;
3773 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3774 
3775 	udelay(1);
3776 	val = mtk_hw_reset_read(eth);
3777 	if (val & rst_mask)
3778 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3779 			val, rst_mask);
3780 }
3781 
3782 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3783 {
3784 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3785 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3786 	bool oq_hang, cdm1_busy, adma_busy;
3787 	bool wtx_busy, cdm_full, oq_free;
3788 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3789 	bool qfsm_hang, qfwd_hang;
3790 	bool ret = false;
3791 
3792 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3793 		return false;
3794 
3795 	/* WDMA sanity checks */
3796 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3797 
3798 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3799 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3800 
3801 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3802 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3803 
3804 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3805 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3806 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3807 
3808 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3809 		if (++eth->reset.wdma_hang_count > 2) {
3810 			eth->reset.wdma_hang_count = 0;
3811 			ret = true;
3812 		}
3813 		goto out;
3814 	}
3815 
3816 	/* QDMA sanity checks */
3817 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3818 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3819 
3820 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3821 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3822 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3823 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3824 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3825 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3826 
3827 	if (qfsm_hang && qfwd_hang &&
3828 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3829 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3830 		if (++eth->reset.qdma_hang_count > 2) {
3831 			eth->reset.qdma_hang_count = 0;
3832 			ret = true;
3833 		}
3834 		goto out;
3835 	}
3836 
3837 	/* ADMA sanity checks */
3838 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3839 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3840 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3841 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3842 
3843 	if (oq_hang && cdm1_busy && adma_busy) {
3844 		if (++eth->reset.adma_hang_count > 2) {
3845 			eth->reset.adma_hang_count = 0;
3846 			ret = true;
3847 		}
3848 		goto out;
3849 	}
3850 
3851 	eth->reset.wdma_hang_count = 0;
3852 	eth->reset.qdma_hang_count = 0;
3853 	eth->reset.adma_hang_count = 0;
3854 out:
3855 	eth->reset.wdidx = wdidx;
3856 
3857 	return ret;
3858 }
3859 
3860 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3861 {
3862 	struct delayed_work *del_work = to_delayed_work(work);
3863 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3864 					   reset.monitor_work);
3865 
3866 	if (test_bit(MTK_RESETTING, &eth->state))
3867 		goto out;
3868 
3869 	/* DMA stuck checks */
3870 	if (mtk_hw_check_dma_hang(eth))
3871 		schedule_work(&eth->pending_work);
3872 
3873 out:
3874 	schedule_delayed_work(&eth->reset.monitor_work,
3875 			      MTK_DMA_MONITOR_TIMEOUT);
3876 }
3877 
3878 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3879 {
3880 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3881 		       ETHSYS_DMA_AG_MAP_PPE;
3882 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3883 	int i, val, ret;
3884 
3885 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3886 		return 0;
3887 
3888 	if (!reset) {
3889 		pm_runtime_enable(eth->dev);
3890 		pm_runtime_get_sync(eth->dev);
3891 
3892 		ret = mtk_clk_enable(eth);
3893 		if (ret)
3894 			goto err_disable_pm;
3895 	}
3896 
3897 	if (eth->ethsys)
3898 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3899 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3900 
3901 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3902 		ret = device_reset(eth->dev);
3903 		if (ret) {
3904 			dev_err(eth->dev, "MAC reset failed!\n");
3905 			goto err_disable_pm;
3906 		}
3907 
3908 		/* set interrupt delays based on current Net DIM sample */
3909 		mtk_dim_rx(&eth->rx_dim.work);
3910 		mtk_dim_tx(&eth->tx_dim.work);
3911 
3912 		/* disable delay and normal interrupt */
3913 		mtk_tx_irq_disable(eth, ~0);
3914 		mtk_rx_irq_disable(eth, ~0);
3915 
3916 		return 0;
3917 	}
3918 
3919 	msleep(100);
3920 
3921 	if (reset)
3922 		mtk_hw_warm_reset(eth);
3923 	else
3924 		mtk_hw_reset(eth);
3925 
3926 	if (mtk_is_netsys_v3_or_greater(eth)) {
3927 		/* Set FE to PDMAv2 if necessary */
3928 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3929 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3930 	}
3931 
3932 	if (eth->pctl) {
3933 		/* Set GE2 driving and slew rate */
3934 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3935 
3936 		/* set GE2 TDSEL */
3937 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3938 
3939 		/* set GE2 TUNE */
3940 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3941 	}
3942 
3943 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3944 	 * up with the more appropriate value when mtk_mac_config call is being
3945 	 * invoked.
3946 	 */
3947 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3948 		struct net_device *dev = eth->netdev[i];
3949 
3950 		if (!dev)
3951 			continue;
3952 
3953 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3954 		mtk_set_mcr_max_rx(netdev_priv(dev),
3955 				   dev->mtu + MTK_RX_ETH_HLEN);
3956 	}
3957 
3958 	/* Indicates CDM to parse the MTK special tag from CPU
3959 	 * which also is working out for untag packets.
3960 	 */
3961 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3962 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3963 	if (mtk_is_netsys_v1(eth)) {
3964 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3965 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3966 
3967 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3968 	}
3969 
3970 	/* set interrupt delays based on current Net DIM sample */
3971 	mtk_dim_rx(&eth->rx_dim.work);
3972 	mtk_dim_tx(&eth->tx_dim.work);
3973 
3974 	/* disable delay and normal interrupt */
3975 	mtk_tx_irq_disable(eth, ~0);
3976 	mtk_rx_irq_disable(eth, ~0);
3977 
3978 	/* FE int grouping */
3979 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3980 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
3981 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3982 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
3983 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3984 
3985 	if (mtk_is_netsys_v3_or_greater(eth)) {
3986 		/* PSE should not drop port1, port8 and port9 packets */
3987 		mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3988 
3989 		/* GDM and CDM Threshold */
3990 		mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3991 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3992 
3993 		/* Disable GDM1 RX CRC stripping */
3994 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
3995 
3996 		/* PSE GDM3 MIB counter has incorrect hw default values,
3997 		 * so the driver ought to read clear the values beforehand
3998 		 * in case ethtool retrieve wrong mib values.
3999 		 */
4000 		for (i = 0; i < 0x80; i += 0x4)
4001 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4002 	} else if (!mtk_is_netsys_v1(eth)) {
4003 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
4004 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4005 
4006 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4007 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
4008 
4009 		/* PSE Free Queue Flow Control  */
4010 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4011 
4012 		/* PSE config input queue threshold */
4013 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4014 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4015 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4016 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4017 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4018 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4019 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4020 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4021 
4022 		/* PSE config output queue threshold */
4023 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4024 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4025 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4026 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4027 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4028 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4029 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4030 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4031 
4032 		/* GDM and CDM Threshold */
4033 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4034 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4035 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4036 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4037 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4038 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4039 	}
4040 
4041 	return 0;
4042 
4043 err_disable_pm:
4044 	if (!reset) {
4045 		pm_runtime_put_sync(eth->dev);
4046 		pm_runtime_disable(eth->dev);
4047 	}
4048 
4049 	return ret;
4050 }
4051 
4052 static int mtk_hw_deinit(struct mtk_eth *eth)
4053 {
4054 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4055 		return 0;
4056 
4057 	mtk_clk_disable(eth);
4058 
4059 	pm_runtime_put_sync(eth->dev);
4060 	pm_runtime_disable(eth->dev);
4061 
4062 	return 0;
4063 }
4064 
4065 static void mtk_uninit(struct net_device *dev)
4066 {
4067 	struct mtk_mac *mac = netdev_priv(dev);
4068 	struct mtk_eth *eth = mac->hw;
4069 
4070 	phylink_disconnect_phy(mac->phylink);
4071 	mtk_tx_irq_disable(eth, ~0);
4072 	mtk_rx_irq_disable(eth, ~0);
4073 }
4074 
4075 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4076 {
4077 	int length = new_mtu + MTK_RX_ETH_HLEN;
4078 	struct mtk_mac *mac = netdev_priv(dev);
4079 	struct mtk_eth *eth = mac->hw;
4080 
4081 	if (rcu_access_pointer(eth->prog) &&
4082 	    length > MTK_PP_MAX_BUF_SIZE) {
4083 		netdev_err(dev, "Invalid MTU for XDP mode\n");
4084 		return -EINVAL;
4085 	}
4086 
4087 	mtk_set_mcr_max_rx(mac, length);
4088 	WRITE_ONCE(dev->mtu, new_mtu);
4089 
4090 	return 0;
4091 }
4092 
4093 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4094 {
4095 	struct mtk_mac *mac = netdev_priv(dev);
4096 
4097 	switch (cmd) {
4098 	case SIOCGMIIPHY:
4099 	case SIOCGMIIREG:
4100 	case SIOCSMIIREG:
4101 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4102 	default:
4103 		break;
4104 	}
4105 
4106 	return -EOPNOTSUPP;
4107 }
4108 
4109 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4110 {
4111 	u32 val;
4112 	int i;
4113 
4114 	/* set FE PPE ports link down */
4115 	for (i = MTK_GMAC1_ID;
4116 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4117 	     i += 2) {
4118 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4119 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4120 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4121 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4122 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4123 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4124 	}
4125 
4126 	/* adjust PPE configurations to prepare for reset */
4127 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4128 		mtk_ppe_prepare_reset(eth->ppe[i]);
4129 
4130 	/* disable NETSYS interrupts */
4131 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4132 
4133 	/* force link down GMAC */
4134 	for (i = 0; i < 2; i++) {
4135 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4136 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4137 	}
4138 }
4139 
4140 static void mtk_pending_work(struct work_struct *work)
4141 {
4142 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4143 	unsigned long restart = 0;
4144 	u32 val;
4145 	int i;
4146 
4147 	rtnl_lock();
4148 	set_bit(MTK_RESETTING, &eth->state);
4149 
4150 	mtk_prepare_for_reset(eth);
4151 	mtk_wed_fe_reset();
4152 	/* Run again reset preliminary configuration in order to avoid any
4153 	 * possible race during FE reset since it can run releasing RTNL lock.
4154 	 */
4155 	mtk_prepare_for_reset(eth);
4156 
4157 	/* stop all devices to make sure that dma is properly shut down */
4158 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4159 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4160 			continue;
4161 
4162 		mtk_stop(eth->netdev[i]);
4163 		__set_bit(i, &restart);
4164 	}
4165 
4166 	usleep_range(15000, 16000);
4167 
4168 	if (eth->dev->pins)
4169 		pinctrl_select_state(eth->dev->pins->p,
4170 				     eth->dev->pins->default_state);
4171 	mtk_hw_init(eth, true);
4172 
4173 	/* restart DMA and enable IRQs */
4174 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4175 		if (!eth->netdev[i] || !test_bit(i, &restart))
4176 			continue;
4177 
4178 		if (mtk_open(eth->netdev[i])) {
4179 			netif_alert(eth, ifup, eth->netdev[i],
4180 				    "Driver up/down cycle failed\n");
4181 			dev_close(eth->netdev[i]);
4182 		}
4183 	}
4184 
4185 	/* set FE PPE ports link up */
4186 	for (i = MTK_GMAC1_ID;
4187 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4188 	     i += 2) {
4189 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4190 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4191 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4192 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4193 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4194 
4195 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4196 	}
4197 
4198 	clear_bit(MTK_RESETTING, &eth->state);
4199 
4200 	mtk_wed_fe_reset_complete();
4201 
4202 	rtnl_unlock();
4203 }
4204 
4205 static int mtk_free_dev(struct mtk_eth *eth)
4206 {
4207 	int i;
4208 
4209 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4210 		if (!eth->netdev[i])
4211 			continue;
4212 		free_netdev(eth->netdev[i]);
4213 	}
4214 
4215 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4216 		if (!eth->dsa_meta[i])
4217 			break;
4218 		metadata_dst_free(eth->dsa_meta[i]);
4219 	}
4220 
4221 	return 0;
4222 }
4223 
4224 static int mtk_unreg_dev(struct mtk_eth *eth)
4225 {
4226 	int i;
4227 
4228 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4229 		struct mtk_mac *mac;
4230 		if (!eth->netdev[i])
4231 			continue;
4232 		mac = netdev_priv(eth->netdev[i]);
4233 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4234 			unregister_netdevice_notifier(&mac->device_notifier);
4235 		unregister_netdev(eth->netdev[i]);
4236 	}
4237 
4238 	return 0;
4239 }
4240 
4241 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4242 {
4243 	int i;
4244 
4245 	for (i = 0; i < MTK_MAX_DEVS; i++)
4246 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4247 }
4248 
4249 static int mtk_cleanup(struct mtk_eth *eth)
4250 {
4251 	mtk_sgmii_destroy(eth);
4252 	mtk_unreg_dev(eth);
4253 	mtk_free_dev(eth);
4254 	cancel_work_sync(&eth->pending_work);
4255 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4256 
4257 	return 0;
4258 }
4259 
4260 static int mtk_get_link_ksettings(struct net_device *ndev,
4261 				  struct ethtool_link_ksettings *cmd)
4262 {
4263 	struct mtk_mac *mac = netdev_priv(ndev);
4264 
4265 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4266 		return -EBUSY;
4267 
4268 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4269 }
4270 
4271 static int mtk_set_link_ksettings(struct net_device *ndev,
4272 				  const struct ethtool_link_ksettings *cmd)
4273 {
4274 	struct mtk_mac *mac = netdev_priv(ndev);
4275 
4276 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4277 		return -EBUSY;
4278 
4279 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4280 }
4281 
4282 static void mtk_get_drvinfo(struct net_device *dev,
4283 			    struct ethtool_drvinfo *info)
4284 {
4285 	struct mtk_mac *mac = netdev_priv(dev);
4286 
4287 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4288 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4289 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4290 }
4291 
4292 static u32 mtk_get_msglevel(struct net_device *dev)
4293 {
4294 	struct mtk_mac *mac = netdev_priv(dev);
4295 
4296 	return mac->hw->msg_enable;
4297 }
4298 
4299 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4300 {
4301 	struct mtk_mac *mac = netdev_priv(dev);
4302 
4303 	mac->hw->msg_enable = value;
4304 }
4305 
4306 static int mtk_nway_reset(struct net_device *dev)
4307 {
4308 	struct mtk_mac *mac = netdev_priv(dev);
4309 
4310 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4311 		return -EBUSY;
4312 
4313 	if (!mac->phylink)
4314 		return -ENOTSUPP;
4315 
4316 	return phylink_ethtool_nway_reset(mac->phylink);
4317 }
4318 
4319 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4320 {
4321 	int i;
4322 
4323 	switch (stringset) {
4324 	case ETH_SS_STATS: {
4325 		struct mtk_mac *mac = netdev_priv(dev);
4326 
4327 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4328 			ethtool_puts(&data, mtk_ethtool_stats[i].str);
4329 		if (mtk_page_pool_enabled(mac->hw))
4330 			page_pool_ethtool_stats_get_strings(data);
4331 		break;
4332 	}
4333 	default:
4334 		break;
4335 	}
4336 }
4337 
4338 static int mtk_get_sset_count(struct net_device *dev, int sset)
4339 {
4340 	switch (sset) {
4341 	case ETH_SS_STATS: {
4342 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4343 		struct mtk_mac *mac = netdev_priv(dev);
4344 
4345 		if (mtk_page_pool_enabled(mac->hw))
4346 			count += page_pool_ethtool_stats_get_count();
4347 		return count;
4348 	}
4349 	default:
4350 		return -EOPNOTSUPP;
4351 	}
4352 }
4353 
4354 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4355 {
4356 	struct page_pool_stats stats = {};
4357 	int i;
4358 
4359 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4360 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4361 
4362 		if (!ring->page_pool)
4363 			continue;
4364 
4365 		page_pool_get_stats(ring->page_pool, &stats);
4366 	}
4367 	page_pool_ethtool_stats_get(data, &stats);
4368 }
4369 
4370 static void mtk_get_ethtool_stats(struct net_device *dev,
4371 				  struct ethtool_stats *stats, u64 *data)
4372 {
4373 	struct mtk_mac *mac = netdev_priv(dev);
4374 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4375 	u64 *data_src, *data_dst;
4376 	unsigned int start;
4377 	int i;
4378 
4379 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4380 		return;
4381 
4382 	if (netif_running(dev) && netif_device_present(dev)) {
4383 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4384 			mtk_stats_update_mac(mac);
4385 			spin_unlock_bh(&hwstats->stats_lock);
4386 		}
4387 	}
4388 
4389 	data_src = (u64 *)hwstats;
4390 
4391 	do {
4392 		data_dst = data;
4393 		start = u64_stats_fetch_begin(&hwstats->syncp);
4394 
4395 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4396 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4397 		if (mtk_page_pool_enabled(mac->hw))
4398 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4399 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4400 }
4401 
4402 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4403 			 u32 *rule_locs)
4404 {
4405 	int ret = -EOPNOTSUPP;
4406 
4407 	switch (cmd->cmd) {
4408 	case ETHTOOL_GRXRINGS:
4409 		if (dev->hw_features & NETIF_F_LRO) {
4410 			cmd->data = MTK_MAX_RX_RING_NUM;
4411 			ret = 0;
4412 		}
4413 		break;
4414 	case ETHTOOL_GRXCLSRLCNT:
4415 		if (dev->hw_features & NETIF_F_LRO) {
4416 			struct mtk_mac *mac = netdev_priv(dev);
4417 
4418 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4419 			ret = 0;
4420 		}
4421 		break;
4422 	case ETHTOOL_GRXCLSRULE:
4423 		if (dev->hw_features & NETIF_F_LRO)
4424 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4425 		break;
4426 	case ETHTOOL_GRXCLSRLALL:
4427 		if (dev->hw_features & NETIF_F_LRO)
4428 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4429 						     rule_locs);
4430 		break;
4431 	default:
4432 		break;
4433 	}
4434 
4435 	return ret;
4436 }
4437 
4438 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4439 {
4440 	int ret = -EOPNOTSUPP;
4441 
4442 	switch (cmd->cmd) {
4443 	case ETHTOOL_SRXCLSRLINS:
4444 		if (dev->hw_features & NETIF_F_LRO)
4445 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4446 		break;
4447 	case ETHTOOL_SRXCLSRLDEL:
4448 		if (dev->hw_features & NETIF_F_LRO)
4449 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4450 		break;
4451 	default:
4452 		break;
4453 	}
4454 
4455 	return ret;
4456 }
4457 
4458 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4459 {
4460 	struct mtk_mac *mac = netdev_priv(dev);
4461 
4462 	phylink_ethtool_get_pauseparam(mac->phylink, pause);
4463 }
4464 
4465 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4466 {
4467 	struct mtk_mac *mac = netdev_priv(dev);
4468 
4469 	return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4470 }
4471 
4472 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4473 			    struct net_device *sb_dev)
4474 {
4475 	struct mtk_mac *mac = netdev_priv(dev);
4476 	unsigned int queue = 0;
4477 
4478 	if (netdev_uses_dsa(dev))
4479 		queue = skb_get_queue_mapping(skb) + 3;
4480 	else
4481 		queue = mac->id;
4482 
4483 	if (queue >= dev->num_tx_queues)
4484 		queue = 0;
4485 
4486 	return queue;
4487 }
4488 
4489 static const struct ethtool_ops mtk_ethtool_ops = {
4490 	.get_link_ksettings	= mtk_get_link_ksettings,
4491 	.set_link_ksettings	= mtk_set_link_ksettings,
4492 	.get_drvinfo		= mtk_get_drvinfo,
4493 	.get_msglevel		= mtk_get_msglevel,
4494 	.set_msglevel		= mtk_set_msglevel,
4495 	.nway_reset		= mtk_nway_reset,
4496 	.get_link		= ethtool_op_get_link,
4497 	.get_strings		= mtk_get_strings,
4498 	.get_sset_count		= mtk_get_sset_count,
4499 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4500 	.get_pauseparam		= mtk_get_pauseparam,
4501 	.set_pauseparam		= mtk_set_pauseparam,
4502 	.get_rxnfc		= mtk_get_rxnfc,
4503 	.set_rxnfc		= mtk_set_rxnfc,
4504 };
4505 
4506 static const struct net_device_ops mtk_netdev_ops = {
4507 	.ndo_uninit		= mtk_uninit,
4508 	.ndo_open		= mtk_open,
4509 	.ndo_stop		= mtk_stop,
4510 	.ndo_start_xmit		= mtk_start_xmit,
4511 	.ndo_set_mac_address	= mtk_set_mac_address,
4512 	.ndo_validate_addr	= eth_validate_addr,
4513 	.ndo_eth_ioctl		= mtk_do_ioctl,
4514 	.ndo_change_mtu		= mtk_change_mtu,
4515 	.ndo_tx_timeout		= mtk_tx_timeout,
4516 	.ndo_get_stats64        = mtk_get_stats64,
4517 	.ndo_fix_features	= mtk_fix_features,
4518 	.ndo_set_features	= mtk_set_features,
4519 #ifdef CONFIG_NET_POLL_CONTROLLER
4520 	.ndo_poll_controller	= mtk_poll_controller,
4521 #endif
4522 	.ndo_setup_tc		= mtk_eth_setup_tc,
4523 	.ndo_bpf		= mtk_xdp,
4524 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4525 	.ndo_select_queue	= mtk_select_queue,
4526 };
4527 
4528 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4529 {
4530 	const __be32 *_id = of_get_property(np, "reg", NULL);
4531 	phy_interface_t phy_mode;
4532 	struct phylink *phylink;
4533 	struct mtk_mac *mac;
4534 	int id, err;
4535 	int txqs = 1;
4536 	u32 val;
4537 
4538 	if (!_id) {
4539 		dev_err(eth->dev, "missing mac id\n");
4540 		return -EINVAL;
4541 	}
4542 
4543 	id = be32_to_cpup(_id);
4544 	if (id >= MTK_MAX_DEVS) {
4545 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4546 		return -EINVAL;
4547 	}
4548 
4549 	if (eth->netdev[id]) {
4550 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4551 		return -EINVAL;
4552 	}
4553 
4554 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4555 		txqs = MTK_QDMA_NUM_QUEUES;
4556 
4557 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4558 	if (!eth->netdev[id]) {
4559 		dev_err(eth->dev, "alloc_etherdev failed\n");
4560 		return -ENOMEM;
4561 	}
4562 	mac = netdev_priv(eth->netdev[id]);
4563 	eth->mac[id] = mac;
4564 	mac->id = id;
4565 	mac->hw = eth;
4566 	mac->of_node = np;
4567 
4568 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4569 	if (err == -EPROBE_DEFER)
4570 		return err;
4571 
4572 	if (err) {
4573 		/* If the mac address is invalid, use random mac address */
4574 		eth_hw_addr_random(eth->netdev[id]);
4575 		dev_err(eth->dev, "generated random MAC address %pM\n",
4576 			eth->netdev[id]->dev_addr);
4577 	}
4578 
4579 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4580 	mac->hwlro_ip_cnt = 0;
4581 
4582 	mac->hw_stats = devm_kzalloc(eth->dev,
4583 				     sizeof(*mac->hw_stats),
4584 				     GFP_KERNEL);
4585 	if (!mac->hw_stats) {
4586 		dev_err(eth->dev, "failed to allocate counter memory\n");
4587 		err = -ENOMEM;
4588 		goto free_netdev;
4589 	}
4590 	spin_lock_init(&mac->hw_stats->stats_lock);
4591 	u64_stats_init(&mac->hw_stats->syncp);
4592 
4593 	if (mtk_is_netsys_v3_or_greater(eth))
4594 		mac->hw_stats->reg_offset = id * 0x80;
4595 	else
4596 		mac->hw_stats->reg_offset = id * 0x40;
4597 
4598 	/* phylink create */
4599 	err = of_get_phy_mode(np, &phy_mode);
4600 	if (err) {
4601 		dev_err(eth->dev, "incorrect phy-mode\n");
4602 		goto free_netdev;
4603 	}
4604 
4605 	/* mac config is not set */
4606 	mac->interface = PHY_INTERFACE_MODE_NA;
4607 	mac->speed = SPEED_UNKNOWN;
4608 
4609 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4610 	mac->phylink_config.type = PHYLINK_NETDEV;
4611 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4612 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4613 
4614 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4615 	 * in its .mac_config method (since state->speed is not valid there.
4616 	 * Disable support for MII, GMII and RGMII.
4617 	 */
4618 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4619 		__set_bit(PHY_INTERFACE_MODE_MII,
4620 			  mac->phylink_config.supported_interfaces);
4621 		__set_bit(PHY_INTERFACE_MODE_GMII,
4622 			  mac->phylink_config.supported_interfaces);
4623 
4624 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4625 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4626 	}
4627 
4628 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4629 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4630 			  mac->phylink_config.supported_interfaces);
4631 
4632 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4633 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4634 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4635 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4636 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4637 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4638 				    mac->phylink_config.supported_interfaces);
4639 	}
4640 
4641 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4642 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4643 			  mac->phylink_config.supported_interfaces);
4644 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4645 			  mac->phylink_config.supported_interfaces);
4646 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4647 			  mac->phylink_config.supported_interfaces);
4648 	}
4649 
4650 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4651 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4652 	    id == MTK_GMAC1_ID) {
4653 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4654 						       MAC_SYM_PAUSE |
4655 						       MAC_10000FD;
4656 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4657 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4658 			  mac->phylink_config.supported_interfaces);
4659 	}
4660 
4661 	phylink = phylink_create(&mac->phylink_config,
4662 				 of_fwnode_handle(mac->of_node),
4663 				 phy_mode, &mtk_phylink_ops);
4664 	if (IS_ERR(phylink)) {
4665 		err = PTR_ERR(phylink);
4666 		goto free_netdev;
4667 	}
4668 
4669 	mac->phylink = phylink;
4670 
4671 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4672 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4673 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4674 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4675 
4676 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4677 	if (eth->hwlro)
4678 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4679 
4680 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4681 		~NETIF_F_HW_VLAN_CTAG_TX;
4682 	eth->netdev[id]->features |= eth->soc->hw_features;
4683 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4684 
4685 	eth->netdev[id]->irq = eth->irq[0];
4686 	eth->netdev[id]->dev.of_node = np;
4687 
4688 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4689 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4690 	else
4691 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4692 
4693 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4694 		mac->device_notifier.notifier_call = mtk_device_event;
4695 		register_netdevice_notifier(&mac->device_notifier);
4696 	}
4697 
4698 	if (mtk_page_pool_enabled(eth))
4699 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4700 						NETDEV_XDP_ACT_REDIRECT |
4701 						NETDEV_XDP_ACT_NDO_XMIT |
4702 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4703 
4704 	return 0;
4705 
4706 free_netdev:
4707 	free_netdev(eth->netdev[id]);
4708 	return err;
4709 }
4710 
4711 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4712 {
4713 	struct net_device *dev, *tmp;
4714 	LIST_HEAD(dev_list);
4715 	int i;
4716 
4717 	rtnl_lock();
4718 
4719 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4720 		dev = eth->netdev[i];
4721 
4722 		if (!dev || !(dev->flags & IFF_UP))
4723 			continue;
4724 
4725 		list_add_tail(&dev->close_list, &dev_list);
4726 	}
4727 
4728 	dev_close_many(&dev_list, false);
4729 
4730 	eth->dma_dev = dma_dev;
4731 
4732 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4733 		list_del_init(&dev->close_list);
4734 		dev_open(dev, NULL);
4735 	}
4736 
4737 	rtnl_unlock();
4738 }
4739 
4740 static int mtk_sgmii_init(struct mtk_eth *eth)
4741 {
4742 	struct device_node *np;
4743 	struct regmap *regmap;
4744 	u32 flags;
4745 	int i;
4746 
4747 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4748 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4749 		if (!np)
4750 			break;
4751 
4752 		regmap = syscon_node_to_regmap(np);
4753 		flags = 0;
4754 		if (of_property_read_bool(np, "mediatek,pnswap"))
4755 			flags |= MTK_SGMII_FLAG_PN_SWAP;
4756 
4757 		of_node_put(np);
4758 
4759 		if (IS_ERR(regmap))
4760 			return PTR_ERR(regmap);
4761 
4762 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4763 							 eth->soc->ana_rgc3,
4764 							 flags);
4765 	}
4766 
4767 	return 0;
4768 }
4769 
4770 static int mtk_probe(struct platform_device *pdev)
4771 {
4772 	struct resource *res = NULL, *res_sram;
4773 	struct device_node *mac_np;
4774 	struct mtk_eth *eth;
4775 	int err, i;
4776 
4777 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4778 	if (!eth)
4779 		return -ENOMEM;
4780 
4781 	eth->soc = of_device_get_match_data(&pdev->dev);
4782 
4783 	eth->dev = &pdev->dev;
4784 	eth->dma_dev = &pdev->dev;
4785 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4786 	if (IS_ERR(eth->base))
4787 		return PTR_ERR(eth->base);
4788 
4789 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4790 		eth->ip_align = NET_IP_ALIGN;
4791 
4792 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4793 		/* SRAM is actual memory and supports transparent access just like DRAM.
4794 		 * Hence we don't require __iomem being set and don't need to use accessor
4795 		 * functions to read from or write to SRAM.
4796 		 */
4797 		if (mtk_is_netsys_v3_or_greater(eth)) {
4798 			eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4799 			if (IS_ERR(eth->sram_base))
4800 				return PTR_ERR(eth->sram_base);
4801 		} else {
4802 			eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4803 		}
4804 	}
4805 
4806 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4807 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4808 		if (!err)
4809 			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4810 
4811 		if (err) {
4812 			dev_err(&pdev->dev, "Wrong DMA config\n");
4813 			return -EINVAL;
4814 		}
4815 	}
4816 
4817 	spin_lock_init(&eth->page_lock);
4818 	spin_lock_init(&eth->tx_irq_lock);
4819 	spin_lock_init(&eth->rx_irq_lock);
4820 	spin_lock_init(&eth->dim_lock);
4821 
4822 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4823 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4824 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4825 
4826 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4827 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4828 
4829 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4830 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4831 							      "mediatek,ethsys");
4832 		if (IS_ERR(eth->ethsys)) {
4833 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4834 			return PTR_ERR(eth->ethsys);
4835 		}
4836 	}
4837 
4838 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4839 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4840 							     "mediatek,infracfg");
4841 		if (IS_ERR(eth->infra)) {
4842 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4843 			return PTR_ERR(eth->infra);
4844 		}
4845 	}
4846 
4847 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4848 		struct regmap *cci;
4849 
4850 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4851 						      "cci-control-port");
4852 		/* enable CPU/bus coherency */
4853 		if (!IS_ERR(cci))
4854 			regmap_write(cci, 0, 3);
4855 	}
4856 
4857 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4858 		err = mtk_sgmii_init(eth);
4859 
4860 		if (err)
4861 			return err;
4862 	}
4863 
4864 	if (eth->soc->required_pctl) {
4865 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4866 							    "mediatek,pctl");
4867 		if (IS_ERR(eth->pctl)) {
4868 			dev_err(&pdev->dev, "no pctl regmap found\n");
4869 			err = PTR_ERR(eth->pctl);
4870 			goto err_destroy_sgmii;
4871 		}
4872 	}
4873 
4874 	if (mtk_is_netsys_v2_or_greater(eth)) {
4875 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4876 		if (!res) {
4877 			err = -EINVAL;
4878 			goto err_destroy_sgmii;
4879 		}
4880 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4881 			if (mtk_is_netsys_v3_or_greater(eth)) {
4882 				res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4883 				if (!res_sram) {
4884 					err = -EINVAL;
4885 					goto err_destroy_sgmii;
4886 				}
4887 				eth->phy_scratch_ring = res_sram->start;
4888 			} else {
4889 				eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4890 			}
4891 		}
4892 	}
4893 
4894 	if (eth->soc->offload_version) {
4895 		for (i = 0;; i++) {
4896 			struct device_node *np;
4897 			phys_addr_t wdma_phy;
4898 			u32 wdma_base;
4899 
4900 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4901 				break;
4902 
4903 			np = of_parse_phandle(pdev->dev.of_node,
4904 					      "mediatek,wed", i);
4905 			if (!np)
4906 				break;
4907 
4908 			wdma_base = eth->soc->reg_map->wdma_base[i];
4909 			wdma_phy = res ? res->start + wdma_base : 0;
4910 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4911 				       wdma_phy, i);
4912 		}
4913 	}
4914 
4915 	for (i = 0; i < 3; i++) {
4916 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4917 			eth->irq[i] = eth->irq[0];
4918 		else
4919 			eth->irq[i] = platform_get_irq(pdev, i);
4920 		if (eth->irq[i] < 0) {
4921 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4922 			err = -ENXIO;
4923 			goto err_wed_exit;
4924 		}
4925 	}
4926 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4927 		eth->clks[i] = devm_clk_get(eth->dev,
4928 					    mtk_clks_source_name[i]);
4929 		if (IS_ERR(eth->clks[i])) {
4930 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4931 				err = -EPROBE_DEFER;
4932 				goto err_wed_exit;
4933 			}
4934 			if (eth->soc->required_clks & BIT(i)) {
4935 				dev_err(&pdev->dev, "clock %s not found\n",
4936 					mtk_clks_source_name[i]);
4937 				err = -EINVAL;
4938 				goto err_wed_exit;
4939 			}
4940 			eth->clks[i] = NULL;
4941 		}
4942 	}
4943 
4944 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4945 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4946 
4947 	err = mtk_hw_init(eth, false);
4948 	if (err)
4949 		goto err_wed_exit;
4950 
4951 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4952 
4953 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4954 		if (!of_device_is_compatible(mac_np,
4955 					     "mediatek,eth-mac"))
4956 			continue;
4957 
4958 		if (!of_device_is_available(mac_np))
4959 			continue;
4960 
4961 		err = mtk_add_mac(eth, mac_np);
4962 		if (err) {
4963 			of_node_put(mac_np);
4964 			goto err_deinit_hw;
4965 		}
4966 	}
4967 
4968 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4969 		err = devm_request_irq(eth->dev, eth->irq[0],
4970 				       mtk_handle_irq, 0,
4971 				       dev_name(eth->dev), eth);
4972 	} else {
4973 		err = devm_request_irq(eth->dev, eth->irq[1],
4974 				       mtk_handle_irq_tx, 0,
4975 				       dev_name(eth->dev), eth);
4976 		if (err)
4977 			goto err_free_dev;
4978 
4979 		err = devm_request_irq(eth->dev, eth->irq[2],
4980 				       mtk_handle_irq_rx, 0,
4981 				       dev_name(eth->dev), eth);
4982 	}
4983 	if (err)
4984 		goto err_free_dev;
4985 
4986 	/* No MT7628/88 support yet */
4987 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4988 		err = mtk_mdio_init(eth);
4989 		if (err)
4990 			goto err_free_dev;
4991 	}
4992 
4993 	if (eth->soc->offload_version) {
4994 		u8 ppe_num = eth->soc->ppe_num;
4995 
4996 		ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
4997 		for (i = 0; i < ppe_num; i++) {
4998 			u32 ppe_addr = eth->soc->reg_map->ppe_base;
4999 
5000 			ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5001 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5002 
5003 			if (!eth->ppe[i]) {
5004 				err = -ENOMEM;
5005 				goto err_deinit_ppe;
5006 			}
5007 			err = mtk_eth_offload_init(eth, i);
5008 
5009 			if (err)
5010 				goto err_deinit_ppe;
5011 		}
5012 	}
5013 
5014 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5015 		if (!eth->netdev[i])
5016 			continue;
5017 
5018 		err = register_netdev(eth->netdev[i]);
5019 		if (err) {
5020 			dev_err(eth->dev, "error bringing up device\n");
5021 			goto err_deinit_ppe;
5022 		} else
5023 			netif_info(eth, probe, eth->netdev[i],
5024 				   "mediatek frame engine at 0x%08lx, irq %d\n",
5025 				   eth->netdev[i]->base_addr, eth->irq[0]);
5026 	}
5027 
5028 	/* we run 2 devices on the same DMA ring so we need a dummy device
5029 	 * for NAPI to work
5030 	 */
5031 	eth->dummy_dev = alloc_netdev_dummy(0);
5032 	if (!eth->dummy_dev) {
5033 		err = -ENOMEM;
5034 		dev_err(eth->dev, "failed to allocated dummy device\n");
5035 		goto err_unreg_netdev;
5036 	}
5037 	netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
5038 	netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
5039 
5040 	platform_set_drvdata(pdev, eth);
5041 	schedule_delayed_work(&eth->reset.monitor_work,
5042 			      MTK_DMA_MONITOR_TIMEOUT);
5043 
5044 	return 0;
5045 
5046 err_unreg_netdev:
5047 	mtk_unreg_dev(eth);
5048 err_deinit_ppe:
5049 	mtk_ppe_deinit(eth);
5050 	mtk_mdio_cleanup(eth);
5051 err_free_dev:
5052 	mtk_free_dev(eth);
5053 err_deinit_hw:
5054 	mtk_hw_deinit(eth);
5055 err_wed_exit:
5056 	mtk_wed_exit();
5057 err_destroy_sgmii:
5058 	mtk_sgmii_destroy(eth);
5059 
5060 	return err;
5061 }
5062 
5063 static void mtk_remove(struct platform_device *pdev)
5064 {
5065 	struct mtk_eth *eth = platform_get_drvdata(pdev);
5066 	struct mtk_mac *mac;
5067 	int i;
5068 
5069 	/* stop all devices to make sure that dma is properly shut down */
5070 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5071 		if (!eth->netdev[i])
5072 			continue;
5073 		mtk_stop(eth->netdev[i]);
5074 		mac = netdev_priv(eth->netdev[i]);
5075 		phylink_disconnect_phy(mac->phylink);
5076 	}
5077 
5078 	mtk_wed_exit();
5079 	mtk_hw_deinit(eth);
5080 
5081 	netif_napi_del(&eth->tx_napi);
5082 	netif_napi_del(&eth->rx_napi);
5083 	mtk_cleanup(eth);
5084 	free_netdev(eth->dummy_dev);
5085 	mtk_mdio_cleanup(eth);
5086 }
5087 
5088 static const struct mtk_soc_data mt2701_data = {
5089 	.reg_map = &mtk_reg_map,
5090 	.caps = MT7623_CAPS | MTK_HWLRO,
5091 	.hw_features = MTK_HW_FEATURES,
5092 	.required_clks = MT7623_CLKS_BITMAP,
5093 	.required_pctl = true,
5094 	.version = 1,
5095 	.tx = {
5096 		.desc_size = sizeof(struct mtk_tx_dma),
5097 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5098 		.dma_len_offset = 16,
5099 		.dma_size = MTK_DMA_SIZE(2K),
5100 		.fq_dma_size = MTK_DMA_SIZE(2K),
5101 	},
5102 	.rx = {
5103 		.desc_size = sizeof(struct mtk_rx_dma),
5104 		.irq_done_mask = MTK_RX_DONE_INT,
5105 		.dma_l4_valid = RX_DMA_L4_VALID,
5106 		.dma_size = MTK_DMA_SIZE(2K),
5107 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5108 		.dma_len_offset = 16,
5109 	},
5110 };
5111 
5112 static const struct mtk_soc_data mt7621_data = {
5113 	.reg_map = &mtk_reg_map,
5114 	.caps = MT7621_CAPS,
5115 	.hw_features = MTK_HW_FEATURES,
5116 	.required_clks = MT7621_CLKS_BITMAP,
5117 	.required_pctl = false,
5118 	.version = 1,
5119 	.offload_version = 1,
5120 	.ppe_num = 1,
5121 	.hash_offset = 2,
5122 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5123 	.tx = {
5124 		.desc_size = sizeof(struct mtk_tx_dma),
5125 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5126 		.dma_len_offset = 16,
5127 		.dma_size = MTK_DMA_SIZE(2K),
5128 		.fq_dma_size = MTK_DMA_SIZE(2K),
5129 	},
5130 	.rx = {
5131 		.desc_size = sizeof(struct mtk_rx_dma),
5132 		.irq_done_mask = MTK_RX_DONE_INT,
5133 		.dma_l4_valid = RX_DMA_L4_VALID,
5134 		.dma_size = MTK_DMA_SIZE(2K),
5135 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5136 		.dma_len_offset = 16,
5137 	},
5138 };
5139 
5140 static const struct mtk_soc_data mt7622_data = {
5141 	.reg_map = &mtk_reg_map,
5142 	.ana_rgc3 = 0x2028,
5143 	.caps = MT7622_CAPS | MTK_HWLRO,
5144 	.hw_features = MTK_HW_FEATURES,
5145 	.required_clks = MT7622_CLKS_BITMAP,
5146 	.required_pctl = false,
5147 	.version = 1,
5148 	.offload_version = 2,
5149 	.ppe_num = 1,
5150 	.hash_offset = 2,
5151 	.has_accounting = true,
5152 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5153 	.tx = {
5154 		.desc_size = sizeof(struct mtk_tx_dma),
5155 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5156 		.dma_len_offset = 16,
5157 		.dma_size = MTK_DMA_SIZE(2K),
5158 		.fq_dma_size = MTK_DMA_SIZE(2K),
5159 	},
5160 	.rx = {
5161 		.desc_size = sizeof(struct mtk_rx_dma),
5162 		.irq_done_mask = MTK_RX_DONE_INT,
5163 		.dma_l4_valid = RX_DMA_L4_VALID,
5164 		.dma_size = MTK_DMA_SIZE(2K),
5165 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5166 		.dma_len_offset = 16,
5167 	},
5168 };
5169 
5170 static const struct mtk_soc_data mt7623_data = {
5171 	.reg_map = &mtk_reg_map,
5172 	.caps = MT7623_CAPS | MTK_HWLRO,
5173 	.hw_features = MTK_HW_FEATURES,
5174 	.required_clks = MT7623_CLKS_BITMAP,
5175 	.required_pctl = true,
5176 	.version = 1,
5177 	.offload_version = 1,
5178 	.ppe_num = 1,
5179 	.hash_offset = 2,
5180 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5181 	.disable_pll_modes = true,
5182 	.tx = {
5183 		.desc_size = sizeof(struct mtk_tx_dma),
5184 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5185 		.dma_len_offset = 16,
5186 		.dma_size = MTK_DMA_SIZE(2K),
5187 		.fq_dma_size = MTK_DMA_SIZE(2K),
5188 	},
5189 	.rx = {
5190 		.desc_size = sizeof(struct mtk_rx_dma),
5191 		.irq_done_mask = MTK_RX_DONE_INT,
5192 		.dma_l4_valid = RX_DMA_L4_VALID,
5193 		.dma_size = MTK_DMA_SIZE(2K),
5194 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5195 		.dma_len_offset = 16,
5196 	},
5197 };
5198 
5199 static const struct mtk_soc_data mt7629_data = {
5200 	.reg_map = &mtk_reg_map,
5201 	.ana_rgc3 = 0x128,
5202 	.caps = MT7629_CAPS | MTK_HWLRO,
5203 	.hw_features = MTK_HW_FEATURES,
5204 	.required_clks = MT7629_CLKS_BITMAP,
5205 	.required_pctl = false,
5206 	.has_accounting = true,
5207 	.version = 1,
5208 	.tx = {
5209 		.desc_size = sizeof(struct mtk_tx_dma),
5210 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5211 		.dma_len_offset = 16,
5212 		.dma_size = MTK_DMA_SIZE(2K),
5213 		.fq_dma_size = MTK_DMA_SIZE(2K),
5214 	},
5215 	.rx = {
5216 		.desc_size = sizeof(struct mtk_rx_dma),
5217 		.irq_done_mask = MTK_RX_DONE_INT,
5218 		.dma_l4_valid = RX_DMA_L4_VALID,
5219 		.dma_size = MTK_DMA_SIZE(2K),
5220 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5221 		.dma_len_offset = 16,
5222 	},
5223 };
5224 
5225 static const struct mtk_soc_data mt7981_data = {
5226 	.reg_map = &mt7986_reg_map,
5227 	.ana_rgc3 = 0x128,
5228 	.caps = MT7981_CAPS,
5229 	.hw_features = MTK_HW_FEATURES,
5230 	.required_clks = MT7981_CLKS_BITMAP,
5231 	.required_pctl = false,
5232 	.version = 2,
5233 	.offload_version = 2,
5234 	.ppe_num = 2,
5235 	.hash_offset = 4,
5236 	.has_accounting = true,
5237 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5238 	.tx = {
5239 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5240 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5241 		.dma_len_offset = 8,
5242 		.dma_size = MTK_DMA_SIZE(2K),
5243 		.fq_dma_size = MTK_DMA_SIZE(2K),
5244 	},
5245 	.rx = {
5246 		.desc_size = sizeof(struct mtk_rx_dma),
5247 		.irq_done_mask = MTK_RX_DONE_INT,
5248 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5249 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5250 		.dma_len_offset = 16,
5251 		.dma_size = MTK_DMA_SIZE(2K),
5252 	},
5253 };
5254 
5255 static const struct mtk_soc_data mt7986_data = {
5256 	.reg_map = &mt7986_reg_map,
5257 	.ana_rgc3 = 0x128,
5258 	.caps = MT7986_CAPS,
5259 	.hw_features = MTK_HW_FEATURES,
5260 	.required_clks = MT7986_CLKS_BITMAP,
5261 	.required_pctl = false,
5262 	.version = 2,
5263 	.offload_version = 2,
5264 	.ppe_num = 2,
5265 	.hash_offset = 4,
5266 	.has_accounting = true,
5267 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5268 	.tx = {
5269 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5270 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5271 		.dma_len_offset = 8,
5272 		.dma_size = MTK_DMA_SIZE(2K),
5273 		.fq_dma_size = MTK_DMA_SIZE(2K),
5274 	},
5275 	.rx = {
5276 		.desc_size = sizeof(struct mtk_rx_dma),
5277 		.irq_done_mask = MTK_RX_DONE_INT,
5278 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5279 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5280 		.dma_len_offset = 16,
5281 		.dma_size = MTK_DMA_SIZE(2K),
5282 	},
5283 };
5284 
5285 static const struct mtk_soc_data mt7988_data = {
5286 	.reg_map = &mt7988_reg_map,
5287 	.ana_rgc3 = 0x128,
5288 	.caps = MT7988_CAPS,
5289 	.hw_features = MTK_HW_FEATURES,
5290 	.required_clks = MT7988_CLKS_BITMAP,
5291 	.required_pctl = false,
5292 	.version = 3,
5293 	.offload_version = 2,
5294 	.ppe_num = 3,
5295 	.hash_offset = 4,
5296 	.has_accounting = true,
5297 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5298 	.tx = {
5299 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5300 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5301 		.dma_len_offset = 8,
5302 		.dma_size = MTK_DMA_SIZE(2K),
5303 		.fq_dma_size = MTK_DMA_SIZE(4K),
5304 	},
5305 	.rx = {
5306 		.desc_size = sizeof(struct mtk_rx_dma_v2),
5307 		.irq_done_mask = MTK_RX_DONE_INT_V2,
5308 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5309 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5310 		.dma_len_offset = 8,
5311 		.dma_size = MTK_DMA_SIZE(2K),
5312 	},
5313 };
5314 
5315 static const struct mtk_soc_data rt5350_data = {
5316 	.reg_map = &mt7628_reg_map,
5317 	.caps = MT7628_CAPS,
5318 	.hw_features = MTK_HW_FEATURES_MT7628,
5319 	.required_clks = MT7628_CLKS_BITMAP,
5320 	.required_pctl = false,
5321 	.version = 1,
5322 	.tx = {
5323 		.desc_size = sizeof(struct mtk_tx_dma),
5324 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5325 		.dma_len_offset = 16,
5326 		.dma_size = MTK_DMA_SIZE(2K),
5327 	},
5328 	.rx = {
5329 		.desc_size = sizeof(struct mtk_rx_dma),
5330 		.irq_done_mask = MTK_RX_DONE_INT,
5331 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5332 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5333 		.dma_len_offset = 16,
5334 		.dma_size = MTK_DMA_SIZE(2K),
5335 	},
5336 };
5337 
5338 const struct of_device_id of_mtk_match[] = {
5339 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5340 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5341 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5342 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5343 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5344 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5345 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5346 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5347 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5348 	{},
5349 };
5350 MODULE_DEVICE_TABLE(of, of_mtk_match);
5351 
5352 static struct platform_driver mtk_driver = {
5353 	.probe = mtk_probe,
5354 	.remove = mtk_remove,
5355 	.driver = {
5356 		.name = "mtk_soc_eth",
5357 		.of_match_table = of_mtk_match,
5358 	},
5359 };
5360 
5361 module_platform_driver(mtk_driver);
5362 
5363 MODULE_LICENSE("GPL");
5364 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5365 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5366