xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision 7681a4f58fb9c338d6dfe1181607f84c793d77de)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
25 #include <net/dsa.h>
26 #include <net/dst_metadata.h>
27 
28 #include "mtk_eth_soc.h"
29 #include "mtk_wed.h"
30 
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
40 				  sizeof(u64) }
41 
42 static const struct mtk_reg_map mtk_reg_map = {
43 	.tx_irq_mask		= 0x1a1c,
44 	.tx_irq_status		= 0x1a18,
45 	.pdma = {
46 		.rx_ptr		= 0x0900,
47 		.rx_cnt_cfg	= 0x0904,
48 		.pcrx_ptr	= 0x0908,
49 		.glo_cfg	= 0x0a04,
50 		.rst_idx	= 0x0a08,
51 		.delay_irq	= 0x0a0c,
52 		.irq_status	= 0x0a20,
53 		.irq_mask	= 0x0a28,
54 		.adma_rx_dbg0	= 0x0a38,
55 		.int_grp	= 0x0a50,
56 	},
57 	.qdma = {
58 		.qtx_cfg	= 0x1800,
59 		.qtx_sch	= 0x1804,
60 		.rx_ptr		= 0x1900,
61 		.rx_cnt_cfg	= 0x1904,
62 		.qcrx_ptr	= 0x1908,
63 		.glo_cfg	= 0x1a04,
64 		.rst_idx	= 0x1a08,
65 		.delay_irq	= 0x1a0c,
66 		.fc_th		= 0x1a10,
67 		.tx_sch_rate	= 0x1a14,
68 		.int_grp	= 0x1a20,
69 		.hred		= 0x1a44,
70 		.ctx_ptr	= 0x1b00,
71 		.dtx_ptr	= 0x1b04,
72 		.crx_ptr	= 0x1b10,
73 		.drx_ptr	= 0x1b14,
74 		.fq_head	= 0x1b20,
75 		.fq_tail	= 0x1b24,
76 		.fq_count	= 0x1b28,
77 		.fq_blen	= 0x1b2c,
78 	},
79 	.gdm1_cnt		= 0x2400,
80 	.gdma_to_ppe		= 0x4444,
81 	.ppe_base		= 0x0c00,
82 	.wdma_base = {
83 		[0]		= 0x2800,
84 		[1]		= 0x2c00,
85 	},
86 	.pse_iq_sta		= 0x0110,
87 	.pse_oq_sta		= 0x0118,
88 };
89 
90 static const struct mtk_reg_map mt7628_reg_map = {
91 	.tx_irq_mask		= 0x0a28,
92 	.tx_irq_status		= 0x0a20,
93 	.pdma = {
94 		.rx_ptr		= 0x0900,
95 		.rx_cnt_cfg	= 0x0904,
96 		.pcrx_ptr	= 0x0908,
97 		.glo_cfg	= 0x0a04,
98 		.rst_idx	= 0x0a08,
99 		.delay_irq	= 0x0a0c,
100 		.irq_status	= 0x0a20,
101 		.irq_mask	= 0x0a28,
102 		.int_grp	= 0x0a50,
103 	},
104 };
105 
106 static const struct mtk_reg_map mt7986_reg_map = {
107 	.tx_irq_mask		= 0x461c,
108 	.tx_irq_status		= 0x4618,
109 	.pdma = {
110 		.rx_ptr		= 0x6100,
111 		.rx_cnt_cfg	= 0x6104,
112 		.pcrx_ptr	= 0x6108,
113 		.glo_cfg	= 0x6204,
114 		.rst_idx	= 0x6208,
115 		.delay_irq	= 0x620c,
116 		.irq_status	= 0x6220,
117 		.irq_mask	= 0x6228,
118 		.adma_rx_dbg0	= 0x6238,
119 		.int_grp	= 0x6250,
120 	},
121 	.qdma = {
122 		.qtx_cfg	= 0x4400,
123 		.qtx_sch	= 0x4404,
124 		.rx_ptr		= 0x4500,
125 		.rx_cnt_cfg	= 0x4504,
126 		.qcrx_ptr	= 0x4508,
127 		.glo_cfg	= 0x4604,
128 		.rst_idx	= 0x4608,
129 		.delay_irq	= 0x460c,
130 		.fc_th		= 0x4610,
131 		.int_grp	= 0x4620,
132 		.hred		= 0x4644,
133 		.ctx_ptr	= 0x4700,
134 		.dtx_ptr	= 0x4704,
135 		.crx_ptr	= 0x4710,
136 		.drx_ptr	= 0x4714,
137 		.fq_head	= 0x4720,
138 		.fq_tail	= 0x4724,
139 		.fq_count	= 0x4728,
140 		.fq_blen	= 0x472c,
141 		.tx_sch_rate	= 0x4798,
142 	},
143 	.gdm1_cnt		= 0x1c00,
144 	.gdma_to_ppe		= 0x3333,
145 	.ppe_base		= 0x2000,
146 	.wdma_base = {
147 		[0]		= 0x4800,
148 		[1]		= 0x4c00,
149 	},
150 	.pse_iq_sta		= 0x0180,
151 	.pse_oq_sta		= 0x01a0,
152 };
153 
154 /* strings used by ethtool */
155 static const struct mtk_ethtool_stats {
156 	char str[ETH_GSTRING_LEN];
157 	u32 offset;
158 } mtk_ethtool_stats[] = {
159 	MTK_ETHTOOL_STAT(tx_bytes),
160 	MTK_ETHTOOL_STAT(tx_packets),
161 	MTK_ETHTOOL_STAT(tx_skip),
162 	MTK_ETHTOOL_STAT(tx_collisions),
163 	MTK_ETHTOOL_STAT(rx_bytes),
164 	MTK_ETHTOOL_STAT(rx_packets),
165 	MTK_ETHTOOL_STAT(rx_overflow),
166 	MTK_ETHTOOL_STAT(rx_fcs_errors),
167 	MTK_ETHTOOL_STAT(rx_short_errors),
168 	MTK_ETHTOOL_STAT(rx_long_errors),
169 	MTK_ETHTOOL_STAT(rx_checksum_errors),
170 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
171 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
172 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
173 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
174 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
175 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
176 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
177 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
178 };
179 
180 static const char * const mtk_clks_source_name[] = {
181 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
182 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
183 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
184 	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
185 };
186 
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
188 {
189 	__raw_writel(val, eth->base + reg);
190 }
191 
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
193 {
194 	return __raw_readl(eth->base + reg);
195 }
196 
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
198 {
199 	u32 val;
200 
201 	val = mtk_r32(eth, reg);
202 	val &= ~mask;
203 	val |= set;
204 	mtk_w32(eth, val, reg);
205 	return reg;
206 }
207 
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
209 {
210 	unsigned long t_start = jiffies;
211 
212 	while (1) {
213 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
214 			return 0;
215 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
216 			break;
217 		cond_resched();
218 	}
219 
220 	dev_err(eth->dev, "mdio: MDIO timeout\n");
221 	return -ETIMEDOUT;
222 }
223 
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
225 			       u32 write_data)
226 {
227 	int ret;
228 
229 	ret = mtk_mdio_busy_wait(eth);
230 	if (ret < 0)
231 		return ret;
232 
233 	mtk_w32(eth, PHY_IAC_ACCESS |
234 		PHY_IAC_START_C22 |
235 		PHY_IAC_CMD_WRITE |
236 		PHY_IAC_REG(phy_reg) |
237 		PHY_IAC_ADDR(phy_addr) |
238 		PHY_IAC_DATA(write_data),
239 		MTK_PHY_IAC);
240 
241 	ret = mtk_mdio_busy_wait(eth);
242 	if (ret < 0)
243 		return ret;
244 
245 	return 0;
246 }
247 
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
249 			       u32 devad, u32 phy_reg, u32 write_data)
250 {
251 	int ret;
252 
253 	ret = mtk_mdio_busy_wait(eth);
254 	if (ret < 0)
255 		return ret;
256 
257 	mtk_w32(eth, PHY_IAC_ACCESS |
258 		PHY_IAC_START_C45 |
259 		PHY_IAC_CMD_C45_ADDR |
260 		PHY_IAC_REG(devad) |
261 		PHY_IAC_ADDR(phy_addr) |
262 		PHY_IAC_DATA(phy_reg),
263 		MTK_PHY_IAC);
264 
265 	ret = mtk_mdio_busy_wait(eth);
266 	if (ret < 0)
267 		return ret;
268 
269 	mtk_w32(eth, PHY_IAC_ACCESS |
270 		PHY_IAC_START_C45 |
271 		PHY_IAC_CMD_WRITE |
272 		PHY_IAC_REG(devad) |
273 		PHY_IAC_ADDR(phy_addr) |
274 		PHY_IAC_DATA(write_data),
275 		MTK_PHY_IAC);
276 
277 	ret = mtk_mdio_busy_wait(eth);
278 	if (ret < 0)
279 		return ret;
280 
281 	return 0;
282 }
283 
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
285 {
286 	int ret;
287 
288 	ret = mtk_mdio_busy_wait(eth);
289 	if (ret < 0)
290 		return ret;
291 
292 	mtk_w32(eth, PHY_IAC_ACCESS |
293 		PHY_IAC_START_C22 |
294 		PHY_IAC_CMD_C22_READ |
295 		PHY_IAC_REG(phy_reg) |
296 		PHY_IAC_ADDR(phy_addr),
297 		MTK_PHY_IAC);
298 
299 	ret = mtk_mdio_busy_wait(eth);
300 	if (ret < 0)
301 		return ret;
302 
303 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
304 }
305 
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
307 			      u32 devad, u32 phy_reg)
308 {
309 	int ret;
310 
311 	ret = mtk_mdio_busy_wait(eth);
312 	if (ret < 0)
313 		return ret;
314 
315 	mtk_w32(eth, PHY_IAC_ACCESS |
316 		PHY_IAC_START_C45 |
317 		PHY_IAC_CMD_C45_ADDR |
318 		PHY_IAC_REG(devad) |
319 		PHY_IAC_ADDR(phy_addr) |
320 		PHY_IAC_DATA(phy_reg),
321 		MTK_PHY_IAC);
322 
323 	ret = mtk_mdio_busy_wait(eth);
324 	if (ret < 0)
325 		return ret;
326 
327 	mtk_w32(eth, PHY_IAC_ACCESS |
328 		PHY_IAC_START_C45 |
329 		PHY_IAC_CMD_C45_READ |
330 		PHY_IAC_REG(devad) |
331 		PHY_IAC_ADDR(phy_addr),
332 		MTK_PHY_IAC);
333 
334 	ret = mtk_mdio_busy_wait(eth);
335 	if (ret < 0)
336 		return ret;
337 
338 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
339 }
340 
341 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
342 			      int phy_reg, u16 val)
343 {
344 	struct mtk_eth *eth = bus->priv;
345 
346 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
347 }
348 
349 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
350 			      int devad, int phy_reg, u16 val)
351 {
352 	struct mtk_eth *eth = bus->priv;
353 
354 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
355 }
356 
357 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
358 {
359 	struct mtk_eth *eth = bus->priv;
360 
361 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
362 }
363 
364 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
365 			     int phy_reg)
366 {
367 	struct mtk_eth *eth = bus->priv;
368 
369 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
370 }
371 
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
373 				     phy_interface_t interface)
374 {
375 	u32 val;
376 
377 	/* Check DDR memory type.
378 	 * Currently TRGMII mode with DDR2 memory is not supported.
379 	 */
380 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
381 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
382 	    val & SYSCFG_DRAM_TYPE_DDR2) {
383 		dev_err(eth->dev,
384 			"TRGMII mode with DDR2 memory is not supported!\n");
385 		return -EOPNOTSUPP;
386 	}
387 
388 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
389 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
390 
391 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
392 			   ETHSYS_TRGMII_MT7621_MASK, val);
393 
394 	return 0;
395 }
396 
397 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
398 				   phy_interface_t interface, int speed)
399 {
400 	u32 val;
401 	int ret;
402 
403 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
404 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
405 		val = 500000000;
406 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
407 		if (ret)
408 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
409 		return;
410 	}
411 
412 	val = (speed == SPEED_1000) ?
413 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
414 	mtk_w32(eth, val, INTF_MODE);
415 
416 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
417 			   ETHSYS_TRGMII_CLK_SEL362_5,
418 			   ETHSYS_TRGMII_CLK_SEL362_5);
419 
420 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
421 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
422 	if (ret)
423 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
424 
425 	val = (speed == SPEED_1000) ?
426 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
427 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
428 
429 	val = (speed == SPEED_1000) ?
430 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
431 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
432 }
433 
434 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
435 					      phy_interface_t interface)
436 {
437 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
438 					   phylink_config);
439 	struct mtk_eth *eth = mac->hw;
440 	unsigned int sid;
441 
442 	if (interface == PHY_INTERFACE_MODE_SGMII ||
443 	    phy_interface_mode_is_8023z(interface)) {
444 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
445 		       0 : mac->id;
446 
447 		return mtk_sgmii_select_pcs(eth->sgmii, sid);
448 	}
449 
450 	return NULL;
451 }
452 
453 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
454 			   const struct phylink_link_state *state)
455 {
456 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
457 					   phylink_config);
458 	struct mtk_eth *eth = mac->hw;
459 	int val, ge_mode, err = 0;
460 	u32 i;
461 
462 	/* MT76x8 has no hardware settings between for the MAC */
463 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
464 	    mac->interface != state->interface) {
465 		/* Setup soc pin functions */
466 		switch (state->interface) {
467 		case PHY_INTERFACE_MODE_TRGMII:
468 			if (mac->id)
469 				goto err_phy;
470 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
471 					  MTK_GMAC1_TRGMII))
472 				goto err_phy;
473 			fallthrough;
474 		case PHY_INTERFACE_MODE_RGMII_TXID:
475 		case PHY_INTERFACE_MODE_RGMII_RXID:
476 		case PHY_INTERFACE_MODE_RGMII_ID:
477 		case PHY_INTERFACE_MODE_RGMII:
478 		case PHY_INTERFACE_MODE_MII:
479 		case PHY_INTERFACE_MODE_REVMII:
480 		case PHY_INTERFACE_MODE_RMII:
481 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
482 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
483 				if (err)
484 					goto init_err;
485 			}
486 			break;
487 		case PHY_INTERFACE_MODE_1000BASEX:
488 		case PHY_INTERFACE_MODE_2500BASEX:
489 		case PHY_INTERFACE_MODE_SGMII:
490 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
491 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
492 				if (err)
493 					goto init_err;
494 			}
495 			break;
496 		case PHY_INTERFACE_MODE_GMII:
497 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
498 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
499 				if (err)
500 					goto init_err;
501 			}
502 			break;
503 		default:
504 			goto err_phy;
505 		}
506 
507 		/* Setup clock for 1st gmac */
508 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
509 		    !phy_interface_mode_is_8023z(state->interface) &&
510 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
511 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
512 					 MTK_TRGMII_MT7621_CLK)) {
513 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
514 							      state->interface))
515 					goto err_phy;
516 			} else {
517 				/* FIXME: this is incorrect. Not only does it
518 				 * use state->speed (which is not guaranteed
519 				 * to be correct) but it also makes use of it
520 				 * in a code path that will only be reachable
521 				 * when the PHY interface mode changes, not
522 				 * when the speed changes. Consequently, RGMII
523 				 * is probably broken.
524 				 */
525 				mtk_gmac0_rgmii_adjust(mac->hw,
526 						       state->interface,
527 						       state->speed);
528 
529 				/* mt7623_pad_clk_setup */
530 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
531 					mtk_w32(mac->hw,
532 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
533 						TRGMII_TD_ODT(i));
534 
535 				/* Assert/release MT7623 RXC reset */
536 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
537 					TRGMII_RCK_CTRL);
538 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
539 			}
540 		}
541 
542 		ge_mode = 0;
543 		switch (state->interface) {
544 		case PHY_INTERFACE_MODE_MII:
545 		case PHY_INTERFACE_MODE_GMII:
546 			ge_mode = 1;
547 			break;
548 		case PHY_INTERFACE_MODE_REVMII:
549 			ge_mode = 2;
550 			break;
551 		case PHY_INTERFACE_MODE_RMII:
552 			if (mac->id)
553 				goto err_phy;
554 			ge_mode = 3;
555 			break;
556 		default:
557 			break;
558 		}
559 
560 		/* put the gmac into the right mode */
561 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
562 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
563 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
564 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
565 
566 		mac->interface = state->interface;
567 	}
568 
569 	/* SGMII */
570 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
571 	    phy_interface_mode_is_8023z(state->interface)) {
572 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
573 		 * being setup done.
574 		 */
575 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
576 
577 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
578 				   SYSCFG0_SGMII_MASK,
579 				   ~(u32)SYSCFG0_SGMII_MASK);
580 
581 		/* Save the syscfg0 value for mac_finish */
582 		mac->syscfg0 = val;
583 	} else if (phylink_autoneg_inband(mode)) {
584 		dev_err(eth->dev,
585 			"In-band mode not supported in non SGMII mode!\n");
586 		return;
587 	}
588 
589 	return;
590 
591 err_phy:
592 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
593 		mac->id, phy_modes(state->interface));
594 	return;
595 
596 init_err:
597 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
598 		mac->id, phy_modes(state->interface), err);
599 }
600 
601 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
602 			  phy_interface_t interface)
603 {
604 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
605 					   phylink_config);
606 	struct mtk_eth *eth = mac->hw;
607 	u32 mcr_cur, mcr_new;
608 
609 	/* Enable SGMII */
610 	if (interface == PHY_INTERFACE_MODE_SGMII ||
611 	    phy_interface_mode_is_8023z(interface))
612 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
613 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
614 
615 	/* Setup gmac */
616 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
617 	mcr_new = mcr_cur;
618 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
619 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
620 
621 	/* Only update control register when needed! */
622 	if (mcr_new != mcr_cur)
623 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
624 
625 	return 0;
626 }
627 
628 static void mtk_mac_pcs_get_state(struct phylink_config *config,
629 				  struct phylink_link_state *state)
630 {
631 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
632 					   phylink_config);
633 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
634 
635 	state->link = (pmsr & MAC_MSR_LINK);
636 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
637 
638 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
639 	case 0:
640 		state->speed = SPEED_10;
641 		break;
642 	case MAC_MSR_SPEED_100:
643 		state->speed = SPEED_100;
644 		break;
645 	case MAC_MSR_SPEED_1000:
646 		state->speed = SPEED_1000;
647 		break;
648 	default:
649 		state->speed = SPEED_UNKNOWN;
650 		break;
651 	}
652 
653 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
654 	if (pmsr & MAC_MSR_RX_FC)
655 		state->pause |= MLO_PAUSE_RX;
656 	if (pmsr & MAC_MSR_TX_FC)
657 		state->pause |= MLO_PAUSE_TX;
658 }
659 
660 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
661 			      phy_interface_t interface)
662 {
663 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
664 					   phylink_config);
665 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
666 
667 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
668 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
669 }
670 
671 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
672 				int speed)
673 {
674 	const struct mtk_soc_data *soc = eth->soc;
675 	u32 ofs, val;
676 
677 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
678 		return;
679 
680 	val = MTK_QTX_SCH_MIN_RATE_EN |
681 	      /* minimum: 10 Mbps */
682 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
683 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
684 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
685 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
686 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
687 
688 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
689 		switch (speed) {
690 		case SPEED_10:
691 			val |= MTK_QTX_SCH_MAX_RATE_EN |
692 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
693 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
694 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
695 			break;
696 		case SPEED_100:
697 			val |= MTK_QTX_SCH_MAX_RATE_EN |
698 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
699 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
700 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
701 			break;
702 		case SPEED_1000:
703 			val |= MTK_QTX_SCH_MAX_RATE_EN |
704 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
705 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
706 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
707 			break;
708 		default:
709 			break;
710 		}
711 	} else {
712 		switch (speed) {
713 		case SPEED_10:
714 			val |= MTK_QTX_SCH_MAX_RATE_EN |
715 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
716 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
717 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
718 			break;
719 		case SPEED_100:
720 			val |= MTK_QTX_SCH_MAX_RATE_EN |
721 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
722 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
723 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
724 			break;
725 		case SPEED_1000:
726 			val |= MTK_QTX_SCH_MAX_RATE_EN |
727 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
728 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
729 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
730 			break;
731 		default:
732 			break;
733 		}
734 	}
735 
736 	ofs = MTK_QTX_OFFSET * idx;
737 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
738 }
739 
740 static void mtk_mac_link_up(struct phylink_config *config,
741 			    struct phy_device *phy,
742 			    unsigned int mode, phy_interface_t interface,
743 			    int speed, int duplex, bool tx_pause, bool rx_pause)
744 {
745 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
746 					   phylink_config);
747 	u32 mcr;
748 
749 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
750 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
751 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
752 		 MAC_MCR_FORCE_RX_FC);
753 
754 	/* Configure speed */
755 	switch (speed) {
756 	case SPEED_2500:
757 	case SPEED_1000:
758 		mcr |= MAC_MCR_SPEED_1000;
759 		break;
760 	case SPEED_100:
761 		mcr |= MAC_MCR_SPEED_100;
762 		break;
763 	}
764 
765 	mtk_set_queue_speed(mac->hw, mac->id, speed);
766 
767 	/* Configure duplex */
768 	if (duplex == DUPLEX_FULL)
769 		mcr |= MAC_MCR_FORCE_DPX;
770 
771 	/* Configure pause modes - phylink will avoid these for half duplex */
772 	if (tx_pause)
773 		mcr |= MAC_MCR_FORCE_TX_FC;
774 	if (rx_pause)
775 		mcr |= MAC_MCR_FORCE_RX_FC;
776 
777 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
778 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
779 }
780 
781 static const struct phylink_mac_ops mtk_phylink_ops = {
782 	.mac_select_pcs = mtk_mac_select_pcs,
783 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
784 	.mac_config = mtk_mac_config,
785 	.mac_finish = mtk_mac_finish,
786 	.mac_link_down = mtk_mac_link_down,
787 	.mac_link_up = mtk_mac_link_up,
788 };
789 
790 static int mtk_mdio_init(struct mtk_eth *eth)
791 {
792 	struct device_node *mii_np;
793 	int ret;
794 
795 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
796 	if (!mii_np) {
797 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
798 		return -ENODEV;
799 	}
800 
801 	if (!of_device_is_available(mii_np)) {
802 		ret = -ENODEV;
803 		goto err_put_node;
804 	}
805 
806 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
807 	if (!eth->mii_bus) {
808 		ret = -ENOMEM;
809 		goto err_put_node;
810 	}
811 
812 	eth->mii_bus->name = "mdio";
813 	eth->mii_bus->read = mtk_mdio_read_c22;
814 	eth->mii_bus->write = mtk_mdio_write_c22;
815 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
816 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
817 	eth->mii_bus->priv = eth;
818 	eth->mii_bus->parent = eth->dev;
819 
820 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
821 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
822 
823 err_put_node:
824 	of_node_put(mii_np);
825 	return ret;
826 }
827 
828 static void mtk_mdio_cleanup(struct mtk_eth *eth)
829 {
830 	if (!eth->mii_bus)
831 		return;
832 
833 	mdiobus_unregister(eth->mii_bus);
834 }
835 
836 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
837 {
838 	unsigned long flags;
839 	u32 val;
840 
841 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
842 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
843 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
844 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
845 }
846 
847 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
848 {
849 	unsigned long flags;
850 	u32 val;
851 
852 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
853 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
854 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
855 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
856 }
857 
858 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
859 {
860 	unsigned long flags;
861 	u32 val;
862 
863 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
864 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
865 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
866 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
867 }
868 
869 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
870 {
871 	unsigned long flags;
872 	u32 val;
873 
874 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
875 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
876 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
877 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
878 }
879 
880 static int mtk_set_mac_address(struct net_device *dev, void *p)
881 {
882 	int ret = eth_mac_addr(dev, p);
883 	struct mtk_mac *mac = netdev_priv(dev);
884 	struct mtk_eth *eth = mac->hw;
885 	const char *macaddr = dev->dev_addr;
886 
887 	if (ret)
888 		return ret;
889 
890 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
891 		return -EBUSY;
892 
893 	spin_lock_bh(&mac->hw->page_lock);
894 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
895 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
896 			MT7628_SDM_MAC_ADRH);
897 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
898 			(macaddr[4] << 8) | macaddr[5],
899 			MT7628_SDM_MAC_ADRL);
900 	} else {
901 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
902 			MTK_GDMA_MAC_ADRH(mac->id));
903 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
904 			(macaddr[4] << 8) | macaddr[5],
905 			MTK_GDMA_MAC_ADRL(mac->id));
906 	}
907 	spin_unlock_bh(&mac->hw->page_lock);
908 
909 	return 0;
910 }
911 
912 void mtk_stats_update_mac(struct mtk_mac *mac)
913 {
914 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
915 	struct mtk_eth *eth = mac->hw;
916 
917 	u64_stats_update_begin(&hw_stats->syncp);
918 
919 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
920 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
921 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
922 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
923 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
924 		hw_stats->rx_checksum_errors +=
925 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
926 	} else {
927 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
928 		unsigned int offs = hw_stats->reg_offset;
929 		u64 stats;
930 
931 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
932 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
933 		if (stats)
934 			hw_stats->rx_bytes += (stats << 32);
935 		hw_stats->rx_packets +=
936 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
937 		hw_stats->rx_overflow +=
938 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
939 		hw_stats->rx_fcs_errors +=
940 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
941 		hw_stats->rx_short_errors +=
942 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
943 		hw_stats->rx_long_errors +=
944 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
945 		hw_stats->rx_checksum_errors +=
946 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
947 		hw_stats->rx_flow_control_packets +=
948 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
949 		hw_stats->tx_skip +=
950 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
951 		hw_stats->tx_collisions +=
952 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
953 		hw_stats->tx_bytes +=
954 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
955 		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
956 		if (stats)
957 			hw_stats->tx_bytes += (stats << 32);
958 		hw_stats->tx_packets +=
959 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
960 	}
961 
962 	u64_stats_update_end(&hw_stats->syncp);
963 }
964 
965 static void mtk_stats_update(struct mtk_eth *eth)
966 {
967 	int i;
968 
969 	for (i = 0; i < MTK_MAC_COUNT; i++) {
970 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
971 			continue;
972 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
973 			mtk_stats_update_mac(eth->mac[i]);
974 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
975 		}
976 	}
977 }
978 
979 static void mtk_get_stats64(struct net_device *dev,
980 			    struct rtnl_link_stats64 *storage)
981 {
982 	struct mtk_mac *mac = netdev_priv(dev);
983 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
984 	unsigned int start;
985 
986 	if (netif_running(dev) && netif_device_present(dev)) {
987 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
988 			mtk_stats_update_mac(mac);
989 			spin_unlock_bh(&hw_stats->stats_lock);
990 		}
991 	}
992 
993 	do {
994 		start = u64_stats_fetch_begin(&hw_stats->syncp);
995 		storage->rx_packets = hw_stats->rx_packets;
996 		storage->tx_packets = hw_stats->tx_packets;
997 		storage->rx_bytes = hw_stats->rx_bytes;
998 		storage->tx_bytes = hw_stats->tx_bytes;
999 		storage->collisions = hw_stats->tx_collisions;
1000 		storage->rx_length_errors = hw_stats->rx_short_errors +
1001 			hw_stats->rx_long_errors;
1002 		storage->rx_over_errors = hw_stats->rx_overflow;
1003 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1004 		storage->rx_errors = hw_stats->rx_checksum_errors;
1005 		storage->tx_aborted_errors = hw_stats->tx_skip;
1006 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1007 
1008 	storage->tx_errors = dev->stats.tx_errors;
1009 	storage->rx_dropped = dev->stats.rx_dropped;
1010 	storage->tx_dropped = dev->stats.tx_dropped;
1011 }
1012 
1013 static inline int mtk_max_frag_size(int mtu)
1014 {
1015 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1016 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1017 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1018 
1019 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1020 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1021 }
1022 
1023 static inline int mtk_max_buf_size(int frag_size)
1024 {
1025 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1026 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1027 
1028 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1029 
1030 	return buf_size;
1031 }
1032 
1033 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1034 			    struct mtk_rx_dma_v2 *dma_rxd)
1035 {
1036 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1037 	if (!(rxd->rxd2 & RX_DMA_DONE))
1038 		return false;
1039 
1040 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1041 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1042 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1043 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1044 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1045 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1046 	}
1047 
1048 	return true;
1049 }
1050 
1051 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1052 {
1053 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1054 	unsigned long data;
1055 
1056 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1057 				get_order(size));
1058 
1059 	return (void *)data;
1060 }
1061 
1062 /* the qdma core needs scratch memory to be setup */
1063 static int mtk_init_fq_dma(struct mtk_eth *eth)
1064 {
1065 	const struct mtk_soc_data *soc = eth->soc;
1066 	dma_addr_t phy_ring_tail;
1067 	int cnt = MTK_QDMA_RING_SIZE;
1068 	dma_addr_t dma_addr;
1069 	int i;
1070 
1071 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1072 					       cnt * soc->txrx.txd_size,
1073 					       &eth->phy_scratch_ring,
1074 					       GFP_KERNEL);
1075 	if (unlikely(!eth->scratch_ring))
1076 		return -ENOMEM;
1077 
1078 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1079 	if (unlikely(!eth->scratch_head))
1080 		return -ENOMEM;
1081 
1082 	dma_addr = dma_map_single(eth->dma_dev,
1083 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1084 				  DMA_FROM_DEVICE);
1085 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1086 		return -ENOMEM;
1087 
1088 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1089 
1090 	for (i = 0; i < cnt; i++) {
1091 		struct mtk_tx_dma_v2 *txd;
1092 
1093 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1094 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1095 		if (i < cnt - 1)
1096 			txd->txd2 = eth->phy_scratch_ring +
1097 				    (i + 1) * soc->txrx.txd_size;
1098 
1099 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1100 		txd->txd4 = 0;
1101 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1102 			txd->txd5 = 0;
1103 			txd->txd6 = 0;
1104 			txd->txd7 = 0;
1105 			txd->txd8 = 0;
1106 		}
1107 	}
1108 
1109 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1110 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1111 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1112 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1113 
1114 	return 0;
1115 }
1116 
1117 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1118 {
1119 	return ring->dma + (desc - ring->phys);
1120 }
1121 
1122 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1123 					     void *txd, u32 txd_size)
1124 {
1125 	int idx = (txd - ring->dma) / txd_size;
1126 
1127 	return &ring->buf[idx];
1128 }
1129 
1130 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1131 				       struct mtk_tx_dma *dma)
1132 {
1133 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1134 }
1135 
1136 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1137 {
1138 	return (dma - ring->dma) / txd_size;
1139 }
1140 
1141 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1142 			 struct xdp_frame_bulk *bq, bool napi)
1143 {
1144 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1145 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1146 			dma_unmap_single(eth->dma_dev,
1147 					 dma_unmap_addr(tx_buf, dma_addr0),
1148 					 dma_unmap_len(tx_buf, dma_len0),
1149 					 DMA_TO_DEVICE);
1150 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1151 			dma_unmap_page(eth->dma_dev,
1152 				       dma_unmap_addr(tx_buf, dma_addr0),
1153 				       dma_unmap_len(tx_buf, dma_len0),
1154 				       DMA_TO_DEVICE);
1155 		}
1156 	} else {
1157 		if (dma_unmap_len(tx_buf, dma_len0)) {
1158 			dma_unmap_page(eth->dma_dev,
1159 				       dma_unmap_addr(tx_buf, dma_addr0),
1160 				       dma_unmap_len(tx_buf, dma_len0),
1161 				       DMA_TO_DEVICE);
1162 		}
1163 
1164 		if (dma_unmap_len(tx_buf, dma_len1)) {
1165 			dma_unmap_page(eth->dma_dev,
1166 				       dma_unmap_addr(tx_buf, dma_addr1),
1167 				       dma_unmap_len(tx_buf, dma_len1),
1168 				       DMA_TO_DEVICE);
1169 		}
1170 	}
1171 
1172 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1173 		if (tx_buf->type == MTK_TYPE_SKB) {
1174 			struct sk_buff *skb = tx_buf->data;
1175 
1176 			if (napi)
1177 				napi_consume_skb(skb, napi);
1178 			else
1179 				dev_kfree_skb_any(skb);
1180 		} else {
1181 			struct xdp_frame *xdpf = tx_buf->data;
1182 
1183 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1184 				xdp_return_frame_rx_napi(xdpf);
1185 			else if (bq)
1186 				xdp_return_frame_bulk(xdpf, bq);
1187 			else
1188 				xdp_return_frame(xdpf);
1189 		}
1190 	}
1191 	tx_buf->flags = 0;
1192 	tx_buf->data = NULL;
1193 }
1194 
1195 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1196 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1197 			 size_t size, int idx)
1198 {
1199 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1200 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1201 		dma_unmap_len_set(tx_buf, dma_len0, size);
1202 	} else {
1203 		if (idx & 1) {
1204 			txd->txd3 = mapped_addr;
1205 			txd->txd2 |= TX_DMA_PLEN1(size);
1206 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1207 			dma_unmap_len_set(tx_buf, dma_len1, size);
1208 		} else {
1209 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1210 			txd->txd1 = mapped_addr;
1211 			txd->txd2 = TX_DMA_PLEN0(size);
1212 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1213 			dma_unmap_len_set(tx_buf, dma_len0, size);
1214 		}
1215 	}
1216 }
1217 
1218 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1219 				   struct mtk_tx_dma_desc_info *info)
1220 {
1221 	struct mtk_mac *mac = netdev_priv(dev);
1222 	struct mtk_eth *eth = mac->hw;
1223 	struct mtk_tx_dma *desc = txd;
1224 	u32 data;
1225 
1226 	WRITE_ONCE(desc->txd1, info->addr);
1227 
1228 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1229 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1230 	if (info->last)
1231 		data |= TX_DMA_LS0;
1232 	WRITE_ONCE(desc->txd3, data);
1233 
1234 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1235 	if (info->first) {
1236 		if (info->gso)
1237 			data |= TX_DMA_TSO;
1238 		/* tx checksum offload */
1239 		if (info->csum)
1240 			data |= TX_DMA_CHKSUM;
1241 		/* vlan header offload */
1242 		if (info->vlan)
1243 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1244 	}
1245 	WRITE_ONCE(desc->txd4, data);
1246 }
1247 
1248 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1249 				   struct mtk_tx_dma_desc_info *info)
1250 {
1251 	struct mtk_mac *mac = netdev_priv(dev);
1252 	struct mtk_tx_dma_v2 *desc = txd;
1253 	struct mtk_eth *eth = mac->hw;
1254 	u32 data;
1255 
1256 	WRITE_ONCE(desc->txd1, info->addr);
1257 
1258 	data = TX_DMA_PLEN0(info->size);
1259 	if (info->last)
1260 		data |= TX_DMA_LS0;
1261 	WRITE_ONCE(desc->txd3, data);
1262 
1263 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1264 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1265 	WRITE_ONCE(desc->txd4, data);
1266 
1267 	data = 0;
1268 	if (info->first) {
1269 		if (info->gso)
1270 			data |= TX_DMA_TSO_V2;
1271 		/* tx checksum offload */
1272 		if (info->csum)
1273 			data |= TX_DMA_CHKSUM_V2;
1274 	}
1275 	WRITE_ONCE(desc->txd5, data);
1276 
1277 	data = 0;
1278 	if (info->first && info->vlan)
1279 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1280 	WRITE_ONCE(desc->txd6, data);
1281 
1282 	WRITE_ONCE(desc->txd7, 0);
1283 	WRITE_ONCE(desc->txd8, 0);
1284 }
1285 
1286 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1287 				struct mtk_tx_dma_desc_info *info)
1288 {
1289 	struct mtk_mac *mac = netdev_priv(dev);
1290 	struct mtk_eth *eth = mac->hw;
1291 
1292 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1293 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1294 	else
1295 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1296 }
1297 
1298 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1299 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1300 {
1301 	struct mtk_tx_dma_desc_info txd_info = {
1302 		.size = skb_headlen(skb),
1303 		.gso = gso,
1304 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1305 		.vlan = skb_vlan_tag_present(skb),
1306 		.qid = skb_get_queue_mapping(skb),
1307 		.vlan_tci = skb_vlan_tag_get(skb),
1308 		.first = true,
1309 		.last = !skb_is_nonlinear(skb),
1310 	};
1311 	struct netdev_queue *txq;
1312 	struct mtk_mac *mac = netdev_priv(dev);
1313 	struct mtk_eth *eth = mac->hw;
1314 	const struct mtk_soc_data *soc = eth->soc;
1315 	struct mtk_tx_dma *itxd, *txd;
1316 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1317 	struct mtk_tx_buf *itx_buf, *tx_buf;
1318 	int i, n_desc = 1;
1319 	int queue = skb_get_queue_mapping(skb);
1320 	int k = 0;
1321 
1322 	txq = netdev_get_tx_queue(dev, queue);
1323 	itxd = ring->next_free;
1324 	itxd_pdma = qdma_to_pdma(ring, itxd);
1325 	if (itxd == ring->last_free)
1326 		return -ENOMEM;
1327 
1328 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1329 	memset(itx_buf, 0, sizeof(*itx_buf));
1330 
1331 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1332 				       DMA_TO_DEVICE);
1333 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1334 		return -ENOMEM;
1335 
1336 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1337 
1338 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1339 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1340 			  MTK_TX_FLAGS_FPORT1;
1341 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1342 		     k++);
1343 
1344 	/* TX SG offload */
1345 	txd = itxd;
1346 	txd_pdma = qdma_to_pdma(ring, txd);
1347 
1348 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1349 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1350 		unsigned int offset = 0;
1351 		int frag_size = skb_frag_size(frag);
1352 
1353 		while (frag_size) {
1354 			bool new_desc = true;
1355 
1356 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1357 			    (i & 0x1)) {
1358 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1359 				txd_pdma = qdma_to_pdma(ring, txd);
1360 				if (txd == ring->last_free)
1361 					goto err_dma;
1362 
1363 				n_desc++;
1364 			} else {
1365 				new_desc = false;
1366 			}
1367 
1368 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1369 			txd_info.size = min_t(unsigned int, frag_size,
1370 					      soc->txrx.dma_max_len);
1371 			txd_info.qid = queue;
1372 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1373 					!(frag_size - txd_info.size);
1374 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1375 							 offset, txd_info.size,
1376 							 DMA_TO_DEVICE);
1377 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1378 				goto err_dma;
1379 
1380 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1381 
1382 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1383 						    soc->txrx.txd_size);
1384 			if (new_desc)
1385 				memset(tx_buf, 0, sizeof(*tx_buf));
1386 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1387 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1388 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1389 					 MTK_TX_FLAGS_FPORT1;
1390 
1391 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1392 				     txd_info.size, k++);
1393 
1394 			frag_size -= txd_info.size;
1395 			offset += txd_info.size;
1396 		}
1397 	}
1398 
1399 	/* store skb to cleanup */
1400 	itx_buf->type = MTK_TYPE_SKB;
1401 	itx_buf->data = skb;
1402 
1403 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1404 		if (k & 0x1)
1405 			txd_pdma->txd2 |= TX_DMA_LS0;
1406 		else
1407 			txd_pdma->txd2 |= TX_DMA_LS1;
1408 	}
1409 
1410 	netdev_tx_sent_queue(txq, skb->len);
1411 	skb_tx_timestamp(skb);
1412 
1413 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1414 	atomic_sub(n_desc, &ring->free_count);
1415 
1416 	/* make sure that all changes to the dma ring are flushed before we
1417 	 * continue
1418 	 */
1419 	wmb();
1420 
1421 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1422 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1423 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1424 	} else {
1425 		int next_idx;
1426 
1427 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1428 					 ring->dma_size);
1429 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1430 	}
1431 
1432 	return 0;
1433 
1434 err_dma:
1435 	do {
1436 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1437 
1438 		/* unmap dma */
1439 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1440 
1441 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1442 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1443 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1444 
1445 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1446 		itxd_pdma = qdma_to_pdma(ring, itxd);
1447 	} while (itxd != txd);
1448 
1449 	return -ENOMEM;
1450 }
1451 
1452 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1453 {
1454 	int i, nfrags = 1;
1455 	skb_frag_t *frag;
1456 
1457 	if (skb_is_gso(skb)) {
1458 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1459 			frag = &skb_shinfo(skb)->frags[i];
1460 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1461 					       eth->soc->txrx.dma_max_len);
1462 		}
1463 	} else {
1464 		nfrags += skb_shinfo(skb)->nr_frags;
1465 	}
1466 
1467 	return nfrags;
1468 }
1469 
1470 static int mtk_queue_stopped(struct mtk_eth *eth)
1471 {
1472 	int i;
1473 
1474 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1475 		if (!eth->netdev[i])
1476 			continue;
1477 		if (netif_queue_stopped(eth->netdev[i]))
1478 			return 1;
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static void mtk_wake_queue(struct mtk_eth *eth)
1485 {
1486 	int i;
1487 
1488 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1489 		if (!eth->netdev[i])
1490 			continue;
1491 		netif_tx_wake_all_queues(eth->netdev[i]);
1492 	}
1493 }
1494 
1495 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1496 {
1497 	struct mtk_mac *mac = netdev_priv(dev);
1498 	struct mtk_eth *eth = mac->hw;
1499 	struct mtk_tx_ring *ring = &eth->tx_ring;
1500 	struct net_device_stats *stats = &dev->stats;
1501 	bool gso = false;
1502 	int tx_num;
1503 
1504 	/* normally we can rely on the stack not calling this more than once,
1505 	 * however we have 2 queues running on the same ring so we need to lock
1506 	 * the ring access
1507 	 */
1508 	spin_lock(&eth->page_lock);
1509 
1510 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1511 		goto drop;
1512 
1513 	tx_num = mtk_cal_txd_req(eth, skb);
1514 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1515 		netif_tx_stop_all_queues(dev);
1516 		netif_err(eth, tx_queued, dev,
1517 			  "Tx Ring full when queue awake!\n");
1518 		spin_unlock(&eth->page_lock);
1519 		return NETDEV_TX_BUSY;
1520 	}
1521 
1522 	/* TSO: fill MSS info in tcp checksum field */
1523 	if (skb_is_gso(skb)) {
1524 		if (skb_cow_head(skb, 0)) {
1525 			netif_warn(eth, tx_err, dev,
1526 				   "GSO expand head fail.\n");
1527 			goto drop;
1528 		}
1529 
1530 		if (skb_shinfo(skb)->gso_type &
1531 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1532 			gso = true;
1533 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1534 		}
1535 	}
1536 
1537 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1538 		goto drop;
1539 
1540 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1541 		netif_tx_stop_all_queues(dev);
1542 
1543 	spin_unlock(&eth->page_lock);
1544 
1545 	return NETDEV_TX_OK;
1546 
1547 drop:
1548 	spin_unlock(&eth->page_lock);
1549 	stats->tx_dropped++;
1550 	dev_kfree_skb_any(skb);
1551 	return NETDEV_TX_OK;
1552 }
1553 
1554 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1555 {
1556 	int i;
1557 	struct mtk_rx_ring *ring;
1558 	int idx;
1559 
1560 	if (!eth->hwlro)
1561 		return &eth->rx_ring[0];
1562 
1563 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1564 		struct mtk_rx_dma *rxd;
1565 
1566 		ring = &eth->rx_ring[i];
1567 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1568 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1569 		if (rxd->rxd2 & RX_DMA_DONE) {
1570 			ring->calc_idx_update = true;
1571 			return ring;
1572 		}
1573 	}
1574 
1575 	return NULL;
1576 }
1577 
1578 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1579 {
1580 	struct mtk_rx_ring *ring;
1581 	int i;
1582 
1583 	if (!eth->hwlro) {
1584 		ring = &eth->rx_ring[0];
1585 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1586 	} else {
1587 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1588 			ring = &eth->rx_ring[i];
1589 			if (ring->calc_idx_update) {
1590 				ring->calc_idx_update = false;
1591 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1592 			}
1593 		}
1594 	}
1595 }
1596 
1597 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1598 {
1599 	return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1600 }
1601 
1602 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1603 					      struct xdp_rxq_info *xdp_q,
1604 					      int id, int size)
1605 {
1606 	struct page_pool_params pp_params = {
1607 		.order = 0,
1608 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1609 		.pool_size = size,
1610 		.nid = NUMA_NO_NODE,
1611 		.dev = eth->dma_dev,
1612 		.offset = MTK_PP_HEADROOM,
1613 		.max_len = MTK_PP_MAX_BUF_SIZE,
1614 	};
1615 	struct page_pool *pp;
1616 	int err;
1617 
1618 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1619 							  : DMA_FROM_DEVICE;
1620 	pp = page_pool_create(&pp_params);
1621 	if (IS_ERR(pp))
1622 		return pp;
1623 
1624 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
1625 				 id, PAGE_SIZE);
1626 	if (err < 0)
1627 		goto err_free_pp;
1628 
1629 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1630 	if (err)
1631 		goto err_unregister_rxq;
1632 
1633 	return pp;
1634 
1635 err_unregister_rxq:
1636 	xdp_rxq_info_unreg(xdp_q);
1637 err_free_pp:
1638 	page_pool_destroy(pp);
1639 
1640 	return ERR_PTR(err);
1641 }
1642 
1643 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1644 				    gfp_t gfp_mask)
1645 {
1646 	struct page *page;
1647 
1648 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1649 	if (!page)
1650 		return NULL;
1651 
1652 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1653 	return page_address(page);
1654 }
1655 
1656 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1657 {
1658 	if (ring->page_pool)
1659 		page_pool_put_full_page(ring->page_pool,
1660 					virt_to_head_page(data), napi);
1661 	else
1662 		skb_free_frag(data);
1663 }
1664 
1665 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1666 			     struct mtk_tx_dma_desc_info *txd_info,
1667 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1668 			     void *data, u16 headroom, int index, bool dma_map)
1669 {
1670 	struct mtk_tx_ring *ring = &eth->tx_ring;
1671 	struct mtk_mac *mac = netdev_priv(dev);
1672 	struct mtk_tx_dma *txd_pdma;
1673 
1674 	if (dma_map) {  /* ndo_xdp_xmit */
1675 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1676 						txd_info->size, DMA_TO_DEVICE);
1677 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1678 			return -ENOMEM;
1679 
1680 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1681 	} else {
1682 		struct page *page = virt_to_head_page(data);
1683 
1684 		txd_info->addr = page_pool_get_dma_addr(page) +
1685 				 sizeof(struct xdp_frame) + headroom;
1686 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1687 					   txd_info->size, DMA_BIDIRECTIONAL);
1688 	}
1689 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1690 
1691 	tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1692 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1693 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1694 
1695 	txd_pdma = qdma_to_pdma(ring, txd);
1696 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1697 		     index);
1698 
1699 	return 0;
1700 }
1701 
1702 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1703 				struct net_device *dev, bool dma_map)
1704 {
1705 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1706 	const struct mtk_soc_data *soc = eth->soc;
1707 	struct mtk_tx_ring *ring = &eth->tx_ring;
1708 	struct mtk_mac *mac = netdev_priv(dev);
1709 	struct mtk_tx_dma_desc_info txd_info = {
1710 		.size	= xdpf->len,
1711 		.first	= true,
1712 		.last	= !xdp_frame_has_frags(xdpf),
1713 		.qid	= mac->id,
1714 	};
1715 	int err, index = 0, n_desc = 1, nr_frags;
1716 	struct mtk_tx_buf *htx_buf, *tx_buf;
1717 	struct mtk_tx_dma *htxd, *txd;
1718 	void *data = xdpf->data;
1719 
1720 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1721 		return -EBUSY;
1722 
1723 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1724 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1725 		return -EBUSY;
1726 
1727 	spin_lock(&eth->page_lock);
1728 
1729 	txd = ring->next_free;
1730 	if (txd == ring->last_free) {
1731 		spin_unlock(&eth->page_lock);
1732 		return -ENOMEM;
1733 	}
1734 	htxd = txd;
1735 
1736 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1737 	memset(tx_buf, 0, sizeof(*tx_buf));
1738 	htx_buf = tx_buf;
1739 
1740 	for (;;) {
1741 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1742 					data, xdpf->headroom, index, dma_map);
1743 		if (err < 0)
1744 			goto unmap;
1745 
1746 		if (txd_info.last)
1747 			break;
1748 
1749 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1750 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1751 			if (txd == ring->last_free)
1752 				goto unmap;
1753 
1754 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1755 						    soc->txrx.txd_size);
1756 			memset(tx_buf, 0, sizeof(*tx_buf));
1757 			n_desc++;
1758 		}
1759 
1760 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1761 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1762 		txd_info.last = index + 1 == nr_frags;
1763 		txd_info.qid = mac->id;
1764 		data = skb_frag_address(&sinfo->frags[index]);
1765 
1766 		index++;
1767 	}
1768 	/* store xdpf for cleanup */
1769 	htx_buf->data = xdpf;
1770 
1771 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1772 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1773 
1774 		if (index & 1)
1775 			txd_pdma->txd2 |= TX_DMA_LS0;
1776 		else
1777 			txd_pdma->txd2 |= TX_DMA_LS1;
1778 	}
1779 
1780 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1781 	atomic_sub(n_desc, &ring->free_count);
1782 
1783 	/* make sure that all changes to the dma ring are flushed before we
1784 	 * continue
1785 	 */
1786 	wmb();
1787 
1788 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1789 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1790 	} else {
1791 		int idx;
1792 
1793 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1794 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1795 			MT7628_TX_CTX_IDX0);
1796 	}
1797 
1798 	spin_unlock(&eth->page_lock);
1799 
1800 	return 0;
1801 
1802 unmap:
1803 	while (htxd != txd) {
1804 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1805 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1806 
1807 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1808 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1809 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1810 
1811 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1812 		}
1813 
1814 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1815 	}
1816 
1817 	spin_unlock(&eth->page_lock);
1818 
1819 	return err;
1820 }
1821 
1822 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1823 			struct xdp_frame **frames, u32 flags)
1824 {
1825 	struct mtk_mac *mac = netdev_priv(dev);
1826 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1827 	struct mtk_eth *eth = mac->hw;
1828 	int i, nxmit = 0;
1829 
1830 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1831 		return -EINVAL;
1832 
1833 	for (i = 0; i < num_frame; i++) {
1834 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1835 			break;
1836 		nxmit++;
1837 	}
1838 
1839 	u64_stats_update_begin(&hw_stats->syncp);
1840 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1841 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1842 	u64_stats_update_end(&hw_stats->syncp);
1843 
1844 	return nxmit;
1845 }
1846 
1847 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1848 		       struct xdp_buff *xdp, struct net_device *dev)
1849 {
1850 	struct mtk_mac *mac = netdev_priv(dev);
1851 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1852 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1853 	struct bpf_prog *prog;
1854 	u32 act = XDP_PASS;
1855 
1856 	rcu_read_lock();
1857 
1858 	prog = rcu_dereference(eth->prog);
1859 	if (!prog)
1860 		goto out;
1861 
1862 	act = bpf_prog_run_xdp(prog, xdp);
1863 	switch (act) {
1864 	case XDP_PASS:
1865 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1866 		goto update_stats;
1867 	case XDP_REDIRECT:
1868 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1869 			act = XDP_DROP;
1870 			break;
1871 		}
1872 
1873 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1874 		goto update_stats;
1875 	case XDP_TX: {
1876 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1877 
1878 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1879 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1880 			act = XDP_DROP;
1881 			break;
1882 		}
1883 
1884 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1885 		goto update_stats;
1886 	}
1887 	default:
1888 		bpf_warn_invalid_xdp_action(dev, prog, act);
1889 		fallthrough;
1890 	case XDP_ABORTED:
1891 		trace_xdp_exception(dev, prog, act);
1892 		fallthrough;
1893 	case XDP_DROP:
1894 		break;
1895 	}
1896 
1897 	page_pool_put_full_page(ring->page_pool,
1898 				virt_to_head_page(xdp->data), true);
1899 
1900 update_stats:
1901 	u64_stats_update_begin(&hw_stats->syncp);
1902 	*count = *count + 1;
1903 	u64_stats_update_end(&hw_stats->syncp);
1904 out:
1905 	rcu_read_unlock();
1906 
1907 	return act;
1908 }
1909 
1910 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1911 		       struct mtk_eth *eth)
1912 {
1913 	struct dim_sample dim_sample = {};
1914 	struct mtk_rx_ring *ring;
1915 	bool xdp_flush = false;
1916 	int idx;
1917 	struct sk_buff *skb;
1918 	u8 *data, *new_data;
1919 	struct mtk_rx_dma_v2 *rxd, trxd;
1920 	int done = 0, bytes = 0;
1921 
1922 	while (done < budget) {
1923 		unsigned int pktlen, *rxdcsum;
1924 		struct net_device *netdev;
1925 		dma_addr_t dma_addr;
1926 		u32 hash, reason;
1927 		int mac = 0;
1928 
1929 		ring = mtk_get_rx_ring(eth);
1930 		if (unlikely(!ring))
1931 			goto rx_done;
1932 
1933 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1934 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1935 		data = ring->data[idx];
1936 
1937 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
1938 			break;
1939 
1940 		/* find out which mac the packet come from. values start at 1 */
1941 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1942 			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1943 		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1944 			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1945 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1946 
1947 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1948 			     !eth->netdev[mac]))
1949 			goto release_desc;
1950 
1951 		netdev = eth->netdev[mac];
1952 
1953 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1954 			goto release_desc;
1955 
1956 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1957 
1958 		/* alloc new buffer */
1959 		if (ring->page_pool) {
1960 			struct page *page = virt_to_head_page(data);
1961 			struct xdp_buff xdp;
1962 			u32 ret;
1963 
1964 			new_data = mtk_page_pool_get_buff(ring->page_pool,
1965 							  &dma_addr,
1966 							  GFP_ATOMIC);
1967 			if (unlikely(!new_data)) {
1968 				netdev->stats.rx_dropped++;
1969 				goto release_desc;
1970 			}
1971 
1972 			dma_sync_single_for_cpu(eth->dma_dev,
1973 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1974 				pktlen, page_pool_get_dma_dir(ring->page_pool));
1975 
1976 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1977 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1978 					 false);
1979 			xdp_buff_clear_frags_flag(&xdp);
1980 
1981 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1982 			if (ret == XDP_REDIRECT)
1983 				xdp_flush = true;
1984 
1985 			if (ret != XDP_PASS)
1986 				goto skip_rx;
1987 
1988 			skb = build_skb(data, PAGE_SIZE);
1989 			if (unlikely(!skb)) {
1990 				page_pool_put_full_page(ring->page_pool,
1991 							page, true);
1992 				netdev->stats.rx_dropped++;
1993 				goto skip_rx;
1994 			}
1995 
1996 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
1997 			skb_put(skb, xdp.data_end - xdp.data);
1998 			skb_mark_for_recycle(skb);
1999 		} else {
2000 			if (ring->frag_size <= PAGE_SIZE)
2001 				new_data = napi_alloc_frag(ring->frag_size);
2002 			else
2003 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2004 
2005 			if (unlikely(!new_data)) {
2006 				netdev->stats.rx_dropped++;
2007 				goto release_desc;
2008 			}
2009 
2010 			dma_addr = dma_map_single(eth->dma_dev,
2011 				new_data + NET_SKB_PAD + eth->ip_align,
2012 				ring->buf_size, DMA_FROM_DEVICE);
2013 			if (unlikely(dma_mapping_error(eth->dma_dev,
2014 						       dma_addr))) {
2015 				skb_free_frag(new_data);
2016 				netdev->stats.rx_dropped++;
2017 				goto release_desc;
2018 			}
2019 
2020 			dma_unmap_single(eth->dma_dev, trxd.rxd1,
2021 					 ring->buf_size, DMA_FROM_DEVICE);
2022 
2023 			skb = build_skb(data, ring->frag_size);
2024 			if (unlikely(!skb)) {
2025 				netdev->stats.rx_dropped++;
2026 				skb_free_frag(data);
2027 				goto skip_rx;
2028 			}
2029 
2030 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2031 			skb_put(skb, pktlen);
2032 		}
2033 
2034 		skb->dev = netdev;
2035 		bytes += skb->len;
2036 
2037 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2038 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2039 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2040 			if (hash != MTK_RXD5_FOE_ENTRY)
2041 				skb_set_hash(skb, jhash_1word(hash, 0),
2042 					     PKT_HASH_TYPE_L4);
2043 			rxdcsum = &trxd.rxd3;
2044 		} else {
2045 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2046 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2047 			if (hash != MTK_RXD4_FOE_ENTRY)
2048 				skb_set_hash(skb, jhash_1word(hash, 0),
2049 					     PKT_HASH_TYPE_L4);
2050 			rxdcsum = &trxd.rxd4;
2051 		}
2052 
2053 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2054 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2055 		else
2056 			skb_checksum_none_assert(skb);
2057 		skb->protocol = eth_type_trans(skb, netdev);
2058 
2059 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2060 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2061 
2062 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2063 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2064 				if (trxd.rxd3 & RX_DMA_VTAG_V2)
2065 					__vlan_hwaccel_put_tag(skb,
2066 						htons(RX_DMA_VPID(trxd.rxd4)),
2067 						RX_DMA_VID(trxd.rxd4));
2068 			} else if (trxd.rxd2 & RX_DMA_VTAG) {
2069 				__vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
2070 						       RX_DMA_VID(trxd.rxd3));
2071 			}
2072 		}
2073 
2074 		/* When using VLAN untagging in combination with DSA, the
2075 		 * hardware treats the MTK special tag as a VLAN and untags it.
2076 		 */
2077 		if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
2078 			unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
2079 
2080 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2081 			    eth->dsa_meta[port])
2082 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2083 
2084 			__vlan_hwaccel_clear_tag(skb);
2085 		}
2086 
2087 		skb_record_rx_queue(skb, 0);
2088 		napi_gro_receive(napi, skb);
2089 
2090 skip_rx:
2091 		ring->data[idx] = new_data;
2092 		rxd->rxd1 = (unsigned int)dma_addr;
2093 release_desc:
2094 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2095 			rxd->rxd2 = RX_DMA_LSO;
2096 		else
2097 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2098 
2099 		ring->calc_idx = idx;
2100 		done++;
2101 	}
2102 
2103 rx_done:
2104 	if (done) {
2105 		/* make sure that all changes to the dma ring are flushed before
2106 		 * we continue
2107 		 */
2108 		wmb();
2109 		mtk_update_rx_cpu_idx(eth);
2110 	}
2111 
2112 	eth->rx_packets += done;
2113 	eth->rx_bytes += bytes;
2114 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2115 			  &dim_sample);
2116 	net_dim(&eth->rx_dim, dim_sample);
2117 
2118 	if (xdp_flush)
2119 		xdp_do_flush_map();
2120 
2121 	return done;
2122 }
2123 
2124 struct mtk_poll_state {
2125     struct netdev_queue *txq;
2126     unsigned int total;
2127     unsigned int done;
2128     unsigned int bytes;
2129 };
2130 
2131 static void
2132 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2133 		 struct sk_buff *skb)
2134 {
2135 	struct netdev_queue *txq;
2136 	struct net_device *dev;
2137 	unsigned int bytes = skb->len;
2138 
2139 	state->total++;
2140 	eth->tx_packets++;
2141 	eth->tx_bytes += bytes;
2142 
2143 	dev = eth->netdev[mac];
2144 	if (!dev)
2145 		return;
2146 
2147 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2148 	if (state->txq == txq) {
2149 		state->done++;
2150 		state->bytes += bytes;
2151 		return;
2152 	}
2153 
2154 	if (state->txq)
2155 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2156 
2157 	state->txq = txq;
2158 	state->done = 1;
2159 	state->bytes = bytes;
2160 }
2161 
2162 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2163 			    struct mtk_poll_state *state)
2164 {
2165 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2166 	struct mtk_tx_ring *ring = &eth->tx_ring;
2167 	struct mtk_tx_buf *tx_buf;
2168 	struct xdp_frame_bulk bq;
2169 	struct mtk_tx_dma *desc;
2170 	u32 cpu, dma;
2171 
2172 	cpu = ring->last_free_ptr;
2173 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2174 
2175 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2176 	xdp_frame_bulk_init(&bq);
2177 
2178 	while ((cpu != dma) && budget) {
2179 		u32 next_cpu = desc->txd2;
2180 		int mac = 0;
2181 
2182 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2183 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2184 			break;
2185 
2186 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2187 					    eth->soc->txrx.txd_size);
2188 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2189 			mac = 1;
2190 
2191 		if (!tx_buf->data)
2192 			break;
2193 
2194 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2195 			if (tx_buf->type == MTK_TYPE_SKB)
2196 				mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2197 
2198 			budget--;
2199 		}
2200 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2201 
2202 		ring->last_free = desc;
2203 		atomic_inc(&ring->free_count);
2204 
2205 		cpu = next_cpu;
2206 	}
2207 	xdp_flush_frame_bulk(&bq);
2208 
2209 	ring->last_free_ptr = cpu;
2210 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2211 
2212 	return budget;
2213 }
2214 
2215 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2216 			    struct mtk_poll_state *state)
2217 {
2218 	struct mtk_tx_ring *ring = &eth->tx_ring;
2219 	struct mtk_tx_buf *tx_buf;
2220 	struct xdp_frame_bulk bq;
2221 	struct mtk_tx_dma *desc;
2222 	u32 cpu, dma;
2223 
2224 	cpu = ring->cpu_idx;
2225 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2226 	xdp_frame_bulk_init(&bq);
2227 
2228 	while ((cpu != dma) && budget) {
2229 		tx_buf = &ring->buf[cpu];
2230 		if (!tx_buf->data)
2231 			break;
2232 
2233 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2234 			if (tx_buf->type == MTK_TYPE_SKB)
2235 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2236 			budget--;
2237 		}
2238 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2239 
2240 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2241 		ring->last_free = desc;
2242 		atomic_inc(&ring->free_count);
2243 
2244 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2245 	}
2246 	xdp_flush_frame_bulk(&bq);
2247 
2248 	ring->cpu_idx = cpu;
2249 
2250 	return budget;
2251 }
2252 
2253 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2254 {
2255 	struct mtk_tx_ring *ring = &eth->tx_ring;
2256 	struct dim_sample dim_sample = {};
2257 	struct mtk_poll_state state = {};
2258 
2259 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2260 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2261 	else
2262 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2263 
2264 	if (state.txq)
2265 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2266 
2267 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2268 			  &dim_sample);
2269 	net_dim(&eth->tx_dim, dim_sample);
2270 
2271 	if (mtk_queue_stopped(eth) &&
2272 	    (atomic_read(&ring->free_count) > ring->thresh))
2273 		mtk_wake_queue(eth);
2274 
2275 	return state.total;
2276 }
2277 
2278 static void mtk_handle_status_irq(struct mtk_eth *eth)
2279 {
2280 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2281 
2282 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2283 		mtk_stats_update(eth);
2284 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2285 			MTK_INT_STATUS2);
2286 	}
2287 }
2288 
2289 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2290 {
2291 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2292 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2293 	int tx_done = 0;
2294 
2295 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2296 		mtk_handle_status_irq(eth);
2297 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2298 	tx_done = mtk_poll_tx(eth, budget);
2299 
2300 	if (unlikely(netif_msg_intr(eth))) {
2301 		dev_info(eth->dev,
2302 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2303 			 mtk_r32(eth, reg_map->tx_irq_status),
2304 			 mtk_r32(eth, reg_map->tx_irq_mask));
2305 	}
2306 
2307 	if (tx_done == budget)
2308 		return budget;
2309 
2310 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2311 		return budget;
2312 
2313 	if (napi_complete_done(napi, tx_done))
2314 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2315 
2316 	return tx_done;
2317 }
2318 
2319 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2320 {
2321 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2322 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2323 	int rx_done_total = 0;
2324 
2325 	mtk_handle_status_irq(eth);
2326 
2327 	do {
2328 		int rx_done;
2329 
2330 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2331 			reg_map->pdma.irq_status);
2332 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2333 		rx_done_total += rx_done;
2334 
2335 		if (unlikely(netif_msg_intr(eth))) {
2336 			dev_info(eth->dev,
2337 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2338 				 mtk_r32(eth, reg_map->pdma.irq_status),
2339 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2340 		}
2341 
2342 		if (rx_done_total == budget)
2343 			return budget;
2344 
2345 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2346 		 eth->soc->txrx.rx_irq_done_mask);
2347 
2348 	if (napi_complete_done(napi, rx_done_total))
2349 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2350 
2351 	return rx_done_total;
2352 }
2353 
2354 static int mtk_tx_alloc(struct mtk_eth *eth)
2355 {
2356 	const struct mtk_soc_data *soc = eth->soc;
2357 	struct mtk_tx_ring *ring = &eth->tx_ring;
2358 	int i, sz = soc->txrx.txd_size;
2359 	struct mtk_tx_dma_v2 *txd;
2360 	int ring_size;
2361 	u32 ofs, val;
2362 
2363 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2364 		ring_size = MTK_QDMA_RING_SIZE;
2365 	else
2366 		ring_size = MTK_DMA_SIZE;
2367 
2368 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2369 			       GFP_KERNEL);
2370 	if (!ring->buf)
2371 		goto no_tx_mem;
2372 
2373 	ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2374 				       &ring->phys, GFP_KERNEL);
2375 	if (!ring->dma)
2376 		goto no_tx_mem;
2377 
2378 	for (i = 0; i < ring_size; i++) {
2379 		int next = (i + 1) % ring_size;
2380 		u32 next_ptr = ring->phys + next * sz;
2381 
2382 		txd = ring->dma + i * sz;
2383 		txd->txd2 = next_ptr;
2384 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2385 		txd->txd4 = 0;
2386 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2387 			txd->txd5 = 0;
2388 			txd->txd6 = 0;
2389 			txd->txd7 = 0;
2390 			txd->txd8 = 0;
2391 		}
2392 	}
2393 
2394 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2395 	 * only as the framework. The real HW descriptors are the PDMA
2396 	 * descriptors in ring->dma_pdma.
2397 	 */
2398 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2399 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2400 						    &ring->phys_pdma, GFP_KERNEL);
2401 		if (!ring->dma_pdma)
2402 			goto no_tx_mem;
2403 
2404 		for (i = 0; i < ring_size; i++) {
2405 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2406 			ring->dma_pdma[i].txd4 = 0;
2407 		}
2408 	}
2409 
2410 	ring->dma_size = ring_size;
2411 	atomic_set(&ring->free_count, ring_size - 2);
2412 	ring->next_free = ring->dma;
2413 	ring->last_free = (void *)txd;
2414 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2415 	ring->thresh = MAX_SKB_FRAGS;
2416 
2417 	/* make sure that all changes to the dma ring are flushed before we
2418 	 * continue
2419 	 */
2420 	wmb();
2421 
2422 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2423 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2424 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2425 		mtk_w32(eth,
2426 			ring->phys + ((ring_size - 1) * sz),
2427 			soc->reg_map->qdma.crx_ptr);
2428 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2429 
2430 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2431 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2432 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2433 
2434 			val = MTK_QTX_SCH_MIN_RATE_EN |
2435 			      /* minimum: 10 Mbps */
2436 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2437 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2438 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2439 			if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2440 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2441 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2442 			ofs += MTK_QTX_OFFSET;
2443 		}
2444 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2445 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2446 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2447 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2448 	} else {
2449 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2450 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2451 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2452 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2453 	}
2454 
2455 	return 0;
2456 
2457 no_tx_mem:
2458 	return -ENOMEM;
2459 }
2460 
2461 static void mtk_tx_clean(struct mtk_eth *eth)
2462 {
2463 	const struct mtk_soc_data *soc = eth->soc;
2464 	struct mtk_tx_ring *ring = &eth->tx_ring;
2465 	int i;
2466 
2467 	if (ring->buf) {
2468 		for (i = 0; i < ring->dma_size; i++)
2469 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2470 		kfree(ring->buf);
2471 		ring->buf = NULL;
2472 	}
2473 
2474 	if (ring->dma) {
2475 		dma_free_coherent(eth->dma_dev,
2476 				  ring->dma_size * soc->txrx.txd_size,
2477 				  ring->dma, ring->phys);
2478 		ring->dma = NULL;
2479 	}
2480 
2481 	if (ring->dma_pdma) {
2482 		dma_free_coherent(eth->dma_dev,
2483 				  ring->dma_size * soc->txrx.txd_size,
2484 				  ring->dma_pdma, ring->phys_pdma);
2485 		ring->dma_pdma = NULL;
2486 	}
2487 }
2488 
2489 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2490 {
2491 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2492 	struct mtk_rx_ring *ring;
2493 	int rx_data_len, rx_dma_size;
2494 	int i;
2495 
2496 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2497 		if (ring_no)
2498 			return -EINVAL;
2499 		ring = &eth->rx_ring_qdma;
2500 	} else {
2501 		ring = &eth->rx_ring[ring_no];
2502 	}
2503 
2504 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2505 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2506 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2507 	} else {
2508 		rx_data_len = ETH_DATA_LEN;
2509 		rx_dma_size = MTK_DMA_SIZE;
2510 	}
2511 
2512 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2513 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2514 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2515 			     GFP_KERNEL);
2516 	if (!ring->data)
2517 		return -ENOMEM;
2518 
2519 	if (mtk_page_pool_enabled(eth)) {
2520 		struct page_pool *pp;
2521 
2522 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2523 					  rx_dma_size);
2524 		if (IS_ERR(pp))
2525 			return PTR_ERR(pp);
2526 
2527 		ring->page_pool = pp;
2528 	}
2529 
2530 	ring->dma = dma_alloc_coherent(eth->dma_dev,
2531 				       rx_dma_size * eth->soc->txrx.rxd_size,
2532 				       &ring->phys, GFP_KERNEL);
2533 	if (!ring->dma)
2534 		return -ENOMEM;
2535 
2536 	for (i = 0; i < rx_dma_size; i++) {
2537 		struct mtk_rx_dma_v2 *rxd;
2538 		dma_addr_t dma_addr;
2539 		void *data;
2540 
2541 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2542 		if (ring->page_pool) {
2543 			data = mtk_page_pool_get_buff(ring->page_pool,
2544 						      &dma_addr, GFP_KERNEL);
2545 			if (!data)
2546 				return -ENOMEM;
2547 		} else {
2548 			if (ring->frag_size <= PAGE_SIZE)
2549 				data = netdev_alloc_frag(ring->frag_size);
2550 			else
2551 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2552 
2553 			if (!data)
2554 				return -ENOMEM;
2555 
2556 			dma_addr = dma_map_single(eth->dma_dev,
2557 				data + NET_SKB_PAD + eth->ip_align,
2558 				ring->buf_size, DMA_FROM_DEVICE);
2559 			if (unlikely(dma_mapping_error(eth->dma_dev,
2560 						       dma_addr))) {
2561 				skb_free_frag(data);
2562 				return -ENOMEM;
2563 			}
2564 		}
2565 		rxd->rxd1 = (unsigned int)dma_addr;
2566 		ring->data[i] = data;
2567 
2568 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2569 			rxd->rxd2 = RX_DMA_LSO;
2570 		else
2571 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2572 
2573 		rxd->rxd3 = 0;
2574 		rxd->rxd4 = 0;
2575 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2576 			rxd->rxd5 = 0;
2577 			rxd->rxd6 = 0;
2578 			rxd->rxd7 = 0;
2579 			rxd->rxd8 = 0;
2580 		}
2581 	}
2582 
2583 	ring->dma_size = rx_dma_size;
2584 	ring->calc_idx_update = false;
2585 	ring->calc_idx = rx_dma_size - 1;
2586 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2587 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2588 				    ring_no * MTK_QRX_OFFSET;
2589 	else
2590 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2591 				    ring_no * MTK_QRX_OFFSET;
2592 	/* make sure that all changes to the dma ring are flushed before we
2593 	 * continue
2594 	 */
2595 	wmb();
2596 
2597 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2598 		mtk_w32(eth, ring->phys,
2599 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2600 		mtk_w32(eth, rx_dma_size,
2601 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2602 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2603 			reg_map->qdma.rst_idx);
2604 	} else {
2605 		mtk_w32(eth, ring->phys,
2606 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2607 		mtk_w32(eth, rx_dma_size,
2608 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2609 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2610 			reg_map->pdma.rst_idx);
2611 	}
2612 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2613 
2614 	return 0;
2615 }
2616 
2617 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2618 {
2619 	int i;
2620 
2621 	if (ring->data && ring->dma) {
2622 		for (i = 0; i < ring->dma_size; i++) {
2623 			struct mtk_rx_dma *rxd;
2624 
2625 			if (!ring->data[i])
2626 				continue;
2627 
2628 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2629 			if (!rxd->rxd1)
2630 				continue;
2631 
2632 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2633 					 ring->buf_size, DMA_FROM_DEVICE);
2634 			mtk_rx_put_buff(ring, ring->data[i], false);
2635 		}
2636 		kfree(ring->data);
2637 		ring->data = NULL;
2638 	}
2639 
2640 	if (ring->dma) {
2641 		dma_free_coherent(eth->dma_dev,
2642 				  ring->dma_size * eth->soc->txrx.rxd_size,
2643 				  ring->dma, ring->phys);
2644 		ring->dma = NULL;
2645 	}
2646 
2647 	if (ring->page_pool) {
2648 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2649 			xdp_rxq_info_unreg(&ring->xdp_q);
2650 		page_pool_destroy(ring->page_pool);
2651 		ring->page_pool = NULL;
2652 	}
2653 }
2654 
2655 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2656 {
2657 	int i;
2658 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2659 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2660 
2661 	/* set LRO rings to auto-learn modes */
2662 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2663 
2664 	/* validate LRO ring */
2665 	ring_ctrl_dw2 |= MTK_RING_VLD;
2666 
2667 	/* set AGE timer (unit: 20us) */
2668 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2669 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2670 
2671 	/* set max AGG timer (unit: 20us) */
2672 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2673 
2674 	/* set max LRO AGG count */
2675 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2676 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2677 
2678 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2679 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2680 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2681 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2682 	}
2683 
2684 	/* IPv4 checksum update enable */
2685 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2686 
2687 	/* switch priority comparison to packet count mode */
2688 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2689 
2690 	/* bandwidth threshold setting */
2691 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2692 
2693 	/* auto-learn score delta setting */
2694 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2695 
2696 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2697 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2698 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2699 
2700 	/* set HW LRO mode & the max aggregation count for rx packets */
2701 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2702 
2703 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2704 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2705 
2706 	/* enable HW LRO */
2707 	lro_ctrl_dw0 |= MTK_LRO_EN;
2708 
2709 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2710 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2711 
2712 	return 0;
2713 }
2714 
2715 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2716 {
2717 	int i;
2718 	u32 val;
2719 
2720 	/* relinquish lro rings, flush aggregated packets */
2721 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2722 
2723 	/* wait for relinquishments done */
2724 	for (i = 0; i < 10; i++) {
2725 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2726 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2727 			msleep(20);
2728 			continue;
2729 		}
2730 		break;
2731 	}
2732 
2733 	/* invalidate lro rings */
2734 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2735 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2736 
2737 	/* disable HW LRO */
2738 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2739 }
2740 
2741 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2742 {
2743 	u32 reg_val;
2744 
2745 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2746 
2747 	/* invalidate the IP setting */
2748 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2749 
2750 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2751 
2752 	/* validate the IP setting */
2753 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2754 }
2755 
2756 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2757 {
2758 	u32 reg_val;
2759 
2760 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2761 
2762 	/* invalidate the IP setting */
2763 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2764 
2765 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2766 }
2767 
2768 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2769 {
2770 	int cnt = 0;
2771 	int i;
2772 
2773 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2774 		if (mac->hwlro_ip[i])
2775 			cnt++;
2776 	}
2777 
2778 	return cnt;
2779 }
2780 
2781 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2782 				struct ethtool_rxnfc *cmd)
2783 {
2784 	struct ethtool_rx_flow_spec *fsp =
2785 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2786 	struct mtk_mac *mac = netdev_priv(dev);
2787 	struct mtk_eth *eth = mac->hw;
2788 	int hwlro_idx;
2789 
2790 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2791 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2792 	    (fsp->location > 1))
2793 		return -EINVAL;
2794 
2795 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2796 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2797 
2798 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2799 
2800 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2801 
2802 	return 0;
2803 }
2804 
2805 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2806 				struct ethtool_rxnfc *cmd)
2807 {
2808 	struct ethtool_rx_flow_spec *fsp =
2809 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2810 	struct mtk_mac *mac = netdev_priv(dev);
2811 	struct mtk_eth *eth = mac->hw;
2812 	int hwlro_idx;
2813 
2814 	if (fsp->location > 1)
2815 		return -EINVAL;
2816 
2817 	mac->hwlro_ip[fsp->location] = 0;
2818 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2819 
2820 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2821 
2822 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2823 
2824 	return 0;
2825 }
2826 
2827 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2828 {
2829 	struct mtk_mac *mac = netdev_priv(dev);
2830 	struct mtk_eth *eth = mac->hw;
2831 	int i, hwlro_idx;
2832 
2833 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2834 		mac->hwlro_ip[i] = 0;
2835 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2836 
2837 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2838 	}
2839 
2840 	mac->hwlro_ip_cnt = 0;
2841 }
2842 
2843 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2844 				    struct ethtool_rxnfc *cmd)
2845 {
2846 	struct mtk_mac *mac = netdev_priv(dev);
2847 	struct ethtool_rx_flow_spec *fsp =
2848 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2849 
2850 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2851 		return -EINVAL;
2852 
2853 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2854 	fsp->flow_type = TCP_V4_FLOW;
2855 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2856 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2857 
2858 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2859 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2860 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2861 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2862 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2863 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2864 	fsp->h_u.tcp_ip4_spec.tos = 0;
2865 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2866 
2867 	return 0;
2868 }
2869 
2870 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2871 				  struct ethtool_rxnfc *cmd,
2872 				  u32 *rule_locs)
2873 {
2874 	struct mtk_mac *mac = netdev_priv(dev);
2875 	int cnt = 0;
2876 	int i;
2877 
2878 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2879 		if (mac->hwlro_ip[i]) {
2880 			rule_locs[cnt] = i;
2881 			cnt++;
2882 		}
2883 	}
2884 
2885 	cmd->rule_cnt = cnt;
2886 
2887 	return 0;
2888 }
2889 
2890 static netdev_features_t mtk_fix_features(struct net_device *dev,
2891 					  netdev_features_t features)
2892 {
2893 	if (!(features & NETIF_F_LRO)) {
2894 		struct mtk_mac *mac = netdev_priv(dev);
2895 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2896 
2897 		if (ip_cnt) {
2898 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2899 
2900 			features |= NETIF_F_LRO;
2901 		}
2902 	}
2903 
2904 	return features;
2905 }
2906 
2907 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2908 {
2909 	struct mtk_mac *mac = netdev_priv(dev);
2910 	struct mtk_eth *eth = mac->hw;
2911 	netdev_features_t diff = dev->features ^ features;
2912 	int i;
2913 
2914 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2915 		mtk_hwlro_netdev_disable(dev);
2916 
2917 	/* Set RX VLAN offloading */
2918 	if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2919 		return 0;
2920 
2921 	mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2922 		MTK_CDMP_EG_CTRL);
2923 
2924 	/* sync features with other MAC */
2925 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2926 		if (!eth->netdev[i] || eth->netdev[i] == dev)
2927 			continue;
2928 		eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2929 		eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2930 	}
2931 
2932 	return 0;
2933 }
2934 
2935 /* wait for DMA to finish whatever it is doing before we start using it again */
2936 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2937 {
2938 	unsigned int reg;
2939 	int ret;
2940 	u32 val;
2941 
2942 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2943 		reg = eth->soc->reg_map->qdma.glo_cfg;
2944 	else
2945 		reg = eth->soc->reg_map->pdma.glo_cfg;
2946 
2947 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2948 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2949 					5, MTK_DMA_BUSY_TIMEOUT_US);
2950 	if (ret)
2951 		dev_err(eth->dev, "DMA init timeout\n");
2952 
2953 	return ret;
2954 }
2955 
2956 static int mtk_dma_init(struct mtk_eth *eth)
2957 {
2958 	int err;
2959 	u32 i;
2960 
2961 	if (mtk_dma_busy_wait(eth))
2962 		return -EBUSY;
2963 
2964 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2965 		/* QDMA needs scratch memory for internal reordering of the
2966 		 * descriptors
2967 		 */
2968 		err = mtk_init_fq_dma(eth);
2969 		if (err)
2970 			return err;
2971 	}
2972 
2973 	err = mtk_tx_alloc(eth);
2974 	if (err)
2975 		return err;
2976 
2977 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2978 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2979 		if (err)
2980 			return err;
2981 	}
2982 
2983 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2984 	if (err)
2985 		return err;
2986 
2987 	if (eth->hwlro) {
2988 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2989 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2990 			if (err)
2991 				return err;
2992 		}
2993 		err = mtk_hwlro_rx_init(eth);
2994 		if (err)
2995 			return err;
2996 	}
2997 
2998 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2999 		/* Enable random early drop and set drop threshold
3000 		 * automatically
3001 		 */
3002 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3003 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3004 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3005 	}
3006 
3007 	return 0;
3008 }
3009 
3010 static void mtk_dma_free(struct mtk_eth *eth)
3011 {
3012 	const struct mtk_soc_data *soc = eth->soc;
3013 	int i;
3014 
3015 	for (i = 0; i < MTK_MAC_COUNT; i++)
3016 		if (eth->netdev[i])
3017 			netdev_reset_queue(eth->netdev[i]);
3018 	if (eth->scratch_ring) {
3019 		dma_free_coherent(eth->dma_dev,
3020 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3021 				  eth->scratch_ring, eth->phy_scratch_ring);
3022 		eth->scratch_ring = NULL;
3023 		eth->phy_scratch_ring = 0;
3024 	}
3025 	mtk_tx_clean(eth);
3026 	mtk_rx_clean(eth, &eth->rx_ring[0]);
3027 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
3028 
3029 	if (eth->hwlro) {
3030 		mtk_hwlro_rx_uninit(eth);
3031 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3032 			mtk_rx_clean(eth, &eth->rx_ring[i]);
3033 	}
3034 
3035 	kfree(eth->scratch_head);
3036 }
3037 
3038 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3039 {
3040 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3041 
3042 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3043 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3044 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3045 }
3046 
3047 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3048 {
3049 	struct mtk_mac *mac = netdev_priv(dev);
3050 	struct mtk_eth *eth = mac->hw;
3051 
3052 	if (test_bit(MTK_RESETTING, &eth->state))
3053 		return;
3054 
3055 	if (!mtk_hw_reset_check(eth))
3056 		return;
3057 
3058 	eth->netdev[mac->id]->stats.tx_errors++;
3059 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3060 
3061 	schedule_work(&eth->pending_work);
3062 }
3063 
3064 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3065 {
3066 	struct mtk_eth *eth = _eth;
3067 
3068 	eth->rx_events++;
3069 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3070 		__napi_schedule(&eth->rx_napi);
3071 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3072 	}
3073 
3074 	return IRQ_HANDLED;
3075 }
3076 
3077 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3078 {
3079 	struct mtk_eth *eth = _eth;
3080 
3081 	eth->tx_events++;
3082 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3083 		__napi_schedule(&eth->tx_napi);
3084 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3085 	}
3086 
3087 	return IRQ_HANDLED;
3088 }
3089 
3090 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3091 {
3092 	struct mtk_eth *eth = _eth;
3093 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3094 
3095 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3096 	    eth->soc->txrx.rx_irq_done_mask) {
3097 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3098 		    eth->soc->txrx.rx_irq_done_mask)
3099 			mtk_handle_irq_rx(irq, _eth);
3100 	}
3101 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3102 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3103 			mtk_handle_irq_tx(irq, _eth);
3104 	}
3105 
3106 	return IRQ_HANDLED;
3107 }
3108 
3109 #ifdef CONFIG_NET_POLL_CONTROLLER
3110 static void mtk_poll_controller(struct net_device *dev)
3111 {
3112 	struct mtk_mac *mac = netdev_priv(dev);
3113 	struct mtk_eth *eth = mac->hw;
3114 
3115 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3116 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3117 	mtk_handle_irq_rx(eth->irq[2], dev);
3118 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3119 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3120 }
3121 #endif
3122 
3123 static int mtk_start_dma(struct mtk_eth *eth)
3124 {
3125 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3126 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3127 	int err;
3128 
3129 	err = mtk_dma_init(eth);
3130 	if (err) {
3131 		mtk_dma_free(eth);
3132 		return err;
3133 	}
3134 
3135 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3136 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3137 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3138 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3139 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3140 
3141 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3142 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3143 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3144 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3145 		else
3146 			val |= MTK_RX_BT_32DWORDS;
3147 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3148 
3149 		mtk_w32(eth,
3150 			MTK_RX_DMA_EN | rx_2b_offset |
3151 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3152 			reg_map->pdma.glo_cfg);
3153 	} else {
3154 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3155 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3156 			reg_map->pdma.glo_cfg);
3157 	}
3158 
3159 	return 0;
3160 }
3161 
3162 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3163 {
3164 	int i;
3165 
3166 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3167 		return;
3168 
3169 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3170 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3171 
3172 		/* default setup the forward port to send frame to PDMA */
3173 		val &= ~0xffff;
3174 
3175 		/* Enable RX checksum */
3176 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3177 
3178 		val |= config;
3179 
3180 		if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
3181 			val |= MTK_GDMA_SPECIAL_TAG;
3182 
3183 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3184 	}
3185 	/* Reset and enable PSE */
3186 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3187 	mtk_w32(eth, 0, MTK_RST_GL);
3188 }
3189 
3190 
3191 static bool mtk_uses_dsa(struct net_device *dev)
3192 {
3193 #if IS_ENABLED(CONFIG_NET_DSA)
3194 	return netdev_uses_dsa(dev) &&
3195 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3196 #else
3197 	return false;
3198 #endif
3199 }
3200 
3201 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3202 {
3203 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3204 	struct mtk_eth *eth = mac->hw;
3205 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3206 	struct ethtool_link_ksettings s;
3207 	struct net_device *ldev;
3208 	struct list_head *iter;
3209 	struct dsa_port *dp;
3210 
3211 	if (event != NETDEV_CHANGE)
3212 		return NOTIFY_DONE;
3213 
3214 	netdev_for_each_lower_dev(dev, ldev, iter) {
3215 		if (netdev_priv(ldev) == mac)
3216 			goto found;
3217 	}
3218 
3219 	return NOTIFY_DONE;
3220 
3221 found:
3222 	if (!dsa_slave_dev_check(dev))
3223 		return NOTIFY_DONE;
3224 
3225 	if (__ethtool_get_link_ksettings(dev, &s))
3226 		return NOTIFY_DONE;
3227 
3228 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3229 		return NOTIFY_DONE;
3230 
3231 	dp = dsa_port_from_netdev(dev);
3232 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3233 		return NOTIFY_DONE;
3234 
3235 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3236 
3237 	return NOTIFY_DONE;
3238 }
3239 
3240 static int mtk_open(struct net_device *dev)
3241 {
3242 	struct mtk_mac *mac = netdev_priv(dev);
3243 	struct mtk_eth *eth = mac->hw;
3244 	int i, err;
3245 
3246 	if (mtk_uses_dsa(dev) && !eth->prog) {
3247 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3248 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3249 
3250 			if (md_dst)
3251 				continue;
3252 
3253 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3254 						    GFP_KERNEL);
3255 			if (!md_dst)
3256 				return -ENOMEM;
3257 
3258 			md_dst->u.port_info.port_id = i;
3259 			eth->dsa_meta[i] = md_dst;
3260 		}
3261 	} else {
3262 		/* Hardware special tag parsing needs to be disabled if at least
3263 		 * one MAC does not use DSA.
3264 		 */
3265 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3266 		val &= ~MTK_CDMP_STAG_EN;
3267 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3268 	}
3269 
3270 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3271 	if (err) {
3272 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3273 			   err);
3274 		return err;
3275 	}
3276 
3277 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3278 	if (!refcount_read(&eth->dma_refcnt)) {
3279 		const struct mtk_soc_data *soc = eth->soc;
3280 		u32 gdm_config;
3281 		int i;
3282 
3283 		err = mtk_start_dma(eth);
3284 		if (err) {
3285 			phylink_disconnect_phy(mac->phylink);
3286 			return err;
3287 		}
3288 
3289 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3290 			mtk_ppe_start(eth->ppe[i]);
3291 
3292 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3293 						  : MTK_GDMA_TO_PDMA;
3294 		mtk_gdm_config(eth, gdm_config);
3295 
3296 		napi_enable(&eth->tx_napi);
3297 		napi_enable(&eth->rx_napi);
3298 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3299 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3300 		refcount_set(&eth->dma_refcnt, 1);
3301 	}
3302 	else
3303 		refcount_inc(&eth->dma_refcnt);
3304 
3305 	phylink_start(mac->phylink);
3306 	netif_tx_start_all_queues(dev);
3307 
3308 	return 0;
3309 }
3310 
3311 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3312 {
3313 	u32 val;
3314 	int i;
3315 
3316 	/* stop the dma engine */
3317 	spin_lock_bh(&eth->page_lock);
3318 	val = mtk_r32(eth, glo_cfg);
3319 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3320 		glo_cfg);
3321 	spin_unlock_bh(&eth->page_lock);
3322 
3323 	/* wait for dma stop */
3324 	for (i = 0; i < 10; i++) {
3325 		val = mtk_r32(eth, glo_cfg);
3326 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3327 			msleep(20);
3328 			continue;
3329 		}
3330 		break;
3331 	}
3332 }
3333 
3334 static int mtk_stop(struct net_device *dev)
3335 {
3336 	struct mtk_mac *mac = netdev_priv(dev);
3337 	struct mtk_eth *eth = mac->hw;
3338 	int i;
3339 
3340 	phylink_stop(mac->phylink);
3341 
3342 	netif_tx_disable(dev);
3343 
3344 	phylink_disconnect_phy(mac->phylink);
3345 
3346 	/* only shutdown DMA if this is the last user */
3347 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3348 		return 0;
3349 
3350 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3351 
3352 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3353 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3354 	napi_disable(&eth->tx_napi);
3355 	napi_disable(&eth->rx_napi);
3356 
3357 	cancel_work_sync(&eth->rx_dim.work);
3358 	cancel_work_sync(&eth->tx_dim.work);
3359 
3360 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3361 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3362 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3363 
3364 	mtk_dma_free(eth);
3365 
3366 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3367 		mtk_ppe_stop(eth->ppe[i]);
3368 
3369 	return 0;
3370 }
3371 
3372 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3373 			 struct netlink_ext_ack *extack)
3374 {
3375 	struct mtk_mac *mac = netdev_priv(dev);
3376 	struct mtk_eth *eth = mac->hw;
3377 	struct bpf_prog *old_prog;
3378 	bool need_update;
3379 
3380 	if (eth->hwlro) {
3381 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3382 		return -EOPNOTSUPP;
3383 	}
3384 
3385 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3386 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3387 		return -EOPNOTSUPP;
3388 	}
3389 
3390 	need_update = !!eth->prog != !!prog;
3391 	if (netif_running(dev) && need_update)
3392 		mtk_stop(dev);
3393 
3394 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3395 	if (old_prog)
3396 		bpf_prog_put(old_prog);
3397 
3398 	if (netif_running(dev) && need_update)
3399 		return mtk_open(dev);
3400 
3401 	return 0;
3402 }
3403 
3404 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3405 {
3406 	switch (xdp->command) {
3407 	case XDP_SETUP_PROG:
3408 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3409 	default:
3410 		return -EINVAL;
3411 	}
3412 }
3413 
3414 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3415 {
3416 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3417 			   reset_bits,
3418 			   reset_bits);
3419 
3420 	usleep_range(1000, 1100);
3421 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3422 			   reset_bits,
3423 			   ~reset_bits);
3424 	mdelay(10);
3425 }
3426 
3427 static void mtk_clk_disable(struct mtk_eth *eth)
3428 {
3429 	int clk;
3430 
3431 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3432 		clk_disable_unprepare(eth->clks[clk]);
3433 }
3434 
3435 static int mtk_clk_enable(struct mtk_eth *eth)
3436 {
3437 	int clk, ret;
3438 
3439 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3440 		ret = clk_prepare_enable(eth->clks[clk]);
3441 		if (ret)
3442 			goto err_disable_clks;
3443 	}
3444 
3445 	return 0;
3446 
3447 err_disable_clks:
3448 	while (--clk >= 0)
3449 		clk_disable_unprepare(eth->clks[clk]);
3450 
3451 	return ret;
3452 }
3453 
3454 static void mtk_dim_rx(struct work_struct *work)
3455 {
3456 	struct dim *dim = container_of(work, struct dim, work);
3457 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3458 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3459 	struct dim_cq_moder cur_profile;
3460 	u32 val, cur;
3461 
3462 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3463 						dim->profile_ix);
3464 	spin_lock_bh(&eth->dim_lock);
3465 
3466 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3467 	val &= MTK_PDMA_DELAY_TX_MASK;
3468 	val |= MTK_PDMA_DELAY_RX_EN;
3469 
3470 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3471 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3472 
3473 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3474 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3475 
3476 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3477 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3478 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3479 
3480 	spin_unlock_bh(&eth->dim_lock);
3481 
3482 	dim->state = DIM_START_MEASURE;
3483 }
3484 
3485 static void mtk_dim_tx(struct work_struct *work)
3486 {
3487 	struct dim *dim = container_of(work, struct dim, work);
3488 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3489 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3490 	struct dim_cq_moder cur_profile;
3491 	u32 val, cur;
3492 
3493 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3494 						dim->profile_ix);
3495 	spin_lock_bh(&eth->dim_lock);
3496 
3497 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3498 	val &= MTK_PDMA_DELAY_RX_MASK;
3499 	val |= MTK_PDMA_DELAY_TX_EN;
3500 
3501 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3502 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3503 
3504 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3505 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3506 
3507 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3508 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3509 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3510 
3511 	spin_unlock_bh(&eth->dim_lock);
3512 
3513 	dim->state = DIM_START_MEASURE;
3514 }
3515 
3516 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3517 {
3518 	struct mtk_eth *eth = mac->hw;
3519 	u32 mcr_cur, mcr_new;
3520 
3521 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3522 		return;
3523 
3524 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3525 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3526 
3527 	if (val <= 1518)
3528 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3529 	else if (val <= 1536)
3530 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3531 	else if (val <= 1552)
3532 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3533 	else
3534 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3535 
3536 	if (mcr_new != mcr_cur)
3537 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3538 }
3539 
3540 static void mtk_hw_reset(struct mtk_eth *eth)
3541 {
3542 	u32 val;
3543 
3544 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3545 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3546 		val = RSTCTRL_PPE0_V2;
3547 	} else {
3548 		val = RSTCTRL_PPE0;
3549 	}
3550 
3551 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3552 		val |= RSTCTRL_PPE1;
3553 
3554 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3555 
3556 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3557 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3558 			     0x3ffffff);
3559 }
3560 
3561 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3562 {
3563 	u32 val;
3564 
3565 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3566 	return val;
3567 }
3568 
3569 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3570 {
3571 	u32 rst_mask, val;
3572 
3573 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3574 			   RSTCTRL_FE);
3575 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3576 				      val & RSTCTRL_FE, 1, 1000)) {
3577 		dev_err(eth->dev, "warm reset failed\n");
3578 		mtk_hw_reset(eth);
3579 		return;
3580 	}
3581 
3582 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3583 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3584 	else
3585 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3586 
3587 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3588 		rst_mask |= RSTCTRL_PPE1;
3589 
3590 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3591 
3592 	udelay(1);
3593 	val = mtk_hw_reset_read(eth);
3594 	if (!(val & rst_mask))
3595 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3596 			val, rst_mask);
3597 
3598 	rst_mask |= RSTCTRL_FE;
3599 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3600 
3601 	udelay(1);
3602 	val = mtk_hw_reset_read(eth);
3603 	if (val & rst_mask)
3604 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3605 			val, rst_mask);
3606 }
3607 
3608 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3609 {
3610 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3611 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3612 	bool oq_hang, cdm1_busy, adma_busy;
3613 	bool wtx_busy, cdm_full, oq_free;
3614 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3615 	bool qfsm_hang, qfwd_hang;
3616 	bool ret = false;
3617 
3618 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3619 		return false;
3620 
3621 	/* WDMA sanity checks */
3622 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3623 
3624 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3625 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3626 
3627 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3628 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3629 
3630 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3631 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3632 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3633 
3634 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3635 		if (++eth->reset.wdma_hang_count > 2) {
3636 			eth->reset.wdma_hang_count = 0;
3637 			ret = true;
3638 		}
3639 		goto out;
3640 	}
3641 
3642 	/* QDMA sanity checks */
3643 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3644 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3645 
3646 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3647 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3648 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3649 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3650 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3651 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3652 
3653 	if (qfsm_hang && qfwd_hang &&
3654 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3655 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3656 		if (++eth->reset.qdma_hang_count > 2) {
3657 			eth->reset.qdma_hang_count = 0;
3658 			ret = true;
3659 		}
3660 		goto out;
3661 	}
3662 
3663 	/* ADMA sanity checks */
3664 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3665 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3666 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3667 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3668 
3669 	if (oq_hang && cdm1_busy && adma_busy) {
3670 		if (++eth->reset.adma_hang_count > 2) {
3671 			eth->reset.adma_hang_count = 0;
3672 			ret = true;
3673 		}
3674 		goto out;
3675 	}
3676 
3677 	eth->reset.wdma_hang_count = 0;
3678 	eth->reset.qdma_hang_count = 0;
3679 	eth->reset.adma_hang_count = 0;
3680 out:
3681 	eth->reset.wdidx = wdidx;
3682 
3683 	return ret;
3684 }
3685 
3686 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3687 {
3688 	struct delayed_work *del_work = to_delayed_work(work);
3689 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3690 					   reset.monitor_work);
3691 
3692 	if (test_bit(MTK_RESETTING, &eth->state))
3693 		goto out;
3694 
3695 	/* DMA stuck checks */
3696 	if (mtk_hw_check_dma_hang(eth))
3697 		schedule_work(&eth->pending_work);
3698 
3699 out:
3700 	schedule_delayed_work(&eth->reset.monitor_work,
3701 			      MTK_DMA_MONITOR_TIMEOUT);
3702 }
3703 
3704 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3705 {
3706 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3707 		       ETHSYS_DMA_AG_MAP_PPE;
3708 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3709 	int i, val, ret;
3710 
3711 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3712 		return 0;
3713 
3714 	if (!reset) {
3715 		pm_runtime_enable(eth->dev);
3716 		pm_runtime_get_sync(eth->dev);
3717 
3718 		ret = mtk_clk_enable(eth);
3719 		if (ret)
3720 			goto err_disable_pm;
3721 	}
3722 
3723 	if (eth->ethsys)
3724 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3725 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3726 
3727 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3728 		ret = device_reset(eth->dev);
3729 		if (ret) {
3730 			dev_err(eth->dev, "MAC reset failed!\n");
3731 			goto err_disable_pm;
3732 		}
3733 
3734 		/* set interrupt delays based on current Net DIM sample */
3735 		mtk_dim_rx(&eth->rx_dim.work);
3736 		mtk_dim_tx(&eth->tx_dim.work);
3737 
3738 		/* disable delay and normal interrupt */
3739 		mtk_tx_irq_disable(eth, ~0);
3740 		mtk_rx_irq_disable(eth, ~0);
3741 
3742 		return 0;
3743 	}
3744 
3745 	msleep(100);
3746 
3747 	if (reset)
3748 		mtk_hw_warm_reset(eth);
3749 	else
3750 		mtk_hw_reset(eth);
3751 
3752 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3753 		/* Set FE to PDMAv2 if necessary */
3754 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3755 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3756 	}
3757 
3758 	if (eth->pctl) {
3759 		/* Set GE2 driving and slew rate */
3760 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3761 
3762 		/* set GE2 TDSEL */
3763 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3764 
3765 		/* set GE2 TUNE */
3766 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3767 	}
3768 
3769 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3770 	 * up with the more appropriate value when mtk_mac_config call is being
3771 	 * invoked.
3772 	 */
3773 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3774 		struct net_device *dev = eth->netdev[i];
3775 
3776 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3777 		if (dev) {
3778 			struct mtk_mac *mac = netdev_priv(dev);
3779 
3780 			mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3781 		}
3782 	}
3783 
3784 	/* Indicates CDM to parse the MTK special tag from CPU
3785 	 * which also is working out for untag packets.
3786 	 */
3787 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3788 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3789 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3790 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3791 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3792 	}
3793 
3794 	/* Enable RX VLan Offloading */
3795 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3796 
3797 	/* set interrupt delays based on current Net DIM sample */
3798 	mtk_dim_rx(&eth->rx_dim.work);
3799 	mtk_dim_tx(&eth->tx_dim.work);
3800 
3801 	/* disable delay and normal interrupt */
3802 	mtk_tx_irq_disable(eth, ~0);
3803 	mtk_rx_irq_disable(eth, ~0);
3804 
3805 	/* FE int grouping */
3806 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3807 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3808 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3809 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3810 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3811 
3812 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3813 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3814 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3815 
3816 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3817 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3818 
3819 		/* PSE Free Queue Flow Control  */
3820 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3821 
3822 		/* PSE config input queue threshold */
3823 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3824 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3825 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3826 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3827 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3828 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3829 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3830 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3831 
3832 		/* PSE config output queue threshold */
3833 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3834 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3835 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3836 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3837 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3838 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3839 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3840 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3841 
3842 		/* GDM and CDM Threshold */
3843 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3844 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3845 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3846 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3847 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3848 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3849 	}
3850 
3851 	return 0;
3852 
3853 err_disable_pm:
3854 	if (!reset) {
3855 		pm_runtime_put_sync(eth->dev);
3856 		pm_runtime_disable(eth->dev);
3857 	}
3858 
3859 	return ret;
3860 }
3861 
3862 static int mtk_hw_deinit(struct mtk_eth *eth)
3863 {
3864 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3865 		return 0;
3866 
3867 	mtk_clk_disable(eth);
3868 
3869 	pm_runtime_put_sync(eth->dev);
3870 	pm_runtime_disable(eth->dev);
3871 
3872 	return 0;
3873 }
3874 
3875 static int __init mtk_init(struct net_device *dev)
3876 {
3877 	struct mtk_mac *mac = netdev_priv(dev);
3878 	struct mtk_eth *eth = mac->hw;
3879 	int ret;
3880 
3881 	ret = of_get_ethdev_address(mac->of_node, dev);
3882 	if (ret) {
3883 		/* If the mac address is invalid, use random mac address */
3884 		eth_hw_addr_random(dev);
3885 		dev_err(eth->dev, "generated random MAC address %pM\n",
3886 			dev->dev_addr);
3887 	}
3888 
3889 	return 0;
3890 }
3891 
3892 static void mtk_uninit(struct net_device *dev)
3893 {
3894 	struct mtk_mac *mac = netdev_priv(dev);
3895 	struct mtk_eth *eth = mac->hw;
3896 
3897 	phylink_disconnect_phy(mac->phylink);
3898 	mtk_tx_irq_disable(eth, ~0);
3899 	mtk_rx_irq_disable(eth, ~0);
3900 }
3901 
3902 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3903 {
3904 	int length = new_mtu + MTK_RX_ETH_HLEN;
3905 	struct mtk_mac *mac = netdev_priv(dev);
3906 	struct mtk_eth *eth = mac->hw;
3907 
3908 	if (rcu_access_pointer(eth->prog) &&
3909 	    length > MTK_PP_MAX_BUF_SIZE) {
3910 		netdev_err(dev, "Invalid MTU for XDP mode\n");
3911 		return -EINVAL;
3912 	}
3913 
3914 	mtk_set_mcr_max_rx(mac, length);
3915 	dev->mtu = new_mtu;
3916 
3917 	return 0;
3918 }
3919 
3920 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3921 {
3922 	struct mtk_mac *mac = netdev_priv(dev);
3923 
3924 	switch (cmd) {
3925 	case SIOCGMIIPHY:
3926 	case SIOCGMIIREG:
3927 	case SIOCSMIIREG:
3928 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3929 	default:
3930 		break;
3931 	}
3932 
3933 	return -EOPNOTSUPP;
3934 }
3935 
3936 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3937 {
3938 	u32 val;
3939 	int i;
3940 
3941 	/* disabe FE P3 and P4 */
3942 	val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3943 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3944 		val |= MTK_FE_LINK_DOWN_P4;
3945 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
3946 
3947 	/* adjust PPE configurations to prepare for reset */
3948 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3949 		mtk_ppe_prepare_reset(eth->ppe[i]);
3950 
3951 	/* disable NETSYS interrupts */
3952 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3953 
3954 	/* force link down GMAC */
3955 	for (i = 0; i < 2; i++) {
3956 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3957 		mtk_w32(eth, val, MTK_MAC_MCR(i));
3958 	}
3959 }
3960 
3961 static void mtk_pending_work(struct work_struct *work)
3962 {
3963 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3964 	unsigned long restart = 0;
3965 	u32 val;
3966 	int i;
3967 
3968 	rtnl_lock();
3969 	set_bit(MTK_RESETTING, &eth->state);
3970 
3971 	mtk_prepare_for_reset(eth);
3972 	mtk_wed_fe_reset();
3973 	/* Run again reset preliminary configuration in order to avoid any
3974 	 * possible race during FE reset since it can run releasing RTNL lock.
3975 	 */
3976 	mtk_prepare_for_reset(eth);
3977 
3978 	/* stop all devices to make sure that dma is properly shut down */
3979 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3980 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3981 			continue;
3982 
3983 		mtk_stop(eth->netdev[i]);
3984 		__set_bit(i, &restart);
3985 	}
3986 
3987 	usleep_range(15000, 16000);
3988 
3989 	if (eth->dev->pins)
3990 		pinctrl_select_state(eth->dev->pins->p,
3991 				     eth->dev->pins->default_state);
3992 	mtk_hw_init(eth, true);
3993 
3994 	/* restart DMA and enable IRQs */
3995 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3996 		if (!test_bit(i, &restart))
3997 			continue;
3998 
3999 		if (mtk_open(eth->netdev[i])) {
4000 			netif_alert(eth, ifup, eth->netdev[i],
4001 				    "Driver up/down cycle failed\n");
4002 			dev_close(eth->netdev[i]);
4003 		}
4004 	}
4005 
4006 	/* enabe FE P3 and P4 */
4007 	val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
4008 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4009 		val &= ~MTK_FE_LINK_DOWN_P4;
4010 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
4011 
4012 	clear_bit(MTK_RESETTING, &eth->state);
4013 
4014 	mtk_wed_fe_reset_complete();
4015 
4016 	rtnl_unlock();
4017 }
4018 
4019 static int mtk_free_dev(struct mtk_eth *eth)
4020 {
4021 	int i;
4022 
4023 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4024 		if (!eth->netdev[i])
4025 			continue;
4026 		free_netdev(eth->netdev[i]);
4027 	}
4028 
4029 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4030 		if (!eth->dsa_meta[i])
4031 			break;
4032 		metadata_dst_free(eth->dsa_meta[i]);
4033 	}
4034 
4035 	return 0;
4036 }
4037 
4038 static int mtk_unreg_dev(struct mtk_eth *eth)
4039 {
4040 	int i;
4041 
4042 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4043 		struct mtk_mac *mac;
4044 		if (!eth->netdev[i])
4045 			continue;
4046 		mac = netdev_priv(eth->netdev[i]);
4047 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4048 			unregister_netdevice_notifier(&mac->device_notifier);
4049 		unregister_netdev(eth->netdev[i]);
4050 	}
4051 
4052 	return 0;
4053 }
4054 
4055 static int mtk_cleanup(struct mtk_eth *eth)
4056 {
4057 	mtk_unreg_dev(eth);
4058 	mtk_free_dev(eth);
4059 	cancel_work_sync(&eth->pending_work);
4060 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4061 
4062 	return 0;
4063 }
4064 
4065 static int mtk_get_link_ksettings(struct net_device *ndev,
4066 				  struct ethtool_link_ksettings *cmd)
4067 {
4068 	struct mtk_mac *mac = netdev_priv(ndev);
4069 
4070 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4071 		return -EBUSY;
4072 
4073 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4074 }
4075 
4076 static int mtk_set_link_ksettings(struct net_device *ndev,
4077 				  const struct ethtool_link_ksettings *cmd)
4078 {
4079 	struct mtk_mac *mac = netdev_priv(ndev);
4080 
4081 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4082 		return -EBUSY;
4083 
4084 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4085 }
4086 
4087 static void mtk_get_drvinfo(struct net_device *dev,
4088 			    struct ethtool_drvinfo *info)
4089 {
4090 	struct mtk_mac *mac = netdev_priv(dev);
4091 
4092 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4093 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4094 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4095 }
4096 
4097 static u32 mtk_get_msglevel(struct net_device *dev)
4098 {
4099 	struct mtk_mac *mac = netdev_priv(dev);
4100 
4101 	return mac->hw->msg_enable;
4102 }
4103 
4104 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4105 {
4106 	struct mtk_mac *mac = netdev_priv(dev);
4107 
4108 	mac->hw->msg_enable = value;
4109 }
4110 
4111 static int mtk_nway_reset(struct net_device *dev)
4112 {
4113 	struct mtk_mac *mac = netdev_priv(dev);
4114 
4115 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4116 		return -EBUSY;
4117 
4118 	if (!mac->phylink)
4119 		return -ENOTSUPP;
4120 
4121 	return phylink_ethtool_nway_reset(mac->phylink);
4122 }
4123 
4124 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4125 {
4126 	int i;
4127 
4128 	switch (stringset) {
4129 	case ETH_SS_STATS: {
4130 		struct mtk_mac *mac = netdev_priv(dev);
4131 
4132 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4133 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4134 			data += ETH_GSTRING_LEN;
4135 		}
4136 		if (mtk_page_pool_enabled(mac->hw))
4137 			page_pool_ethtool_stats_get_strings(data);
4138 		break;
4139 	}
4140 	default:
4141 		break;
4142 	}
4143 }
4144 
4145 static int mtk_get_sset_count(struct net_device *dev, int sset)
4146 {
4147 	switch (sset) {
4148 	case ETH_SS_STATS: {
4149 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4150 		struct mtk_mac *mac = netdev_priv(dev);
4151 
4152 		if (mtk_page_pool_enabled(mac->hw))
4153 			count += page_pool_ethtool_stats_get_count();
4154 		return count;
4155 	}
4156 	default:
4157 		return -EOPNOTSUPP;
4158 	}
4159 }
4160 
4161 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4162 {
4163 	struct page_pool_stats stats = {};
4164 	int i;
4165 
4166 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4167 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4168 
4169 		if (!ring->page_pool)
4170 			continue;
4171 
4172 		page_pool_get_stats(ring->page_pool, &stats);
4173 	}
4174 	page_pool_ethtool_stats_get(data, &stats);
4175 }
4176 
4177 static void mtk_get_ethtool_stats(struct net_device *dev,
4178 				  struct ethtool_stats *stats, u64 *data)
4179 {
4180 	struct mtk_mac *mac = netdev_priv(dev);
4181 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4182 	u64 *data_src, *data_dst;
4183 	unsigned int start;
4184 	int i;
4185 
4186 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4187 		return;
4188 
4189 	if (netif_running(dev) && netif_device_present(dev)) {
4190 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4191 			mtk_stats_update_mac(mac);
4192 			spin_unlock_bh(&hwstats->stats_lock);
4193 		}
4194 	}
4195 
4196 	data_src = (u64 *)hwstats;
4197 
4198 	do {
4199 		data_dst = data;
4200 		start = u64_stats_fetch_begin(&hwstats->syncp);
4201 
4202 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4203 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4204 		if (mtk_page_pool_enabled(mac->hw))
4205 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4206 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4207 }
4208 
4209 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4210 			 u32 *rule_locs)
4211 {
4212 	int ret = -EOPNOTSUPP;
4213 
4214 	switch (cmd->cmd) {
4215 	case ETHTOOL_GRXRINGS:
4216 		if (dev->hw_features & NETIF_F_LRO) {
4217 			cmd->data = MTK_MAX_RX_RING_NUM;
4218 			ret = 0;
4219 		}
4220 		break;
4221 	case ETHTOOL_GRXCLSRLCNT:
4222 		if (dev->hw_features & NETIF_F_LRO) {
4223 			struct mtk_mac *mac = netdev_priv(dev);
4224 
4225 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4226 			ret = 0;
4227 		}
4228 		break;
4229 	case ETHTOOL_GRXCLSRULE:
4230 		if (dev->hw_features & NETIF_F_LRO)
4231 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4232 		break;
4233 	case ETHTOOL_GRXCLSRLALL:
4234 		if (dev->hw_features & NETIF_F_LRO)
4235 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4236 						     rule_locs);
4237 		break;
4238 	default:
4239 		break;
4240 	}
4241 
4242 	return ret;
4243 }
4244 
4245 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4246 {
4247 	int ret = -EOPNOTSUPP;
4248 
4249 	switch (cmd->cmd) {
4250 	case ETHTOOL_SRXCLSRLINS:
4251 		if (dev->hw_features & NETIF_F_LRO)
4252 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4253 		break;
4254 	case ETHTOOL_SRXCLSRLDEL:
4255 		if (dev->hw_features & NETIF_F_LRO)
4256 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4257 		break;
4258 	default:
4259 		break;
4260 	}
4261 
4262 	return ret;
4263 }
4264 
4265 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4266 			    struct net_device *sb_dev)
4267 {
4268 	struct mtk_mac *mac = netdev_priv(dev);
4269 	unsigned int queue = 0;
4270 
4271 	if (netdev_uses_dsa(dev))
4272 		queue = skb_get_queue_mapping(skb) + 3;
4273 	else
4274 		queue = mac->id;
4275 
4276 	if (queue >= dev->num_tx_queues)
4277 		queue = 0;
4278 
4279 	return queue;
4280 }
4281 
4282 static const struct ethtool_ops mtk_ethtool_ops = {
4283 	.get_link_ksettings	= mtk_get_link_ksettings,
4284 	.set_link_ksettings	= mtk_set_link_ksettings,
4285 	.get_drvinfo		= mtk_get_drvinfo,
4286 	.get_msglevel		= mtk_get_msglevel,
4287 	.set_msglevel		= mtk_set_msglevel,
4288 	.nway_reset		= mtk_nway_reset,
4289 	.get_link		= ethtool_op_get_link,
4290 	.get_strings		= mtk_get_strings,
4291 	.get_sset_count		= mtk_get_sset_count,
4292 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4293 	.get_rxnfc		= mtk_get_rxnfc,
4294 	.set_rxnfc              = mtk_set_rxnfc,
4295 };
4296 
4297 static const struct net_device_ops mtk_netdev_ops = {
4298 	.ndo_init		= mtk_init,
4299 	.ndo_uninit		= mtk_uninit,
4300 	.ndo_open		= mtk_open,
4301 	.ndo_stop		= mtk_stop,
4302 	.ndo_start_xmit		= mtk_start_xmit,
4303 	.ndo_set_mac_address	= mtk_set_mac_address,
4304 	.ndo_validate_addr	= eth_validate_addr,
4305 	.ndo_eth_ioctl		= mtk_do_ioctl,
4306 	.ndo_change_mtu		= mtk_change_mtu,
4307 	.ndo_tx_timeout		= mtk_tx_timeout,
4308 	.ndo_get_stats64        = mtk_get_stats64,
4309 	.ndo_fix_features	= mtk_fix_features,
4310 	.ndo_set_features	= mtk_set_features,
4311 #ifdef CONFIG_NET_POLL_CONTROLLER
4312 	.ndo_poll_controller	= mtk_poll_controller,
4313 #endif
4314 	.ndo_setup_tc		= mtk_eth_setup_tc,
4315 	.ndo_bpf		= mtk_xdp,
4316 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4317 	.ndo_select_queue	= mtk_select_queue,
4318 };
4319 
4320 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4321 {
4322 	const __be32 *_id = of_get_property(np, "reg", NULL);
4323 	phy_interface_t phy_mode;
4324 	struct phylink *phylink;
4325 	struct mtk_mac *mac;
4326 	int id, err;
4327 	int txqs = 1;
4328 
4329 	if (!_id) {
4330 		dev_err(eth->dev, "missing mac id\n");
4331 		return -EINVAL;
4332 	}
4333 
4334 	id = be32_to_cpup(_id);
4335 	if (id >= MTK_MAC_COUNT) {
4336 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4337 		return -EINVAL;
4338 	}
4339 
4340 	if (eth->netdev[id]) {
4341 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4342 		return -EINVAL;
4343 	}
4344 
4345 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4346 		txqs = MTK_QDMA_NUM_QUEUES;
4347 
4348 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4349 	if (!eth->netdev[id]) {
4350 		dev_err(eth->dev, "alloc_etherdev failed\n");
4351 		return -ENOMEM;
4352 	}
4353 	mac = netdev_priv(eth->netdev[id]);
4354 	eth->mac[id] = mac;
4355 	mac->id = id;
4356 	mac->hw = eth;
4357 	mac->of_node = np;
4358 
4359 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4360 	mac->hwlro_ip_cnt = 0;
4361 
4362 	mac->hw_stats = devm_kzalloc(eth->dev,
4363 				     sizeof(*mac->hw_stats),
4364 				     GFP_KERNEL);
4365 	if (!mac->hw_stats) {
4366 		dev_err(eth->dev, "failed to allocate counter memory\n");
4367 		err = -ENOMEM;
4368 		goto free_netdev;
4369 	}
4370 	spin_lock_init(&mac->hw_stats->stats_lock);
4371 	u64_stats_init(&mac->hw_stats->syncp);
4372 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4373 
4374 	/* phylink create */
4375 	err = of_get_phy_mode(np, &phy_mode);
4376 	if (err) {
4377 		dev_err(eth->dev, "incorrect phy-mode\n");
4378 		goto free_netdev;
4379 	}
4380 
4381 	/* mac config is not set */
4382 	mac->interface = PHY_INTERFACE_MODE_NA;
4383 	mac->speed = SPEED_UNKNOWN;
4384 
4385 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4386 	mac->phylink_config.type = PHYLINK_NETDEV;
4387 	/* This driver makes use of state->speed in mac_config */
4388 	mac->phylink_config.legacy_pre_march2020 = true;
4389 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4390 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4391 
4392 	__set_bit(PHY_INTERFACE_MODE_MII,
4393 		  mac->phylink_config.supported_interfaces);
4394 	__set_bit(PHY_INTERFACE_MODE_GMII,
4395 		  mac->phylink_config.supported_interfaces);
4396 
4397 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4398 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4399 
4400 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4401 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4402 			  mac->phylink_config.supported_interfaces);
4403 
4404 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4405 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4406 			  mac->phylink_config.supported_interfaces);
4407 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4408 			  mac->phylink_config.supported_interfaces);
4409 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4410 			  mac->phylink_config.supported_interfaces);
4411 	}
4412 
4413 	phylink = phylink_create(&mac->phylink_config,
4414 				 of_fwnode_handle(mac->of_node),
4415 				 phy_mode, &mtk_phylink_ops);
4416 	if (IS_ERR(phylink)) {
4417 		err = PTR_ERR(phylink);
4418 		goto free_netdev;
4419 	}
4420 
4421 	mac->phylink = phylink;
4422 
4423 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4424 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4425 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4426 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4427 
4428 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4429 	if (eth->hwlro)
4430 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4431 
4432 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4433 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4434 	eth->netdev[id]->features |= eth->soc->hw_features;
4435 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4436 
4437 	eth->netdev[id]->irq = eth->irq[0];
4438 	eth->netdev[id]->dev.of_node = np;
4439 
4440 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4441 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4442 	else
4443 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4444 
4445 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4446 		mac->device_notifier.notifier_call = mtk_device_event;
4447 		register_netdevice_notifier(&mac->device_notifier);
4448 	}
4449 
4450 	return 0;
4451 
4452 free_netdev:
4453 	free_netdev(eth->netdev[id]);
4454 	return err;
4455 }
4456 
4457 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4458 {
4459 	struct net_device *dev, *tmp;
4460 	LIST_HEAD(dev_list);
4461 	int i;
4462 
4463 	rtnl_lock();
4464 
4465 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4466 		dev = eth->netdev[i];
4467 
4468 		if (!dev || !(dev->flags & IFF_UP))
4469 			continue;
4470 
4471 		list_add_tail(&dev->close_list, &dev_list);
4472 	}
4473 
4474 	dev_close_many(&dev_list, false);
4475 
4476 	eth->dma_dev = dma_dev;
4477 
4478 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4479 		list_del_init(&dev->close_list);
4480 		dev_open(dev, NULL);
4481 	}
4482 
4483 	rtnl_unlock();
4484 }
4485 
4486 static int mtk_probe(struct platform_device *pdev)
4487 {
4488 	struct resource *res = NULL;
4489 	struct device_node *mac_np;
4490 	struct mtk_eth *eth;
4491 	int err, i;
4492 
4493 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4494 	if (!eth)
4495 		return -ENOMEM;
4496 
4497 	eth->soc = of_device_get_match_data(&pdev->dev);
4498 
4499 	eth->dev = &pdev->dev;
4500 	eth->dma_dev = &pdev->dev;
4501 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4502 	if (IS_ERR(eth->base))
4503 		return PTR_ERR(eth->base);
4504 
4505 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4506 		eth->ip_align = NET_IP_ALIGN;
4507 
4508 	spin_lock_init(&eth->page_lock);
4509 	spin_lock_init(&eth->tx_irq_lock);
4510 	spin_lock_init(&eth->rx_irq_lock);
4511 	spin_lock_init(&eth->dim_lock);
4512 
4513 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4514 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4515 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4516 
4517 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4518 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4519 
4520 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4521 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4522 							      "mediatek,ethsys");
4523 		if (IS_ERR(eth->ethsys)) {
4524 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4525 			return PTR_ERR(eth->ethsys);
4526 		}
4527 	}
4528 
4529 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4530 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4531 							     "mediatek,infracfg");
4532 		if (IS_ERR(eth->infra)) {
4533 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4534 			return PTR_ERR(eth->infra);
4535 		}
4536 	}
4537 
4538 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4539 		struct regmap *cci;
4540 
4541 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4542 						      "cci-control-port");
4543 		/* enable CPU/bus coherency */
4544 		if (!IS_ERR(cci))
4545 			regmap_write(cci, 0, 3);
4546 	}
4547 
4548 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4549 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4550 					  GFP_KERNEL);
4551 		if (!eth->sgmii)
4552 			return -ENOMEM;
4553 
4554 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4555 				     eth->soc->ana_rgc3);
4556 
4557 		if (err)
4558 			return err;
4559 	}
4560 
4561 	if (eth->soc->required_pctl) {
4562 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4563 							    "mediatek,pctl");
4564 		if (IS_ERR(eth->pctl)) {
4565 			dev_err(&pdev->dev, "no pctl regmap found\n");
4566 			return PTR_ERR(eth->pctl);
4567 		}
4568 	}
4569 
4570 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4571 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4572 		if (!res)
4573 			return -EINVAL;
4574 	}
4575 
4576 	if (eth->soc->offload_version) {
4577 		for (i = 0;; i++) {
4578 			struct device_node *np;
4579 			phys_addr_t wdma_phy;
4580 			u32 wdma_base;
4581 
4582 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4583 				break;
4584 
4585 			np = of_parse_phandle(pdev->dev.of_node,
4586 					      "mediatek,wed", i);
4587 			if (!np)
4588 				break;
4589 
4590 			wdma_base = eth->soc->reg_map->wdma_base[i];
4591 			wdma_phy = res ? res->start + wdma_base : 0;
4592 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4593 				       wdma_phy, i);
4594 		}
4595 	}
4596 
4597 	for (i = 0; i < 3; i++) {
4598 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4599 			eth->irq[i] = eth->irq[0];
4600 		else
4601 			eth->irq[i] = platform_get_irq(pdev, i);
4602 		if (eth->irq[i] < 0) {
4603 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4604 			err = -ENXIO;
4605 			goto err_wed_exit;
4606 		}
4607 	}
4608 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4609 		eth->clks[i] = devm_clk_get(eth->dev,
4610 					    mtk_clks_source_name[i]);
4611 		if (IS_ERR(eth->clks[i])) {
4612 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4613 				err = -EPROBE_DEFER;
4614 				goto err_wed_exit;
4615 			}
4616 			if (eth->soc->required_clks & BIT(i)) {
4617 				dev_err(&pdev->dev, "clock %s not found\n",
4618 					mtk_clks_source_name[i]);
4619 				err = -EINVAL;
4620 				goto err_wed_exit;
4621 			}
4622 			eth->clks[i] = NULL;
4623 		}
4624 	}
4625 
4626 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4627 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4628 
4629 	err = mtk_hw_init(eth, false);
4630 	if (err)
4631 		goto err_wed_exit;
4632 
4633 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4634 
4635 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4636 		if (!of_device_is_compatible(mac_np,
4637 					     "mediatek,eth-mac"))
4638 			continue;
4639 
4640 		if (!of_device_is_available(mac_np))
4641 			continue;
4642 
4643 		err = mtk_add_mac(eth, mac_np);
4644 		if (err) {
4645 			of_node_put(mac_np);
4646 			goto err_deinit_hw;
4647 		}
4648 	}
4649 
4650 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4651 		err = devm_request_irq(eth->dev, eth->irq[0],
4652 				       mtk_handle_irq, 0,
4653 				       dev_name(eth->dev), eth);
4654 	} else {
4655 		err = devm_request_irq(eth->dev, eth->irq[1],
4656 				       mtk_handle_irq_tx, 0,
4657 				       dev_name(eth->dev), eth);
4658 		if (err)
4659 			goto err_free_dev;
4660 
4661 		err = devm_request_irq(eth->dev, eth->irq[2],
4662 				       mtk_handle_irq_rx, 0,
4663 				       dev_name(eth->dev), eth);
4664 	}
4665 	if (err)
4666 		goto err_free_dev;
4667 
4668 	/* No MT7628/88 support yet */
4669 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4670 		err = mtk_mdio_init(eth);
4671 		if (err)
4672 			goto err_free_dev;
4673 	}
4674 
4675 	if (eth->soc->offload_version) {
4676 		u32 num_ppe;
4677 
4678 		num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4679 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4680 		for (i = 0; i < num_ppe; i++) {
4681 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4682 
4683 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4684 						   eth->soc->offload_version, i);
4685 			if (!eth->ppe[i]) {
4686 				err = -ENOMEM;
4687 				goto err_deinit_ppe;
4688 			}
4689 		}
4690 
4691 		err = mtk_eth_offload_init(eth);
4692 		if (err)
4693 			goto err_deinit_ppe;
4694 	}
4695 
4696 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4697 		if (!eth->netdev[i])
4698 			continue;
4699 
4700 		err = register_netdev(eth->netdev[i]);
4701 		if (err) {
4702 			dev_err(eth->dev, "error bringing up device\n");
4703 			goto err_deinit_ppe;
4704 		} else
4705 			netif_info(eth, probe, eth->netdev[i],
4706 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4707 				   eth->netdev[i]->base_addr, eth->irq[0]);
4708 	}
4709 
4710 	/* we run 2 devices on the same DMA ring so we need a dummy device
4711 	 * for NAPI to work
4712 	 */
4713 	init_dummy_netdev(&eth->dummy_dev);
4714 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4715 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4716 
4717 	platform_set_drvdata(pdev, eth);
4718 	schedule_delayed_work(&eth->reset.monitor_work,
4719 			      MTK_DMA_MONITOR_TIMEOUT);
4720 
4721 	return 0;
4722 
4723 err_deinit_ppe:
4724 	mtk_ppe_deinit(eth);
4725 	mtk_mdio_cleanup(eth);
4726 err_free_dev:
4727 	mtk_free_dev(eth);
4728 err_deinit_hw:
4729 	mtk_hw_deinit(eth);
4730 err_wed_exit:
4731 	mtk_wed_exit();
4732 
4733 	return err;
4734 }
4735 
4736 static int mtk_remove(struct platform_device *pdev)
4737 {
4738 	struct mtk_eth *eth = platform_get_drvdata(pdev);
4739 	struct mtk_mac *mac;
4740 	int i;
4741 
4742 	/* stop all devices to make sure that dma is properly shut down */
4743 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4744 		if (!eth->netdev[i])
4745 			continue;
4746 		mtk_stop(eth->netdev[i]);
4747 		mac = netdev_priv(eth->netdev[i]);
4748 		phylink_disconnect_phy(mac->phylink);
4749 	}
4750 
4751 	mtk_wed_exit();
4752 	mtk_hw_deinit(eth);
4753 
4754 	netif_napi_del(&eth->tx_napi);
4755 	netif_napi_del(&eth->rx_napi);
4756 	mtk_cleanup(eth);
4757 	mtk_mdio_cleanup(eth);
4758 
4759 	return 0;
4760 }
4761 
4762 static const struct mtk_soc_data mt2701_data = {
4763 	.reg_map = &mtk_reg_map,
4764 	.caps = MT7623_CAPS | MTK_HWLRO,
4765 	.hw_features = MTK_HW_FEATURES,
4766 	.required_clks = MT7623_CLKS_BITMAP,
4767 	.required_pctl = true,
4768 	.txrx = {
4769 		.txd_size = sizeof(struct mtk_tx_dma),
4770 		.rxd_size = sizeof(struct mtk_rx_dma),
4771 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4772 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4773 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4774 		.dma_len_offset = 16,
4775 	},
4776 };
4777 
4778 static const struct mtk_soc_data mt7621_data = {
4779 	.reg_map = &mtk_reg_map,
4780 	.caps = MT7621_CAPS,
4781 	.hw_features = MTK_HW_FEATURES,
4782 	.required_clks = MT7621_CLKS_BITMAP,
4783 	.required_pctl = false,
4784 	.offload_version = 1,
4785 	.hash_offset = 2,
4786 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4787 	.txrx = {
4788 		.txd_size = sizeof(struct mtk_tx_dma),
4789 		.rxd_size = sizeof(struct mtk_rx_dma),
4790 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4791 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4792 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4793 		.dma_len_offset = 16,
4794 	},
4795 };
4796 
4797 static const struct mtk_soc_data mt7622_data = {
4798 	.reg_map = &mtk_reg_map,
4799 	.ana_rgc3 = 0x2028,
4800 	.caps = MT7622_CAPS | MTK_HWLRO,
4801 	.hw_features = MTK_HW_FEATURES,
4802 	.required_clks = MT7622_CLKS_BITMAP,
4803 	.required_pctl = false,
4804 	.offload_version = 2,
4805 	.hash_offset = 2,
4806 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4807 	.txrx = {
4808 		.txd_size = sizeof(struct mtk_tx_dma),
4809 		.rxd_size = sizeof(struct mtk_rx_dma),
4810 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4811 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4812 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4813 		.dma_len_offset = 16,
4814 	},
4815 };
4816 
4817 static const struct mtk_soc_data mt7623_data = {
4818 	.reg_map = &mtk_reg_map,
4819 	.caps = MT7623_CAPS | MTK_HWLRO,
4820 	.hw_features = MTK_HW_FEATURES,
4821 	.required_clks = MT7623_CLKS_BITMAP,
4822 	.required_pctl = true,
4823 	.offload_version = 1,
4824 	.hash_offset = 2,
4825 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4826 	.txrx = {
4827 		.txd_size = sizeof(struct mtk_tx_dma),
4828 		.rxd_size = sizeof(struct mtk_rx_dma),
4829 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4830 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4831 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4832 		.dma_len_offset = 16,
4833 	},
4834 };
4835 
4836 static const struct mtk_soc_data mt7629_data = {
4837 	.reg_map = &mtk_reg_map,
4838 	.ana_rgc3 = 0x128,
4839 	.caps = MT7629_CAPS | MTK_HWLRO,
4840 	.hw_features = MTK_HW_FEATURES,
4841 	.required_clks = MT7629_CLKS_BITMAP,
4842 	.required_pctl = false,
4843 	.txrx = {
4844 		.txd_size = sizeof(struct mtk_tx_dma),
4845 		.rxd_size = sizeof(struct mtk_rx_dma),
4846 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4847 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4848 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4849 		.dma_len_offset = 16,
4850 	},
4851 };
4852 
4853 static const struct mtk_soc_data mt7986_data = {
4854 	.reg_map = &mt7986_reg_map,
4855 	.ana_rgc3 = 0x128,
4856 	.caps = MT7986_CAPS,
4857 	.hw_features = MTK_HW_FEATURES,
4858 	.required_clks = MT7986_CLKS_BITMAP,
4859 	.required_pctl = false,
4860 	.offload_version = 2,
4861 	.hash_offset = 4,
4862 	.foe_entry_size = sizeof(struct mtk_foe_entry),
4863 	.txrx = {
4864 		.txd_size = sizeof(struct mtk_tx_dma_v2),
4865 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
4866 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4867 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4868 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4869 		.dma_len_offset = 8,
4870 	},
4871 };
4872 
4873 static const struct mtk_soc_data rt5350_data = {
4874 	.reg_map = &mt7628_reg_map,
4875 	.caps = MT7628_CAPS,
4876 	.hw_features = MTK_HW_FEATURES_MT7628,
4877 	.required_clks = MT7628_CLKS_BITMAP,
4878 	.required_pctl = false,
4879 	.txrx = {
4880 		.txd_size = sizeof(struct mtk_tx_dma),
4881 		.rxd_size = sizeof(struct mtk_rx_dma),
4882 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4883 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4884 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4885 		.dma_len_offset = 16,
4886 	},
4887 };
4888 
4889 const struct of_device_id of_mtk_match[] = {
4890 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4891 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4892 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4893 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4894 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4895 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4896 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4897 	{},
4898 };
4899 MODULE_DEVICE_TABLE(of, of_mtk_match);
4900 
4901 static struct platform_driver mtk_driver = {
4902 	.probe = mtk_probe,
4903 	.remove = mtk_remove,
4904 	.driver = {
4905 		.name = "mtk_soc_eth",
4906 		.of_match_table = of_mtk_match,
4907 	},
4908 };
4909 
4910 module_platform_driver(mtk_driver);
4911 
4912 MODULE_LICENSE("GPL");
4913 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4914 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
4915