xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision 99ce286d2d30a31eba4171036bc3f32eeb59e5f3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
25 #include <net/dsa.h>
26 #include <net/dst_metadata.h>
27 
28 #include "mtk_eth_soc.h"
29 #include "mtk_wed.h"
30 
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
40 				  sizeof(u64) }
41 
42 static const struct mtk_reg_map mtk_reg_map = {
43 	.tx_irq_mask		= 0x1a1c,
44 	.tx_irq_status		= 0x1a18,
45 	.pdma = {
46 		.rx_ptr		= 0x0900,
47 		.rx_cnt_cfg	= 0x0904,
48 		.pcrx_ptr	= 0x0908,
49 		.glo_cfg	= 0x0a04,
50 		.rst_idx	= 0x0a08,
51 		.delay_irq	= 0x0a0c,
52 		.irq_status	= 0x0a20,
53 		.irq_mask	= 0x0a28,
54 		.adma_rx_dbg0	= 0x0a38,
55 		.int_grp	= 0x0a50,
56 	},
57 	.qdma = {
58 		.qtx_cfg	= 0x1800,
59 		.qtx_sch	= 0x1804,
60 		.rx_ptr		= 0x1900,
61 		.rx_cnt_cfg	= 0x1904,
62 		.qcrx_ptr	= 0x1908,
63 		.glo_cfg	= 0x1a04,
64 		.rst_idx	= 0x1a08,
65 		.delay_irq	= 0x1a0c,
66 		.fc_th		= 0x1a10,
67 		.tx_sch_rate	= 0x1a14,
68 		.int_grp	= 0x1a20,
69 		.hred		= 0x1a44,
70 		.ctx_ptr	= 0x1b00,
71 		.dtx_ptr	= 0x1b04,
72 		.crx_ptr	= 0x1b10,
73 		.drx_ptr	= 0x1b14,
74 		.fq_head	= 0x1b20,
75 		.fq_tail	= 0x1b24,
76 		.fq_count	= 0x1b28,
77 		.fq_blen	= 0x1b2c,
78 	},
79 	.gdm1_cnt		= 0x2400,
80 	.gdma_to_ppe		= 0x4444,
81 	.ppe_base		= 0x0c00,
82 	.wdma_base = {
83 		[0]		= 0x2800,
84 		[1]		= 0x2c00,
85 	},
86 	.pse_iq_sta		= 0x0110,
87 	.pse_oq_sta		= 0x0118,
88 };
89 
90 static const struct mtk_reg_map mt7628_reg_map = {
91 	.tx_irq_mask		= 0x0a28,
92 	.tx_irq_status		= 0x0a20,
93 	.pdma = {
94 		.rx_ptr		= 0x0900,
95 		.rx_cnt_cfg	= 0x0904,
96 		.pcrx_ptr	= 0x0908,
97 		.glo_cfg	= 0x0a04,
98 		.rst_idx	= 0x0a08,
99 		.delay_irq	= 0x0a0c,
100 		.irq_status	= 0x0a20,
101 		.irq_mask	= 0x0a28,
102 		.int_grp	= 0x0a50,
103 	},
104 };
105 
106 static const struct mtk_reg_map mt7986_reg_map = {
107 	.tx_irq_mask		= 0x461c,
108 	.tx_irq_status		= 0x4618,
109 	.pdma = {
110 		.rx_ptr		= 0x6100,
111 		.rx_cnt_cfg	= 0x6104,
112 		.pcrx_ptr	= 0x6108,
113 		.glo_cfg	= 0x6204,
114 		.rst_idx	= 0x6208,
115 		.delay_irq	= 0x620c,
116 		.irq_status	= 0x6220,
117 		.irq_mask	= 0x6228,
118 		.adma_rx_dbg0	= 0x6238,
119 		.int_grp	= 0x6250,
120 	},
121 	.qdma = {
122 		.qtx_cfg	= 0x4400,
123 		.qtx_sch	= 0x4404,
124 		.rx_ptr		= 0x4500,
125 		.rx_cnt_cfg	= 0x4504,
126 		.qcrx_ptr	= 0x4508,
127 		.glo_cfg	= 0x4604,
128 		.rst_idx	= 0x4608,
129 		.delay_irq	= 0x460c,
130 		.fc_th		= 0x4610,
131 		.int_grp	= 0x4620,
132 		.hred		= 0x4644,
133 		.ctx_ptr	= 0x4700,
134 		.dtx_ptr	= 0x4704,
135 		.crx_ptr	= 0x4710,
136 		.drx_ptr	= 0x4714,
137 		.fq_head	= 0x4720,
138 		.fq_tail	= 0x4724,
139 		.fq_count	= 0x4728,
140 		.fq_blen	= 0x472c,
141 		.tx_sch_rate	= 0x4798,
142 	},
143 	.gdm1_cnt		= 0x1c00,
144 	.gdma_to_ppe		= 0x3333,
145 	.ppe_base		= 0x2000,
146 	.wdma_base = {
147 		[0]		= 0x4800,
148 		[1]		= 0x4c00,
149 	},
150 	.pse_iq_sta		= 0x0180,
151 	.pse_oq_sta		= 0x01a0,
152 };
153 
154 /* strings used by ethtool */
155 static const struct mtk_ethtool_stats {
156 	char str[ETH_GSTRING_LEN];
157 	u32 offset;
158 } mtk_ethtool_stats[] = {
159 	MTK_ETHTOOL_STAT(tx_bytes),
160 	MTK_ETHTOOL_STAT(tx_packets),
161 	MTK_ETHTOOL_STAT(tx_skip),
162 	MTK_ETHTOOL_STAT(tx_collisions),
163 	MTK_ETHTOOL_STAT(rx_bytes),
164 	MTK_ETHTOOL_STAT(rx_packets),
165 	MTK_ETHTOOL_STAT(rx_overflow),
166 	MTK_ETHTOOL_STAT(rx_fcs_errors),
167 	MTK_ETHTOOL_STAT(rx_short_errors),
168 	MTK_ETHTOOL_STAT(rx_long_errors),
169 	MTK_ETHTOOL_STAT(rx_checksum_errors),
170 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
171 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
172 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
173 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
174 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
175 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
176 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
177 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
178 };
179 
180 static const char * const mtk_clks_source_name[] = {
181 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
182 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
183 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
184 	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
185 };
186 
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
188 {
189 	__raw_writel(val, eth->base + reg);
190 }
191 
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
193 {
194 	return __raw_readl(eth->base + reg);
195 }
196 
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
198 {
199 	u32 val;
200 
201 	val = mtk_r32(eth, reg);
202 	val &= ~mask;
203 	val |= set;
204 	mtk_w32(eth, val, reg);
205 	return reg;
206 }
207 
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
209 {
210 	unsigned long t_start = jiffies;
211 
212 	while (1) {
213 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
214 			return 0;
215 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
216 			break;
217 		cond_resched();
218 	}
219 
220 	dev_err(eth->dev, "mdio: MDIO timeout\n");
221 	return -ETIMEDOUT;
222 }
223 
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
225 			       u32 write_data)
226 {
227 	int ret;
228 
229 	ret = mtk_mdio_busy_wait(eth);
230 	if (ret < 0)
231 		return ret;
232 
233 	mtk_w32(eth, PHY_IAC_ACCESS |
234 		PHY_IAC_START_C22 |
235 		PHY_IAC_CMD_WRITE |
236 		PHY_IAC_REG(phy_reg) |
237 		PHY_IAC_ADDR(phy_addr) |
238 		PHY_IAC_DATA(write_data),
239 		MTK_PHY_IAC);
240 
241 	ret = mtk_mdio_busy_wait(eth);
242 	if (ret < 0)
243 		return ret;
244 
245 	return 0;
246 }
247 
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
249 			       u32 devad, u32 phy_reg, u32 write_data)
250 {
251 	int ret;
252 
253 	ret = mtk_mdio_busy_wait(eth);
254 	if (ret < 0)
255 		return ret;
256 
257 	mtk_w32(eth, PHY_IAC_ACCESS |
258 		PHY_IAC_START_C45 |
259 		PHY_IAC_CMD_C45_ADDR |
260 		PHY_IAC_REG(devad) |
261 		PHY_IAC_ADDR(phy_addr) |
262 		PHY_IAC_DATA(phy_reg),
263 		MTK_PHY_IAC);
264 
265 	ret = mtk_mdio_busy_wait(eth);
266 	if (ret < 0)
267 		return ret;
268 
269 	mtk_w32(eth, PHY_IAC_ACCESS |
270 		PHY_IAC_START_C45 |
271 		PHY_IAC_CMD_WRITE |
272 		PHY_IAC_REG(devad) |
273 		PHY_IAC_ADDR(phy_addr) |
274 		PHY_IAC_DATA(write_data),
275 		MTK_PHY_IAC);
276 
277 	ret = mtk_mdio_busy_wait(eth);
278 	if (ret < 0)
279 		return ret;
280 
281 	return 0;
282 }
283 
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
285 {
286 	int ret;
287 
288 	ret = mtk_mdio_busy_wait(eth);
289 	if (ret < 0)
290 		return ret;
291 
292 	mtk_w32(eth, PHY_IAC_ACCESS |
293 		PHY_IAC_START_C22 |
294 		PHY_IAC_CMD_C22_READ |
295 		PHY_IAC_REG(phy_reg) |
296 		PHY_IAC_ADDR(phy_addr),
297 		MTK_PHY_IAC);
298 
299 	ret = mtk_mdio_busy_wait(eth);
300 	if (ret < 0)
301 		return ret;
302 
303 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
304 }
305 
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
307 			      u32 devad, u32 phy_reg)
308 {
309 	int ret;
310 
311 	ret = mtk_mdio_busy_wait(eth);
312 	if (ret < 0)
313 		return ret;
314 
315 	mtk_w32(eth, PHY_IAC_ACCESS |
316 		PHY_IAC_START_C45 |
317 		PHY_IAC_CMD_C45_ADDR |
318 		PHY_IAC_REG(devad) |
319 		PHY_IAC_ADDR(phy_addr) |
320 		PHY_IAC_DATA(phy_reg),
321 		MTK_PHY_IAC);
322 
323 	ret = mtk_mdio_busy_wait(eth);
324 	if (ret < 0)
325 		return ret;
326 
327 	mtk_w32(eth, PHY_IAC_ACCESS |
328 		PHY_IAC_START_C45 |
329 		PHY_IAC_CMD_C45_READ |
330 		PHY_IAC_REG(devad) |
331 		PHY_IAC_ADDR(phy_addr),
332 		MTK_PHY_IAC);
333 
334 	ret = mtk_mdio_busy_wait(eth);
335 	if (ret < 0)
336 		return ret;
337 
338 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
339 }
340 
341 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
342 			      int phy_reg, u16 val)
343 {
344 	struct mtk_eth *eth = bus->priv;
345 
346 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
347 }
348 
349 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
350 			      int devad, int phy_reg, u16 val)
351 {
352 	struct mtk_eth *eth = bus->priv;
353 
354 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
355 }
356 
357 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
358 {
359 	struct mtk_eth *eth = bus->priv;
360 
361 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
362 }
363 
364 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
365 			     int phy_reg)
366 {
367 	struct mtk_eth *eth = bus->priv;
368 
369 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
370 }
371 
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
373 				     phy_interface_t interface)
374 {
375 	u32 val;
376 
377 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
378 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
379 
380 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
381 			   ETHSYS_TRGMII_MT7621_MASK, val);
382 
383 	return 0;
384 }
385 
386 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
387 				   phy_interface_t interface, int speed)
388 {
389 	unsigned long rate;
390 	u32 tck, rck, intf;
391 	int ret;
392 
393 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
394 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
395 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
396 		if (ret)
397 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
398 		return;
399 	}
400 
401 	if (speed == SPEED_1000) {
402 		intf = INTF_MODE_RGMII_1000;
403 		rate = 250000000;
404 		rck = RCK_CTRL_RGMII_1000;
405 		tck = TCK_CTRL_RGMII_1000;
406 	} else {
407 		intf = INTF_MODE_RGMII_10_100;
408 		rate = 500000000;
409 		rck = RCK_CTRL_RGMII_10_100;
410 		tck = TCK_CTRL_RGMII_10_100;
411 	}
412 
413 	mtk_w32(eth, intf, INTF_MODE);
414 
415 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
416 			   ETHSYS_TRGMII_CLK_SEL362_5,
417 			   ETHSYS_TRGMII_CLK_SEL362_5);
418 
419 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
420 	if (ret)
421 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
422 
423 	mtk_w32(eth, rck, TRGMII_RCK_CTRL);
424 	mtk_w32(eth, tck, TRGMII_TCK_CTRL);
425 }
426 
427 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
428 					      phy_interface_t interface)
429 {
430 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
431 					   phylink_config);
432 	struct mtk_eth *eth = mac->hw;
433 	unsigned int sid;
434 
435 	if (interface == PHY_INTERFACE_MODE_SGMII ||
436 	    phy_interface_mode_is_8023z(interface)) {
437 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
438 		       0 : mac->id;
439 
440 		return mtk_sgmii_select_pcs(eth->sgmii, sid);
441 	}
442 
443 	return NULL;
444 }
445 
446 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
447 			   const struct phylink_link_state *state)
448 {
449 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
450 					   phylink_config);
451 	struct mtk_eth *eth = mac->hw;
452 	int val, ge_mode, err = 0;
453 	u32 i;
454 
455 	/* MT76x8 has no hardware settings between for the MAC */
456 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
457 	    mac->interface != state->interface) {
458 		/* Setup soc pin functions */
459 		switch (state->interface) {
460 		case PHY_INTERFACE_MODE_TRGMII:
461 		case PHY_INTERFACE_MODE_RGMII_TXID:
462 		case PHY_INTERFACE_MODE_RGMII_RXID:
463 		case PHY_INTERFACE_MODE_RGMII_ID:
464 		case PHY_INTERFACE_MODE_RGMII:
465 		case PHY_INTERFACE_MODE_MII:
466 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
467 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
468 				if (err)
469 					goto init_err;
470 			}
471 			break;
472 		case PHY_INTERFACE_MODE_1000BASEX:
473 		case PHY_INTERFACE_MODE_2500BASEX:
474 		case PHY_INTERFACE_MODE_SGMII:
475 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
476 			if (err)
477 				goto init_err;
478 			break;
479 		case PHY_INTERFACE_MODE_GMII:
480 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
481 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
482 				if (err)
483 					goto init_err;
484 			}
485 			break;
486 		default:
487 			goto err_phy;
488 		}
489 
490 		/* Setup clock for 1st gmac */
491 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
492 		    !phy_interface_mode_is_8023z(state->interface) &&
493 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
494 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
495 					 MTK_TRGMII_MT7621_CLK)) {
496 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
497 							      state->interface))
498 					goto err_phy;
499 			} else {
500 				/* FIXME: this is incorrect. Not only does it
501 				 * use state->speed (which is not guaranteed
502 				 * to be correct) but it also makes use of it
503 				 * in a code path that will only be reachable
504 				 * when the PHY interface mode changes, not
505 				 * when the speed changes. Consequently, RGMII
506 				 * is probably broken.
507 				 */
508 				mtk_gmac0_rgmii_adjust(mac->hw,
509 						       state->interface,
510 						       state->speed);
511 
512 				/* mt7623_pad_clk_setup */
513 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
514 					mtk_w32(mac->hw,
515 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
516 						TRGMII_TD_ODT(i));
517 
518 				/* Assert/release MT7623 RXC reset */
519 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
520 					TRGMII_RCK_CTRL);
521 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
522 			}
523 		}
524 
525 		switch (state->interface) {
526 		case PHY_INTERFACE_MODE_MII:
527 		case PHY_INTERFACE_MODE_GMII:
528 			ge_mode = 1;
529 			break;
530 		default:
531 			ge_mode = 0;
532 			break;
533 		}
534 
535 		/* put the gmac into the right mode */
536 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
537 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
538 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
539 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
540 
541 		mac->interface = state->interface;
542 	}
543 
544 	/* SGMII */
545 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
546 	    phy_interface_mode_is_8023z(state->interface)) {
547 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
548 		 * being setup done.
549 		 */
550 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
551 
552 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
553 				   SYSCFG0_SGMII_MASK,
554 				   ~(u32)SYSCFG0_SGMII_MASK);
555 
556 		/* Save the syscfg0 value for mac_finish */
557 		mac->syscfg0 = val;
558 	} else if (phylink_autoneg_inband(mode)) {
559 		dev_err(eth->dev,
560 			"In-band mode not supported in non SGMII mode!\n");
561 		return;
562 	}
563 
564 	return;
565 
566 err_phy:
567 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
568 		mac->id, phy_modes(state->interface));
569 	return;
570 
571 init_err:
572 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
573 		mac->id, phy_modes(state->interface), err);
574 }
575 
576 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
577 			  phy_interface_t interface)
578 {
579 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
580 					   phylink_config);
581 	struct mtk_eth *eth = mac->hw;
582 	u32 mcr_cur, mcr_new;
583 
584 	/* Enable SGMII */
585 	if (interface == PHY_INTERFACE_MODE_SGMII ||
586 	    phy_interface_mode_is_8023z(interface))
587 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
588 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
589 
590 	/* Setup gmac */
591 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
592 	mcr_new = mcr_cur;
593 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
594 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
595 		   MAC_MCR_RX_FIFO_CLR_DIS;
596 
597 	/* Only update control register when needed! */
598 	if (mcr_new != mcr_cur)
599 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
600 
601 	return 0;
602 }
603 
604 static void mtk_mac_pcs_get_state(struct phylink_config *config,
605 				  struct phylink_link_state *state)
606 {
607 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
608 					   phylink_config);
609 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
610 
611 	state->link = (pmsr & MAC_MSR_LINK);
612 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
613 
614 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
615 	case 0:
616 		state->speed = SPEED_10;
617 		break;
618 	case MAC_MSR_SPEED_100:
619 		state->speed = SPEED_100;
620 		break;
621 	case MAC_MSR_SPEED_1000:
622 		state->speed = SPEED_1000;
623 		break;
624 	default:
625 		state->speed = SPEED_UNKNOWN;
626 		break;
627 	}
628 
629 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
630 	if (pmsr & MAC_MSR_RX_FC)
631 		state->pause |= MLO_PAUSE_RX;
632 	if (pmsr & MAC_MSR_TX_FC)
633 		state->pause |= MLO_PAUSE_TX;
634 }
635 
636 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
637 			      phy_interface_t interface)
638 {
639 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
640 					   phylink_config);
641 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
642 
643 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
644 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
645 }
646 
647 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
648 				int speed)
649 {
650 	const struct mtk_soc_data *soc = eth->soc;
651 	u32 ofs, val;
652 
653 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
654 		return;
655 
656 	val = MTK_QTX_SCH_MIN_RATE_EN |
657 	      /* minimum: 10 Mbps */
658 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
659 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
660 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
661 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
662 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
663 
664 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
665 		switch (speed) {
666 		case SPEED_10:
667 			val |= MTK_QTX_SCH_MAX_RATE_EN |
668 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
669 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
670 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
671 			break;
672 		case SPEED_100:
673 			val |= MTK_QTX_SCH_MAX_RATE_EN |
674 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
675 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
676 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
677 			break;
678 		case SPEED_1000:
679 			val |= MTK_QTX_SCH_MAX_RATE_EN |
680 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
681 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
682 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
683 			break;
684 		default:
685 			break;
686 		}
687 	} else {
688 		switch (speed) {
689 		case SPEED_10:
690 			val |= MTK_QTX_SCH_MAX_RATE_EN |
691 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
692 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
693 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
694 			break;
695 		case SPEED_100:
696 			val |= MTK_QTX_SCH_MAX_RATE_EN |
697 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
698 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
699 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
700 			break;
701 		case SPEED_1000:
702 			val |= MTK_QTX_SCH_MAX_RATE_EN |
703 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
704 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
705 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
706 			break;
707 		default:
708 			break;
709 		}
710 	}
711 
712 	ofs = MTK_QTX_OFFSET * idx;
713 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
714 }
715 
716 static void mtk_mac_link_up(struct phylink_config *config,
717 			    struct phy_device *phy,
718 			    unsigned int mode, phy_interface_t interface,
719 			    int speed, int duplex, bool tx_pause, bool rx_pause)
720 {
721 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
722 					   phylink_config);
723 	u32 mcr;
724 
725 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
726 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
727 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
728 		 MAC_MCR_FORCE_RX_FC);
729 
730 	/* Configure speed */
731 	switch (speed) {
732 	case SPEED_2500:
733 	case SPEED_1000:
734 		mcr |= MAC_MCR_SPEED_1000;
735 		break;
736 	case SPEED_100:
737 		mcr |= MAC_MCR_SPEED_100;
738 		break;
739 	}
740 
741 	mtk_set_queue_speed(mac->hw, mac->id, speed);
742 
743 	/* Configure duplex */
744 	if (duplex == DUPLEX_FULL)
745 		mcr |= MAC_MCR_FORCE_DPX;
746 
747 	/* Configure pause modes - phylink will avoid these for half duplex */
748 	if (tx_pause)
749 		mcr |= MAC_MCR_FORCE_TX_FC;
750 	if (rx_pause)
751 		mcr |= MAC_MCR_FORCE_RX_FC;
752 
753 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
754 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
755 }
756 
757 static const struct phylink_mac_ops mtk_phylink_ops = {
758 	.mac_select_pcs = mtk_mac_select_pcs,
759 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
760 	.mac_config = mtk_mac_config,
761 	.mac_finish = mtk_mac_finish,
762 	.mac_link_down = mtk_mac_link_down,
763 	.mac_link_up = mtk_mac_link_up,
764 };
765 
766 static int mtk_mdio_init(struct mtk_eth *eth)
767 {
768 	struct device_node *mii_np;
769 	int ret;
770 
771 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
772 	if (!mii_np) {
773 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
774 		return -ENODEV;
775 	}
776 
777 	if (!of_device_is_available(mii_np)) {
778 		ret = -ENODEV;
779 		goto err_put_node;
780 	}
781 
782 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
783 	if (!eth->mii_bus) {
784 		ret = -ENOMEM;
785 		goto err_put_node;
786 	}
787 
788 	eth->mii_bus->name = "mdio";
789 	eth->mii_bus->read = mtk_mdio_read_c22;
790 	eth->mii_bus->write = mtk_mdio_write_c22;
791 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
792 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
793 	eth->mii_bus->priv = eth;
794 	eth->mii_bus->parent = eth->dev;
795 
796 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
797 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
798 
799 err_put_node:
800 	of_node_put(mii_np);
801 	return ret;
802 }
803 
804 static void mtk_mdio_cleanup(struct mtk_eth *eth)
805 {
806 	if (!eth->mii_bus)
807 		return;
808 
809 	mdiobus_unregister(eth->mii_bus);
810 }
811 
812 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
813 {
814 	unsigned long flags;
815 	u32 val;
816 
817 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
818 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
819 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
820 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
821 }
822 
823 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
824 {
825 	unsigned long flags;
826 	u32 val;
827 
828 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
829 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
830 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
831 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
832 }
833 
834 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
835 {
836 	unsigned long flags;
837 	u32 val;
838 
839 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
840 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
841 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
842 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
843 }
844 
845 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
846 {
847 	unsigned long flags;
848 	u32 val;
849 
850 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
851 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
852 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
853 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
854 }
855 
856 static int mtk_set_mac_address(struct net_device *dev, void *p)
857 {
858 	int ret = eth_mac_addr(dev, p);
859 	struct mtk_mac *mac = netdev_priv(dev);
860 	struct mtk_eth *eth = mac->hw;
861 	const char *macaddr = dev->dev_addr;
862 
863 	if (ret)
864 		return ret;
865 
866 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
867 		return -EBUSY;
868 
869 	spin_lock_bh(&mac->hw->page_lock);
870 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
871 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
872 			MT7628_SDM_MAC_ADRH);
873 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
874 			(macaddr[4] << 8) | macaddr[5],
875 			MT7628_SDM_MAC_ADRL);
876 	} else {
877 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
878 			MTK_GDMA_MAC_ADRH(mac->id));
879 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
880 			(macaddr[4] << 8) | macaddr[5],
881 			MTK_GDMA_MAC_ADRL(mac->id));
882 	}
883 	spin_unlock_bh(&mac->hw->page_lock);
884 
885 	return 0;
886 }
887 
888 void mtk_stats_update_mac(struct mtk_mac *mac)
889 {
890 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
891 	struct mtk_eth *eth = mac->hw;
892 
893 	u64_stats_update_begin(&hw_stats->syncp);
894 
895 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
896 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
897 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
898 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
899 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
900 		hw_stats->rx_checksum_errors +=
901 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
902 	} else {
903 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
904 		unsigned int offs = hw_stats->reg_offset;
905 		u64 stats;
906 
907 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
908 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
909 		if (stats)
910 			hw_stats->rx_bytes += (stats << 32);
911 		hw_stats->rx_packets +=
912 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
913 		hw_stats->rx_overflow +=
914 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
915 		hw_stats->rx_fcs_errors +=
916 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
917 		hw_stats->rx_short_errors +=
918 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
919 		hw_stats->rx_long_errors +=
920 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
921 		hw_stats->rx_checksum_errors +=
922 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
923 		hw_stats->rx_flow_control_packets +=
924 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
925 		hw_stats->tx_skip +=
926 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
927 		hw_stats->tx_collisions +=
928 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
929 		hw_stats->tx_bytes +=
930 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
931 		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
932 		if (stats)
933 			hw_stats->tx_bytes += (stats << 32);
934 		hw_stats->tx_packets +=
935 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
936 	}
937 
938 	u64_stats_update_end(&hw_stats->syncp);
939 }
940 
941 static void mtk_stats_update(struct mtk_eth *eth)
942 {
943 	int i;
944 
945 	for (i = 0; i < MTK_MAC_COUNT; i++) {
946 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
947 			continue;
948 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
949 			mtk_stats_update_mac(eth->mac[i]);
950 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
951 		}
952 	}
953 }
954 
955 static void mtk_get_stats64(struct net_device *dev,
956 			    struct rtnl_link_stats64 *storage)
957 {
958 	struct mtk_mac *mac = netdev_priv(dev);
959 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
960 	unsigned int start;
961 
962 	if (netif_running(dev) && netif_device_present(dev)) {
963 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
964 			mtk_stats_update_mac(mac);
965 			spin_unlock_bh(&hw_stats->stats_lock);
966 		}
967 	}
968 
969 	do {
970 		start = u64_stats_fetch_begin(&hw_stats->syncp);
971 		storage->rx_packets = hw_stats->rx_packets;
972 		storage->tx_packets = hw_stats->tx_packets;
973 		storage->rx_bytes = hw_stats->rx_bytes;
974 		storage->tx_bytes = hw_stats->tx_bytes;
975 		storage->collisions = hw_stats->tx_collisions;
976 		storage->rx_length_errors = hw_stats->rx_short_errors +
977 			hw_stats->rx_long_errors;
978 		storage->rx_over_errors = hw_stats->rx_overflow;
979 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
980 		storage->rx_errors = hw_stats->rx_checksum_errors;
981 		storage->tx_aborted_errors = hw_stats->tx_skip;
982 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
983 
984 	storage->tx_errors = dev->stats.tx_errors;
985 	storage->rx_dropped = dev->stats.rx_dropped;
986 	storage->tx_dropped = dev->stats.tx_dropped;
987 }
988 
989 static inline int mtk_max_frag_size(int mtu)
990 {
991 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
992 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
993 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
994 
995 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
996 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
997 }
998 
999 static inline int mtk_max_buf_size(int frag_size)
1000 {
1001 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1002 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1003 
1004 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1005 
1006 	return buf_size;
1007 }
1008 
1009 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1010 			    struct mtk_rx_dma_v2 *dma_rxd)
1011 {
1012 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1013 	if (!(rxd->rxd2 & RX_DMA_DONE))
1014 		return false;
1015 
1016 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1017 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1018 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1019 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1020 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1021 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1022 	}
1023 
1024 	return true;
1025 }
1026 
1027 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1028 {
1029 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1030 	unsigned long data;
1031 
1032 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1033 				get_order(size));
1034 
1035 	return (void *)data;
1036 }
1037 
1038 /* the qdma core needs scratch memory to be setup */
1039 static int mtk_init_fq_dma(struct mtk_eth *eth)
1040 {
1041 	const struct mtk_soc_data *soc = eth->soc;
1042 	dma_addr_t phy_ring_tail;
1043 	int cnt = MTK_QDMA_RING_SIZE;
1044 	dma_addr_t dma_addr;
1045 	int i;
1046 
1047 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1048 					       cnt * soc->txrx.txd_size,
1049 					       &eth->phy_scratch_ring,
1050 					       GFP_KERNEL);
1051 	if (unlikely(!eth->scratch_ring))
1052 		return -ENOMEM;
1053 
1054 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1055 	if (unlikely(!eth->scratch_head))
1056 		return -ENOMEM;
1057 
1058 	dma_addr = dma_map_single(eth->dma_dev,
1059 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1060 				  DMA_FROM_DEVICE);
1061 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1062 		return -ENOMEM;
1063 
1064 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1065 
1066 	for (i = 0; i < cnt; i++) {
1067 		struct mtk_tx_dma_v2 *txd;
1068 
1069 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1070 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1071 		if (i < cnt - 1)
1072 			txd->txd2 = eth->phy_scratch_ring +
1073 				    (i + 1) * soc->txrx.txd_size;
1074 
1075 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1076 		txd->txd4 = 0;
1077 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1078 			txd->txd5 = 0;
1079 			txd->txd6 = 0;
1080 			txd->txd7 = 0;
1081 			txd->txd8 = 0;
1082 		}
1083 	}
1084 
1085 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1086 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1087 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1088 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1089 
1090 	return 0;
1091 }
1092 
1093 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1094 {
1095 	return ring->dma + (desc - ring->phys);
1096 }
1097 
1098 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1099 					     void *txd, u32 txd_size)
1100 {
1101 	int idx = (txd - ring->dma) / txd_size;
1102 
1103 	return &ring->buf[idx];
1104 }
1105 
1106 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1107 				       struct mtk_tx_dma *dma)
1108 {
1109 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1110 }
1111 
1112 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1113 {
1114 	return (dma - ring->dma) / txd_size;
1115 }
1116 
1117 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1118 			 struct xdp_frame_bulk *bq, bool napi)
1119 {
1120 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1121 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1122 			dma_unmap_single(eth->dma_dev,
1123 					 dma_unmap_addr(tx_buf, dma_addr0),
1124 					 dma_unmap_len(tx_buf, dma_len0),
1125 					 DMA_TO_DEVICE);
1126 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1127 			dma_unmap_page(eth->dma_dev,
1128 				       dma_unmap_addr(tx_buf, dma_addr0),
1129 				       dma_unmap_len(tx_buf, dma_len0),
1130 				       DMA_TO_DEVICE);
1131 		}
1132 	} else {
1133 		if (dma_unmap_len(tx_buf, dma_len0)) {
1134 			dma_unmap_page(eth->dma_dev,
1135 				       dma_unmap_addr(tx_buf, dma_addr0),
1136 				       dma_unmap_len(tx_buf, dma_len0),
1137 				       DMA_TO_DEVICE);
1138 		}
1139 
1140 		if (dma_unmap_len(tx_buf, dma_len1)) {
1141 			dma_unmap_page(eth->dma_dev,
1142 				       dma_unmap_addr(tx_buf, dma_addr1),
1143 				       dma_unmap_len(tx_buf, dma_len1),
1144 				       DMA_TO_DEVICE);
1145 		}
1146 	}
1147 
1148 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1149 		if (tx_buf->type == MTK_TYPE_SKB) {
1150 			struct sk_buff *skb = tx_buf->data;
1151 
1152 			if (napi)
1153 				napi_consume_skb(skb, napi);
1154 			else
1155 				dev_kfree_skb_any(skb);
1156 		} else {
1157 			struct xdp_frame *xdpf = tx_buf->data;
1158 
1159 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1160 				xdp_return_frame_rx_napi(xdpf);
1161 			else if (bq)
1162 				xdp_return_frame_bulk(xdpf, bq);
1163 			else
1164 				xdp_return_frame(xdpf);
1165 		}
1166 	}
1167 	tx_buf->flags = 0;
1168 	tx_buf->data = NULL;
1169 }
1170 
1171 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1172 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1173 			 size_t size, int idx)
1174 {
1175 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1176 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1177 		dma_unmap_len_set(tx_buf, dma_len0, size);
1178 	} else {
1179 		if (idx & 1) {
1180 			txd->txd3 = mapped_addr;
1181 			txd->txd2 |= TX_DMA_PLEN1(size);
1182 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1183 			dma_unmap_len_set(tx_buf, dma_len1, size);
1184 		} else {
1185 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1186 			txd->txd1 = mapped_addr;
1187 			txd->txd2 = TX_DMA_PLEN0(size);
1188 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1189 			dma_unmap_len_set(tx_buf, dma_len0, size);
1190 		}
1191 	}
1192 }
1193 
1194 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1195 				   struct mtk_tx_dma_desc_info *info)
1196 {
1197 	struct mtk_mac *mac = netdev_priv(dev);
1198 	struct mtk_eth *eth = mac->hw;
1199 	struct mtk_tx_dma *desc = txd;
1200 	u32 data;
1201 
1202 	WRITE_ONCE(desc->txd1, info->addr);
1203 
1204 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1205 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1206 	if (info->last)
1207 		data |= TX_DMA_LS0;
1208 	WRITE_ONCE(desc->txd3, data);
1209 
1210 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1211 	if (info->first) {
1212 		if (info->gso)
1213 			data |= TX_DMA_TSO;
1214 		/* tx checksum offload */
1215 		if (info->csum)
1216 			data |= TX_DMA_CHKSUM;
1217 		/* vlan header offload */
1218 		if (info->vlan)
1219 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1220 	}
1221 	WRITE_ONCE(desc->txd4, data);
1222 }
1223 
1224 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1225 				   struct mtk_tx_dma_desc_info *info)
1226 {
1227 	struct mtk_mac *mac = netdev_priv(dev);
1228 	struct mtk_tx_dma_v2 *desc = txd;
1229 	struct mtk_eth *eth = mac->hw;
1230 	u32 data;
1231 
1232 	WRITE_ONCE(desc->txd1, info->addr);
1233 
1234 	data = TX_DMA_PLEN0(info->size);
1235 	if (info->last)
1236 		data |= TX_DMA_LS0;
1237 	WRITE_ONCE(desc->txd3, data);
1238 
1239 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1240 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1241 	WRITE_ONCE(desc->txd4, data);
1242 
1243 	data = 0;
1244 	if (info->first) {
1245 		if (info->gso)
1246 			data |= TX_DMA_TSO_V2;
1247 		/* tx checksum offload */
1248 		if (info->csum)
1249 			data |= TX_DMA_CHKSUM_V2;
1250 	}
1251 	WRITE_ONCE(desc->txd5, data);
1252 
1253 	data = 0;
1254 	if (info->first && info->vlan)
1255 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1256 	WRITE_ONCE(desc->txd6, data);
1257 
1258 	WRITE_ONCE(desc->txd7, 0);
1259 	WRITE_ONCE(desc->txd8, 0);
1260 }
1261 
1262 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1263 				struct mtk_tx_dma_desc_info *info)
1264 {
1265 	struct mtk_mac *mac = netdev_priv(dev);
1266 	struct mtk_eth *eth = mac->hw;
1267 
1268 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1269 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1270 	else
1271 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1272 }
1273 
1274 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1275 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1276 {
1277 	struct mtk_tx_dma_desc_info txd_info = {
1278 		.size = skb_headlen(skb),
1279 		.gso = gso,
1280 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1281 		.vlan = skb_vlan_tag_present(skb),
1282 		.qid = skb_get_queue_mapping(skb),
1283 		.vlan_tci = skb_vlan_tag_get(skb),
1284 		.first = true,
1285 		.last = !skb_is_nonlinear(skb),
1286 	};
1287 	struct netdev_queue *txq;
1288 	struct mtk_mac *mac = netdev_priv(dev);
1289 	struct mtk_eth *eth = mac->hw;
1290 	const struct mtk_soc_data *soc = eth->soc;
1291 	struct mtk_tx_dma *itxd, *txd;
1292 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1293 	struct mtk_tx_buf *itx_buf, *tx_buf;
1294 	int i, n_desc = 1;
1295 	int queue = skb_get_queue_mapping(skb);
1296 	int k = 0;
1297 
1298 	txq = netdev_get_tx_queue(dev, queue);
1299 	itxd = ring->next_free;
1300 	itxd_pdma = qdma_to_pdma(ring, itxd);
1301 	if (itxd == ring->last_free)
1302 		return -ENOMEM;
1303 
1304 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1305 	memset(itx_buf, 0, sizeof(*itx_buf));
1306 
1307 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1308 				       DMA_TO_DEVICE);
1309 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1310 		return -ENOMEM;
1311 
1312 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1313 
1314 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1315 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1316 			  MTK_TX_FLAGS_FPORT1;
1317 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1318 		     k++);
1319 
1320 	/* TX SG offload */
1321 	txd = itxd;
1322 	txd_pdma = qdma_to_pdma(ring, txd);
1323 
1324 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1325 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1326 		unsigned int offset = 0;
1327 		int frag_size = skb_frag_size(frag);
1328 
1329 		while (frag_size) {
1330 			bool new_desc = true;
1331 
1332 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1333 			    (i & 0x1)) {
1334 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1335 				txd_pdma = qdma_to_pdma(ring, txd);
1336 				if (txd == ring->last_free)
1337 					goto err_dma;
1338 
1339 				n_desc++;
1340 			} else {
1341 				new_desc = false;
1342 			}
1343 
1344 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1345 			txd_info.size = min_t(unsigned int, frag_size,
1346 					      soc->txrx.dma_max_len);
1347 			txd_info.qid = queue;
1348 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1349 					!(frag_size - txd_info.size);
1350 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1351 							 offset, txd_info.size,
1352 							 DMA_TO_DEVICE);
1353 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1354 				goto err_dma;
1355 
1356 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1357 
1358 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1359 						    soc->txrx.txd_size);
1360 			if (new_desc)
1361 				memset(tx_buf, 0, sizeof(*tx_buf));
1362 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1363 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1364 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1365 					 MTK_TX_FLAGS_FPORT1;
1366 
1367 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1368 				     txd_info.size, k++);
1369 
1370 			frag_size -= txd_info.size;
1371 			offset += txd_info.size;
1372 		}
1373 	}
1374 
1375 	/* store skb to cleanup */
1376 	itx_buf->type = MTK_TYPE_SKB;
1377 	itx_buf->data = skb;
1378 
1379 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1380 		if (k & 0x1)
1381 			txd_pdma->txd2 |= TX_DMA_LS0;
1382 		else
1383 			txd_pdma->txd2 |= TX_DMA_LS1;
1384 	}
1385 
1386 	netdev_tx_sent_queue(txq, skb->len);
1387 	skb_tx_timestamp(skb);
1388 
1389 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1390 	atomic_sub(n_desc, &ring->free_count);
1391 
1392 	/* make sure that all changes to the dma ring are flushed before we
1393 	 * continue
1394 	 */
1395 	wmb();
1396 
1397 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1398 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1399 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1400 	} else {
1401 		int next_idx;
1402 
1403 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1404 					 ring->dma_size);
1405 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1406 	}
1407 
1408 	return 0;
1409 
1410 err_dma:
1411 	do {
1412 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1413 
1414 		/* unmap dma */
1415 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1416 
1417 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1418 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1419 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1420 
1421 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1422 		itxd_pdma = qdma_to_pdma(ring, itxd);
1423 	} while (itxd != txd);
1424 
1425 	return -ENOMEM;
1426 }
1427 
1428 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1429 {
1430 	int i, nfrags = 1;
1431 	skb_frag_t *frag;
1432 
1433 	if (skb_is_gso(skb)) {
1434 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1435 			frag = &skb_shinfo(skb)->frags[i];
1436 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1437 					       eth->soc->txrx.dma_max_len);
1438 		}
1439 	} else {
1440 		nfrags += skb_shinfo(skb)->nr_frags;
1441 	}
1442 
1443 	return nfrags;
1444 }
1445 
1446 static int mtk_queue_stopped(struct mtk_eth *eth)
1447 {
1448 	int i;
1449 
1450 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1451 		if (!eth->netdev[i])
1452 			continue;
1453 		if (netif_queue_stopped(eth->netdev[i]))
1454 			return 1;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static void mtk_wake_queue(struct mtk_eth *eth)
1461 {
1462 	int i;
1463 
1464 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1465 		if (!eth->netdev[i])
1466 			continue;
1467 		netif_tx_wake_all_queues(eth->netdev[i]);
1468 	}
1469 }
1470 
1471 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1472 {
1473 	struct mtk_mac *mac = netdev_priv(dev);
1474 	struct mtk_eth *eth = mac->hw;
1475 	struct mtk_tx_ring *ring = &eth->tx_ring;
1476 	struct net_device_stats *stats = &dev->stats;
1477 	bool gso = false;
1478 	int tx_num;
1479 
1480 	/* normally we can rely on the stack not calling this more than once,
1481 	 * however we have 2 queues running on the same ring so we need to lock
1482 	 * the ring access
1483 	 */
1484 	spin_lock(&eth->page_lock);
1485 
1486 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1487 		goto drop;
1488 
1489 	tx_num = mtk_cal_txd_req(eth, skb);
1490 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1491 		netif_tx_stop_all_queues(dev);
1492 		netif_err(eth, tx_queued, dev,
1493 			  "Tx Ring full when queue awake!\n");
1494 		spin_unlock(&eth->page_lock);
1495 		return NETDEV_TX_BUSY;
1496 	}
1497 
1498 	/* TSO: fill MSS info in tcp checksum field */
1499 	if (skb_is_gso(skb)) {
1500 		if (skb_cow_head(skb, 0)) {
1501 			netif_warn(eth, tx_err, dev,
1502 				   "GSO expand head fail.\n");
1503 			goto drop;
1504 		}
1505 
1506 		if (skb_shinfo(skb)->gso_type &
1507 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1508 			gso = true;
1509 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1510 		}
1511 	}
1512 
1513 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1514 		goto drop;
1515 
1516 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1517 		netif_tx_stop_all_queues(dev);
1518 
1519 	spin_unlock(&eth->page_lock);
1520 
1521 	return NETDEV_TX_OK;
1522 
1523 drop:
1524 	spin_unlock(&eth->page_lock);
1525 	stats->tx_dropped++;
1526 	dev_kfree_skb_any(skb);
1527 	return NETDEV_TX_OK;
1528 }
1529 
1530 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1531 {
1532 	int i;
1533 	struct mtk_rx_ring *ring;
1534 	int idx;
1535 
1536 	if (!eth->hwlro)
1537 		return &eth->rx_ring[0];
1538 
1539 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1540 		struct mtk_rx_dma *rxd;
1541 
1542 		ring = &eth->rx_ring[i];
1543 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1544 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1545 		if (rxd->rxd2 & RX_DMA_DONE) {
1546 			ring->calc_idx_update = true;
1547 			return ring;
1548 		}
1549 	}
1550 
1551 	return NULL;
1552 }
1553 
1554 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1555 {
1556 	struct mtk_rx_ring *ring;
1557 	int i;
1558 
1559 	if (!eth->hwlro) {
1560 		ring = &eth->rx_ring[0];
1561 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1562 	} else {
1563 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1564 			ring = &eth->rx_ring[i];
1565 			if (ring->calc_idx_update) {
1566 				ring->calc_idx_update = false;
1567 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1568 			}
1569 		}
1570 	}
1571 }
1572 
1573 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1574 {
1575 	return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1576 }
1577 
1578 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1579 					      struct xdp_rxq_info *xdp_q,
1580 					      int id, int size)
1581 {
1582 	struct page_pool_params pp_params = {
1583 		.order = 0,
1584 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1585 		.pool_size = size,
1586 		.nid = NUMA_NO_NODE,
1587 		.dev = eth->dma_dev,
1588 		.offset = MTK_PP_HEADROOM,
1589 		.max_len = MTK_PP_MAX_BUF_SIZE,
1590 	};
1591 	struct page_pool *pp;
1592 	int err;
1593 
1594 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1595 							  : DMA_FROM_DEVICE;
1596 	pp = page_pool_create(&pp_params);
1597 	if (IS_ERR(pp))
1598 		return pp;
1599 
1600 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
1601 				 eth->rx_napi.napi_id, PAGE_SIZE);
1602 	if (err < 0)
1603 		goto err_free_pp;
1604 
1605 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1606 	if (err)
1607 		goto err_unregister_rxq;
1608 
1609 	return pp;
1610 
1611 err_unregister_rxq:
1612 	xdp_rxq_info_unreg(xdp_q);
1613 err_free_pp:
1614 	page_pool_destroy(pp);
1615 
1616 	return ERR_PTR(err);
1617 }
1618 
1619 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1620 				    gfp_t gfp_mask)
1621 {
1622 	struct page *page;
1623 
1624 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1625 	if (!page)
1626 		return NULL;
1627 
1628 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1629 	return page_address(page);
1630 }
1631 
1632 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1633 {
1634 	if (ring->page_pool)
1635 		page_pool_put_full_page(ring->page_pool,
1636 					virt_to_head_page(data), napi);
1637 	else
1638 		skb_free_frag(data);
1639 }
1640 
1641 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1642 			     struct mtk_tx_dma_desc_info *txd_info,
1643 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1644 			     void *data, u16 headroom, int index, bool dma_map)
1645 {
1646 	struct mtk_tx_ring *ring = &eth->tx_ring;
1647 	struct mtk_mac *mac = netdev_priv(dev);
1648 	struct mtk_tx_dma *txd_pdma;
1649 
1650 	if (dma_map) {  /* ndo_xdp_xmit */
1651 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1652 						txd_info->size, DMA_TO_DEVICE);
1653 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1654 			return -ENOMEM;
1655 
1656 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1657 	} else {
1658 		struct page *page = virt_to_head_page(data);
1659 
1660 		txd_info->addr = page_pool_get_dma_addr(page) +
1661 				 sizeof(struct xdp_frame) + headroom;
1662 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1663 					   txd_info->size, DMA_BIDIRECTIONAL);
1664 	}
1665 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1666 
1667 	tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1668 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1669 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1670 
1671 	txd_pdma = qdma_to_pdma(ring, txd);
1672 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1673 		     index);
1674 
1675 	return 0;
1676 }
1677 
1678 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1679 				struct net_device *dev, bool dma_map)
1680 {
1681 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1682 	const struct mtk_soc_data *soc = eth->soc;
1683 	struct mtk_tx_ring *ring = &eth->tx_ring;
1684 	struct mtk_mac *mac = netdev_priv(dev);
1685 	struct mtk_tx_dma_desc_info txd_info = {
1686 		.size	= xdpf->len,
1687 		.first	= true,
1688 		.last	= !xdp_frame_has_frags(xdpf),
1689 		.qid	= mac->id,
1690 	};
1691 	int err, index = 0, n_desc = 1, nr_frags;
1692 	struct mtk_tx_buf *htx_buf, *tx_buf;
1693 	struct mtk_tx_dma *htxd, *txd;
1694 	void *data = xdpf->data;
1695 
1696 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1697 		return -EBUSY;
1698 
1699 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1700 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1701 		return -EBUSY;
1702 
1703 	spin_lock(&eth->page_lock);
1704 
1705 	txd = ring->next_free;
1706 	if (txd == ring->last_free) {
1707 		spin_unlock(&eth->page_lock);
1708 		return -ENOMEM;
1709 	}
1710 	htxd = txd;
1711 
1712 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1713 	memset(tx_buf, 0, sizeof(*tx_buf));
1714 	htx_buf = tx_buf;
1715 
1716 	for (;;) {
1717 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1718 					data, xdpf->headroom, index, dma_map);
1719 		if (err < 0)
1720 			goto unmap;
1721 
1722 		if (txd_info.last)
1723 			break;
1724 
1725 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1726 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1727 			if (txd == ring->last_free)
1728 				goto unmap;
1729 
1730 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1731 						    soc->txrx.txd_size);
1732 			memset(tx_buf, 0, sizeof(*tx_buf));
1733 			n_desc++;
1734 		}
1735 
1736 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1737 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1738 		txd_info.last = index + 1 == nr_frags;
1739 		txd_info.qid = mac->id;
1740 		data = skb_frag_address(&sinfo->frags[index]);
1741 
1742 		index++;
1743 	}
1744 	/* store xdpf for cleanup */
1745 	htx_buf->data = xdpf;
1746 
1747 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1748 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1749 
1750 		if (index & 1)
1751 			txd_pdma->txd2 |= TX_DMA_LS0;
1752 		else
1753 			txd_pdma->txd2 |= TX_DMA_LS1;
1754 	}
1755 
1756 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1757 	atomic_sub(n_desc, &ring->free_count);
1758 
1759 	/* make sure that all changes to the dma ring are flushed before we
1760 	 * continue
1761 	 */
1762 	wmb();
1763 
1764 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1765 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1766 	} else {
1767 		int idx;
1768 
1769 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1770 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1771 			MT7628_TX_CTX_IDX0);
1772 	}
1773 
1774 	spin_unlock(&eth->page_lock);
1775 
1776 	return 0;
1777 
1778 unmap:
1779 	while (htxd != txd) {
1780 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1781 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1782 
1783 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1784 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1785 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1786 
1787 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1788 		}
1789 
1790 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1791 	}
1792 
1793 	spin_unlock(&eth->page_lock);
1794 
1795 	return err;
1796 }
1797 
1798 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1799 			struct xdp_frame **frames, u32 flags)
1800 {
1801 	struct mtk_mac *mac = netdev_priv(dev);
1802 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1803 	struct mtk_eth *eth = mac->hw;
1804 	int i, nxmit = 0;
1805 
1806 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1807 		return -EINVAL;
1808 
1809 	for (i = 0; i < num_frame; i++) {
1810 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1811 			break;
1812 		nxmit++;
1813 	}
1814 
1815 	u64_stats_update_begin(&hw_stats->syncp);
1816 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1817 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1818 	u64_stats_update_end(&hw_stats->syncp);
1819 
1820 	return nxmit;
1821 }
1822 
1823 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1824 		       struct xdp_buff *xdp, struct net_device *dev)
1825 {
1826 	struct mtk_mac *mac = netdev_priv(dev);
1827 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1828 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1829 	struct bpf_prog *prog;
1830 	u32 act = XDP_PASS;
1831 
1832 	rcu_read_lock();
1833 
1834 	prog = rcu_dereference(eth->prog);
1835 	if (!prog)
1836 		goto out;
1837 
1838 	act = bpf_prog_run_xdp(prog, xdp);
1839 	switch (act) {
1840 	case XDP_PASS:
1841 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1842 		goto update_stats;
1843 	case XDP_REDIRECT:
1844 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1845 			act = XDP_DROP;
1846 			break;
1847 		}
1848 
1849 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1850 		goto update_stats;
1851 	case XDP_TX: {
1852 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1853 
1854 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1855 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1856 			act = XDP_DROP;
1857 			break;
1858 		}
1859 
1860 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1861 		goto update_stats;
1862 	}
1863 	default:
1864 		bpf_warn_invalid_xdp_action(dev, prog, act);
1865 		fallthrough;
1866 	case XDP_ABORTED:
1867 		trace_xdp_exception(dev, prog, act);
1868 		fallthrough;
1869 	case XDP_DROP:
1870 		break;
1871 	}
1872 
1873 	page_pool_put_full_page(ring->page_pool,
1874 				virt_to_head_page(xdp->data), true);
1875 
1876 update_stats:
1877 	u64_stats_update_begin(&hw_stats->syncp);
1878 	*count = *count + 1;
1879 	u64_stats_update_end(&hw_stats->syncp);
1880 out:
1881 	rcu_read_unlock();
1882 
1883 	return act;
1884 }
1885 
1886 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1887 		       struct mtk_eth *eth)
1888 {
1889 	struct dim_sample dim_sample = {};
1890 	struct mtk_rx_ring *ring;
1891 	bool xdp_flush = false;
1892 	int idx;
1893 	struct sk_buff *skb;
1894 	u8 *data, *new_data;
1895 	struct mtk_rx_dma_v2 *rxd, trxd;
1896 	int done = 0, bytes = 0;
1897 
1898 	while (done < budget) {
1899 		unsigned int pktlen, *rxdcsum;
1900 		bool has_hwaccel_tag = false;
1901 		struct net_device *netdev;
1902 		u16 vlan_proto, vlan_tci;
1903 		dma_addr_t dma_addr;
1904 		u32 hash, reason;
1905 		int mac = 0;
1906 
1907 		ring = mtk_get_rx_ring(eth);
1908 		if (unlikely(!ring))
1909 			goto rx_done;
1910 
1911 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1912 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1913 		data = ring->data[idx];
1914 
1915 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
1916 			break;
1917 
1918 		/* find out which mac the packet come from. values start at 1 */
1919 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1920 			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1921 		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1922 			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1923 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1924 
1925 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1926 			     !eth->netdev[mac]))
1927 			goto release_desc;
1928 
1929 		netdev = eth->netdev[mac];
1930 
1931 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1932 			goto release_desc;
1933 
1934 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1935 
1936 		/* alloc new buffer */
1937 		if (ring->page_pool) {
1938 			struct page *page = virt_to_head_page(data);
1939 			struct xdp_buff xdp;
1940 			u32 ret;
1941 
1942 			new_data = mtk_page_pool_get_buff(ring->page_pool,
1943 							  &dma_addr,
1944 							  GFP_ATOMIC);
1945 			if (unlikely(!new_data)) {
1946 				netdev->stats.rx_dropped++;
1947 				goto release_desc;
1948 			}
1949 
1950 			dma_sync_single_for_cpu(eth->dma_dev,
1951 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1952 				pktlen, page_pool_get_dma_dir(ring->page_pool));
1953 
1954 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1955 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1956 					 false);
1957 			xdp_buff_clear_frags_flag(&xdp);
1958 
1959 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1960 			if (ret == XDP_REDIRECT)
1961 				xdp_flush = true;
1962 
1963 			if (ret != XDP_PASS)
1964 				goto skip_rx;
1965 
1966 			skb = build_skb(data, PAGE_SIZE);
1967 			if (unlikely(!skb)) {
1968 				page_pool_put_full_page(ring->page_pool,
1969 							page, true);
1970 				netdev->stats.rx_dropped++;
1971 				goto skip_rx;
1972 			}
1973 
1974 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
1975 			skb_put(skb, xdp.data_end - xdp.data);
1976 			skb_mark_for_recycle(skb);
1977 		} else {
1978 			if (ring->frag_size <= PAGE_SIZE)
1979 				new_data = napi_alloc_frag(ring->frag_size);
1980 			else
1981 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1982 
1983 			if (unlikely(!new_data)) {
1984 				netdev->stats.rx_dropped++;
1985 				goto release_desc;
1986 			}
1987 
1988 			dma_addr = dma_map_single(eth->dma_dev,
1989 				new_data + NET_SKB_PAD + eth->ip_align,
1990 				ring->buf_size, DMA_FROM_DEVICE);
1991 			if (unlikely(dma_mapping_error(eth->dma_dev,
1992 						       dma_addr))) {
1993 				skb_free_frag(new_data);
1994 				netdev->stats.rx_dropped++;
1995 				goto release_desc;
1996 			}
1997 
1998 			dma_unmap_single(eth->dma_dev, trxd.rxd1,
1999 					 ring->buf_size, DMA_FROM_DEVICE);
2000 
2001 			skb = build_skb(data, ring->frag_size);
2002 			if (unlikely(!skb)) {
2003 				netdev->stats.rx_dropped++;
2004 				skb_free_frag(data);
2005 				goto skip_rx;
2006 			}
2007 
2008 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2009 			skb_put(skb, pktlen);
2010 		}
2011 
2012 		skb->dev = netdev;
2013 		bytes += skb->len;
2014 
2015 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2016 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2017 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2018 			if (hash != MTK_RXD5_FOE_ENTRY)
2019 				skb_set_hash(skb, jhash_1word(hash, 0),
2020 					     PKT_HASH_TYPE_L4);
2021 			rxdcsum = &trxd.rxd3;
2022 		} else {
2023 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2024 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2025 			if (hash != MTK_RXD4_FOE_ENTRY)
2026 				skb_set_hash(skb, jhash_1word(hash, 0),
2027 					     PKT_HASH_TYPE_L4);
2028 			rxdcsum = &trxd.rxd4;
2029 		}
2030 
2031 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2032 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2033 		else
2034 			skb_checksum_none_assert(skb);
2035 		skb->protocol = eth_type_trans(skb, netdev);
2036 
2037 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2038 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2039 
2040 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2041 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2042 				if (trxd.rxd3 & RX_DMA_VTAG_V2) {
2043 					vlan_proto = RX_DMA_VPID(trxd.rxd4);
2044 					vlan_tci = RX_DMA_VID(trxd.rxd4);
2045 					has_hwaccel_tag = true;
2046 				}
2047 			} else if (trxd.rxd2 & RX_DMA_VTAG) {
2048 				vlan_proto = RX_DMA_VPID(trxd.rxd3);
2049 				vlan_tci = RX_DMA_VID(trxd.rxd3);
2050 				has_hwaccel_tag = true;
2051 			}
2052 		}
2053 
2054 		/* When using VLAN untagging in combination with DSA, the
2055 		 * hardware treats the MTK special tag as a VLAN and untags it.
2056 		 */
2057 		if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
2058 			unsigned int port = vlan_proto & GENMASK(2, 0);
2059 
2060 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2061 			    eth->dsa_meta[port])
2062 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2063 		} else if (has_hwaccel_tag) {
2064 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
2065 		}
2066 
2067 		skb_record_rx_queue(skb, 0);
2068 		napi_gro_receive(napi, skb);
2069 
2070 skip_rx:
2071 		ring->data[idx] = new_data;
2072 		rxd->rxd1 = (unsigned int)dma_addr;
2073 release_desc:
2074 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2075 			rxd->rxd2 = RX_DMA_LSO;
2076 		else
2077 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2078 
2079 		ring->calc_idx = idx;
2080 		done++;
2081 	}
2082 
2083 rx_done:
2084 	if (done) {
2085 		/* make sure that all changes to the dma ring are flushed before
2086 		 * we continue
2087 		 */
2088 		wmb();
2089 		mtk_update_rx_cpu_idx(eth);
2090 	}
2091 
2092 	eth->rx_packets += done;
2093 	eth->rx_bytes += bytes;
2094 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2095 			  &dim_sample);
2096 	net_dim(&eth->rx_dim, dim_sample);
2097 
2098 	if (xdp_flush)
2099 		xdp_do_flush_map();
2100 
2101 	return done;
2102 }
2103 
2104 struct mtk_poll_state {
2105     struct netdev_queue *txq;
2106     unsigned int total;
2107     unsigned int done;
2108     unsigned int bytes;
2109 };
2110 
2111 static void
2112 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2113 		 struct sk_buff *skb)
2114 {
2115 	struct netdev_queue *txq;
2116 	struct net_device *dev;
2117 	unsigned int bytes = skb->len;
2118 
2119 	state->total++;
2120 	eth->tx_packets++;
2121 	eth->tx_bytes += bytes;
2122 
2123 	dev = eth->netdev[mac];
2124 	if (!dev)
2125 		return;
2126 
2127 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2128 	if (state->txq == txq) {
2129 		state->done++;
2130 		state->bytes += bytes;
2131 		return;
2132 	}
2133 
2134 	if (state->txq)
2135 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2136 
2137 	state->txq = txq;
2138 	state->done = 1;
2139 	state->bytes = bytes;
2140 }
2141 
2142 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2143 			    struct mtk_poll_state *state)
2144 {
2145 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2146 	struct mtk_tx_ring *ring = &eth->tx_ring;
2147 	struct mtk_tx_buf *tx_buf;
2148 	struct xdp_frame_bulk bq;
2149 	struct mtk_tx_dma *desc;
2150 	u32 cpu, dma;
2151 
2152 	cpu = ring->last_free_ptr;
2153 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2154 
2155 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2156 	xdp_frame_bulk_init(&bq);
2157 
2158 	while ((cpu != dma) && budget) {
2159 		u32 next_cpu = desc->txd2;
2160 		int mac = 0;
2161 
2162 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2163 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2164 			break;
2165 
2166 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2167 					    eth->soc->txrx.txd_size);
2168 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2169 			mac = 1;
2170 
2171 		if (!tx_buf->data)
2172 			break;
2173 
2174 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2175 			if (tx_buf->type == MTK_TYPE_SKB)
2176 				mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2177 
2178 			budget--;
2179 		}
2180 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2181 
2182 		ring->last_free = desc;
2183 		atomic_inc(&ring->free_count);
2184 
2185 		cpu = next_cpu;
2186 	}
2187 	xdp_flush_frame_bulk(&bq);
2188 
2189 	ring->last_free_ptr = cpu;
2190 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2191 
2192 	return budget;
2193 }
2194 
2195 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2196 			    struct mtk_poll_state *state)
2197 {
2198 	struct mtk_tx_ring *ring = &eth->tx_ring;
2199 	struct mtk_tx_buf *tx_buf;
2200 	struct xdp_frame_bulk bq;
2201 	struct mtk_tx_dma *desc;
2202 	u32 cpu, dma;
2203 
2204 	cpu = ring->cpu_idx;
2205 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2206 	xdp_frame_bulk_init(&bq);
2207 
2208 	while ((cpu != dma) && budget) {
2209 		tx_buf = &ring->buf[cpu];
2210 		if (!tx_buf->data)
2211 			break;
2212 
2213 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2214 			if (tx_buf->type == MTK_TYPE_SKB)
2215 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2216 			budget--;
2217 		}
2218 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2219 
2220 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2221 		ring->last_free = desc;
2222 		atomic_inc(&ring->free_count);
2223 
2224 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2225 	}
2226 	xdp_flush_frame_bulk(&bq);
2227 
2228 	ring->cpu_idx = cpu;
2229 
2230 	return budget;
2231 }
2232 
2233 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2234 {
2235 	struct mtk_tx_ring *ring = &eth->tx_ring;
2236 	struct dim_sample dim_sample = {};
2237 	struct mtk_poll_state state = {};
2238 
2239 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2240 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2241 	else
2242 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2243 
2244 	if (state.txq)
2245 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2246 
2247 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2248 			  &dim_sample);
2249 	net_dim(&eth->tx_dim, dim_sample);
2250 
2251 	if (mtk_queue_stopped(eth) &&
2252 	    (atomic_read(&ring->free_count) > ring->thresh))
2253 		mtk_wake_queue(eth);
2254 
2255 	return state.total;
2256 }
2257 
2258 static void mtk_handle_status_irq(struct mtk_eth *eth)
2259 {
2260 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2261 
2262 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2263 		mtk_stats_update(eth);
2264 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2265 			MTK_INT_STATUS2);
2266 	}
2267 }
2268 
2269 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2270 {
2271 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2272 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2273 	int tx_done = 0;
2274 
2275 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2276 		mtk_handle_status_irq(eth);
2277 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2278 	tx_done = mtk_poll_tx(eth, budget);
2279 
2280 	if (unlikely(netif_msg_intr(eth))) {
2281 		dev_info(eth->dev,
2282 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2283 			 mtk_r32(eth, reg_map->tx_irq_status),
2284 			 mtk_r32(eth, reg_map->tx_irq_mask));
2285 	}
2286 
2287 	if (tx_done == budget)
2288 		return budget;
2289 
2290 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2291 		return budget;
2292 
2293 	if (napi_complete_done(napi, tx_done))
2294 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2295 
2296 	return tx_done;
2297 }
2298 
2299 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2300 {
2301 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2302 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2303 	int rx_done_total = 0;
2304 
2305 	mtk_handle_status_irq(eth);
2306 
2307 	do {
2308 		int rx_done;
2309 
2310 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2311 			reg_map->pdma.irq_status);
2312 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2313 		rx_done_total += rx_done;
2314 
2315 		if (unlikely(netif_msg_intr(eth))) {
2316 			dev_info(eth->dev,
2317 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2318 				 mtk_r32(eth, reg_map->pdma.irq_status),
2319 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2320 		}
2321 
2322 		if (rx_done_total == budget)
2323 			return budget;
2324 
2325 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2326 		 eth->soc->txrx.rx_irq_done_mask);
2327 
2328 	if (napi_complete_done(napi, rx_done_total))
2329 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2330 
2331 	return rx_done_total;
2332 }
2333 
2334 static int mtk_tx_alloc(struct mtk_eth *eth)
2335 {
2336 	const struct mtk_soc_data *soc = eth->soc;
2337 	struct mtk_tx_ring *ring = &eth->tx_ring;
2338 	int i, sz = soc->txrx.txd_size;
2339 	struct mtk_tx_dma_v2 *txd;
2340 	int ring_size;
2341 	u32 ofs, val;
2342 
2343 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2344 		ring_size = MTK_QDMA_RING_SIZE;
2345 	else
2346 		ring_size = MTK_DMA_SIZE;
2347 
2348 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2349 			       GFP_KERNEL);
2350 	if (!ring->buf)
2351 		goto no_tx_mem;
2352 
2353 	ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2354 				       &ring->phys, GFP_KERNEL);
2355 	if (!ring->dma)
2356 		goto no_tx_mem;
2357 
2358 	for (i = 0; i < ring_size; i++) {
2359 		int next = (i + 1) % ring_size;
2360 		u32 next_ptr = ring->phys + next * sz;
2361 
2362 		txd = ring->dma + i * sz;
2363 		txd->txd2 = next_ptr;
2364 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2365 		txd->txd4 = 0;
2366 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2367 			txd->txd5 = 0;
2368 			txd->txd6 = 0;
2369 			txd->txd7 = 0;
2370 			txd->txd8 = 0;
2371 		}
2372 	}
2373 
2374 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2375 	 * only as the framework. The real HW descriptors are the PDMA
2376 	 * descriptors in ring->dma_pdma.
2377 	 */
2378 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2379 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2380 						    &ring->phys_pdma, GFP_KERNEL);
2381 		if (!ring->dma_pdma)
2382 			goto no_tx_mem;
2383 
2384 		for (i = 0; i < ring_size; i++) {
2385 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2386 			ring->dma_pdma[i].txd4 = 0;
2387 		}
2388 	}
2389 
2390 	ring->dma_size = ring_size;
2391 	atomic_set(&ring->free_count, ring_size - 2);
2392 	ring->next_free = ring->dma;
2393 	ring->last_free = (void *)txd;
2394 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2395 	ring->thresh = MAX_SKB_FRAGS;
2396 
2397 	/* make sure that all changes to the dma ring are flushed before we
2398 	 * continue
2399 	 */
2400 	wmb();
2401 
2402 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2403 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2404 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2405 		mtk_w32(eth,
2406 			ring->phys + ((ring_size - 1) * sz),
2407 			soc->reg_map->qdma.crx_ptr);
2408 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2409 
2410 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2411 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2412 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2413 
2414 			val = MTK_QTX_SCH_MIN_RATE_EN |
2415 			      /* minimum: 10 Mbps */
2416 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2417 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2418 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2419 			if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2420 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2421 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2422 			ofs += MTK_QTX_OFFSET;
2423 		}
2424 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2425 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2426 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2427 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2428 	} else {
2429 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2430 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2431 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2432 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2433 	}
2434 
2435 	return 0;
2436 
2437 no_tx_mem:
2438 	return -ENOMEM;
2439 }
2440 
2441 static void mtk_tx_clean(struct mtk_eth *eth)
2442 {
2443 	const struct mtk_soc_data *soc = eth->soc;
2444 	struct mtk_tx_ring *ring = &eth->tx_ring;
2445 	int i;
2446 
2447 	if (ring->buf) {
2448 		for (i = 0; i < ring->dma_size; i++)
2449 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2450 		kfree(ring->buf);
2451 		ring->buf = NULL;
2452 	}
2453 
2454 	if (ring->dma) {
2455 		dma_free_coherent(eth->dma_dev,
2456 				  ring->dma_size * soc->txrx.txd_size,
2457 				  ring->dma, ring->phys);
2458 		ring->dma = NULL;
2459 	}
2460 
2461 	if (ring->dma_pdma) {
2462 		dma_free_coherent(eth->dma_dev,
2463 				  ring->dma_size * soc->txrx.txd_size,
2464 				  ring->dma_pdma, ring->phys_pdma);
2465 		ring->dma_pdma = NULL;
2466 	}
2467 }
2468 
2469 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2470 {
2471 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2472 	struct mtk_rx_ring *ring;
2473 	int rx_data_len, rx_dma_size;
2474 	int i;
2475 
2476 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2477 		if (ring_no)
2478 			return -EINVAL;
2479 		ring = &eth->rx_ring_qdma;
2480 	} else {
2481 		ring = &eth->rx_ring[ring_no];
2482 	}
2483 
2484 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2485 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2486 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2487 	} else {
2488 		rx_data_len = ETH_DATA_LEN;
2489 		rx_dma_size = MTK_DMA_SIZE;
2490 	}
2491 
2492 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2493 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2494 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2495 			     GFP_KERNEL);
2496 	if (!ring->data)
2497 		return -ENOMEM;
2498 
2499 	if (mtk_page_pool_enabled(eth)) {
2500 		struct page_pool *pp;
2501 
2502 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2503 					  rx_dma_size);
2504 		if (IS_ERR(pp))
2505 			return PTR_ERR(pp);
2506 
2507 		ring->page_pool = pp;
2508 	}
2509 
2510 	ring->dma = dma_alloc_coherent(eth->dma_dev,
2511 				       rx_dma_size * eth->soc->txrx.rxd_size,
2512 				       &ring->phys, GFP_KERNEL);
2513 	if (!ring->dma)
2514 		return -ENOMEM;
2515 
2516 	for (i = 0; i < rx_dma_size; i++) {
2517 		struct mtk_rx_dma_v2 *rxd;
2518 		dma_addr_t dma_addr;
2519 		void *data;
2520 
2521 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2522 		if (ring->page_pool) {
2523 			data = mtk_page_pool_get_buff(ring->page_pool,
2524 						      &dma_addr, GFP_KERNEL);
2525 			if (!data)
2526 				return -ENOMEM;
2527 		} else {
2528 			if (ring->frag_size <= PAGE_SIZE)
2529 				data = netdev_alloc_frag(ring->frag_size);
2530 			else
2531 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2532 
2533 			if (!data)
2534 				return -ENOMEM;
2535 
2536 			dma_addr = dma_map_single(eth->dma_dev,
2537 				data + NET_SKB_PAD + eth->ip_align,
2538 				ring->buf_size, DMA_FROM_DEVICE);
2539 			if (unlikely(dma_mapping_error(eth->dma_dev,
2540 						       dma_addr))) {
2541 				skb_free_frag(data);
2542 				return -ENOMEM;
2543 			}
2544 		}
2545 		rxd->rxd1 = (unsigned int)dma_addr;
2546 		ring->data[i] = data;
2547 
2548 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2549 			rxd->rxd2 = RX_DMA_LSO;
2550 		else
2551 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2552 
2553 		rxd->rxd3 = 0;
2554 		rxd->rxd4 = 0;
2555 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2556 			rxd->rxd5 = 0;
2557 			rxd->rxd6 = 0;
2558 			rxd->rxd7 = 0;
2559 			rxd->rxd8 = 0;
2560 		}
2561 	}
2562 
2563 	ring->dma_size = rx_dma_size;
2564 	ring->calc_idx_update = false;
2565 	ring->calc_idx = rx_dma_size - 1;
2566 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2567 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2568 				    ring_no * MTK_QRX_OFFSET;
2569 	else
2570 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2571 				    ring_no * MTK_QRX_OFFSET;
2572 	/* make sure that all changes to the dma ring are flushed before we
2573 	 * continue
2574 	 */
2575 	wmb();
2576 
2577 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2578 		mtk_w32(eth, ring->phys,
2579 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2580 		mtk_w32(eth, rx_dma_size,
2581 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2582 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2583 			reg_map->qdma.rst_idx);
2584 	} else {
2585 		mtk_w32(eth, ring->phys,
2586 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2587 		mtk_w32(eth, rx_dma_size,
2588 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2589 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2590 			reg_map->pdma.rst_idx);
2591 	}
2592 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2593 
2594 	return 0;
2595 }
2596 
2597 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2598 {
2599 	int i;
2600 
2601 	if (ring->data && ring->dma) {
2602 		for (i = 0; i < ring->dma_size; i++) {
2603 			struct mtk_rx_dma *rxd;
2604 
2605 			if (!ring->data[i])
2606 				continue;
2607 
2608 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2609 			if (!rxd->rxd1)
2610 				continue;
2611 
2612 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2613 					 ring->buf_size, DMA_FROM_DEVICE);
2614 			mtk_rx_put_buff(ring, ring->data[i], false);
2615 		}
2616 		kfree(ring->data);
2617 		ring->data = NULL;
2618 	}
2619 
2620 	if (ring->dma) {
2621 		dma_free_coherent(eth->dma_dev,
2622 				  ring->dma_size * eth->soc->txrx.rxd_size,
2623 				  ring->dma, ring->phys);
2624 		ring->dma = NULL;
2625 	}
2626 
2627 	if (ring->page_pool) {
2628 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2629 			xdp_rxq_info_unreg(&ring->xdp_q);
2630 		page_pool_destroy(ring->page_pool);
2631 		ring->page_pool = NULL;
2632 	}
2633 }
2634 
2635 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2636 {
2637 	int i;
2638 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2639 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2640 
2641 	/* set LRO rings to auto-learn modes */
2642 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2643 
2644 	/* validate LRO ring */
2645 	ring_ctrl_dw2 |= MTK_RING_VLD;
2646 
2647 	/* set AGE timer (unit: 20us) */
2648 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2649 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2650 
2651 	/* set max AGG timer (unit: 20us) */
2652 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2653 
2654 	/* set max LRO AGG count */
2655 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2656 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2657 
2658 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2659 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2660 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2661 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2662 	}
2663 
2664 	/* IPv4 checksum update enable */
2665 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2666 
2667 	/* switch priority comparison to packet count mode */
2668 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2669 
2670 	/* bandwidth threshold setting */
2671 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2672 
2673 	/* auto-learn score delta setting */
2674 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2675 
2676 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2677 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2678 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2679 
2680 	/* set HW LRO mode & the max aggregation count for rx packets */
2681 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2682 
2683 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2684 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2685 
2686 	/* enable HW LRO */
2687 	lro_ctrl_dw0 |= MTK_LRO_EN;
2688 
2689 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2690 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2691 
2692 	return 0;
2693 }
2694 
2695 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2696 {
2697 	int i;
2698 	u32 val;
2699 
2700 	/* relinquish lro rings, flush aggregated packets */
2701 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2702 
2703 	/* wait for relinquishments done */
2704 	for (i = 0; i < 10; i++) {
2705 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2706 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2707 			msleep(20);
2708 			continue;
2709 		}
2710 		break;
2711 	}
2712 
2713 	/* invalidate lro rings */
2714 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2715 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2716 
2717 	/* disable HW LRO */
2718 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2719 }
2720 
2721 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2722 {
2723 	u32 reg_val;
2724 
2725 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2726 
2727 	/* invalidate the IP setting */
2728 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2729 
2730 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2731 
2732 	/* validate the IP setting */
2733 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2734 }
2735 
2736 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2737 {
2738 	u32 reg_val;
2739 
2740 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2741 
2742 	/* invalidate the IP setting */
2743 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2744 
2745 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2746 }
2747 
2748 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2749 {
2750 	int cnt = 0;
2751 	int i;
2752 
2753 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2754 		if (mac->hwlro_ip[i])
2755 			cnt++;
2756 	}
2757 
2758 	return cnt;
2759 }
2760 
2761 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2762 				struct ethtool_rxnfc *cmd)
2763 {
2764 	struct ethtool_rx_flow_spec *fsp =
2765 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2766 	struct mtk_mac *mac = netdev_priv(dev);
2767 	struct mtk_eth *eth = mac->hw;
2768 	int hwlro_idx;
2769 
2770 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2771 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2772 	    (fsp->location > 1))
2773 		return -EINVAL;
2774 
2775 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2776 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2777 
2778 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2779 
2780 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2781 
2782 	return 0;
2783 }
2784 
2785 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2786 				struct ethtool_rxnfc *cmd)
2787 {
2788 	struct ethtool_rx_flow_spec *fsp =
2789 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2790 	struct mtk_mac *mac = netdev_priv(dev);
2791 	struct mtk_eth *eth = mac->hw;
2792 	int hwlro_idx;
2793 
2794 	if (fsp->location > 1)
2795 		return -EINVAL;
2796 
2797 	mac->hwlro_ip[fsp->location] = 0;
2798 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2799 
2800 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2801 
2802 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2803 
2804 	return 0;
2805 }
2806 
2807 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2808 {
2809 	struct mtk_mac *mac = netdev_priv(dev);
2810 	struct mtk_eth *eth = mac->hw;
2811 	int i, hwlro_idx;
2812 
2813 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2814 		mac->hwlro_ip[i] = 0;
2815 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2816 
2817 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2818 	}
2819 
2820 	mac->hwlro_ip_cnt = 0;
2821 }
2822 
2823 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2824 				    struct ethtool_rxnfc *cmd)
2825 {
2826 	struct mtk_mac *mac = netdev_priv(dev);
2827 	struct ethtool_rx_flow_spec *fsp =
2828 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2829 
2830 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2831 		return -EINVAL;
2832 
2833 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2834 	fsp->flow_type = TCP_V4_FLOW;
2835 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2836 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2837 
2838 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2839 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2840 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2841 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2842 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2843 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2844 	fsp->h_u.tcp_ip4_spec.tos = 0;
2845 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2846 
2847 	return 0;
2848 }
2849 
2850 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2851 				  struct ethtool_rxnfc *cmd,
2852 				  u32 *rule_locs)
2853 {
2854 	struct mtk_mac *mac = netdev_priv(dev);
2855 	int cnt = 0;
2856 	int i;
2857 
2858 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2859 		if (mac->hwlro_ip[i]) {
2860 			rule_locs[cnt] = i;
2861 			cnt++;
2862 		}
2863 	}
2864 
2865 	cmd->rule_cnt = cnt;
2866 
2867 	return 0;
2868 }
2869 
2870 static netdev_features_t mtk_fix_features(struct net_device *dev,
2871 					  netdev_features_t features)
2872 {
2873 	if (!(features & NETIF_F_LRO)) {
2874 		struct mtk_mac *mac = netdev_priv(dev);
2875 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2876 
2877 		if (ip_cnt) {
2878 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2879 
2880 			features |= NETIF_F_LRO;
2881 		}
2882 	}
2883 
2884 	return features;
2885 }
2886 
2887 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2888 {
2889 	struct mtk_mac *mac = netdev_priv(dev);
2890 	struct mtk_eth *eth = mac->hw;
2891 	netdev_features_t diff = dev->features ^ features;
2892 	int i;
2893 
2894 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2895 		mtk_hwlro_netdev_disable(dev);
2896 
2897 	/* Set RX VLAN offloading */
2898 	if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2899 		return 0;
2900 
2901 	mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2902 		MTK_CDMP_EG_CTRL);
2903 
2904 	/* sync features with other MAC */
2905 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2906 		if (!eth->netdev[i] || eth->netdev[i] == dev)
2907 			continue;
2908 		eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2909 		eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2910 	}
2911 
2912 	return 0;
2913 }
2914 
2915 /* wait for DMA to finish whatever it is doing before we start using it again */
2916 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2917 {
2918 	unsigned int reg;
2919 	int ret;
2920 	u32 val;
2921 
2922 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2923 		reg = eth->soc->reg_map->qdma.glo_cfg;
2924 	else
2925 		reg = eth->soc->reg_map->pdma.glo_cfg;
2926 
2927 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2928 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2929 					5, MTK_DMA_BUSY_TIMEOUT_US);
2930 	if (ret)
2931 		dev_err(eth->dev, "DMA init timeout\n");
2932 
2933 	return ret;
2934 }
2935 
2936 static int mtk_dma_init(struct mtk_eth *eth)
2937 {
2938 	int err;
2939 	u32 i;
2940 
2941 	if (mtk_dma_busy_wait(eth))
2942 		return -EBUSY;
2943 
2944 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2945 		/* QDMA needs scratch memory for internal reordering of the
2946 		 * descriptors
2947 		 */
2948 		err = mtk_init_fq_dma(eth);
2949 		if (err)
2950 			return err;
2951 	}
2952 
2953 	err = mtk_tx_alloc(eth);
2954 	if (err)
2955 		return err;
2956 
2957 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2958 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2959 		if (err)
2960 			return err;
2961 	}
2962 
2963 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2964 	if (err)
2965 		return err;
2966 
2967 	if (eth->hwlro) {
2968 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2969 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2970 			if (err)
2971 				return err;
2972 		}
2973 		err = mtk_hwlro_rx_init(eth);
2974 		if (err)
2975 			return err;
2976 	}
2977 
2978 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2979 		/* Enable random early drop and set drop threshold
2980 		 * automatically
2981 		 */
2982 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2983 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2984 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2985 	}
2986 
2987 	return 0;
2988 }
2989 
2990 static void mtk_dma_free(struct mtk_eth *eth)
2991 {
2992 	const struct mtk_soc_data *soc = eth->soc;
2993 	int i;
2994 
2995 	for (i = 0; i < MTK_MAC_COUNT; i++)
2996 		if (eth->netdev[i])
2997 			netdev_reset_queue(eth->netdev[i]);
2998 	if (eth->scratch_ring) {
2999 		dma_free_coherent(eth->dma_dev,
3000 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3001 				  eth->scratch_ring, eth->phy_scratch_ring);
3002 		eth->scratch_ring = NULL;
3003 		eth->phy_scratch_ring = 0;
3004 	}
3005 	mtk_tx_clean(eth);
3006 	mtk_rx_clean(eth, &eth->rx_ring[0]);
3007 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
3008 
3009 	if (eth->hwlro) {
3010 		mtk_hwlro_rx_uninit(eth);
3011 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3012 			mtk_rx_clean(eth, &eth->rx_ring[i]);
3013 	}
3014 
3015 	kfree(eth->scratch_head);
3016 }
3017 
3018 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3019 {
3020 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3021 
3022 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3023 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3024 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3025 }
3026 
3027 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3028 {
3029 	struct mtk_mac *mac = netdev_priv(dev);
3030 	struct mtk_eth *eth = mac->hw;
3031 
3032 	if (test_bit(MTK_RESETTING, &eth->state))
3033 		return;
3034 
3035 	if (!mtk_hw_reset_check(eth))
3036 		return;
3037 
3038 	eth->netdev[mac->id]->stats.tx_errors++;
3039 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3040 
3041 	schedule_work(&eth->pending_work);
3042 }
3043 
3044 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3045 {
3046 	struct mtk_eth *eth = _eth;
3047 
3048 	eth->rx_events++;
3049 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3050 		__napi_schedule(&eth->rx_napi);
3051 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3052 	}
3053 
3054 	return IRQ_HANDLED;
3055 }
3056 
3057 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3058 {
3059 	struct mtk_eth *eth = _eth;
3060 
3061 	eth->tx_events++;
3062 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3063 		__napi_schedule(&eth->tx_napi);
3064 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3065 	}
3066 
3067 	return IRQ_HANDLED;
3068 }
3069 
3070 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3071 {
3072 	struct mtk_eth *eth = _eth;
3073 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3074 
3075 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3076 	    eth->soc->txrx.rx_irq_done_mask) {
3077 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3078 		    eth->soc->txrx.rx_irq_done_mask)
3079 			mtk_handle_irq_rx(irq, _eth);
3080 	}
3081 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3082 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3083 			mtk_handle_irq_tx(irq, _eth);
3084 	}
3085 
3086 	return IRQ_HANDLED;
3087 }
3088 
3089 #ifdef CONFIG_NET_POLL_CONTROLLER
3090 static void mtk_poll_controller(struct net_device *dev)
3091 {
3092 	struct mtk_mac *mac = netdev_priv(dev);
3093 	struct mtk_eth *eth = mac->hw;
3094 
3095 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3096 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3097 	mtk_handle_irq_rx(eth->irq[2], dev);
3098 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3099 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3100 }
3101 #endif
3102 
3103 static int mtk_start_dma(struct mtk_eth *eth)
3104 {
3105 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3106 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3107 	int err;
3108 
3109 	err = mtk_dma_init(eth);
3110 	if (err) {
3111 		mtk_dma_free(eth);
3112 		return err;
3113 	}
3114 
3115 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3116 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3117 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3118 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3119 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3120 
3121 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3122 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3123 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3124 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3125 		else
3126 			val |= MTK_RX_BT_32DWORDS;
3127 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3128 
3129 		mtk_w32(eth,
3130 			MTK_RX_DMA_EN | rx_2b_offset |
3131 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3132 			reg_map->pdma.glo_cfg);
3133 	} else {
3134 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3135 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3136 			reg_map->pdma.glo_cfg);
3137 	}
3138 
3139 	return 0;
3140 }
3141 
3142 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3143 {
3144 	int i;
3145 
3146 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3147 		return;
3148 
3149 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3150 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3151 
3152 		/* default setup the forward port to send frame to PDMA */
3153 		val &= ~0xffff;
3154 
3155 		/* Enable RX checksum */
3156 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3157 
3158 		val |= config;
3159 
3160 		if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3161 			val |= MTK_GDMA_SPECIAL_TAG;
3162 
3163 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3164 	}
3165 	/* Reset and enable PSE */
3166 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3167 	mtk_w32(eth, 0, MTK_RST_GL);
3168 }
3169 
3170 
3171 static bool mtk_uses_dsa(struct net_device *dev)
3172 {
3173 #if IS_ENABLED(CONFIG_NET_DSA)
3174 	return netdev_uses_dsa(dev) &&
3175 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3176 #else
3177 	return false;
3178 #endif
3179 }
3180 
3181 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3182 {
3183 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3184 	struct mtk_eth *eth = mac->hw;
3185 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3186 	struct ethtool_link_ksettings s;
3187 	struct net_device *ldev;
3188 	struct list_head *iter;
3189 	struct dsa_port *dp;
3190 
3191 	if (event != NETDEV_CHANGE)
3192 		return NOTIFY_DONE;
3193 
3194 	netdev_for_each_lower_dev(dev, ldev, iter) {
3195 		if (netdev_priv(ldev) == mac)
3196 			goto found;
3197 	}
3198 
3199 	return NOTIFY_DONE;
3200 
3201 found:
3202 	if (!dsa_slave_dev_check(dev))
3203 		return NOTIFY_DONE;
3204 
3205 	if (__ethtool_get_link_ksettings(dev, &s))
3206 		return NOTIFY_DONE;
3207 
3208 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3209 		return NOTIFY_DONE;
3210 
3211 	dp = dsa_port_from_netdev(dev);
3212 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3213 		return NOTIFY_DONE;
3214 
3215 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3216 
3217 	return NOTIFY_DONE;
3218 }
3219 
3220 static int mtk_open(struct net_device *dev)
3221 {
3222 	struct mtk_mac *mac = netdev_priv(dev);
3223 	struct mtk_eth *eth = mac->hw;
3224 	int i, err;
3225 
3226 	if (mtk_uses_dsa(dev) && !eth->prog) {
3227 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3228 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3229 
3230 			if (md_dst)
3231 				continue;
3232 
3233 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3234 						    GFP_KERNEL);
3235 			if (!md_dst)
3236 				return -ENOMEM;
3237 
3238 			md_dst->u.port_info.port_id = i;
3239 			eth->dsa_meta[i] = md_dst;
3240 		}
3241 	} else {
3242 		/* Hardware special tag parsing needs to be disabled if at least
3243 		 * one MAC does not use DSA.
3244 		 */
3245 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3246 		val &= ~MTK_CDMP_STAG_EN;
3247 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3248 	}
3249 
3250 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3251 	if (err) {
3252 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3253 			   err);
3254 		return err;
3255 	}
3256 
3257 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3258 	if (!refcount_read(&eth->dma_refcnt)) {
3259 		const struct mtk_soc_data *soc = eth->soc;
3260 		u32 gdm_config;
3261 		int i;
3262 
3263 		err = mtk_start_dma(eth);
3264 		if (err) {
3265 			phylink_disconnect_phy(mac->phylink);
3266 			return err;
3267 		}
3268 
3269 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3270 			mtk_ppe_start(eth->ppe[i]);
3271 
3272 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3273 						  : MTK_GDMA_TO_PDMA;
3274 		mtk_gdm_config(eth, gdm_config);
3275 
3276 		napi_enable(&eth->tx_napi);
3277 		napi_enable(&eth->rx_napi);
3278 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3279 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3280 		refcount_set(&eth->dma_refcnt, 1);
3281 	}
3282 	else
3283 		refcount_inc(&eth->dma_refcnt);
3284 
3285 	phylink_start(mac->phylink);
3286 	netif_tx_start_all_queues(dev);
3287 
3288 	return 0;
3289 }
3290 
3291 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3292 {
3293 	u32 val;
3294 	int i;
3295 
3296 	/* stop the dma engine */
3297 	spin_lock_bh(&eth->page_lock);
3298 	val = mtk_r32(eth, glo_cfg);
3299 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3300 		glo_cfg);
3301 	spin_unlock_bh(&eth->page_lock);
3302 
3303 	/* wait for dma stop */
3304 	for (i = 0; i < 10; i++) {
3305 		val = mtk_r32(eth, glo_cfg);
3306 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3307 			msleep(20);
3308 			continue;
3309 		}
3310 		break;
3311 	}
3312 }
3313 
3314 static int mtk_stop(struct net_device *dev)
3315 {
3316 	struct mtk_mac *mac = netdev_priv(dev);
3317 	struct mtk_eth *eth = mac->hw;
3318 	int i;
3319 
3320 	phylink_stop(mac->phylink);
3321 
3322 	netif_tx_disable(dev);
3323 
3324 	phylink_disconnect_phy(mac->phylink);
3325 
3326 	/* only shutdown DMA if this is the last user */
3327 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3328 		return 0;
3329 
3330 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3331 
3332 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3333 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3334 	napi_disable(&eth->tx_napi);
3335 	napi_disable(&eth->rx_napi);
3336 
3337 	cancel_work_sync(&eth->rx_dim.work);
3338 	cancel_work_sync(&eth->tx_dim.work);
3339 
3340 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3341 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3342 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3343 
3344 	mtk_dma_free(eth);
3345 
3346 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3347 		mtk_ppe_stop(eth->ppe[i]);
3348 
3349 	return 0;
3350 }
3351 
3352 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3353 			 struct netlink_ext_ack *extack)
3354 {
3355 	struct mtk_mac *mac = netdev_priv(dev);
3356 	struct mtk_eth *eth = mac->hw;
3357 	struct bpf_prog *old_prog;
3358 	bool need_update;
3359 
3360 	if (eth->hwlro) {
3361 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3362 		return -EOPNOTSUPP;
3363 	}
3364 
3365 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3366 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3367 		return -EOPNOTSUPP;
3368 	}
3369 
3370 	need_update = !!eth->prog != !!prog;
3371 	if (netif_running(dev) && need_update)
3372 		mtk_stop(dev);
3373 
3374 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3375 	if (old_prog)
3376 		bpf_prog_put(old_prog);
3377 
3378 	if (netif_running(dev) && need_update)
3379 		return mtk_open(dev);
3380 
3381 	return 0;
3382 }
3383 
3384 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3385 {
3386 	switch (xdp->command) {
3387 	case XDP_SETUP_PROG:
3388 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3389 	default:
3390 		return -EINVAL;
3391 	}
3392 }
3393 
3394 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3395 {
3396 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3397 			   reset_bits,
3398 			   reset_bits);
3399 
3400 	usleep_range(1000, 1100);
3401 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3402 			   reset_bits,
3403 			   ~reset_bits);
3404 	mdelay(10);
3405 }
3406 
3407 static void mtk_clk_disable(struct mtk_eth *eth)
3408 {
3409 	int clk;
3410 
3411 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3412 		clk_disable_unprepare(eth->clks[clk]);
3413 }
3414 
3415 static int mtk_clk_enable(struct mtk_eth *eth)
3416 {
3417 	int clk, ret;
3418 
3419 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3420 		ret = clk_prepare_enable(eth->clks[clk]);
3421 		if (ret)
3422 			goto err_disable_clks;
3423 	}
3424 
3425 	return 0;
3426 
3427 err_disable_clks:
3428 	while (--clk >= 0)
3429 		clk_disable_unprepare(eth->clks[clk]);
3430 
3431 	return ret;
3432 }
3433 
3434 static void mtk_dim_rx(struct work_struct *work)
3435 {
3436 	struct dim *dim = container_of(work, struct dim, work);
3437 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3438 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3439 	struct dim_cq_moder cur_profile;
3440 	u32 val, cur;
3441 
3442 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3443 						dim->profile_ix);
3444 	spin_lock_bh(&eth->dim_lock);
3445 
3446 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3447 	val &= MTK_PDMA_DELAY_TX_MASK;
3448 	val |= MTK_PDMA_DELAY_RX_EN;
3449 
3450 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3451 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3452 
3453 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3454 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3455 
3456 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3457 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3458 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3459 
3460 	spin_unlock_bh(&eth->dim_lock);
3461 
3462 	dim->state = DIM_START_MEASURE;
3463 }
3464 
3465 static void mtk_dim_tx(struct work_struct *work)
3466 {
3467 	struct dim *dim = container_of(work, struct dim, work);
3468 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3469 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3470 	struct dim_cq_moder cur_profile;
3471 	u32 val, cur;
3472 
3473 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3474 						dim->profile_ix);
3475 	spin_lock_bh(&eth->dim_lock);
3476 
3477 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3478 	val &= MTK_PDMA_DELAY_RX_MASK;
3479 	val |= MTK_PDMA_DELAY_TX_EN;
3480 
3481 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3482 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3483 
3484 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3485 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3486 
3487 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3488 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3489 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3490 
3491 	spin_unlock_bh(&eth->dim_lock);
3492 
3493 	dim->state = DIM_START_MEASURE;
3494 }
3495 
3496 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3497 {
3498 	struct mtk_eth *eth = mac->hw;
3499 	u32 mcr_cur, mcr_new;
3500 
3501 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3502 		return;
3503 
3504 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3505 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3506 
3507 	if (val <= 1518)
3508 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3509 	else if (val <= 1536)
3510 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3511 	else if (val <= 1552)
3512 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3513 	else
3514 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3515 
3516 	if (mcr_new != mcr_cur)
3517 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3518 }
3519 
3520 static void mtk_hw_reset(struct mtk_eth *eth)
3521 {
3522 	u32 val;
3523 
3524 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3525 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3526 		val = RSTCTRL_PPE0_V2;
3527 	} else {
3528 		val = RSTCTRL_PPE0;
3529 	}
3530 
3531 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3532 		val |= RSTCTRL_PPE1;
3533 
3534 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3535 
3536 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3537 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3538 			     0x3ffffff);
3539 }
3540 
3541 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3542 {
3543 	u32 val;
3544 
3545 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3546 	return val;
3547 }
3548 
3549 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3550 {
3551 	u32 rst_mask, val;
3552 
3553 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3554 			   RSTCTRL_FE);
3555 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3556 				      val & RSTCTRL_FE, 1, 1000)) {
3557 		dev_err(eth->dev, "warm reset failed\n");
3558 		mtk_hw_reset(eth);
3559 		return;
3560 	}
3561 
3562 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3563 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3564 	else
3565 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3566 
3567 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3568 		rst_mask |= RSTCTRL_PPE1;
3569 
3570 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3571 
3572 	udelay(1);
3573 	val = mtk_hw_reset_read(eth);
3574 	if (!(val & rst_mask))
3575 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3576 			val, rst_mask);
3577 
3578 	rst_mask |= RSTCTRL_FE;
3579 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3580 
3581 	udelay(1);
3582 	val = mtk_hw_reset_read(eth);
3583 	if (val & rst_mask)
3584 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3585 			val, rst_mask);
3586 }
3587 
3588 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3589 {
3590 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3591 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3592 	bool oq_hang, cdm1_busy, adma_busy;
3593 	bool wtx_busy, cdm_full, oq_free;
3594 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3595 	bool qfsm_hang, qfwd_hang;
3596 	bool ret = false;
3597 
3598 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3599 		return false;
3600 
3601 	/* WDMA sanity checks */
3602 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3603 
3604 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3605 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3606 
3607 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3608 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3609 
3610 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3611 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3612 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3613 
3614 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3615 		if (++eth->reset.wdma_hang_count > 2) {
3616 			eth->reset.wdma_hang_count = 0;
3617 			ret = true;
3618 		}
3619 		goto out;
3620 	}
3621 
3622 	/* QDMA sanity checks */
3623 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3624 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3625 
3626 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3627 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3628 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3629 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3630 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3631 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3632 
3633 	if (qfsm_hang && qfwd_hang &&
3634 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3635 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3636 		if (++eth->reset.qdma_hang_count > 2) {
3637 			eth->reset.qdma_hang_count = 0;
3638 			ret = true;
3639 		}
3640 		goto out;
3641 	}
3642 
3643 	/* ADMA sanity checks */
3644 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3645 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3646 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3647 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3648 
3649 	if (oq_hang && cdm1_busy && adma_busy) {
3650 		if (++eth->reset.adma_hang_count > 2) {
3651 			eth->reset.adma_hang_count = 0;
3652 			ret = true;
3653 		}
3654 		goto out;
3655 	}
3656 
3657 	eth->reset.wdma_hang_count = 0;
3658 	eth->reset.qdma_hang_count = 0;
3659 	eth->reset.adma_hang_count = 0;
3660 out:
3661 	eth->reset.wdidx = wdidx;
3662 
3663 	return ret;
3664 }
3665 
3666 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3667 {
3668 	struct delayed_work *del_work = to_delayed_work(work);
3669 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3670 					   reset.monitor_work);
3671 
3672 	if (test_bit(MTK_RESETTING, &eth->state))
3673 		goto out;
3674 
3675 	/* DMA stuck checks */
3676 	if (mtk_hw_check_dma_hang(eth))
3677 		schedule_work(&eth->pending_work);
3678 
3679 out:
3680 	schedule_delayed_work(&eth->reset.monitor_work,
3681 			      MTK_DMA_MONITOR_TIMEOUT);
3682 }
3683 
3684 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3685 {
3686 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3687 		       ETHSYS_DMA_AG_MAP_PPE;
3688 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3689 	int i, val, ret;
3690 
3691 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3692 		return 0;
3693 
3694 	if (!reset) {
3695 		pm_runtime_enable(eth->dev);
3696 		pm_runtime_get_sync(eth->dev);
3697 
3698 		ret = mtk_clk_enable(eth);
3699 		if (ret)
3700 			goto err_disable_pm;
3701 	}
3702 
3703 	if (eth->ethsys)
3704 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3705 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3706 
3707 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3708 		ret = device_reset(eth->dev);
3709 		if (ret) {
3710 			dev_err(eth->dev, "MAC reset failed!\n");
3711 			goto err_disable_pm;
3712 		}
3713 
3714 		/* set interrupt delays based on current Net DIM sample */
3715 		mtk_dim_rx(&eth->rx_dim.work);
3716 		mtk_dim_tx(&eth->tx_dim.work);
3717 
3718 		/* disable delay and normal interrupt */
3719 		mtk_tx_irq_disable(eth, ~0);
3720 		mtk_rx_irq_disable(eth, ~0);
3721 
3722 		return 0;
3723 	}
3724 
3725 	msleep(100);
3726 
3727 	if (reset)
3728 		mtk_hw_warm_reset(eth);
3729 	else
3730 		mtk_hw_reset(eth);
3731 
3732 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3733 		/* Set FE to PDMAv2 if necessary */
3734 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3735 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3736 	}
3737 
3738 	if (eth->pctl) {
3739 		/* Set GE2 driving and slew rate */
3740 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3741 
3742 		/* set GE2 TDSEL */
3743 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3744 
3745 		/* set GE2 TUNE */
3746 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3747 	}
3748 
3749 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3750 	 * up with the more appropriate value when mtk_mac_config call is being
3751 	 * invoked.
3752 	 */
3753 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3754 		struct net_device *dev = eth->netdev[i];
3755 
3756 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3757 		if (dev) {
3758 			struct mtk_mac *mac = netdev_priv(dev);
3759 
3760 			mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3761 		}
3762 	}
3763 
3764 	/* Indicates CDM to parse the MTK special tag from CPU
3765 	 * which also is working out for untag packets.
3766 	 */
3767 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3768 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3769 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3770 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3771 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3772 	}
3773 
3774 	/* Enable RX VLan Offloading */
3775 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3776 
3777 	/* set interrupt delays based on current Net DIM sample */
3778 	mtk_dim_rx(&eth->rx_dim.work);
3779 	mtk_dim_tx(&eth->tx_dim.work);
3780 
3781 	/* disable delay and normal interrupt */
3782 	mtk_tx_irq_disable(eth, ~0);
3783 	mtk_rx_irq_disable(eth, ~0);
3784 
3785 	/* FE int grouping */
3786 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3787 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3788 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3789 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3790 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3791 
3792 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3793 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3794 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3795 
3796 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3797 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3798 
3799 		/* PSE Free Queue Flow Control  */
3800 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3801 
3802 		/* PSE config input queue threshold */
3803 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3804 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3805 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3806 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3807 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3808 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3809 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3810 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3811 
3812 		/* PSE config output queue threshold */
3813 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3814 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3815 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3816 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3817 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3818 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3819 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3820 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3821 
3822 		/* GDM and CDM Threshold */
3823 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3824 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3825 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3826 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3827 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3828 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3829 	}
3830 
3831 	return 0;
3832 
3833 err_disable_pm:
3834 	if (!reset) {
3835 		pm_runtime_put_sync(eth->dev);
3836 		pm_runtime_disable(eth->dev);
3837 	}
3838 
3839 	return ret;
3840 }
3841 
3842 static int mtk_hw_deinit(struct mtk_eth *eth)
3843 {
3844 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3845 		return 0;
3846 
3847 	mtk_clk_disable(eth);
3848 
3849 	pm_runtime_put_sync(eth->dev);
3850 	pm_runtime_disable(eth->dev);
3851 
3852 	return 0;
3853 }
3854 
3855 static int __init mtk_init(struct net_device *dev)
3856 {
3857 	struct mtk_mac *mac = netdev_priv(dev);
3858 	struct mtk_eth *eth = mac->hw;
3859 	int ret;
3860 
3861 	ret = of_get_ethdev_address(mac->of_node, dev);
3862 	if (ret) {
3863 		/* If the mac address is invalid, use random mac address */
3864 		eth_hw_addr_random(dev);
3865 		dev_err(eth->dev, "generated random MAC address %pM\n",
3866 			dev->dev_addr);
3867 	}
3868 
3869 	return 0;
3870 }
3871 
3872 static void mtk_uninit(struct net_device *dev)
3873 {
3874 	struct mtk_mac *mac = netdev_priv(dev);
3875 	struct mtk_eth *eth = mac->hw;
3876 
3877 	phylink_disconnect_phy(mac->phylink);
3878 	mtk_tx_irq_disable(eth, ~0);
3879 	mtk_rx_irq_disable(eth, ~0);
3880 }
3881 
3882 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3883 {
3884 	int length = new_mtu + MTK_RX_ETH_HLEN;
3885 	struct mtk_mac *mac = netdev_priv(dev);
3886 	struct mtk_eth *eth = mac->hw;
3887 
3888 	if (rcu_access_pointer(eth->prog) &&
3889 	    length > MTK_PP_MAX_BUF_SIZE) {
3890 		netdev_err(dev, "Invalid MTU for XDP mode\n");
3891 		return -EINVAL;
3892 	}
3893 
3894 	mtk_set_mcr_max_rx(mac, length);
3895 	dev->mtu = new_mtu;
3896 
3897 	return 0;
3898 }
3899 
3900 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3901 {
3902 	struct mtk_mac *mac = netdev_priv(dev);
3903 
3904 	switch (cmd) {
3905 	case SIOCGMIIPHY:
3906 	case SIOCGMIIREG:
3907 	case SIOCSMIIREG:
3908 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3909 	default:
3910 		break;
3911 	}
3912 
3913 	return -EOPNOTSUPP;
3914 }
3915 
3916 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3917 {
3918 	u32 val;
3919 	int i;
3920 
3921 	/* disabe FE P3 and P4 */
3922 	val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3923 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3924 		val |= MTK_FE_LINK_DOWN_P4;
3925 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
3926 
3927 	/* adjust PPE configurations to prepare for reset */
3928 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3929 		mtk_ppe_prepare_reset(eth->ppe[i]);
3930 
3931 	/* disable NETSYS interrupts */
3932 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3933 
3934 	/* force link down GMAC */
3935 	for (i = 0; i < 2; i++) {
3936 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3937 		mtk_w32(eth, val, MTK_MAC_MCR(i));
3938 	}
3939 }
3940 
3941 static void mtk_pending_work(struct work_struct *work)
3942 {
3943 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3944 	unsigned long restart = 0;
3945 	u32 val;
3946 	int i;
3947 
3948 	rtnl_lock();
3949 	set_bit(MTK_RESETTING, &eth->state);
3950 
3951 	mtk_prepare_for_reset(eth);
3952 	mtk_wed_fe_reset();
3953 	/* Run again reset preliminary configuration in order to avoid any
3954 	 * possible race during FE reset since it can run releasing RTNL lock.
3955 	 */
3956 	mtk_prepare_for_reset(eth);
3957 
3958 	/* stop all devices to make sure that dma is properly shut down */
3959 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3960 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3961 			continue;
3962 
3963 		mtk_stop(eth->netdev[i]);
3964 		__set_bit(i, &restart);
3965 	}
3966 
3967 	usleep_range(15000, 16000);
3968 
3969 	if (eth->dev->pins)
3970 		pinctrl_select_state(eth->dev->pins->p,
3971 				     eth->dev->pins->default_state);
3972 	mtk_hw_init(eth, true);
3973 
3974 	/* restart DMA and enable IRQs */
3975 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3976 		if (!test_bit(i, &restart))
3977 			continue;
3978 
3979 		if (mtk_open(eth->netdev[i])) {
3980 			netif_alert(eth, ifup, eth->netdev[i],
3981 				    "Driver up/down cycle failed\n");
3982 			dev_close(eth->netdev[i]);
3983 		}
3984 	}
3985 
3986 	/* enabe FE P3 and P4 */
3987 	val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
3988 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3989 		val &= ~MTK_FE_LINK_DOWN_P4;
3990 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
3991 
3992 	clear_bit(MTK_RESETTING, &eth->state);
3993 
3994 	mtk_wed_fe_reset_complete();
3995 
3996 	rtnl_unlock();
3997 }
3998 
3999 static int mtk_free_dev(struct mtk_eth *eth)
4000 {
4001 	int i;
4002 
4003 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4004 		if (!eth->netdev[i])
4005 			continue;
4006 		free_netdev(eth->netdev[i]);
4007 	}
4008 
4009 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4010 		if (!eth->dsa_meta[i])
4011 			break;
4012 		metadata_dst_free(eth->dsa_meta[i]);
4013 	}
4014 
4015 	return 0;
4016 }
4017 
4018 static int mtk_unreg_dev(struct mtk_eth *eth)
4019 {
4020 	int i;
4021 
4022 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4023 		struct mtk_mac *mac;
4024 		if (!eth->netdev[i])
4025 			continue;
4026 		mac = netdev_priv(eth->netdev[i]);
4027 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4028 			unregister_netdevice_notifier(&mac->device_notifier);
4029 		unregister_netdev(eth->netdev[i]);
4030 	}
4031 
4032 	return 0;
4033 }
4034 
4035 static int mtk_cleanup(struct mtk_eth *eth)
4036 {
4037 	mtk_unreg_dev(eth);
4038 	mtk_free_dev(eth);
4039 	cancel_work_sync(&eth->pending_work);
4040 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4041 
4042 	return 0;
4043 }
4044 
4045 static int mtk_get_link_ksettings(struct net_device *ndev,
4046 				  struct ethtool_link_ksettings *cmd)
4047 {
4048 	struct mtk_mac *mac = netdev_priv(ndev);
4049 
4050 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4051 		return -EBUSY;
4052 
4053 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4054 }
4055 
4056 static int mtk_set_link_ksettings(struct net_device *ndev,
4057 				  const struct ethtool_link_ksettings *cmd)
4058 {
4059 	struct mtk_mac *mac = netdev_priv(ndev);
4060 
4061 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4062 		return -EBUSY;
4063 
4064 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4065 }
4066 
4067 static void mtk_get_drvinfo(struct net_device *dev,
4068 			    struct ethtool_drvinfo *info)
4069 {
4070 	struct mtk_mac *mac = netdev_priv(dev);
4071 
4072 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4073 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4074 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4075 }
4076 
4077 static u32 mtk_get_msglevel(struct net_device *dev)
4078 {
4079 	struct mtk_mac *mac = netdev_priv(dev);
4080 
4081 	return mac->hw->msg_enable;
4082 }
4083 
4084 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4085 {
4086 	struct mtk_mac *mac = netdev_priv(dev);
4087 
4088 	mac->hw->msg_enable = value;
4089 }
4090 
4091 static int mtk_nway_reset(struct net_device *dev)
4092 {
4093 	struct mtk_mac *mac = netdev_priv(dev);
4094 
4095 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4096 		return -EBUSY;
4097 
4098 	if (!mac->phylink)
4099 		return -ENOTSUPP;
4100 
4101 	return phylink_ethtool_nway_reset(mac->phylink);
4102 }
4103 
4104 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4105 {
4106 	int i;
4107 
4108 	switch (stringset) {
4109 	case ETH_SS_STATS: {
4110 		struct mtk_mac *mac = netdev_priv(dev);
4111 
4112 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4113 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4114 			data += ETH_GSTRING_LEN;
4115 		}
4116 		if (mtk_page_pool_enabled(mac->hw))
4117 			page_pool_ethtool_stats_get_strings(data);
4118 		break;
4119 	}
4120 	default:
4121 		break;
4122 	}
4123 }
4124 
4125 static int mtk_get_sset_count(struct net_device *dev, int sset)
4126 {
4127 	switch (sset) {
4128 	case ETH_SS_STATS: {
4129 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4130 		struct mtk_mac *mac = netdev_priv(dev);
4131 
4132 		if (mtk_page_pool_enabled(mac->hw))
4133 			count += page_pool_ethtool_stats_get_count();
4134 		return count;
4135 	}
4136 	default:
4137 		return -EOPNOTSUPP;
4138 	}
4139 }
4140 
4141 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4142 {
4143 	struct page_pool_stats stats = {};
4144 	int i;
4145 
4146 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4147 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4148 
4149 		if (!ring->page_pool)
4150 			continue;
4151 
4152 		page_pool_get_stats(ring->page_pool, &stats);
4153 	}
4154 	page_pool_ethtool_stats_get(data, &stats);
4155 }
4156 
4157 static void mtk_get_ethtool_stats(struct net_device *dev,
4158 				  struct ethtool_stats *stats, u64 *data)
4159 {
4160 	struct mtk_mac *mac = netdev_priv(dev);
4161 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4162 	u64 *data_src, *data_dst;
4163 	unsigned int start;
4164 	int i;
4165 
4166 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4167 		return;
4168 
4169 	if (netif_running(dev) && netif_device_present(dev)) {
4170 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4171 			mtk_stats_update_mac(mac);
4172 			spin_unlock_bh(&hwstats->stats_lock);
4173 		}
4174 	}
4175 
4176 	data_src = (u64 *)hwstats;
4177 
4178 	do {
4179 		data_dst = data;
4180 		start = u64_stats_fetch_begin(&hwstats->syncp);
4181 
4182 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4183 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4184 		if (mtk_page_pool_enabled(mac->hw))
4185 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4186 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4187 }
4188 
4189 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4190 			 u32 *rule_locs)
4191 {
4192 	int ret = -EOPNOTSUPP;
4193 
4194 	switch (cmd->cmd) {
4195 	case ETHTOOL_GRXRINGS:
4196 		if (dev->hw_features & NETIF_F_LRO) {
4197 			cmd->data = MTK_MAX_RX_RING_NUM;
4198 			ret = 0;
4199 		}
4200 		break;
4201 	case ETHTOOL_GRXCLSRLCNT:
4202 		if (dev->hw_features & NETIF_F_LRO) {
4203 			struct mtk_mac *mac = netdev_priv(dev);
4204 
4205 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4206 			ret = 0;
4207 		}
4208 		break;
4209 	case ETHTOOL_GRXCLSRULE:
4210 		if (dev->hw_features & NETIF_F_LRO)
4211 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4212 		break;
4213 	case ETHTOOL_GRXCLSRLALL:
4214 		if (dev->hw_features & NETIF_F_LRO)
4215 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4216 						     rule_locs);
4217 		break;
4218 	default:
4219 		break;
4220 	}
4221 
4222 	return ret;
4223 }
4224 
4225 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4226 {
4227 	int ret = -EOPNOTSUPP;
4228 
4229 	switch (cmd->cmd) {
4230 	case ETHTOOL_SRXCLSRLINS:
4231 		if (dev->hw_features & NETIF_F_LRO)
4232 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4233 		break;
4234 	case ETHTOOL_SRXCLSRLDEL:
4235 		if (dev->hw_features & NETIF_F_LRO)
4236 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4237 		break;
4238 	default:
4239 		break;
4240 	}
4241 
4242 	return ret;
4243 }
4244 
4245 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4246 			    struct net_device *sb_dev)
4247 {
4248 	struct mtk_mac *mac = netdev_priv(dev);
4249 	unsigned int queue = 0;
4250 
4251 	if (netdev_uses_dsa(dev))
4252 		queue = skb_get_queue_mapping(skb) + 3;
4253 	else
4254 		queue = mac->id;
4255 
4256 	if (queue >= dev->num_tx_queues)
4257 		queue = 0;
4258 
4259 	return queue;
4260 }
4261 
4262 static const struct ethtool_ops mtk_ethtool_ops = {
4263 	.get_link_ksettings	= mtk_get_link_ksettings,
4264 	.set_link_ksettings	= mtk_set_link_ksettings,
4265 	.get_drvinfo		= mtk_get_drvinfo,
4266 	.get_msglevel		= mtk_get_msglevel,
4267 	.set_msglevel		= mtk_set_msglevel,
4268 	.nway_reset		= mtk_nway_reset,
4269 	.get_link		= ethtool_op_get_link,
4270 	.get_strings		= mtk_get_strings,
4271 	.get_sset_count		= mtk_get_sset_count,
4272 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4273 	.get_rxnfc		= mtk_get_rxnfc,
4274 	.set_rxnfc              = mtk_set_rxnfc,
4275 };
4276 
4277 static const struct net_device_ops mtk_netdev_ops = {
4278 	.ndo_init		= mtk_init,
4279 	.ndo_uninit		= mtk_uninit,
4280 	.ndo_open		= mtk_open,
4281 	.ndo_stop		= mtk_stop,
4282 	.ndo_start_xmit		= mtk_start_xmit,
4283 	.ndo_set_mac_address	= mtk_set_mac_address,
4284 	.ndo_validate_addr	= eth_validate_addr,
4285 	.ndo_eth_ioctl		= mtk_do_ioctl,
4286 	.ndo_change_mtu		= mtk_change_mtu,
4287 	.ndo_tx_timeout		= mtk_tx_timeout,
4288 	.ndo_get_stats64        = mtk_get_stats64,
4289 	.ndo_fix_features	= mtk_fix_features,
4290 	.ndo_set_features	= mtk_set_features,
4291 #ifdef CONFIG_NET_POLL_CONTROLLER
4292 	.ndo_poll_controller	= mtk_poll_controller,
4293 #endif
4294 	.ndo_setup_tc		= mtk_eth_setup_tc,
4295 	.ndo_bpf		= mtk_xdp,
4296 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4297 	.ndo_select_queue	= mtk_select_queue,
4298 };
4299 
4300 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4301 {
4302 	const __be32 *_id = of_get_property(np, "reg", NULL);
4303 	phy_interface_t phy_mode;
4304 	struct phylink *phylink;
4305 	struct mtk_mac *mac;
4306 	int id, err;
4307 	int txqs = 1;
4308 	u32 val;
4309 
4310 	if (!_id) {
4311 		dev_err(eth->dev, "missing mac id\n");
4312 		return -EINVAL;
4313 	}
4314 
4315 	id = be32_to_cpup(_id);
4316 	if (id >= MTK_MAC_COUNT) {
4317 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4318 		return -EINVAL;
4319 	}
4320 
4321 	if (eth->netdev[id]) {
4322 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4323 		return -EINVAL;
4324 	}
4325 
4326 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4327 		txqs = MTK_QDMA_NUM_QUEUES;
4328 
4329 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4330 	if (!eth->netdev[id]) {
4331 		dev_err(eth->dev, "alloc_etherdev failed\n");
4332 		return -ENOMEM;
4333 	}
4334 	mac = netdev_priv(eth->netdev[id]);
4335 	eth->mac[id] = mac;
4336 	mac->id = id;
4337 	mac->hw = eth;
4338 	mac->of_node = np;
4339 
4340 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4341 	mac->hwlro_ip_cnt = 0;
4342 
4343 	mac->hw_stats = devm_kzalloc(eth->dev,
4344 				     sizeof(*mac->hw_stats),
4345 				     GFP_KERNEL);
4346 	if (!mac->hw_stats) {
4347 		dev_err(eth->dev, "failed to allocate counter memory\n");
4348 		err = -ENOMEM;
4349 		goto free_netdev;
4350 	}
4351 	spin_lock_init(&mac->hw_stats->stats_lock);
4352 	u64_stats_init(&mac->hw_stats->syncp);
4353 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4354 
4355 	/* phylink create */
4356 	err = of_get_phy_mode(np, &phy_mode);
4357 	if (err) {
4358 		dev_err(eth->dev, "incorrect phy-mode\n");
4359 		goto free_netdev;
4360 	}
4361 
4362 	/* mac config is not set */
4363 	mac->interface = PHY_INTERFACE_MODE_NA;
4364 	mac->speed = SPEED_UNKNOWN;
4365 
4366 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4367 	mac->phylink_config.type = PHYLINK_NETDEV;
4368 	/* This driver makes use of state->speed in mac_config */
4369 	mac->phylink_config.legacy_pre_march2020 = true;
4370 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4371 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4372 
4373 	__set_bit(PHY_INTERFACE_MODE_MII,
4374 		  mac->phylink_config.supported_interfaces);
4375 	__set_bit(PHY_INTERFACE_MODE_GMII,
4376 		  mac->phylink_config.supported_interfaces);
4377 
4378 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4379 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4380 
4381 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4382 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4383 			  mac->phylink_config.supported_interfaces);
4384 
4385 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4386 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4387 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4388 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4389 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4390 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4391 				    mac->phylink_config.supported_interfaces);
4392 	}
4393 
4394 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4395 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4396 			  mac->phylink_config.supported_interfaces);
4397 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4398 			  mac->phylink_config.supported_interfaces);
4399 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4400 			  mac->phylink_config.supported_interfaces);
4401 	}
4402 
4403 	phylink = phylink_create(&mac->phylink_config,
4404 				 of_fwnode_handle(mac->of_node),
4405 				 phy_mode, &mtk_phylink_ops);
4406 	if (IS_ERR(phylink)) {
4407 		err = PTR_ERR(phylink);
4408 		goto free_netdev;
4409 	}
4410 
4411 	mac->phylink = phylink;
4412 
4413 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4414 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4415 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4416 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4417 
4418 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4419 	if (eth->hwlro)
4420 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4421 
4422 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4423 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4424 	eth->netdev[id]->features |= eth->soc->hw_features;
4425 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4426 
4427 	eth->netdev[id]->irq = eth->irq[0];
4428 	eth->netdev[id]->dev.of_node = np;
4429 
4430 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4431 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4432 	else
4433 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4434 
4435 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4436 		mac->device_notifier.notifier_call = mtk_device_event;
4437 		register_netdevice_notifier(&mac->device_notifier);
4438 	}
4439 
4440 	if (mtk_page_pool_enabled(eth))
4441 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4442 						NETDEV_XDP_ACT_REDIRECT |
4443 						NETDEV_XDP_ACT_NDO_XMIT |
4444 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4445 
4446 	return 0;
4447 
4448 free_netdev:
4449 	free_netdev(eth->netdev[id]);
4450 	return err;
4451 }
4452 
4453 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4454 {
4455 	struct net_device *dev, *tmp;
4456 	LIST_HEAD(dev_list);
4457 	int i;
4458 
4459 	rtnl_lock();
4460 
4461 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4462 		dev = eth->netdev[i];
4463 
4464 		if (!dev || !(dev->flags & IFF_UP))
4465 			continue;
4466 
4467 		list_add_tail(&dev->close_list, &dev_list);
4468 	}
4469 
4470 	dev_close_many(&dev_list, false);
4471 
4472 	eth->dma_dev = dma_dev;
4473 
4474 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4475 		list_del_init(&dev->close_list);
4476 		dev_open(dev, NULL);
4477 	}
4478 
4479 	rtnl_unlock();
4480 }
4481 
4482 static int mtk_probe(struct platform_device *pdev)
4483 {
4484 	struct resource *res = NULL;
4485 	struct device_node *mac_np;
4486 	struct mtk_eth *eth;
4487 	int err, i;
4488 
4489 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4490 	if (!eth)
4491 		return -ENOMEM;
4492 
4493 	eth->soc = of_device_get_match_data(&pdev->dev);
4494 
4495 	eth->dev = &pdev->dev;
4496 	eth->dma_dev = &pdev->dev;
4497 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4498 	if (IS_ERR(eth->base))
4499 		return PTR_ERR(eth->base);
4500 
4501 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4502 		eth->ip_align = NET_IP_ALIGN;
4503 
4504 	spin_lock_init(&eth->page_lock);
4505 	spin_lock_init(&eth->tx_irq_lock);
4506 	spin_lock_init(&eth->rx_irq_lock);
4507 	spin_lock_init(&eth->dim_lock);
4508 
4509 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4510 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4511 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4512 
4513 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4514 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4515 
4516 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4517 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4518 							      "mediatek,ethsys");
4519 		if (IS_ERR(eth->ethsys)) {
4520 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4521 			return PTR_ERR(eth->ethsys);
4522 		}
4523 	}
4524 
4525 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4526 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4527 							     "mediatek,infracfg");
4528 		if (IS_ERR(eth->infra)) {
4529 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4530 			return PTR_ERR(eth->infra);
4531 		}
4532 	}
4533 
4534 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4535 		struct regmap *cci;
4536 
4537 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4538 						      "cci-control-port");
4539 		/* enable CPU/bus coherency */
4540 		if (!IS_ERR(cci))
4541 			regmap_write(cci, 0, 3);
4542 	}
4543 
4544 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4545 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4546 					  GFP_KERNEL);
4547 		if (!eth->sgmii)
4548 			return -ENOMEM;
4549 
4550 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4551 				     eth->soc->ana_rgc3);
4552 
4553 		if (err)
4554 			return err;
4555 	}
4556 
4557 	if (eth->soc->required_pctl) {
4558 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4559 							    "mediatek,pctl");
4560 		if (IS_ERR(eth->pctl)) {
4561 			dev_err(&pdev->dev, "no pctl regmap found\n");
4562 			return PTR_ERR(eth->pctl);
4563 		}
4564 	}
4565 
4566 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4567 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4568 		if (!res)
4569 			return -EINVAL;
4570 	}
4571 
4572 	if (eth->soc->offload_version) {
4573 		for (i = 0;; i++) {
4574 			struct device_node *np;
4575 			phys_addr_t wdma_phy;
4576 			u32 wdma_base;
4577 
4578 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4579 				break;
4580 
4581 			np = of_parse_phandle(pdev->dev.of_node,
4582 					      "mediatek,wed", i);
4583 			if (!np)
4584 				break;
4585 
4586 			wdma_base = eth->soc->reg_map->wdma_base[i];
4587 			wdma_phy = res ? res->start + wdma_base : 0;
4588 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4589 				       wdma_phy, i);
4590 		}
4591 	}
4592 
4593 	for (i = 0; i < 3; i++) {
4594 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4595 			eth->irq[i] = eth->irq[0];
4596 		else
4597 			eth->irq[i] = platform_get_irq(pdev, i);
4598 		if (eth->irq[i] < 0) {
4599 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4600 			err = -ENXIO;
4601 			goto err_wed_exit;
4602 		}
4603 	}
4604 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4605 		eth->clks[i] = devm_clk_get(eth->dev,
4606 					    mtk_clks_source_name[i]);
4607 		if (IS_ERR(eth->clks[i])) {
4608 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4609 				err = -EPROBE_DEFER;
4610 				goto err_wed_exit;
4611 			}
4612 			if (eth->soc->required_clks & BIT(i)) {
4613 				dev_err(&pdev->dev, "clock %s not found\n",
4614 					mtk_clks_source_name[i]);
4615 				err = -EINVAL;
4616 				goto err_wed_exit;
4617 			}
4618 			eth->clks[i] = NULL;
4619 		}
4620 	}
4621 
4622 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4623 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4624 
4625 	err = mtk_hw_init(eth, false);
4626 	if (err)
4627 		goto err_wed_exit;
4628 
4629 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4630 
4631 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4632 		if (!of_device_is_compatible(mac_np,
4633 					     "mediatek,eth-mac"))
4634 			continue;
4635 
4636 		if (!of_device_is_available(mac_np))
4637 			continue;
4638 
4639 		err = mtk_add_mac(eth, mac_np);
4640 		if (err) {
4641 			of_node_put(mac_np);
4642 			goto err_deinit_hw;
4643 		}
4644 	}
4645 
4646 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4647 		err = devm_request_irq(eth->dev, eth->irq[0],
4648 				       mtk_handle_irq, 0,
4649 				       dev_name(eth->dev), eth);
4650 	} else {
4651 		err = devm_request_irq(eth->dev, eth->irq[1],
4652 				       mtk_handle_irq_tx, 0,
4653 				       dev_name(eth->dev), eth);
4654 		if (err)
4655 			goto err_free_dev;
4656 
4657 		err = devm_request_irq(eth->dev, eth->irq[2],
4658 				       mtk_handle_irq_rx, 0,
4659 				       dev_name(eth->dev), eth);
4660 	}
4661 	if (err)
4662 		goto err_free_dev;
4663 
4664 	/* No MT7628/88 support yet */
4665 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4666 		err = mtk_mdio_init(eth);
4667 		if (err)
4668 			goto err_free_dev;
4669 	}
4670 
4671 	if (eth->soc->offload_version) {
4672 		u32 num_ppe;
4673 
4674 		num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4675 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4676 		for (i = 0; i < num_ppe; i++) {
4677 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4678 
4679 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4680 						   eth->soc->offload_version, i);
4681 			if (!eth->ppe[i]) {
4682 				err = -ENOMEM;
4683 				goto err_deinit_ppe;
4684 			}
4685 		}
4686 
4687 		err = mtk_eth_offload_init(eth);
4688 		if (err)
4689 			goto err_deinit_ppe;
4690 	}
4691 
4692 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4693 		if (!eth->netdev[i])
4694 			continue;
4695 
4696 		err = register_netdev(eth->netdev[i]);
4697 		if (err) {
4698 			dev_err(eth->dev, "error bringing up device\n");
4699 			goto err_deinit_ppe;
4700 		} else
4701 			netif_info(eth, probe, eth->netdev[i],
4702 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4703 				   eth->netdev[i]->base_addr, eth->irq[0]);
4704 	}
4705 
4706 	/* we run 2 devices on the same DMA ring so we need a dummy device
4707 	 * for NAPI to work
4708 	 */
4709 	init_dummy_netdev(&eth->dummy_dev);
4710 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4711 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4712 
4713 	platform_set_drvdata(pdev, eth);
4714 	schedule_delayed_work(&eth->reset.monitor_work,
4715 			      MTK_DMA_MONITOR_TIMEOUT);
4716 
4717 	return 0;
4718 
4719 err_deinit_ppe:
4720 	mtk_ppe_deinit(eth);
4721 	mtk_mdio_cleanup(eth);
4722 err_free_dev:
4723 	mtk_free_dev(eth);
4724 err_deinit_hw:
4725 	mtk_hw_deinit(eth);
4726 err_wed_exit:
4727 	mtk_wed_exit();
4728 
4729 	return err;
4730 }
4731 
4732 static int mtk_remove(struct platform_device *pdev)
4733 {
4734 	struct mtk_eth *eth = platform_get_drvdata(pdev);
4735 	struct mtk_mac *mac;
4736 	int i;
4737 
4738 	/* stop all devices to make sure that dma is properly shut down */
4739 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4740 		if (!eth->netdev[i])
4741 			continue;
4742 		mtk_stop(eth->netdev[i]);
4743 		mac = netdev_priv(eth->netdev[i]);
4744 		phylink_disconnect_phy(mac->phylink);
4745 	}
4746 
4747 	mtk_wed_exit();
4748 	mtk_hw_deinit(eth);
4749 
4750 	netif_napi_del(&eth->tx_napi);
4751 	netif_napi_del(&eth->rx_napi);
4752 	mtk_cleanup(eth);
4753 	mtk_mdio_cleanup(eth);
4754 
4755 	return 0;
4756 }
4757 
4758 static const struct mtk_soc_data mt2701_data = {
4759 	.reg_map = &mtk_reg_map,
4760 	.caps = MT7623_CAPS | MTK_HWLRO,
4761 	.hw_features = MTK_HW_FEATURES,
4762 	.required_clks = MT7623_CLKS_BITMAP,
4763 	.required_pctl = true,
4764 	.txrx = {
4765 		.txd_size = sizeof(struct mtk_tx_dma),
4766 		.rxd_size = sizeof(struct mtk_rx_dma),
4767 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4768 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4769 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4770 		.dma_len_offset = 16,
4771 	},
4772 };
4773 
4774 static const struct mtk_soc_data mt7621_data = {
4775 	.reg_map = &mtk_reg_map,
4776 	.caps = MT7621_CAPS,
4777 	.hw_features = MTK_HW_FEATURES,
4778 	.required_clks = MT7621_CLKS_BITMAP,
4779 	.required_pctl = false,
4780 	.offload_version = 1,
4781 	.hash_offset = 2,
4782 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4783 	.txrx = {
4784 		.txd_size = sizeof(struct mtk_tx_dma),
4785 		.rxd_size = sizeof(struct mtk_rx_dma),
4786 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4787 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4788 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4789 		.dma_len_offset = 16,
4790 	},
4791 };
4792 
4793 static const struct mtk_soc_data mt7622_data = {
4794 	.reg_map = &mtk_reg_map,
4795 	.ana_rgc3 = 0x2028,
4796 	.caps = MT7622_CAPS | MTK_HWLRO,
4797 	.hw_features = MTK_HW_FEATURES,
4798 	.required_clks = MT7622_CLKS_BITMAP,
4799 	.required_pctl = false,
4800 	.offload_version = 2,
4801 	.hash_offset = 2,
4802 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4803 	.txrx = {
4804 		.txd_size = sizeof(struct mtk_tx_dma),
4805 		.rxd_size = sizeof(struct mtk_rx_dma),
4806 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4807 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4808 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4809 		.dma_len_offset = 16,
4810 	},
4811 };
4812 
4813 static const struct mtk_soc_data mt7623_data = {
4814 	.reg_map = &mtk_reg_map,
4815 	.caps = MT7623_CAPS | MTK_HWLRO,
4816 	.hw_features = MTK_HW_FEATURES,
4817 	.required_clks = MT7623_CLKS_BITMAP,
4818 	.required_pctl = true,
4819 	.offload_version = 1,
4820 	.hash_offset = 2,
4821 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4822 	.txrx = {
4823 		.txd_size = sizeof(struct mtk_tx_dma),
4824 		.rxd_size = sizeof(struct mtk_rx_dma),
4825 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4826 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4827 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4828 		.dma_len_offset = 16,
4829 	},
4830 };
4831 
4832 static const struct mtk_soc_data mt7629_data = {
4833 	.reg_map = &mtk_reg_map,
4834 	.ana_rgc3 = 0x128,
4835 	.caps = MT7629_CAPS | MTK_HWLRO,
4836 	.hw_features = MTK_HW_FEATURES,
4837 	.required_clks = MT7629_CLKS_BITMAP,
4838 	.required_pctl = false,
4839 	.txrx = {
4840 		.txd_size = sizeof(struct mtk_tx_dma),
4841 		.rxd_size = sizeof(struct mtk_rx_dma),
4842 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4843 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4844 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4845 		.dma_len_offset = 16,
4846 	},
4847 };
4848 
4849 static const struct mtk_soc_data mt7986_data = {
4850 	.reg_map = &mt7986_reg_map,
4851 	.ana_rgc3 = 0x128,
4852 	.caps = MT7986_CAPS,
4853 	.hw_features = MTK_HW_FEATURES,
4854 	.required_clks = MT7986_CLKS_BITMAP,
4855 	.required_pctl = false,
4856 	.offload_version = 2,
4857 	.hash_offset = 4,
4858 	.foe_entry_size = sizeof(struct mtk_foe_entry),
4859 	.txrx = {
4860 		.txd_size = sizeof(struct mtk_tx_dma_v2),
4861 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
4862 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4863 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4864 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4865 		.dma_len_offset = 8,
4866 	},
4867 };
4868 
4869 static const struct mtk_soc_data rt5350_data = {
4870 	.reg_map = &mt7628_reg_map,
4871 	.caps = MT7628_CAPS,
4872 	.hw_features = MTK_HW_FEATURES_MT7628,
4873 	.required_clks = MT7628_CLKS_BITMAP,
4874 	.required_pctl = false,
4875 	.txrx = {
4876 		.txd_size = sizeof(struct mtk_tx_dma),
4877 		.rxd_size = sizeof(struct mtk_rx_dma),
4878 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4879 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4880 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4881 		.dma_len_offset = 16,
4882 	},
4883 };
4884 
4885 const struct of_device_id of_mtk_match[] = {
4886 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4887 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4888 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4889 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4890 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4891 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4892 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4893 	{},
4894 };
4895 MODULE_DEVICE_TABLE(of, of_mtk_match);
4896 
4897 static struct platform_driver mtk_driver = {
4898 	.probe = mtk_probe,
4899 	.remove = mtk_remove,
4900 	.driver = {
4901 		.name = "mtk_soc_eth",
4902 		.of_match_table = of_mtk_match,
4903 	},
4904 };
4905 
4906 module_platform_driver(mtk_driver);
4907 
4908 MODULE_LICENSE("GPL");
4909 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4910 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
4911