xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision 41fb0cf1bced59c1fe178cf6cc9f716b5da9e40e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
22 #include <linux/jhash.h>
23 #include <net/dsa.h>
24 
25 #include "mtk_eth_soc.h"
26 
27 static int mtk_msg_level = -1;
28 module_param_named(msg_level, mtk_msg_level, int, 0);
29 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
30 
31 #define MTK_ETHTOOL_STAT(x) { #x, \
32 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
33 
34 /* strings used by ethtool */
35 static const struct mtk_ethtool_stats {
36 	char str[ETH_GSTRING_LEN];
37 	u32 offset;
38 } mtk_ethtool_stats[] = {
39 	MTK_ETHTOOL_STAT(tx_bytes),
40 	MTK_ETHTOOL_STAT(tx_packets),
41 	MTK_ETHTOOL_STAT(tx_skip),
42 	MTK_ETHTOOL_STAT(tx_collisions),
43 	MTK_ETHTOOL_STAT(rx_bytes),
44 	MTK_ETHTOOL_STAT(rx_packets),
45 	MTK_ETHTOOL_STAT(rx_overflow),
46 	MTK_ETHTOOL_STAT(rx_fcs_errors),
47 	MTK_ETHTOOL_STAT(rx_short_errors),
48 	MTK_ETHTOOL_STAT(rx_long_errors),
49 	MTK_ETHTOOL_STAT(rx_checksum_errors),
50 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
51 };
52 
53 static const char * const mtk_clks_source_name[] = {
54 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
55 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
56 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
57 	"sgmii_ck", "eth2pll",
58 };
59 
60 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
61 {
62 	__raw_writel(val, eth->base + reg);
63 }
64 
65 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
66 {
67 	return __raw_readl(eth->base + reg);
68 }
69 
70 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
71 {
72 	u32 val;
73 
74 	val = mtk_r32(eth, reg);
75 	val &= ~mask;
76 	val |= set;
77 	mtk_w32(eth, val, reg);
78 	return reg;
79 }
80 
81 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
82 {
83 	unsigned long t_start = jiffies;
84 
85 	while (1) {
86 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
87 			return 0;
88 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
89 			break;
90 		cond_resched();
91 	}
92 
93 	dev_err(eth->dev, "mdio: MDIO timeout\n");
94 	return -1;
95 }
96 
97 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
98 			   u32 phy_register, u32 write_data)
99 {
100 	if (mtk_mdio_busy_wait(eth))
101 		return -1;
102 
103 	write_data &= 0xffff;
104 
105 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
106 		(phy_register << PHY_IAC_REG_SHIFT) |
107 		(phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
108 		MTK_PHY_IAC);
109 
110 	if (mtk_mdio_busy_wait(eth))
111 		return -1;
112 
113 	return 0;
114 }
115 
116 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
117 {
118 	u32 d;
119 
120 	if (mtk_mdio_busy_wait(eth))
121 		return 0xffff;
122 
123 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
124 		(phy_reg << PHY_IAC_REG_SHIFT) |
125 		(phy_addr << PHY_IAC_ADDR_SHIFT),
126 		MTK_PHY_IAC);
127 
128 	if (mtk_mdio_busy_wait(eth))
129 		return 0xffff;
130 
131 	d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
132 
133 	return d;
134 }
135 
136 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
137 			  int phy_reg, u16 val)
138 {
139 	struct mtk_eth *eth = bus->priv;
140 
141 	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
142 }
143 
144 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
145 {
146 	struct mtk_eth *eth = bus->priv;
147 
148 	return _mtk_mdio_read(eth, phy_addr, phy_reg);
149 }
150 
151 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
152 				     phy_interface_t interface)
153 {
154 	u32 val;
155 
156 	/* Check DDR memory type.
157 	 * Currently TRGMII mode with DDR2 memory is not supported.
158 	 */
159 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
160 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
161 	    val & SYSCFG_DRAM_TYPE_DDR2) {
162 		dev_err(eth->dev,
163 			"TRGMII mode with DDR2 memory is not supported!\n");
164 		return -EOPNOTSUPP;
165 	}
166 
167 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
168 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
169 
170 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
171 			   ETHSYS_TRGMII_MT7621_MASK, val);
172 
173 	return 0;
174 }
175 
176 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
177 				   phy_interface_t interface, int speed)
178 {
179 	u32 val;
180 	int ret;
181 
182 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
183 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
184 		val = 500000000;
185 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
186 		if (ret)
187 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
188 		return;
189 	}
190 
191 	val = (speed == SPEED_1000) ?
192 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
193 	mtk_w32(eth, val, INTF_MODE);
194 
195 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
196 			   ETHSYS_TRGMII_CLK_SEL362_5,
197 			   ETHSYS_TRGMII_CLK_SEL362_5);
198 
199 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
200 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
201 	if (ret)
202 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
203 
204 	val = (speed == SPEED_1000) ?
205 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
206 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
207 
208 	val = (speed == SPEED_1000) ?
209 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
210 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
211 }
212 
213 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
214 			   const struct phylink_link_state *state)
215 {
216 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
217 					   phylink_config);
218 	struct mtk_eth *eth = mac->hw;
219 	u32 mcr_cur, mcr_new, sid, i;
220 	int val, ge_mode, err;
221 
222 	/* MT76x8 has no hardware settings between for the MAC */
223 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
224 	    mac->interface != state->interface) {
225 		/* Setup soc pin functions */
226 		switch (state->interface) {
227 		case PHY_INTERFACE_MODE_TRGMII:
228 			if (mac->id)
229 				goto err_phy;
230 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
231 					  MTK_GMAC1_TRGMII))
232 				goto err_phy;
233 			fallthrough;
234 		case PHY_INTERFACE_MODE_RGMII_TXID:
235 		case PHY_INTERFACE_MODE_RGMII_RXID:
236 		case PHY_INTERFACE_MODE_RGMII_ID:
237 		case PHY_INTERFACE_MODE_RGMII:
238 		case PHY_INTERFACE_MODE_MII:
239 		case PHY_INTERFACE_MODE_REVMII:
240 		case PHY_INTERFACE_MODE_RMII:
241 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
242 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
243 				if (err)
244 					goto init_err;
245 			}
246 			break;
247 		case PHY_INTERFACE_MODE_1000BASEX:
248 		case PHY_INTERFACE_MODE_2500BASEX:
249 		case PHY_INTERFACE_MODE_SGMII:
250 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
251 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
252 				if (err)
253 					goto init_err;
254 			}
255 			break;
256 		case PHY_INTERFACE_MODE_GMII:
257 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
258 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
259 				if (err)
260 					goto init_err;
261 			}
262 			break;
263 		default:
264 			goto err_phy;
265 		}
266 
267 		/* Setup clock for 1st gmac */
268 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
269 		    !phy_interface_mode_is_8023z(state->interface) &&
270 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
271 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
272 					 MTK_TRGMII_MT7621_CLK)) {
273 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
274 							      state->interface))
275 					goto err_phy;
276 			} else {
277 				mtk_gmac0_rgmii_adjust(mac->hw,
278 						       state->interface,
279 						       state->speed);
280 
281 				/* mt7623_pad_clk_setup */
282 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
283 					mtk_w32(mac->hw,
284 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
285 						TRGMII_TD_ODT(i));
286 
287 				/* Assert/release MT7623 RXC reset */
288 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
289 					TRGMII_RCK_CTRL);
290 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
291 			}
292 		}
293 
294 		ge_mode = 0;
295 		switch (state->interface) {
296 		case PHY_INTERFACE_MODE_MII:
297 		case PHY_INTERFACE_MODE_GMII:
298 			ge_mode = 1;
299 			break;
300 		case PHY_INTERFACE_MODE_REVMII:
301 			ge_mode = 2;
302 			break;
303 		case PHY_INTERFACE_MODE_RMII:
304 			if (mac->id)
305 				goto err_phy;
306 			ge_mode = 3;
307 			break;
308 		default:
309 			break;
310 		}
311 
312 		/* put the gmac into the right mode */
313 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
314 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
315 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
316 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
317 
318 		mac->interface = state->interface;
319 	}
320 
321 	/* SGMII */
322 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
323 	    phy_interface_mode_is_8023z(state->interface)) {
324 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
325 		 * being setup done.
326 		 */
327 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
328 
329 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
330 				   SYSCFG0_SGMII_MASK,
331 				   ~(u32)SYSCFG0_SGMII_MASK);
332 
333 		/* Decide how GMAC and SGMIISYS be mapped */
334 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
335 		       0 : mac->id;
336 
337 		/* Setup SGMIISYS with the determined property */
338 		if (state->interface != PHY_INTERFACE_MODE_SGMII)
339 			err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
340 							 state);
341 		else if (phylink_autoneg_inband(mode))
342 			err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
343 
344 		if (err)
345 			goto init_err;
346 
347 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
348 				   SYSCFG0_SGMII_MASK, val);
349 	} else if (phylink_autoneg_inband(mode)) {
350 		dev_err(eth->dev,
351 			"In-band mode not supported in non SGMII mode!\n");
352 		return;
353 	}
354 
355 	/* Setup gmac */
356 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
357 	mcr_new = mcr_cur;
358 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
359 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
360 
361 	/* Only update control register when needed! */
362 	if (mcr_new != mcr_cur)
363 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
364 
365 	return;
366 
367 err_phy:
368 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
369 		mac->id, phy_modes(state->interface));
370 	return;
371 
372 init_err:
373 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
374 		mac->id, phy_modes(state->interface), err);
375 }
376 
377 static void mtk_mac_pcs_get_state(struct phylink_config *config,
378 				  struct phylink_link_state *state)
379 {
380 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
381 					   phylink_config);
382 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
383 
384 	state->link = (pmsr & MAC_MSR_LINK);
385 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
386 
387 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
388 	case 0:
389 		state->speed = SPEED_10;
390 		break;
391 	case MAC_MSR_SPEED_100:
392 		state->speed = SPEED_100;
393 		break;
394 	case MAC_MSR_SPEED_1000:
395 		state->speed = SPEED_1000;
396 		break;
397 	default:
398 		state->speed = SPEED_UNKNOWN;
399 		break;
400 	}
401 
402 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
403 	if (pmsr & MAC_MSR_RX_FC)
404 		state->pause |= MLO_PAUSE_RX;
405 	if (pmsr & MAC_MSR_TX_FC)
406 		state->pause |= MLO_PAUSE_TX;
407 }
408 
409 static void mtk_mac_an_restart(struct phylink_config *config)
410 {
411 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
412 					   phylink_config);
413 
414 	mtk_sgmii_restart_an(mac->hw, mac->id);
415 }
416 
417 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
418 			      phy_interface_t interface)
419 {
420 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
421 					   phylink_config);
422 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
423 
424 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
425 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
426 }
427 
428 static void mtk_mac_link_up(struct phylink_config *config,
429 			    struct phy_device *phy,
430 			    unsigned int mode, phy_interface_t interface,
431 			    int speed, int duplex, bool tx_pause, bool rx_pause)
432 {
433 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
434 					   phylink_config);
435 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
436 
437 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
438 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
439 		 MAC_MCR_FORCE_RX_FC);
440 
441 	/* Configure speed */
442 	switch (speed) {
443 	case SPEED_2500:
444 	case SPEED_1000:
445 		mcr |= MAC_MCR_SPEED_1000;
446 		break;
447 	case SPEED_100:
448 		mcr |= MAC_MCR_SPEED_100;
449 		break;
450 	}
451 
452 	/* Configure duplex */
453 	if (duplex == DUPLEX_FULL)
454 		mcr |= MAC_MCR_FORCE_DPX;
455 
456 	/* Configure pause modes - phylink will avoid these for half duplex */
457 	if (tx_pause)
458 		mcr |= MAC_MCR_FORCE_TX_FC;
459 	if (rx_pause)
460 		mcr |= MAC_MCR_FORCE_RX_FC;
461 
462 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
463 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
464 }
465 
466 static const struct phylink_mac_ops mtk_phylink_ops = {
467 	.validate = phylink_generic_validate,
468 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
469 	.mac_an_restart = mtk_mac_an_restart,
470 	.mac_config = mtk_mac_config,
471 	.mac_link_down = mtk_mac_link_down,
472 	.mac_link_up = mtk_mac_link_up,
473 };
474 
475 static int mtk_mdio_init(struct mtk_eth *eth)
476 {
477 	struct device_node *mii_np;
478 	int ret;
479 
480 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
481 	if (!mii_np) {
482 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
483 		return -ENODEV;
484 	}
485 
486 	if (!of_device_is_available(mii_np)) {
487 		ret = -ENODEV;
488 		goto err_put_node;
489 	}
490 
491 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
492 	if (!eth->mii_bus) {
493 		ret = -ENOMEM;
494 		goto err_put_node;
495 	}
496 
497 	eth->mii_bus->name = "mdio";
498 	eth->mii_bus->read = mtk_mdio_read;
499 	eth->mii_bus->write = mtk_mdio_write;
500 	eth->mii_bus->priv = eth;
501 	eth->mii_bus->parent = eth->dev;
502 
503 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
504 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
505 
506 err_put_node:
507 	of_node_put(mii_np);
508 	return ret;
509 }
510 
511 static void mtk_mdio_cleanup(struct mtk_eth *eth)
512 {
513 	if (!eth->mii_bus)
514 		return;
515 
516 	mdiobus_unregister(eth->mii_bus);
517 }
518 
519 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
520 {
521 	unsigned long flags;
522 	u32 val;
523 
524 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
525 	val = mtk_r32(eth, eth->tx_int_mask_reg);
526 	mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
527 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
528 }
529 
530 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
531 {
532 	unsigned long flags;
533 	u32 val;
534 
535 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
536 	val = mtk_r32(eth, eth->tx_int_mask_reg);
537 	mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
538 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
539 }
540 
541 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
542 {
543 	unsigned long flags;
544 	u32 val;
545 
546 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
547 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
548 	mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
549 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
550 }
551 
552 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
553 {
554 	unsigned long flags;
555 	u32 val;
556 
557 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
558 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
559 	mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
560 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
561 }
562 
563 static int mtk_set_mac_address(struct net_device *dev, void *p)
564 {
565 	int ret = eth_mac_addr(dev, p);
566 	struct mtk_mac *mac = netdev_priv(dev);
567 	struct mtk_eth *eth = mac->hw;
568 	const char *macaddr = dev->dev_addr;
569 
570 	if (ret)
571 		return ret;
572 
573 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
574 		return -EBUSY;
575 
576 	spin_lock_bh(&mac->hw->page_lock);
577 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
578 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
579 			MT7628_SDM_MAC_ADRH);
580 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
581 			(macaddr[4] << 8) | macaddr[5],
582 			MT7628_SDM_MAC_ADRL);
583 	} else {
584 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
585 			MTK_GDMA_MAC_ADRH(mac->id));
586 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
587 			(macaddr[4] << 8) | macaddr[5],
588 			MTK_GDMA_MAC_ADRL(mac->id));
589 	}
590 	spin_unlock_bh(&mac->hw->page_lock);
591 
592 	return 0;
593 }
594 
595 void mtk_stats_update_mac(struct mtk_mac *mac)
596 {
597 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
598 	struct mtk_eth *eth = mac->hw;
599 
600 	u64_stats_update_begin(&hw_stats->syncp);
601 
602 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
603 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
604 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
605 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
606 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
607 		hw_stats->rx_checksum_errors +=
608 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
609 	} else {
610 		unsigned int offs = hw_stats->reg_offset;
611 		u64 stats;
612 
613 		hw_stats->rx_bytes += mtk_r32(mac->hw,
614 					      MTK_GDM1_RX_GBCNT_L + offs);
615 		stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
616 		if (stats)
617 			hw_stats->rx_bytes += (stats << 32);
618 		hw_stats->rx_packets +=
619 			mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
620 		hw_stats->rx_overflow +=
621 			mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
622 		hw_stats->rx_fcs_errors +=
623 			mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
624 		hw_stats->rx_short_errors +=
625 			mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
626 		hw_stats->rx_long_errors +=
627 			mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
628 		hw_stats->rx_checksum_errors +=
629 			mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
630 		hw_stats->rx_flow_control_packets +=
631 			mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
632 		hw_stats->tx_skip +=
633 			mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
634 		hw_stats->tx_collisions +=
635 			mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
636 		hw_stats->tx_bytes +=
637 			mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
638 		stats =  mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
639 		if (stats)
640 			hw_stats->tx_bytes += (stats << 32);
641 		hw_stats->tx_packets +=
642 			mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
643 	}
644 
645 	u64_stats_update_end(&hw_stats->syncp);
646 }
647 
648 static void mtk_stats_update(struct mtk_eth *eth)
649 {
650 	int i;
651 
652 	for (i = 0; i < MTK_MAC_COUNT; i++) {
653 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
654 			continue;
655 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
656 			mtk_stats_update_mac(eth->mac[i]);
657 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
658 		}
659 	}
660 }
661 
662 static void mtk_get_stats64(struct net_device *dev,
663 			    struct rtnl_link_stats64 *storage)
664 {
665 	struct mtk_mac *mac = netdev_priv(dev);
666 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
667 	unsigned int start;
668 
669 	if (netif_running(dev) && netif_device_present(dev)) {
670 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
671 			mtk_stats_update_mac(mac);
672 			spin_unlock_bh(&hw_stats->stats_lock);
673 		}
674 	}
675 
676 	do {
677 		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
678 		storage->rx_packets = hw_stats->rx_packets;
679 		storage->tx_packets = hw_stats->tx_packets;
680 		storage->rx_bytes = hw_stats->rx_bytes;
681 		storage->tx_bytes = hw_stats->tx_bytes;
682 		storage->collisions = hw_stats->tx_collisions;
683 		storage->rx_length_errors = hw_stats->rx_short_errors +
684 			hw_stats->rx_long_errors;
685 		storage->rx_over_errors = hw_stats->rx_overflow;
686 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
687 		storage->rx_errors = hw_stats->rx_checksum_errors;
688 		storage->tx_aborted_errors = hw_stats->tx_skip;
689 	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
690 
691 	storage->tx_errors = dev->stats.tx_errors;
692 	storage->rx_dropped = dev->stats.rx_dropped;
693 	storage->tx_dropped = dev->stats.tx_dropped;
694 }
695 
696 static inline int mtk_max_frag_size(int mtu)
697 {
698 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
699 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
700 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
701 
702 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
703 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
704 }
705 
706 static inline int mtk_max_buf_size(int frag_size)
707 {
708 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
709 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
710 
711 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
712 
713 	return buf_size;
714 }
715 
716 static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
717 				   struct mtk_rx_dma *dma_rxd)
718 {
719 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
720 	if (!(rxd->rxd2 & RX_DMA_DONE))
721 		return false;
722 
723 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
724 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
725 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
726 
727 	return true;
728 }
729 
730 /* the qdma core needs scratch memory to be setup */
731 static int mtk_init_fq_dma(struct mtk_eth *eth)
732 {
733 	dma_addr_t phy_ring_tail;
734 	int cnt = MTK_DMA_SIZE;
735 	dma_addr_t dma_addr;
736 	int i;
737 
738 	eth->scratch_ring = dma_alloc_coherent(eth->dev,
739 					       cnt * sizeof(struct mtk_tx_dma),
740 					       &eth->phy_scratch_ring,
741 					       GFP_ATOMIC);
742 	if (unlikely(!eth->scratch_ring))
743 		return -ENOMEM;
744 
745 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
746 				    GFP_KERNEL);
747 	if (unlikely(!eth->scratch_head))
748 		return -ENOMEM;
749 
750 	dma_addr = dma_map_single(eth->dev,
751 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
752 				  DMA_FROM_DEVICE);
753 	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
754 		return -ENOMEM;
755 
756 	phy_ring_tail = eth->phy_scratch_ring +
757 			(sizeof(struct mtk_tx_dma) * (cnt - 1));
758 
759 	for (i = 0; i < cnt; i++) {
760 		eth->scratch_ring[i].txd1 =
761 					(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
762 		if (i < cnt - 1)
763 			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
764 				((i + 1) * sizeof(struct mtk_tx_dma)));
765 		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
766 	}
767 
768 	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
769 	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
770 	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
771 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
772 
773 	return 0;
774 }
775 
776 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
777 {
778 	void *ret = ring->dma;
779 
780 	return ret + (desc - ring->phys);
781 }
782 
783 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
784 						    struct mtk_tx_dma *txd)
785 {
786 	int idx = txd - ring->dma;
787 
788 	return &ring->buf[idx];
789 }
790 
791 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
792 				       struct mtk_tx_dma *dma)
793 {
794 	return ring->dma_pdma - ring->dma + dma;
795 }
796 
797 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
798 {
799 	return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
800 }
801 
802 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
803 			 bool napi)
804 {
805 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
806 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
807 			dma_unmap_single(eth->dev,
808 					 dma_unmap_addr(tx_buf, dma_addr0),
809 					 dma_unmap_len(tx_buf, dma_len0),
810 					 DMA_TO_DEVICE);
811 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
812 			dma_unmap_page(eth->dev,
813 				       dma_unmap_addr(tx_buf, dma_addr0),
814 				       dma_unmap_len(tx_buf, dma_len0),
815 				       DMA_TO_DEVICE);
816 		}
817 	} else {
818 		if (dma_unmap_len(tx_buf, dma_len0)) {
819 			dma_unmap_page(eth->dev,
820 				       dma_unmap_addr(tx_buf, dma_addr0),
821 				       dma_unmap_len(tx_buf, dma_len0),
822 				       DMA_TO_DEVICE);
823 		}
824 
825 		if (dma_unmap_len(tx_buf, dma_len1)) {
826 			dma_unmap_page(eth->dev,
827 				       dma_unmap_addr(tx_buf, dma_addr1),
828 				       dma_unmap_len(tx_buf, dma_len1),
829 				       DMA_TO_DEVICE);
830 		}
831 	}
832 
833 	tx_buf->flags = 0;
834 	if (tx_buf->skb &&
835 	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
836 		if (napi)
837 			napi_consume_skb(tx_buf->skb, napi);
838 		else
839 			dev_kfree_skb_any(tx_buf->skb);
840 	}
841 	tx_buf->skb = NULL;
842 }
843 
844 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
845 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
846 			 size_t size, int idx)
847 {
848 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
849 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
850 		dma_unmap_len_set(tx_buf, dma_len0, size);
851 	} else {
852 		if (idx & 1) {
853 			txd->txd3 = mapped_addr;
854 			txd->txd2 |= TX_DMA_PLEN1(size);
855 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
856 			dma_unmap_len_set(tx_buf, dma_len1, size);
857 		} else {
858 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
859 			txd->txd1 = mapped_addr;
860 			txd->txd2 = TX_DMA_PLEN0(size);
861 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
862 			dma_unmap_len_set(tx_buf, dma_len0, size);
863 		}
864 	}
865 }
866 
867 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
868 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
869 {
870 	struct mtk_mac *mac = netdev_priv(dev);
871 	struct mtk_eth *eth = mac->hw;
872 	struct mtk_tx_dma *itxd, *txd;
873 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
874 	struct mtk_tx_buf *itx_buf, *tx_buf;
875 	dma_addr_t mapped_addr;
876 	unsigned int nr_frags;
877 	int i, n_desc = 1;
878 	u32 txd4 = 0, fport;
879 	int k = 0;
880 
881 	itxd = ring->next_free;
882 	itxd_pdma = qdma_to_pdma(ring, itxd);
883 	if (itxd == ring->last_free)
884 		return -ENOMEM;
885 
886 	/* set the forward port */
887 	fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
888 	txd4 |= fport;
889 
890 	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
891 	memset(itx_buf, 0, sizeof(*itx_buf));
892 
893 	if (gso)
894 		txd4 |= TX_DMA_TSO;
895 
896 	/* TX Checksum offload */
897 	if (skb->ip_summed == CHECKSUM_PARTIAL)
898 		txd4 |= TX_DMA_CHKSUM;
899 
900 	/* VLAN header offload */
901 	if (skb_vlan_tag_present(skb))
902 		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
903 
904 	mapped_addr = dma_map_single(eth->dev, skb->data,
905 				     skb_headlen(skb), DMA_TO_DEVICE);
906 	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
907 		return -ENOMEM;
908 
909 	WRITE_ONCE(itxd->txd1, mapped_addr);
910 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
911 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
912 			  MTK_TX_FLAGS_FPORT1;
913 	setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
914 		     k++);
915 
916 	/* TX SG offload */
917 	txd = itxd;
918 	txd_pdma = qdma_to_pdma(ring, txd);
919 	nr_frags = skb_shinfo(skb)->nr_frags;
920 
921 	for (i = 0; i < nr_frags; i++) {
922 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
923 		unsigned int offset = 0;
924 		int frag_size = skb_frag_size(frag);
925 
926 		while (frag_size) {
927 			bool last_frag = false;
928 			unsigned int frag_map_size;
929 			bool new_desc = true;
930 
931 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
932 			    (i & 0x1)) {
933 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
934 				txd_pdma = qdma_to_pdma(ring, txd);
935 				if (txd == ring->last_free)
936 					goto err_dma;
937 
938 				n_desc++;
939 			} else {
940 				new_desc = false;
941 			}
942 
943 
944 			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
945 			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
946 						       frag_map_size,
947 						       DMA_TO_DEVICE);
948 			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
949 				goto err_dma;
950 
951 			if (i == nr_frags - 1 &&
952 			    (frag_size - frag_map_size) == 0)
953 				last_frag = true;
954 
955 			WRITE_ONCE(txd->txd1, mapped_addr);
956 			WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
957 					       TX_DMA_PLEN0(frag_map_size) |
958 					       last_frag * TX_DMA_LS0));
959 			WRITE_ONCE(txd->txd4, fport);
960 
961 			tx_buf = mtk_desc_to_tx_buf(ring, txd);
962 			if (new_desc)
963 				memset(tx_buf, 0, sizeof(*tx_buf));
964 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
965 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
966 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
967 					 MTK_TX_FLAGS_FPORT1;
968 
969 			setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
970 				     frag_map_size, k++);
971 
972 			frag_size -= frag_map_size;
973 			offset += frag_map_size;
974 		}
975 	}
976 
977 	/* store skb to cleanup */
978 	itx_buf->skb = skb;
979 
980 	WRITE_ONCE(itxd->txd4, txd4);
981 	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
982 				(!nr_frags * TX_DMA_LS0)));
983 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
984 		if (k & 0x1)
985 			txd_pdma->txd2 |= TX_DMA_LS0;
986 		else
987 			txd_pdma->txd2 |= TX_DMA_LS1;
988 	}
989 
990 	netdev_sent_queue(dev, skb->len);
991 	skb_tx_timestamp(skb);
992 
993 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
994 	atomic_sub(n_desc, &ring->free_count);
995 
996 	/* make sure that all changes to the dma ring are flushed before we
997 	 * continue
998 	 */
999 	wmb();
1000 
1001 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1002 		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1003 		    !netdev_xmit_more())
1004 			mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1005 	} else {
1006 		int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1007 					     ring->dma_size);
1008 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1009 	}
1010 
1011 	return 0;
1012 
1013 err_dma:
1014 	do {
1015 		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1016 
1017 		/* unmap dma */
1018 		mtk_tx_unmap(eth, tx_buf, false);
1019 
1020 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1021 		if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1022 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1023 
1024 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1025 		itxd_pdma = qdma_to_pdma(ring, itxd);
1026 	} while (itxd != txd);
1027 
1028 	return -ENOMEM;
1029 }
1030 
1031 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1032 {
1033 	int i, nfrags;
1034 	skb_frag_t *frag;
1035 
1036 	nfrags = 1;
1037 	if (skb_is_gso(skb)) {
1038 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1039 			frag = &skb_shinfo(skb)->frags[i];
1040 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1041 						MTK_TX_DMA_BUF_LEN);
1042 		}
1043 	} else {
1044 		nfrags += skb_shinfo(skb)->nr_frags;
1045 	}
1046 
1047 	return nfrags;
1048 }
1049 
1050 static int mtk_queue_stopped(struct mtk_eth *eth)
1051 {
1052 	int i;
1053 
1054 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1055 		if (!eth->netdev[i])
1056 			continue;
1057 		if (netif_queue_stopped(eth->netdev[i]))
1058 			return 1;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 static void mtk_wake_queue(struct mtk_eth *eth)
1065 {
1066 	int i;
1067 
1068 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1069 		if (!eth->netdev[i])
1070 			continue;
1071 		netif_wake_queue(eth->netdev[i]);
1072 	}
1073 }
1074 
1075 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1076 {
1077 	struct mtk_mac *mac = netdev_priv(dev);
1078 	struct mtk_eth *eth = mac->hw;
1079 	struct mtk_tx_ring *ring = &eth->tx_ring;
1080 	struct net_device_stats *stats = &dev->stats;
1081 	bool gso = false;
1082 	int tx_num;
1083 
1084 	/* normally we can rely on the stack not calling this more than once,
1085 	 * however we have 2 queues running on the same ring so we need to lock
1086 	 * the ring access
1087 	 */
1088 	spin_lock(&eth->page_lock);
1089 
1090 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1091 		goto drop;
1092 
1093 	tx_num = mtk_cal_txd_req(skb);
1094 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1095 		netif_stop_queue(dev);
1096 		netif_err(eth, tx_queued, dev,
1097 			  "Tx Ring full when queue awake!\n");
1098 		spin_unlock(&eth->page_lock);
1099 		return NETDEV_TX_BUSY;
1100 	}
1101 
1102 	/* TSO: fill MSS info in tcp checksum field */
1103 	if (skb_is_gso(skb)) {
1104 		if (skb_cow_head(skb, 0)) {
1105 			netif_warn(eth, tx_err, dev,
1106 				   "GSO expand head fail.\n");
1107 			goto drop;
1108 		}
1109 
1110 		if (skb_shinfo(skb)->gso_type &
1111 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1112 			gso = true;
1113 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1114 		}
1115 	}
1116 
1117 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1118 		goto drop;
1119 
1120 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1121 		netif_stop_queue(dev);
1122 
1123 	spin_unlock(&eth->page_lock);
1124 
1125 	return NETDEV_TX_OK;
1126 
1127 drop:
1128 	spin_unlock(&eth->page_lock);
1129 	stats->tx_dropped++;
1130 	dev_kfree_skb_any(skb);
1131 	return NETDEV_TX_OK;
1132 }
1133 
1134 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1135 {
1136 	int i;
1137 	struct mtk_rx_ring *ring;
1138 	int idx;
1139 
1140 	if (!eth->hwlro)
1141 		return &eth->rx_ring[0];
1142 
1143 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1144 		ring = &eth->rx_ring[i];
1145 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1146 		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1147 			ring->calc_idx_update = true;
1148 			return ring;
1149 		}
1150 	}
1151 
1152 	return NULL;
1153 }
1154 
1155 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1156 {
1157 	struct mtk_rx_ring *ring;
1158 	int i;
1159 
1160 	if (!eth->hwlro) {
1161 		ring = &eth->rx_ring[0];
1162 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1163 	} else {
1164 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1165 			ring = &eth->rx_ring[i];
1166 			if (ring->calc_idx_update) {
1167 				ring->calc_idx_update = false;
1168 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1169 			}
1170 		}
1171 	}
1172 }
1173 
1174 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1175 		       struct mtk_eth *eth)
1176 {
1177 	struct dim_sample dim_sample = {};
1178 	struct mtk_rx_ring *ring;
1179 	int idx;
1180 	struct sk_buff *skb;
1181 	u8 *data, *new_data;
1182 	struct mtk_rx_dma *rxd, trxd;
1183 	int done = 0, bytes = 0;
1184 
1185 	while (done < budget) {
1186 		struct net_device *netdev;
1187 		unsigned int pktlen;
1188 		dma_addr_t dma_addr;
1189 		u32 hash;
1190 		int mac;
1191 
1192 		ring = mtk_get_rx_ring(eth);
1193 		if (unlikely(!ring))
1194 			goto rx_done;
1195 
1196 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1197 		rxd = &ring->dma[idx];
1198 		data = ring->data[idx];
1199 
1200 		if (!mtk_rx_get_desc(&trxd, rxd))
1201 			break;
1202 
1203 		/* find out which mac the packet come from. values start at 1 */
1204 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
1205 		    (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1206 			mac = 0;
1207 		else
1208 			mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1209 			       RX_DMA_FPORT_MASK) - 1;
1210 
1211 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1212 			     !eth->netdev[mac]))
1213 			goto release_desc;
1214 
1215 		netdev = eth->netdev[mac];
1216 
1217 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1218 			goto release_desc;
1219 
1220 		/* alloc new buffer */
1221 		new_data = napi_alloc_frag(ring->frag_size);
1222 		if (unlikely(!new_data)) {
1223 			netdev->stats.rx_dropped++;
1224 			goto release_desc;
1225 		}
1226 		dma_addr = dma_map_single(eth->dev,
1227 					  new_data + NET_SKB_PAD +
1228 					  eth->ip_align,
1229 					  ring->buf_size,
1230 					  DMA_FROM_DEVICE);
1231 		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1232 			skb_free_frag(new_data);
1233 			netdev->stats.rx_dropped++;
1234 			goto release_desc;
1235 		}
1236 
1237 		dma_unmap_single(eth->dev, trxd.rxd1,
1238 				 ring->buf_size, DMA_FROM_DEVICE);
1239 
1240 		/* receive data */
1241 		skb = build_skb(data, ring->frag_size);
1242 		if (unlikely(!skb)) {
1243 			skb_free_frag(data);
1244 			netdev->stats.rx_dropped++;
1245 			goto skip_rx;
1246 		}
1247 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1248 
1249 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1250 		skb->dev = netdev;
1251 		skb_put(skb, pktlen);
1252 		if (trxd.rxd4 & eth->rx_dma_l4_valid)
1253 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1254 		else
1255 			skb_checksum_none_assert(skb);
1256 		skb->protocol = eth_type_trans(skb, netdev);
1257 		bytes += pktlen;
1258 
1259 		hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
1260 		if (hash != MTK_RXD4_FOE_ENTRY) {
1261 			hash = jhash_1word(hash, 0);
1262 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
1263 		}
1264 
1265 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1266 		    (trxd.rxd2 & RX_DMA_VTAG))
1267 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1268 					       RX_DMA_VID(trxd.rxd3));
1269 		skb_record_rx_queue(skb, 0);
1270 		napi_gro_receive(napi, skb);
1271 
1272 skip_rx:
1273 		ring->data[idx] = new_data;
1274 		rxd->rxd1 = (unsigned int)dma_addr;
1275 
1276 release_desc:
1277 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1278 			rxd->rxd2 = RX_DMA_LSO;
1279 		else
1280 			rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1281 
1282 		ring->calc_idx = idx;
1283 
1284 		done++;
1285 	}
1286 
1287 rx_done:
1288 	if (done) {
1289 		/* make sure that all changes to the dma ring are flushed before
1290 		 * we continue
1291 		 */
1292 		wmb();
1293 		mtk_update_rx_cpu_idx(eth);
1294 	}
1295 
1296 	eth->rx_packets += done;
1297 	eth->rx_bytes += bytes;
1298 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
1299 			  &dim_sample);
1300 	net_dim(&eth->rx_dim, dim_sample);
1301 
1302 	return done;
1303 }
1304 
1305 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1306 			    unsigned int *done, unsigned int *bytes)
1307 {
1308 	struct mtk_tx_ring *ring = &eth->tx_ring;
1309 	struct mtk_tx_dma *desc;
1310 	struct sk_buff *skb;
1311 	struct mtk_tx_buf *tx_buf;
1312 	u32 cpu, dma;
1313 
1314 	cpu = ring->last_free_ptr;
1315 	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1316 
1317 	desc = mtk_qdma_phys_to_virt(ring, cpu);
1318 
1319 	while ((cpu != dma) && budget) {
1320 		u32 next_cpu = desc->txd2;
1321 		int mac = 0;
1322 
1323 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1324 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1325 			break;
1326 
1327 		tx_buf = mtk_desc_to_tx_buf(ring, desc);
1328 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1329 			mac = 1;
1330 
1331 		skb = tx_buf->skb;
1332 		if (!skb)
1333 			break;
1334 
1335 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1336 			bytes[mac] += skb->len;
1337 			done[mac]++;
1338 			budget--;
1339 		}
1340 		mtk_tx_unmap(eth, tx_buf, true);
1341 
1342 		ring->last_free = desc;
1343 		atomic_inc(&ring->free_count);
1344 
1345 		cpu = next_cpu;
1346 	}
1347 
1348 	ring->last_free_ptr = cpu;
1349 	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1350 
1351 	return budget;
1352 }
1353 
1354 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1355 			    unsigned int *done, unsigned int *bytes)
1356 {
1357 	struct mtk_tx_ring *ring = &eth->tx_ring;
1358 	struct mtk_tx_dma *desc;
1359 	struct sk_buff *skb;
1360 	struct mtk_tx_buf *tx_buf;
1361 	u32 cpu, dma;
1362 
1363 	cpu = ring->cpu_idx;
1364 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1365 
1366 	while ((cpu != dma) && budget) {
1367 		tx_buf = &ring->buf[cpu];
1368 		skb = tx_buf->skb;
1369 		if (!skb)
1370 			break;
1371 
1372 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1373 			bytes[0] += skb->len;
1374 			done[0]++;
1375 			budget--;
1376 		}
1377 
1378 		mtk_tx_unmap(eth, tx_buf, true);
1379 
1380 		desc = &ring->dma[cpu];
1381 		ring->last_free = desc;
1382 		atomic_inc(&ring->free_count);
1383 
1384 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1385 	}
1386 
1387 	ring->cpu_idx = cpu;
1388 
1389 	return budget;
1390 }
1391 
1392 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1393 {
1394 	struct mtk_tx_ring *ring = &eth->tx_ring;
1395 	struct dim_sample dim_sample = {};
1396 	unsigned int done[MTK_MAX_DEVS];
1397 	unsigned int bytes[MTK_MAX_DEVS];
1398 	int total = 0, i;
1399 
1400 	memset(done, 0, sizeof(done));
1401 	memset(bytes, 0, sizeof(bytes));
1402 
1403 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1404 		budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1405 	else
1406 		budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1407 
1408 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1409 		if (!eth->netdev[i] || !done[i])
1410 			continue;
1411 		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1412 		total += done[i];
1413 		eth->tx_packets += done[i];
1414 		eth->tx_bytes += bytes[i];
1415 	}
1416 
1417 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
1418 			  &dim_sample);
1419 	net_dim(&eth->tx_dim, dim_sample);
1420 
1421 	if (mtk_queue_stopped(eth) &&
1422 	    (atomic_read(&ring->free_count) > ring->thresh))
1423 		mtk_wake_queue(eth);
1424 
1425 	return total;
1426 }
1427 
1428 static void mtk_handle_status_irq(struct mtk_eth *eth)
1429 {
1430 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1431 
1432 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1433 		mtk_stats_update(eth);
1434 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1435 			MTK_INT_STATUS2);
1436 	}
1437 }
1438 
1439 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1440 {
1441 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1442 	int tx_done = 0;
1443 
1444 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1445 		mtk_handle_status_irq(eth);
1446 	mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1447 	tx_done = mtk_poll_tx(eth, budget);
1448 
1449 	if (unlikely(netif_msg_intr(eth))) {
1450 		dev_info(eth->dev,
1451 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
1452 			 mtk_r32(eth, eth->tx_int_status_reg),
1453 			 mtk_r32(eth, eth->tx_int_mask_reg));
1454 	}
1455 
1456 	if (tx_done == budget)
1457 		return budget;
1458 
1459 	if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
1460 		return budget;
1461 
1462 	if (napi_complete_done(napi, tx_done))
1463 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1464 
1465 	return tx_done;
1466 }
1467 
1468 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1469 {
1470 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1471 	int rx_done_total = 0;
1472 
1473 	mtk_handle_status_irq(eth);
1474 
1475 	do {
1476 		int rx_done;
1477 
1478 		mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1479 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
1480 		rx_done_total += rx_done;
1481 
1482 		if (unlikely(netif_msg_intr(eth))) {
1483 			dev_info(eth->dev,
1484 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
1485 				 mtk_r32(eth, MTK_PDMA_INT_STATUS),
1486 				 mtk_r32(eth, MTK_PDMA_INT_MASK));
1487 		}
1488 
1489 		if (rx_done_total == budget)
1490 			return budget;
1491 
1492 	} while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
1493 
1494 	if (napi_complete_done(napi, rx_done_total))
1495 		mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1496 
1497 	return rx_done_total;
1498 }
1499 
1500 static int mtk_tx_alloc(struct mtk_eth *eth)
1501 {
1502 	struct mtk_tx_ring *ring = &eth->tx_ring;
1503 	int i, sz = sizeof(*ring->dma);
1504 
1505 	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1506 			       GFP_KERNEL);
1507 	if (!ring->buf)
1508 		goto no_tx_mem;
1509 
1510 	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1511 				       &ring->phys, GFP_ATOMIC);
1512 	if (!ring->dma)
1513 		goto no_tx_mem;
1514 
1515 	for (i = 0; i < MTK_DMA_SIZE; i++) {
1516 		int next = (i + 1) % MTK_DMA_SIZE;
1517 		u32 next_ptr = ring->phys + next * sz;
1518 
1519 		ring->dma[i].txd2 = next_ptr;
1520 		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1521 	}
1522 
1523 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
1524 	 * only as the framework. The real HW descriptors are the PDMA
1525 	 * descriptors in ring->dma_pdma.
1526 	 */
1527 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1528 		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1529 						    &ring->phys_pdma,
1530 						    GFP_ATOMIC);
1531 		if (!ring->dma_pdma)
1532 			goto no_tx_mem;
1533 
1534 		for (i = 0; i < MTK_DMA_SIZE; i++) {
1535 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1536 			ring->dma_pdma[i].txd4 = 0;
1537 		}
1538 	}
1539 
1540 	ring->dma_size = MTK_DMA_SIZE;
1541 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1542 	ring->next_free = &ring->dma[0];
1543 	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1544 	ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
1545 	ring->thresh = MAX_SKB_FRAGS;
1546 
1547 	/* make sure that all changes to the dma ring are flushed before we
1548 	 * continue
1549 	 */
1550 	wmb();
1551 
1552 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1553 		mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1554 		mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1555 		mtk_w32(eth,
1556 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1557 			MTK_QTX_CRX_PTR);
1558 		mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
1559 		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1560 			MTK_QTX_CFG(0));
1561 	} else {
1562 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1563 		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1564 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1565 		mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1566 	}
1567 
1568 	return 0;
1569 
1570 no_tx_mem:
1571 	return -ENOMEM;
1572 }
1573 
1574 static void mtk_tx_clean(struct mtk_eth *eth)
1575 {
1576 	struct mtk_tx_ring *ring = &eth->tx_ring;
1577 	int i;
1578 
1579 	if (ring->buf) {
1580 		for (i = 0; i < MTK_DMA_SIZE; i++)
1581 			mtk_tx_unmap(eth, &ring->buf[i], false);
1582 		kfree(ring->buf);
1583 		ring->buf = NULL;
1584 	}
1585 
1586 	if (ring->dma) {
1587 		dma_free_coherent(eth->dev,
1588 				  MTK_DMA_SIZE * sizeof(*ring->dma),
1589 				  ring->dma,
1590 				  ring->phys);
1591 		ring->dma = NULL;
1592 	}
1593 
1594 	if (ring->dma_pdma) {
1595 		dma_free_coherent(eth->dev,
1596 				  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1597 				  ring->dma_pdma,
1598 				  ring->phys_pdma);
1599 		ring->dma_pdma = NULL;
1600 	}
1601 }
1602 
1603 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1604 {
1605 	struct mtk_rx_ring *ring;
1606 	int rx_data_len, rx_dma_size;
1607 	int i;
1608 	u32 offset = 0;
1609 
1610 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
1611 		if (ring_no)
1612 			return -EINVAL;
1613 		ring = &eth->rx_ring_qdma;
1614 		offset = 0x1000;
1615 	} else {
1616 		ring = &eth->rx_ring[ring_no];
1617 	}
1618 
1619 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1620 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1621 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1622 	} else {
1623 		rx_data_len = ETH_DATA_LEN;
1624 		rx_dma_size = MTK_DMA_SIZE;
1625 	}
1626 
1627 	ring->frag_size = mtk_max_frag_size(rx_data_len);
1628 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
1629 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1630 			     GFP_KERNEL);
1631 	if (!ring->data)
1632 		return -ENOMEM;
1633 
1634 	for (i = 0; i < rx_dma_size; i++) {
1635 		ring->data[i] = netdev_alloc_frag(ring->frag_size);
1636 		if (!ring->data[i])
1637 			return -ENOMEM;
1638 	}
1639 
1640 	ring->dma = dma_alloc_coherent(eth->dev,
1641 				       rx_dma_size * sizeof(*ring->dma),
1642 				       &ring->phys, GFP_ATOMIC);
1643 	if (!ring->dma)
1644 		return -ENOMEM;
1645 
1646 	for (i = 0; i < rx_dma_size; i++) {
1647 		dma_addr_t dma_addr = dma_map_single(eth->dev,
1648 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
1649 				ring->buf_size,
1650 				DMA_FROM_DEVICE);
1651 		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1652 			return -ENOMEM;
1653 		ring->dma[i].rxd1 = (unsigned int)dma_addr;
1654 
1655 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1656 			ring->dma[i].rxd2 = RX_DMA_LSO;
1657 		else
1658 			ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1659 	}
1660 	ring->dma_size = rx_dma_size;
1661 	ring->calc_idx_update = false;
1662 	ring->calc_idx = rx_dma_size - 1;
1663 	ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1664 	/* make sure that all changes to the dma ring are flushed before we
1665 	 * continue
1666 	 */
1667 	wmb();
1668 
1669 	mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1670 	mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1671 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1672 	mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1673 
1674 	return 0;
1675 }
1676 
1677 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1678 {
1679 	int i;
1680 
1681 	if (ring->data && ring->dma) {
1682 		for (i = 0; i < ring->dma_size; i++) {
1683 			if (!ring->data[i])
1684 				continue;
1685 			if (!ring->dma[i].rxd1)
1686 				continue;
1687 			dma_unmap_single(eth->dev,
1688 					 ring->dma[i].rxd1,
1689 					 ring->buf_size,
1690 					 DMA_FROM_DEVICE);
1691 			skb_free_frag(ring->data[i]);
1692 		}
1693 		kfree(ring->data);
1694 		ring->data = NULL;
1695 	}
1696 
1697 	if (ring->dma) {
1698 		dma_free_coherent(eth->dev,
1699 				  ring->dma_size * sizeof(*ring->dma),
1700 				  ring->dma,
1701 				  ring->phys);
1702 		ring->dma = NULL;
1703 	}
1704 }
1705 
1706 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1707 {
1708 	int i;
1709 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1710 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1711 
1712 	/* set LRO rings to auto-learn modes */
1713 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1714 
1715 	/* validate LRO ring */
1716 	ring_ctrl_dw2 |= MTK_RING_VLD;
1717 
1718 	/* set AGE timer (unit: 20us) */
1719 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1720 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1721 
1722 	/* set max AGG timer (unit: 20us) */
1723 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1724 
1725 	/* set max LRO AGG count */
1726 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1727 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1728 
1729 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1730 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1731 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1732 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1733 	}
1734 
1735 	/* IPv4 checksum update enable */
1736 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1737 
1738 	/* switch priority comparison to packet count mode */
1739 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1740 
1741 	/* bandwidth threshold setting */
1742 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1743 
1744 	/* auto-learn score delta setting */
1745 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1746 
1747 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1748 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1749 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1750 
1751 	/* set HW LRO mode & the max aggregation count for rx packets */
1752 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1753 
1754 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
1755 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1756 
1757 	/* enable HW LRO */
1758 	lro_ctrl_dw0 |= MTK_LRO_EN;
1759 
1760 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1761 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1762 
1763 	return 0;
1764 }
1765 
1766 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1767 {
1768 	int i;
1769 	u32 val;
1770 
1771 	/* relinquish lro rings, flush aggregated packets */
1772 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1773 
1774 	/* wait for relinquishments done */
1775 	for (i = 0; i < 10; i++) {
1776 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1777 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1778 			msleep(20);
1779 			continue;
1780 		}
1781 		break;
1782 	}
1783 
1784 	/* invalidate lro rings */
1785 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1786 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1787 
1788 	/* disable HW LRO */
1789 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1790 }
1791 
1792 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1793 {
1794 	u32 reg_val;
1795 
1796 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1797 
1798 	/* invalidate the IP setting */
1799 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1800 
1801 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1802 
1803 	/* validate the IP setting */
1804 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1805 }
1806 
1807 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1808 {
1809 	u32 reg_val;
1810 
1811 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1812 
1813 	/* invalidate the IP setting */
1814 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1815 
1816 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1817 }
1818 
1819 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1820 {
1821 	int cnt = 0;
1822 	int i;
1823 
1824 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1825 		if (mac->hwlro_ip[i])
1826 			cnt++;
1827 	}
1828 
1829 	return cnt;
1830 }
1831 
1832 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1833 				struct ethtool_rxnfc *cmd)
1834 {
1835 	struct ethtool_rx_flow_spec *fsp =
1836 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1837 	struct mtk_mac *mac = netdev_priv(dev);
1838 	struct mtk_eth *eth = mac->hw;
1839 	int hwlro_idx;
1840 
1841 	if ((fsp->flow_type != TCP_V4_FLOW) ||
1842 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1843 	    (fsp->location > 1))
1844 		return -EINVAL;
1845 
1846 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1847 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1848 
1849 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1850 
1851 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1852 
1853 	return 0;
1854 }
1855 
1856 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1857 				struct ethtool_rxnfc *cmd)
1858 {
1859 	struct ethtool_rx_flow_spec *fsp =
1860 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1861 	struct mtk_mac *mac = netdev_priv(dev);
1862 	struct mtk_eth *eth = mac->hw;
1863 	int hwlro_idx;
1864 
1865 	if (fsp->location > 1)
1866 		return -EINVAL;
1867 
1868 	mac->hwlro_ip[fsp->location] = 0;
1869 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1870 
1871 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1872 
1873 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1874 
1875 	return 0;
1876 }
1877 
1878 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1879 {
1880 	struct mtk_mac *mac = netdev_priv(dev);
1881 	struct mtk_eth *eth = mac->hw;
1882 	int i, hwlro_idx;
1883 
1884 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1885 		mac->hwlro_ip[i] = 0;
1886 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1887 
1888 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1889 	}
1890 
1891 	mac->hwlro_ip_cnt = 0;
1892 }
1893 
1894 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1895 				    struct ethtool_rxnfc *cmd)
1896 {
1897 	struct mtk_mac *mac = netdev_priv(dev);
1898 	struct ethtool_rx_flow_spec *fsp =
1899 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1900 
1901 	/* only tcp dst ipv4 is meaningful, others are meaningless */
1902 	fsp->flow_type = TCP_V4_FLOW;
1903 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1904 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1905 
1906 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
1907 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1908 	fsp->h_u.tcp_ip4_spec.psrc = 0;
1909 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1910 	fsp->h_u.tcp_ip4_spec.pdst = 0;
1911 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1912 	fsp->h_u.tcp_ip4_spec.tos = 0;
1913 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
1914 
1915 	return 0;
1916 }
1917 
1918 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1919 				  struct ethtool_rxnfc *cmd,
1920 				  u32 *rule_locs)
1921 {
1922 	struct mtk_mac *mac = netdev_priv(dev);
1923 	int cnt = 0;
1924 	int i;
1925 
1926 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1927 		if (mac->hwlro_ip[i]) {
1928 			rule_locs[cnt] = i;
1929 			cnt++;
1930 		}
1931 	}
1932 
1933 	cmd->rule_cnt = cnt;
1934 
1935 	return 0;
1936 }
1937 
1938 static netdev_features_t mtk_fix_features(struct net_device *dev,
1939 					  netdev_features_t features)
1940 {
1941 	if (!(features & NETIF_F_LRO)) {
1942 		struct mtk_mac *mac = netdev_priv(dev);
1943 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1944 
1945 		if (ip_cnt) {
1946 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1947 
1948 			features |= NETIF_F_LRO;
1949 		}
1950 	}
1951 
1952 	return features;
1953 }
1954 
1955 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1956 {
1957 	int err = 0;
1958 
1959 	if (!((dev->features ^ features) & NETIF_F_LRO))
1960 		return 0;
1961 
1962 	if (!(features & NETIF_F_LRO))
1963 		mtk_hwlro_netdev_disable(dev);
1964 
1965 	return err;
1966 }
1967 
1968 /* wait for DMA to finish whatever it is doing before we start using it again */
1969 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1970 {
1971 	unsigned int reg;
1972 	int ret;
1973 	u32 val;
1974 
1975 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1976 		reg = MTK_QDMA_GLO_CFG;
1977 	else
1978 		reg = MTK_PDMA_GLO_CFG;
1979 
1980 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
1981 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
1982 					5, MTK_DMA_BUSY_TIMEOUT_US);
1983 	if (ret)
1984 		dev_err(eth->dev, "DMA init timeout\n");
1985 
1986 	return ret;
1987 }
1988 
1989 static int mtk_dma_init(struct mtk_eth *eth)
1990 {
1991 	int err;
1992 	u32 i;
1993 
1994 	if (mtk_dma_busy_wait(eth))
1995 		return -EBUSY;
1996 
1997 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1998 		/* QDMA needs scratch memory for internal reordering of the
1999 		 * descriptors
2000 		 */
2001 		err = mtk_init_fq_dma(eth);
2002 		if (err)
2003 			return err;
2004 	}
2005 
2006 	err = mtk_tx_alloc(eth);
2007 	if (err)
2008 		return err;
2009 
2010 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2011 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2012 		if (err)
2013 			return err;
2014 	}
2015 
2016 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2017 	if (err)
2018 		return err;
2019 
2020 	if (eth->hwlro) {
2021 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2022 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2023 			if (err)
2024 				return err;
2025 		}
2026 		err = mtk_hwlro_rx_init(eth);
2027 		if (err)
2028 			return err;
2029 	}
2030 
2031 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2032 		/* Enable random early drop and set drop threshold
2033 		 * automatically
2034 		 */
2035 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2036 			FC_THRES_MIN, MTK_QDMA_FC_THRES);
2037 		mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static void mtk_dma_free(struct mtk_eth *eth)
2044 {
2045 	int i;
2046 
2047 	for (i = 0; i < MTK_MAC_COUNT; i++)
2048 		if (eth->netdev[i])
2049 			netdev_reset_queue(eth->netdev[i]);
2050 	if (eth->scratch_ring) {
2051 		dma_free_coherent(eth->dev,
2052 				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2053 				  eth->scratch_ring,
2054 				  eth->phy_scratch_ring);
2055 		eth->scratch_ring = NULL;
2056 		eth->phy_scratch_ring = 0;
2057 	}
2058 	mtk_tx_clean(eth);
2059 	mtk_rx_clean(eth, &eth->rx_ring[0]);
2060 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
2061 
2062 	if (eth->hwlro) {
2063 		mtk_hwlro_rx_uninit(eth);
2064 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2065 			mtk_rx_clean(eth, &eth->rx_ring[i]);
2066 	}
2067 
2068 	kfree(eth->scratch_head);
2069 }
2070 
2071 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2072 {
2073 	struct mtk_mac *mac = netdev_priv(dev);
2074 	struct mtk_eth *eth = mac->hw;
2075 
2076 	eth->netdev[mac->id]->stats.tx_errors++;
2077 	netif_err(eth, tx_err, dev,
2078 		  "transmit timed out\n");
2079 	schedule_work(&eth->pending_work);
2080 }
2081 
2082 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2083 {
2084 	struct mtk_eth *eth = _eth;
2085 
2086 	eth->rx_events++;
2087 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
2088 		__napi_schedule(&eth->rx_napi);
2089 		mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2090 	}
2091 
2092 	return IRQ_HANDLED;
2093 }
2094 
2095 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2096 {
2097 	struct mtk_eth *eth = _eth;
2098 
2099 	eth->tx_events++;
2100 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
2101 		__napi_schedule(&eth->tx_napi);
2102 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2103 	}
2104 
2105 	return IRQ_HANDLED;
2106 }
2107 
2108 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2109 {
2110 	struct mtk_eth *eth = _eth;
2111 
2112 	if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2113 		if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2114 			mtk_handle_irq_rx(irq, _eth);
2115 	}
2116 	if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2117 		if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2118 			mtk_handle_irq_tx(irq, _eth);
2119 	}
2120 
2121 	return IRQ_HANDLED;
2122 }
2123 
2124 #ifdef CONFIG_NET_POLL_CONTROLLER
2125 static void mtk_poll_controller(struct net_device *dev)
2126 {
2127 	struct mtk_mac *mac = netdev_priv(dev);
2128 	struct mtk_eth *eth = mac->hw;
2129 
2130 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2131 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2132 	mtk_handle_irq_rx(eth->irq[2], dev);
2133 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2134 	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2135 }
2136 #endif
2137 
2138 static int mtk_start_dma(struct mtk_eth *eth)
2139 {
2140 	u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2141 	int err;
2142 
2143 	err = mtk_dma_init(eth);
2144 	if (err) {
2145 		mtk_dma_free(eth);
2146 		return err;
2147 	}
2148 
2149 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2150 		mtk_w32(eth,
2151 			MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2152 			MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
2153 			MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2154 			MTK_RX_BT_32DWORDS,
2155 			MTK_QDMA_GLO_CFG);
2156 
2157 		mtk_w32(eth,
2158 			MTK_RX_DMA_EN | rx_2b_offset |
2159 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2160 			MTK_PDMA_GLO_CFG);
2161 	} else {
2162 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2163 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2164 			MTK_PDMA_GLO_CFG);
2165 	}
2166 
2167 	return 0;
2168 }
2169 
2170 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2171 {
2172 	int i;
2173 
2174 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2175 		return;
2176 
2177 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2178 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2179 
2180 		/* default setup the forward port to send frame to PDMA */
2181 		val &= ~0xffff;
2182 
2183 		/* Enable RX checksum */
2184 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2185 
2186 		val |= config;
2187 
2188 		if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
2189 			val |= MTK_GDMA_SPECIAL_TAG;
2190 
2191 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2192 	}
2193 	/* Reset and enable PSE */
2194 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2195 	mtk_w32(eth, 0, MTK_RST_GL);
2196 }
2197 
2198 static int mtk_open(struct net_device *dev)
2199 {
2200 	struct mtk_mac *mac = netdev_priv(dev);
2201 	struct mtk_eth *eth = mac->hw;
2202 	int err;
2203 
2204 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2205 	if (err) {
2206 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2207 			   err);
2208 		return err;
2209 	}
2210 
2211 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
2212 	if (!refcount_read(&eth->dma_refcnt)) {
2213 		u32 gdm_config = MTK_GDMA_TO_PDMA;
2214 		int err;
2215 
2216 		err = mtk_start_dma(eth);
2217 		if (err)
2218 			return err;
2219 
2220 		if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
2221 			gdm_config = MTK_GDMA_TO_PPE;
2222 
2223 		mtk_gdm_config(eth, gdm_config);
2224 
2225 		napi_enable(&eth->tx_napi);
2226 		napi_enable(&eth->rx_napi);
2227 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2228 		mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2229 		refcount_set(&eth->dma_refcnt, 1);
2230 	}
2231 	else
2232 		refcount_inc(&eth->dma_refcnt);
2233 
2234 	phylink_start(mac->phylink);
2235 	netif_start_queue(dev);
2236 	return 0;
2237 }
2238 
2239 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2240 {
2241 	u32 val;
2242 	int i;
2243 
2244 	/* stop the dma engine */
2245 	spin_lock_bh(&eth->page_lock);
2246 	val = mtk_r32(eth, glo_cfg);
2247 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2248 		glo_cfg);
2249 	spin_unlock_bh(&eth->page_lock);
2250 
2251 	/* wait for dma stop */
2252 	for (i = 0; i < 10; i++) {
2253 		val = mtk_r32(eth, glo_cfg);
2254 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2255 			msleep(20);
2256 			continue;
2257 		}
2258 		break;
2259 	}
2260 }
2261 
2262 static int mtk_stop(struct net_device *dev)
2263 {
2264 	struct mtk_mac *mac = netdev_priv(dev);
2265 	struct mtk_eth *eth = mac->hw;
2266 
2267 	phylink_stop(mac->phylink);
2268 
2269 	netif_tx_disable(dev);
2270 
2271 	phylink_disconnect_phy(mac->phylink);
2272 
2273 	/* only shutdown DMA if this is the last user */
2274 	if (!refcount_dec_and_test(&eth->dma_refcnt))
2275 		return 0;
2276 
2277 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2278 
2279 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2280 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2281 	napi_disable(&eth->tx_napi);
2282 	napi_disable(&eth->rx_napi);
2283 
2284 	cancel_work_sync(&eth->rx_dim.work);
2285 	cancel_work_sync(&eth->tx_dim.work);
2286 
2287 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2288 		mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2289 	mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2290 
2291 	mtk_dma_free(eth);
2292 
2293 	if (eth->soc->offload_version)
2294 		mtk_ppe_stop(&eth->ppe);
2295 
2296 	return 0;
2297 }
2298 
2299 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2300 {
2301 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2302 			   reset_bits,
2303 			   reset_bits);
2304 
2305 	usleep_range(1000, 1100);
2306 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2307 			   reset_bits,
2308 			   ~reset_bits);
2309 	mdelay(10);
2310 }
2311 
2312 static void mtk_clk_disable(struct mtk_eth *eth)
2313 {
2314 	int clk;
2315 
2316 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2317 		clk_disable_unprepare(eth->clks[clk]);
2318 }
2319 
2320 static int mtk_clk_enable(struct mtk_eth *eth)
2321 {
2322 	int clk, ret;
2323 
2324 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2325 		ret = clk_prepare_enable(eth->clks[clk]);
2326 		if (ret)
2327 			goto err_disable_clks;
2328 	}
2329 
2330 	return 0;
2331 
2332 err_disable_clks:
2333 	while (--clk >= 0)
2334 		clk_disable_unprepare(eth->clks[clk]);
2335 
2336 	return ret;
2337 }
2338 
2339 static void mtk_dim_rx(struct work_struct *work)
2340 {
2341 	struct dim *dim = container_of(work, struct dim, work);
2342 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
2343 	struct dim_cq_moder cur_profile;
2344 	u32 val, cur;
2345 
2346 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
2347 						dim->profile_ix);
2348 	spin_lock_bh(&eth->dim_lock);
2349 
2350 	val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
2351 	val &= MTK_PDMA_DELAY_TX_MASK;
2352 	val |= MTK_PDMA_DELAY_RX_EN;
2353 
2354 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2355 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
2356 
2357 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2358 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
2359 
2360 	mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
2361 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2362 		mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
2363 
2364 	spin_unlock_bh(&eth->dim_lock);
2365 
2366 	dim->state = DIM_START_MEASURE;
2367 }
2368 
2369 static void mtk_dim_tx(struct work_struct *work)
2370 {
2371 	struct dim *dim = container_of(work, struct dim, work);
2372 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
2373 	struct dim_cq_moder cur_profile;
2374 	u32 val, cur;
2375 
2376 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
2377 						dim->profile_ix);
2378 	spin_lock_bh(&eth->dim_lock);
2379 
2380 	val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
2381 	val &= MTK_PDMA_DELAY_RX_MASK;
2382 	val |= MTK_PDMA_DELAY_TX_EN;
2383 
2384 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2385 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
2386 
2387 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2388 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
2389 
2390 	mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
2391 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2392 		mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
2393 
2394 	spin_unlock_bh(&eth->dim_lock);
2395 
2396 	dim->state = DIM_START_MEASURE;
2397 }
2398 
2399 static int mtk_hw_init(struct mtk_eth *eth)
2400 {
2401 	int i, val, ret;
2402 
2403 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2404 		return 0;
2405 
2406 	pm_runtime_enable(eth->dev);
2407 	pm_runtime_get_sync(eth->dev);
2408 
2409 	ret = mtk_clk_enable(eth);
2410 	if (ret)
2411 		goto err_disable_pm;
2412 
2413 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2414 		ret = device_reset(eth->dev);
2415 		if (ret) {
2416 			dev_err(eth->dev, "MAC reset failed!\n");
2417 			goto err_disable_pm;
2418 		}
2419 
2420 		/* set interrupt delays based on current Net DIM sample */
2421 		mtk_dim_rx(&eth->rx_dim.work);
2422 		mtk_dim_tx(&eth->tx_dim.work);
2423 
2424 		/* disable delay and normal interrupt */
2425 		mtk_tx_irq_disable(eth, ~0);
2426 		mtk_rx_irq_disable(eth, ~0);
2427 
2428 		return 0;
2429 	}
2430 
2431 	/* Non-MT7628 handling... */
2432 	ethsys_reset(eth, RSTCTRL_FE);
2433 	ethsys_reset(eth, RSTCTRL_PPE);
2434 
2435 	if (eth->pctl) {
2436 		/* Set GE2 driving and slew rate */
2437 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2438 
2439 		/* set GE2 TDSEL */
2440 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2441 
2442 		/* set GE2 TUNE */
2443 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2444 	}
2445 
2446 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
2447 	 * up with the more appropriate value when mtk_mac_config call is being
2448 	 * invoked.
2449 	 */
2450 	for (i = 0; i < MTK_MAC_COUNT; i++)
2451 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2452 
2453 	/* Indicates CDM to parse the MTK special tag from CPU
2454 	 * which also is working out for untag packets.
2455 	 */
2456 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2457 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2458 
2459 	/* Enable RX VLan Offloading */
2460 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2461 
2462 	/* set interrupt delays based on current Net DIM sample */
2463 	mtk_dim_rx(&eth->rx_dim.work);
2464 	mtk_dim_tx(&eth->tx_dim.work);
2465 
2466 	/* disable delay and normal interrupt */
2467 	mtk_tx_irq_disable(eth, ~0);
2468 	mtk_rx_irq_disable(eth, ~0);
2469 
2470 	/* FE int grouping */
2471 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2472 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2473 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2474 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2475 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2476 
2477 	return 0;
2478 
2479 err_disable_pm:
2480 	pm_runtime_put_sync(eth->dev);
2481 	pm_runtime_disable(eth->dev);
2482 
2483 	return ret;
2484 }
2485 
2486 static int mtk_hw_deinit(struct mtk_eth *eth)
2487 {
2488 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2489 		return 0;
2490 
2491 	mtk_clk_disable(eth);
2492 
2493 	pm_runtime_put_sync(eth->dev);
2494 	pm_runtime_disable(eth->dev);
2495 
2496 	return 0;
2497 }
2498 
2499 static int __init mtk_init(struct net_device *dev)
2500 {
2501 	struct mtk_mac *mac = netdev_priv(dev);
2502 	struct mtk_eth *eth = mac->hw;
2503 	int ret;
2504 
2505 	ret = of_get_ethdev_address(mac->of_node, dev);
2506 	if (ret) {
2507 		/* If the mac address is invalid, use random mac address */
2508 		eth_hw_addr_random(dev);
2509 		dev_err(eth->dev, "generated random MAC address %pM\n",
2510 			dev->dev_addr);
2511 	}
2512 
2513 	return 0;
2514 }
2515 
2516 static void mtk_uninit(struct net_device *dev)
2517 {
2518 	struct mtk_mac *mac = netdev_priv(dev);
2519 	struct mtk_eth *eth = mac->hw;
2520 
2521 	phylink_disconnect_phy(mac->phylink);
2522 	mtk_tx_irq_disable(eth, ~0);
2523 	mtk_rx_irq_disable(eth, ~0);
2524 }
2525 
2526 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
2527 {
2528 	int length = new_mtu + MTK_RX_ETH_HLEN;
2529 	struct mtk_mac *mac = netdev_priv(dev);
2530 	struct mtk_eth *eth = mac->hw;
2531 	u32 mcr_cur, mcr_new;
2532 
2533 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2534 		mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
2535 		mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
2536 
2537 		if (length <= 1518)
2538 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
2539 		else if (length <= 1536)
2540 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
2541 		else if (length <= 1552)
2542 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
2543 		else
2544 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
2545 
2546 		if (mcr_new != mcr_cur)
2547 			mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
2548 	}
2549 
2550 	dev->mtu = new_mtu;
2551 
2552 	return 0;
2553 }
2554 
2555 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2556 {
2557 	struct mtk_mac *mac = netdev_priv(dev);
2558 
2559 	switch (cmd) {
2560 	case SIOCGMIIPHY:
2561 	case SIOCGMIIREG:
2562 	case SIOCSMIIREG:
2563 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2564 	default:
2565 		break;
2566 	}
2567 
2568 	return -EOPNOTSUPP;
2569 }
2570 
2571 static void mtk_pending_work(struct work_struct *work)
2572 {
2573 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2574 	int err, i;
2575 	unsigned long restart = 0;
2576 
2577 	rtnl_lock();
2578 
2579 	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2580 
2581 	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2582 		cpu_relax();
2583 
2584 	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2585 	/* stop all devices to make sure that dma is properly shut down */
2586 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2587 		if (!eth->netdev[i])
2588 			continue;
2589 		mtk_stop(eth->netdev[i]);
2590 		__set_bit(i, &restart);
2591 	}
2592 	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2593 
2594 	/* restart underlying hardware such as power, clock, pin mux
2595 	 * and the connected phy
2596 	 */
2597 	mtk_hw_deinit(eth);
2598 
2599 	if (eth->dev->pins)
2600 		pinctrl_select_state(eth->dev->pins->p,
2601 				     eth->dev->pins->default_state);
2602 	mtk_hw_init(eth);
2603 
2604 	/* restart DMA and enable IRQs */
2605 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2606 		if (!test_bit(i, &restart))
2607 			continue;
2608 		err = mtk_open(eth->netdev[i]);
2609 		if (err) {
2610 			netif_alert(eth, ifup, eth->netdev[i],
2611 			      "Driver up/down cycle failed, closing device.\n");
2612 			dev_close(eth->netdev[i]);
2613 		}
2614 	}
2615 
2616 	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2617 
2618 	clear_bit_unlock(MTK_RESETTING, &eth->state);
2619 
2620 	rtnl_unlock();
2621 }
2622 
2623 static int mtk_free_dev(struct mtk_eth *eth)
2624 {
2625 	int i;
2626 
2627 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2628 		if (!eth->netdev[i])
2629 			continue;
2630 		free_netdev(eth->netdev[i]);
2631 	}
2632 
2633 	return 0;
2634 }
2635 
2636 static int mtk_unreg_dev(struct mtk_eth *eth)
2637 {
2638 	int i;
2639 
2640 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2641 		if (!eth->netdev[i])
2642 			continue;
2643 		unregister_netdev(eth->netdev[i]);
2644 	}
2645 
2646 	return 0;
2647 }
2648 
2649 static int mtk_cleanup(struct mtk_eth *eth)
2650 {
2651 	mtk_unreg_dev(eth);
2652 	mtk_free_dev(eth);
2653 	cancel_work_sync(&eth->pending_work);
2654 
2655 	return 0;
2656 }
2657 
2658 static int mtk_get_link_ksettings(struct net_device *ndev,
2659 				  struct ethtool_link_ksettings *cmd)
2660 {
2661 	struct mtk_mac *mac = netdev_priv(ndev);
2662 
2663 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2664 		return -EBUSY;
2665 
2666 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2667 }
2668 
2669 static int mtk_set_link_ksettings(struct net_device *ndev,
2670 				  const struct ethtool_link_ksettings *cmd)
2671 {
2672 	struct mtk_mac *mac = netdev_priv(ndev);
2673 
2674 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2675 		return -EBUSY;
2676 
2677 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2678 }
2679 
2680 static void mtk_get_drvinfo(struct net_device *dev,
2681 			    struct ethtool_drvinfo *info)
2682 {
2683 	struct mtk_mac *mac = netdev_priv(dev);
2684 
2685 	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2686 	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2687 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2688 }
2689 
2690 static u32 mtk_get_msglevel(struct net_device *dev)
2691 {
2692 	struct mtk_mac *mac = netdev_priv(dev);
2693 
2694 	return mac->hw->msg_enable;
2695 }
2696 
2697 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2698 {
2699 	struct mtk_mac *mac = netdev_priv(dev);
2700 
2701 	mac->hw->msg_enable = value;
2702 }
2703 
2704 static int mtk_nway_reset(struct net_device *dev)
2705 {
2706 	struct mtk_mac *mac = netdev_priv(dev);
2707 
2708 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2709 		return -EBUSY;
2710 
2711 	if (!mac->phylink)
2712 		return -ENOTSUPP;
2713 
2714 	return phylink_ethtool_nway_reset(mac->phylink);
2715 }
2716 
2717 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2718 {
2719 	int i;
2720 
2721 	switch (stringset) {
2722 	case ETH_SS_STATS:
2723 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2724 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2725 			data += ETH_GSTRING_LEN;
2726 		}
2727 		break;
2728 	}
2729 }
2730 
2731 static int mtk_get_sset_count(struct net_device *dev, int sset)
2732 {
2733 	switch (sset) {
2734 	case ETH_SS_STATS:
2735 		return ARRAY_SIZE(mtk_ethtool_stats);
2736 	default:
2737 		return -EOPNOTSUPP;
2738 	}
2739 }
2740 
2741 static void mtk_get_ethtool_stats(struct net_device *dev,
2742 				  struct ethtool_stats *stats, u64 *data)
2743 {
2744 	struct mtk_mac *mac = netdev_priv(dev);
2745 	struct mtk_hw_stats *hwstats = mac->hw_stats;
2746 	u64 *data_src, *data_dst;
2747 	unsigned int start;
2748 	int i;
2749 
2750 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2751 		return;
2752 
2753 	if (netif_running(dev) && netif_device_present(dev)) {
2754 		if (spin_trylock_bh(&hwstats->stats_lock)) {
2755 			mtk_stats_update_mac(mac);
2756 			spin_unlock_bh(&hwstats->stats_lock);
2757 		}
2758 	}
2759 
2760 	data_src = (u64 *)hwstats;
2761 
2762 	do {
2763 		data_dst = data;
2764 		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2765 
2766 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2767 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2768 	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2769 }
2770 
2771 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2772 			 u32 *rule_locs)
2773 {
2774 	int ret = -EOPNOTSUPP;
2775 
2776 	switch (cmd->cmd) {
2777 	case ETHTOOL_GRXRINGS:
2778 		if (dev->hw_features & NETIF_F_LRO) {
2779 			cmd->data = MTK_MAX_RX_RING_NUM;
2780 			ret = 0;
2781 		}
2782 		break;
2783 	case ETHTOOL_GRXCLSRLCNT:
2784 		if (dev->hw_features & NETIF_F_LRO) {
2785 			struct mtk_mac *mac = netdev_priv(dev);
2786 
2787 			cmd->rule_cnt = mac->hwlro_ip_cnt;
2788 			ret = 0;
2789 		}
2790 		break;
2791 	case ETHTOOL_GRXCLSRULE:
2792 		if (dev->hw_features & NETIF_F_LRO)
2793 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2794 		break;
2795 	case ETHTOOL_GRXCLSRLALL:
2796 		if (dev->hw_features & NETIF_F_LRO)
2797 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
2798 						     rule_locs);
2799 		break;
2800 	default:
2801 		break;
2802 	}
2803 
2804 	return ret;
2805 }
2806 
2807 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2808 {
2809 	int ret = -EOPNOTSUPP;
2810 
2811 	switch (cmd->cmd) {
2812 	case ETHTOOL_SRXCLSRLINS:
2813 		if (dev->hw_features & NETIF_F_LRO)
2814 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
2815 		break;
2816 	case ETHTOOL_SRXCLSRLDEL:
2817 		if (dev->hw_features & NETIF_F_LRO)
2818 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
2819 		break;
2820 	default:
2821 		break;
2822 	}
2823 
2824 	return ret;
2825 }
2826 
2827 static const struct ethtool_ops mtk_ethtool_ops = {
2828 	.get_link_ksettings	= mtk_get_link_ksettings,
2829 	.set_link_ksettings	= mtk_set_link_ksettings,
2830 	.get_drvinfo		= mtk_get_drvinfo,
2831 	.get_msglevel		= mtk_get_msglevel,
2832 	.set_msglevel		= mtk_set_msglevel,
2833 	.nway_reset		= mtk_nway_reset,
2834 	.get_link		= ethtool_op_get_link,
2835 	.get_strings		= mtk_get_strings,
2836 	.get_sset_count		= mtk_get_sset_count,
2837 	.get_ethtool_stats	= mtk_get_ethtool_stats,
2838 	.get_rxnfc		= mtk_get_rxnfc,
2839 	.set_rxnfc              = mtk_set_rxnfc,
2840 };
2841 
2842 static const struct net_device_ops mtk_netdev_ops = {
2843 	.ndo_init		= mtk_init,
2844 	.ndo_uninit		= mtk_uninit,
2845 	.ndo_open		= mtk_open,
2846 	.ndo_stop		= mtk_stop,
2847 	.ndo_start_xmit		= mtk_start_xmit,
2848 	.ndo_set_mac_address	= mtk_set_mac_address,
2849 	.ndo_validate_addr	= eth_validate_addr,
2850 	.ndo_eth_ioctl		= mtk_do_ioctl,
2851 	.ndo_change_mtu		= mtk_change_mtu,
2852 	.ndo_tx_timeout		= mtk_tx_timeout,
2853 	.ndo_get_stats64        = mtk_get_stats64,
2854 	.ndo_fix_features	= mtk_fix_features,
2855 	.ndo_set_features	= mtk_set_features,
2856 #ifdef CONFIG_NET_POLL_CONTROLLER
2857 	.ndo_poll_controller	= mtk_poll_controller,
2858 #endif
2859 	.ndo_setup_tc		= mtk_eth_setup_tc,
2860 };
2861 
2862 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2863 {
2864 	const __be32 *_id = of_get_property(np, "reg", NULL);
2865 	phy_interface_t phy_mode;
2866 	struct phylink *phylink;
2867 	struct mtk_mac *mac;
2868 	int id, err;
2869 
2870 	if (!_id) {
2871 		dev_err(eth->dev, "missing mac id\n");
2872 		return -EINVAL;
2873 	}
2874 
2875 	id = be32_to_cpup(_id);
2876 	if (id >= MTK_MAC_COUNT) {
2877 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
2878 		return -EINVAL;
2879 	}
2880 
2881 	if (eth->netdev[id]) {
2882 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2883 		return -EINVAL;
2884 	}
2885 
2886 	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2887 	if (!eth->netdev[id]) {
2888 		dev_err(eth->dev, "alloc_etherdev failed\n");
2889 		return -ENOMEM;
2890 	}
2891 	mac = netdev_priv(eth->netdev[id]);
2892 	eth->mac[id] = mac;
2893 	mac->id = id;
2894 	mac->hw = eth;
2895 	mac->of_node = np;
2896 
2897 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2898 	mac->hwlro_ip_cnt = 0;
2899 
2900 	mac->hw_stats = devm_kzalloc(eth->dev,
2901 				     sizeof(*mac->hw_stats),
2902 				     GFP_KERNEL);
2903 	if (!mac->hw_stats) {
2904 		dev_err(eth->dev, "failed to allocate counter memory\n");
2905 		err = -ENOMEM;
2906 		goto free_netdev;
2907 	}
2908 	spin_lock_init(&mac->hw_stats->stats_lock);
2909 	u64_stats_init(&mac->hw_stats->syncp);
2910 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2911 
2912 	/* phylink create */
2913 	err = of_get_phy_mode(np, &phy_mode);
2914 	if (err) {
2915 		dev_err(eth->dev, "incorrect phy-mode\n");
2916 		goto free_netdev;
2917 	}
2918 
2919 	/* mac config is not set */
2920 	mac->interface = PHY_INTERFACE_MODE_NA;
2921 	mac->mode = MLO_AN_PHY;
2922 	mac->speed = SPEED_UNKNOWN;
2923 
2924 	mac->phylink_config.dev = &eth->netdev[id]->dev;
2925 	mac->phylink_config.type = PHYLINK_NETDEV;
2926 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
2927 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
2928 
2929 	__set_bit(PHY_INTERFACE_MODE_MII,
2930 		  mac->phylink_config.supported_interfaces);
2931 	__set_bit(PHY_INTERFACE_MODE_GMII,
2932 		  mac->phylink_config.supported_interfaces);
2933 
2934 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
2935 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
2936 
2937 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
2938 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
2939 			  mac->phylink_config.supported_interfaces);
2940 
2941 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
2942 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2943 			  mac->phylink_config.supported_interfaces);
2944 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2945 			  mac->phylink_config.supported_interfaces);
2946 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
2947 			  mac->phylink_config.supported_interfaces);
2948 	}
2949 
2950 	phylink = phylink_create(&mac->phylink_config,
2951 				 of_fwnode_handle(mac->of_node),
2952 				 phy_mode, &mtk_phylink_ops);
2953 	if (IS_ERR(phylink)) {
2954 		err = PTR_ERR(phylink);
2955 		goto free_netdev;
2956 	}
2957 
2958 	mac->phylink = phylink;
2959 
2960 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2961 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
2962 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2963 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
2964 
2965 	eth->netdev[id]->hw_features = eth->soc->hw_features;
2966 	if (eth->hwlro)
2967 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
2968 
2969 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
2970 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2971 	eth->netdev[id]->features |= eth->soc->hw_features;
2972 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2973 
2974 	eth->netdev[id]->irq = eth->irq[0];
2975 	eth->netdev[id]->dev.of_node = np;
2976 
2977 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2978 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
2979 	else
2980 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
2981 
2982 	return 0;
2983 
2984 free_netdev:
2985 	free_netdev(eth->netdev[id]);
2986 	return err;
2987 }
2988 
2989 static int mtk_probe(struct platform_device *pdev)
2990 {
2991 	struct device_node *mac_np;
2992 	struct mtk_eth *eth;
2993 	int err, i;
2994 
2995 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2996 	if (!eth)
2997 		return -ENOMEM;
2998 
2999 	eth->soc = of_device_get_match_data(&pdev->dev);
3000 
3001 	eth->dev = &pdev->dev;
3002 	eth->base = devm_platform_ioremap_resource(pdev, 0);
3003 	if (IS_ERR(eth->base))
3004 		return PTR_ERR(eth->base);
3005 
3006 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3007 		eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3008 		eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3009 	} else {
3010 		eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3011 		eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3012 	}
3013 
3014 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3015 		eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3016 		eth->ip_align = NET_IP_ALIGN;
3017 	} else {
3018 		eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3019 	}
3020 
3021 	spin_lock_init(&eth->page_lock);
3022 	spin_lock_init(&eth->tx_irq_lock);
3023 	spin_lock_init(&eth->rx_irq_lock);
3024 	spin_lock_init(&eth->dim_lock);
3025 
3026 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3027 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
3028 
3029 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3030 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
3031 
3032 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3033 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3034 							      "mediatek,ethsys");
3035 		if (IS_ERR(eth->ethsys)) {
3036 			dev_err(&pdev->dev, "no ethsys regmap found\n");
3037 			return PTR_ERR(eth->ethsys);
3038 		}
3039 	}
3040 
3041 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3042 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3043 							     "mediatek,infracfg");
3044 		if (IS_ERR(eth->infra)) {
3045 			dev_err(&pdev->dev, "no infracfg regmap found\n");
3046 			return PTR_ERR(eth->infra);
3047 		}
3048 	}
3049 
3050 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3051 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3052 					  GFP_KERNEL);
3053 		if (!eth->sgmii)
3054 			return -ENOMEM;
3055 
3056 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3057 				     eth->soc->ana_rgc3);
3058 
3059 		if (err)
3060 			return err;
3061 	}
3062 
3063 	if (eth->soc->required_pctl) {
3064 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3065 							    "mediatek,pctl");
3066 		if (IS_ERR(eth->pctl)) {
3067 			dev_err(&pdev->dev, "no pctl regmap found\n");
3068 			return PTR_ERR(eth->pctl);
3069 		}
3070 	}
3071 
3072 	for (i = 0; i < 3; i++) {
3073 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3074 			eth->irq[i] = eth->irq[0];
3075 		else
3076 			eth->irq[i] = platform_get_irq(pdev, i);
3077 		if (eth->irq[i] < 0) {
3078 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3079 			return -ENXIO;
3080 		}
3081 	}
3082 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3083 		eth->clks[i] = devm_clk_get(eth->dev,
3084 					    mtk_clks_source_name[i]);
3085 		if (IS_ERR(eth->clks[i])) {
3086 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3087 				return -EPROBE_DEFER;
3088 			if (eth->soc->required_clks & BIT(i)) {
3089 				dev_err(&pdev->dev, "clock %s not found\n",
3090 					mtk_clks_source_name[i]);
3091 				return -EINVAL;
3092 			}
3093 			eth->clks[i] = NULL;
3094 		}
3095 	}
3096 
3097 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3098 	INIT_WORK(&eth->pending_work, mtk_pending_work);
3099 
3100 	err = mtk_hw_init(eth);
3101 	if (err)
3102 		return err;
3103 
3104 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3105 
3106 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
3107 		if (!of_device_is_compatible(mac_np,
3108 					     "mediatek,eth-mac"))
3109 			continue;
3110 
3111 		if (!of_device_is_available(mac_np))
3112 			continue;
3113 
3114 		err = mtk_add_mac(eth, mac_np);
3115 		if (err) {
3116 			of_node_put(mac_np);
3117 			goto err_deinit_hw;
3118 		}
3119 	}
3120 
3121 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3122 		err = devm_request_irq(eth->dev, eth->irq[0],
3123 				       mtk_handle_irq, 0,
3124 				       dev_name(eth->dev), eth);
3125 	} else {
3126 		err = devm_request_irq(eth->dev, eth->irq[1],
3127 				       mtk_handle_irq_tx, 0,
3128 				       dev_name(eth->dev), eth);
3129 		if (err)
3130 			goto err_free_dev;
3131 
3132 		err = devm_request_irq(eth->dev, eth->irq[2],
3133 				       mtk_handle_irq_rx, 0,
3134 				       dev_name(eth->dev), eth);
3135 	}
3136 	if (err)
3137 		goto err_free_dev;
3138 
3139 	/* No MT7628/88 support yet */
3140 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3141 		err = mtk_mdio_init(eth);
3142 		if (err)
3143 			goto err_free_dev;
3144 	}
3145 
3146 	if (eth->soc->offload_version) {
3147 		err = mtk_ppe_init(&eth->ppe, eth->dev,
3148 				   eth->base + MTK_ETH_PPE_BASE, 2);
3149 		if (err)
3150 			goto err_free_dev;
3151 
3152 		err = mtk_eth_offload_init(eth);
3153 		if (err)
3154 			goto err_free_dev;
3155 	}
3156 
3157 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3158 		if (!eth->netdev[i])
3159 			continue;
3160 
3161 		err = register_netdev(eth->netdev[i]);
3162 		if (err) {
3163 			dev_err(eth->dev, "error bringing up device\n");
3164 			goto err_deinit_mdio;
3165 		} else
3166 			netif_info(eth, probe, eth->netdev[i],
3167 				   "mediatek frame engine at 0x%08lx, irq %d\n",
3168 				   eth->netdev[i]->base_addr, eth->irq[0]);
3169 	}
3170 
3171 	/* we run 2 devices on the same DMA ring so we need a dummy device
3172 	 * for NAPI to work
3173 	 */
3174 	init_dummy_netdev(&eth->dummy_dev);
3175 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3176 		       MTK_NAPI_WEIGHT);
3177 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
3178 		       MTK_NAPI_WEIGHT);
3179 
3180 	platform_set_drvdata(pdev, eth);
3181 
3182 	return 0;
3183 
3184 err_deinit_mdio:
3185 	mtk_mdio_cleanup(eth);
3186 err_free_dev:
3187 	mtk_free_dev(eth);
3188 err_deinit_hw:
3189 	mtk_hw_deinit(eth);
3190 
3191 	return err;
3192 }
3193 
3194 static int mtk_remove(struct platform_device *pdev)
3195 {
3196 	struct mtk_eth *eth = platform_get_drvdata(pdev);
3197 	struct mtk_mac *mac;
3198 	int i;
3199 
3200 	/* stop all devices to make sure that dma is properly shut down */
3201 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3202 		if (!eth->netdev[i])
3203 			continue;
3204 		mtk_stop(eth->netdev[i]);
3205 		mac = netdev_priv(eth->netdev[i]);
3206 		phylink_disconnect_phy(mac->phylink);
3207 	}
3208 
3209 	mtk_hw_deinit(eth);
3210 
3211 	netif_napi_del(&eth->tx_napi);
3212 	netif_napi_del(&eth->rx_napi);
3213 	mtk_cleanup(eth);
3214 	mtk_mdio_cleanup(eth);
3215 
3216 	return 0;
3217 }
3218 
3219 static const struct mtk_soc_data mt2701_data = {
3220 	.caps = MT7623_CAPS | MTK_HWLRO,
3221 	.hw_features = MTK_HW_FEATURES,
3222 	.required_clks = MT7623_CLKS_BITMAP,
3223 	.required_pctl = true,
3224 };
3225 
3226 static const struct mtk_soc_data mt7621_data = {
3227 	.caps = MT7621_CAPS,
3228 	.hw_features = MTK_HW_FEATURES,
3229 	.required_clks = MT7621_CLKS_BITMAP,
3230 	.required_pctl = false,
3231 	.offload_version = 2,
3232 };
3233 
3234 static const struct mtk_soc_data mt7622_data = {
3235 	.ana_rgc3 = 0x2028,
3236 	.caps = MT7622_CAPS | MTK_HWLRO,
3237 	.hw_features = MTK_HW_FEATURES,
3238 	.required_clks = MT7622_CLKS_BITMAP,
3239 	.required_pctl = false,
3240 	.offload_version = 2,
3241 };
3242 
3243 static const struct mtk_soc_data mt7623_data = {
3244 	.caps = MT7623_CAPS | MTK_HWLRO,
3245 	.hw_features = MTK_HW_FEATURES,
3246 	.required_clks = MT7623_CLKS_BITMAP,
3247 	.required_pctl = true,
3248 	.offload_version = 2,
3249 };
3250 
3251 static const struct mtk_soc_data mt7629_data = {
3252 	.ana_rgc3 = 0x128,
3253 	.caps = MT7629_CAPS | MTK_HWLRO,
3254 	.hw_features = MTK_HW_FEATURES,
3255 	.required_clks = MT7629_CLKS_BITMAP,
3256 	.required_pctl = false,
3257 };
3258 
3259 static const struct mtk_soc_data rt5350_data = {
3260 	.caps = MT7628_CAPS,
3261 	.hw_features = MTK_HW_FEATURES_MT7628,
3262 	.required_clks = MT7628_CLKS_BITMAP,
3263 	.required_pctl = false,
3264 };
3265 
3266 const struct of_device_id of_mtk_match[] = {
3267 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3268 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3269 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3270 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3271 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3272 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3273 	{},
3274 };
3275 MODULE_DEVICE_TABLE(of, of_mtk_match);
3276 
3277 static struct platform_driver mtk_driver = {
3278 	.probe = mtk_probe,
3279 	.remove = mtk_remove,
3280 	.driver = {
3281 		.name = "mtk_soc_eth",
3282 		.of_match_table = of_mtk_match,
3283 	},
3284 };
3285 
3286 module_platform_driver(mtk_driver);
3287 
3288 MODULE_LICENSE("GPL");
3289 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3290 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
3291