xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 #include <linux/genalloc.h>
31 
32 #include "mtk_eth_soc.h"
33 #include "mtk_wed.h"
34 
35 static int mtk_msg_level = -1;
36 module_param_named(msg_level, mtk_msg_level, int, 0);
37 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
38 
39 #define MTK_ETHTOOL_STAT(x) { #x, \
40 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
41 
42 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
43 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
44 				  sizeof(u64) }
45 
46 static const struct mtk_reg_map mtk_reg_map = {
47 	.tx_irq_mask		= 0x1a1c,
48 	.tx_irq_status		= 0x1a18,
49 	.pdma = {
50 		.rx_ptr		= 0x0900,
51 		.rx_cnt_cfg	= 0x0904,
52 		.pcrx_ptr	= 0x0908,
53 		.glo_cfg	= 0x0a04,
54 		.rst_idx	= 0x0a08,
55 		.delay_irq	= 0x0a0c,
56 		.irq_status	= 0x0a20,
57 		.irq_mask	= 0x0a28,
58 		.adma_rx_dbg0	= 0x0a38,
59 		.int_grp	= 0x0a50,
60 	},
61 	.qdma = {
62 		.qtx_cfg	= 0x1800,
63 		.qtx_sch	= 0x1804,
64 		.rx_ptr		= 0x1900,
65 		.rx_cnt_cfg	= 0x1904,
66 		.qcrx_ptr	= 0x1908,
67 		.glo_cfg	= 0x1a04,
68 		.rst_idx	= 0x1a08,
69 		.delay_irq	= 0x1a0c,
70 		.fc_th		= 0x1a10,
71 		.tx_sch_rate	= 0x1a14,
72 		.int_grp	= 0x1a20,
73 		.hred		= 0x1a44,
74 		.ctx_ptr	= 0x1b00,
75 		.dtx_ptr	= 0x1b04,
76 		.crx_ptr	= 0x1b10,
77 		.drx_ptr	= 0x1b14,
78 		.fq_head	= 0x1b20,
79 		.fq_tail	= 0x1b24,
80 		.fq_count	= 0x1b28,
81 		.fq_blen	= 0x1b2c,
82 	},
83 	.gdm1_cnt		= 0x2400,
84 	.gdma_to_ppe	= {
85 		[0]		= 0x4444,
86 	},
87 	.ppe_base		= 0x0c00,
88 	.wdma_base = {
89 		[0]		= 0x2800,
90 		[1]		= 0x2c00,
91 	},
92 	.pse_iq_sta		= 0x0110,
93 	.pse_oq_sta		= 0x0118,
94 };
95 
96 static const struct mtk_reg_map mt7628_reg_map = {
97 	.tx_irq_mask		= 0x0a28,
98 	.tx_irq_status		= 0x0a20,
99 	.pdma = {
100 		.rx_ptr		= 0x0900,
101 		.rx_cnt_cfg	= 0x0904,
102 		.pcrx_ptr	= 0x0908,
103 		.glo_cfg	= 0x0a04,
104 		.rst_idx	= 0x0a08,
105 		.delay_irq	= 0x0a0c,
106 		.irq_status	= 0x0a20,
107 		.irq_mask	= 0x0a28,
108 		.int_grp	= 0x0a50,
109 	},
110 };
111 
112 static const struct mtk_reg_map mt7986_reg_map = {
113 	.tx_irq_mask		= 0x461c,
114 	.tx_irq_status		= 0x4618,
115 	.pdma = {
116 		.rx_ptr		= 0x4100,
117 		.rx_cnt_cfg	= 0x4104,
118 		.pcrx_ptr	= 0x4108,
119 		.glo_cfg	= 0x4204,
120 		.rst_idx	= 0x4208,
121 		.delay_irq	= 0x420c,
122 		.irq_status	= 0x4220,
123 		.irq_mask	= 0x4228,
124 		.adma_rx_dbg0	= 0x4238,
125 		.int_grp	= 0x4250,
126 	},
127 	.qdma = {
128 		.qtx_cfg	= 0x4400,
129 		.qtx_sch	= 0x4404,
130 		.rx_ptr		= 0x4500,
131 		.rx_cnt_cfg	= 0x4504,
132 		.qcrx_ptr	= 0x4508,
133 		.glo_cfg	= 0x4604,
134 		.rst_idx	= 0x4608,
135 		.delay_irq	= 0x460c,
136 		.fc_th		= 0x4610,
137 		.int_grp	= 0x4620,
138 		.hred		= 0x4644,
139 		.ctx_ptr	= 0x4700,
140 		.dtx_ptr	= 0x4704,
141 		.crx_ptr	= 0x4710,
142 		.drx_ptr	= 0x4714,
143 		.fq_head	= 0x4720,
144 		.fq_tail	= 0x4724,
145 		.fq_count	= 0x4728,
146 		.fq_blen	= 0x472c,
147 		.tx_sch_rate	= 0x4798,
148 	},
149 	.gdm1_cnt		= 0x1c00,
150 	.gdma_to_ppe	= {
151 		[0]		= 0x3333,
152 		[1]		= 0x4444,
153 	},
154 	.ppe_base		= 0x2000,
155 	.wdma_base = {
156 		[0]		= 0x4800,
157 		[1]		= 0x4c00,
158 	},
159 	.pse_iq_sta		= 0x0180,
160 	.pse_oq_sta		= 0x01a0,
161 };
162 
163 static const struct mtk_reg_map mt7988_reg_map = {
164 	.tx_irq_mask		= 0x461c,
165 	.tx_irq_status		= 0x4618,
166 	.pdma = {
167 		.rx_ptr		= 0x6900,
168 		.rx_cnt_cfg	= 0x6904,
169 		.pcrx_ptr	= 0x6908,
170 		.glo_cfg	= 0x6a04,
171 		.rst_idx	= 0x6a08,
172 		.delay_irq	= 0x6a0c,
173 		.irq_status	= 0x6a20,
174 		.irq_mask	= 0x6a28,
175 		.adma_rx_dbg0	= 0x6a38,
176 		.int_grp	= 0x6a50,
177 	},
178 	.qdma = {
179 		.qtx_cfg	= 0x4400,
180 		.qtx_sch	= 0x4404,
181 		.rx_ptr		= 0x4500,
182 		.rx_cnt_cfg	= 0x4504,
183 		.qcrx_ptr	= 0x4508,
184 		.glo_cfg	= 0x4604,
185 		.rst_idx	= 0x4608,
186 		.delay_irq	= 0x460c,
187 		.fc_th		= 0x4610,
188 		.int_grp	= 0x4620,
189 		.hred		= 0x4644,
190 		.ctx_ptr	= 0x4700,
191 		.dtx_ptr	= 0x4704,
192 		.crx_ptr	= 0x4710,
193 		.drx_ptr	= 0x4714,
194 		.fq_head	= 0x4720,
195 		.fq_tail	= 0x4724,
196 		.fq_count	= 0x4728,
197 		.fq_blen	= 0x472c,
198 		.tx_sch_rate	= 0x4798,
199 	},
200 	.gdm1_cnt		= 0x1c00,
201 	.gdma_to_ppe	= {
202 		[0]		= 0x3333,
203 		[1]		= 0x4444,
204 		[2]		= 0xcccc,
205 	},
206 	.ppe_base		= 0x2000,
207 	.wdma_base = {
208 		[0]		= 0x4800,
209 		[1]		= 0x4c00,
210 		[2]		= 0x5000,
211 	},
212 	.pse_iq_sta		= 0x0180,
213 	.pse_oq_sta		= 0x01a0,
214 };
215 
216 /* strings used by ethtool */
217 static const struct mtk_ethtool_stats {
218 	char str[ETH_GSTRING_LEN];
219 	u32 offset;
220 } mtk_ethtool_stats[] = {
221 	MTK_ETHTOOL_STAT(tx_bytes),
222 	MTK_ETHTOOL_STAT(tx_packets),
223 	MTK_ETHTOOL_STAT(tx_skip),
224 	MTK_ETHTOOL_STAT(tx_collisions),
225 	MTK_ETHTOOL_STAT(rx_bytes),
226 	MTK_ETHTOOL_STAT(rx_packets),
227 	MTK_ETHTOOL_STAT(rx_overflow),
228 	MTK_ETHTOOL_STAT(rx_fcs_errors),
229 	MTK_ETHTOOL_STAT(rx_short_errors),
230 	MTK_ETHTOOL_STAT(rx_long_errors),
231 	MTK_ETHTOOL_STAT(rx_checksum_errors),
232 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
233 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
234 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
235 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
236 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
237 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
238 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
239 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
240 };
241 
242 static const char * const mtk_clks_source_name[] = {
243 	"ethif",
244 	"sgmiitop",
245 	"esw",
246 	"gp0",
247 	"gp1",
248 	"gp2",
249 	"gp3",
250 	"xgp1",
251 	"xgp2",
252 	"xgp3",
253 	"crypto",
254 	"fe",
255 	"trgpll",
256 	"sgmii_tx250m",
257 	"sgmii_rx250m",
258 	"sgmii_cdr_ref",
259 	"sgmii_cdr_fb",
260 	"sgmii2_tx250m",
261 	"sgmii2_rx250m",
262 	"sgmii2_cdr_ref",
263 	"sgmii2_cdr_fb",
264 	"sgmii_ck",
265 	"eth2pll",
266 	"wocpu0",
267 	"wocpu1",
268 	"netsys0",
269 	"netsys1",
270 	"ethwarp_wocpu2",
271 	"ethwarp_wocpu1",
272 	"ethwarp_wocpu0",
273 	"top_sgm0_sel",
274 	"top_sgm1_sel",
275 	"top_eth_gmii_sel",
276 	"top_eth_refck_50m_sel",
277 	"top_eth_sys_200m_sel",
278 	"top_eth_sys_sel",
279 	"top_eth_xgmii_sel",
280 	"top_eth_mii_sel",
281 	"top_netsys_sel",
282 	"top_netsys_500m_sel",
283 	"top_netsys_pao_2x_sel",
284 	"top_netsys_sync_250m_sel",
285 	"top_netsys_ppefb_250m_sel",
286 	"top_netsys_warp_sel",
287 };
288 
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)289 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
290 {
291 	__raw_writel(val, eth->base + reg);
292 }
293 
mtk_r32(struct mtk_eth * eth,unsigned reg)294 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
295 {
296 	return __raw_readl(eth->base + reg);
297 }
298 
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)299 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
300 {
301 	u32 val;
302 
303 	val = mtk_r32(eth, reg);
304 	val &= ~mask;
305 	val |= set;
306 	mtk_w32(eth, val, reg);
307 	return reg;
308 }
309 
mtk_mdio_busy_wait(struct mtk_eth * eth)310 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
311 {
312 	unsigned long t_start = jiffies;
313 
314 	while (1) {
315 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
316 			return 0;
317 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
318 			break;
319 		cond_resched();
320 	}
321 
322 	dev_err(eth->dev, "mdio: MDIO timeout\n");
323 	return -ETIMEDOUT;
324 }
325 
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)326 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
327 			       u32 write_data)
328 {
329 	int ret;
330 
331 	ret = mtk_mdio_busy_wait(eth);
332 	if (ret < 0)
333 		return ret;
334 
335 	mtk_w32(eth, PHY_IAC_ACCESS |
336 		PHY_IAC_START_C22 |
337 		PHY_IAC_CMD_WRITE |
338 		PHY_IAC_REG(phy_reg) |
339 		PHY_IAC_ADDR(phy_addr) |
340 		PHY_IAC_DATA(write_data),
341 		MTK_PHY_IAC);
342 
343 	ret = mtk_mdio_busy_wait(eth);
344 	if (ret < 0)
345 		return ret;
346 
347 	return 0;
348 }
349 
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)350 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
351 			       u32 devad, u32 phy_reg, u32 write_data)
352 {
353 	int ret;
354 
355 	ret = mtk_mdio_busy_wait(eth);
356 	if (ret < 0)
357 		return ret;
358 
359 	mtk_w32(eth, PHY_IAC_ACCESS |
360 		PHY_IAC_START_C45 |
361 		PHY_IAC_CMD_C45_ADDR |
362 		PHY_IAC_REG(devad) |
363 		PHY_IAC_ADDR(phy_addr) |
364 		PHY_IAC_DATA(phy_reg),
365 		MTK_PHY_IAC);
366 
367 	ret = mtk_mdio_busy_wait(eth);
368 	if (ret < 0)
369 		return ret;
370 
371 	mtk_w32(eth, PHY_IAC_ACCESS |
372 		PHY_IAC_START_C45 |
373 		PHY_IAC_CMD_WRITE |
374 		PHY_IAC_REG(devad) |
375 		PHY_IAC_ADDR(phy_addr) |
376 		PHY_IAC_DATA(write_data),
377 		MTK_PHY_IAC);
378 
379 	ret = mtk_mdio_busy_wait(eth);
380 	if (ret < 0)
381 		return ret;
382 
383 	return 0;
384 }
385 
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)386 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
387 {
388 	int ret;
389 
390 	ret = mtk_mdio_busy_wait(eth);
391 	if (ret < 0)
392 		return ret;
393 
394 	mtk_w32(eth, PHY_IAC_ACCESS |
395 		PHY_IAC_START_C22 |
396 		PHY_IAC_CMD_C22_READ |
397 		PHY_IAC_REG(phy_reg) |
398 		PHY_IAC_ADDR(phy_addr),
399 		MTK_PHY_IAC);
400 
401 	ret = mtk_mdio_busy_wait(eth);
402 	if (ret < 0)
403 		return ret;
404 
405 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
406 }
407 
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)408 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
409 			      u32 devad, u32 phy_reg)
410 {
411 	int ret;
412 
413 	ret = mtk_mdio_busy_wait(eth);
414 	if (ret < 0)
415 		return ret;
416 
417 	mtk_w32(eth, PHY_IAC_ACCESS |
418 		PHY_IAC_START_C45 |
419 		PHY_IAC_CMD_C45_ADDR |
420 		PHY_IAC_REG(devad) |
421 		PHY_IAC_ADDR(phy_addr) |
422 		PHY_IAC_DATA(phy_reg),
423 		MTK_PHY_IAC);
424 
425 	ret = mtk_mdio_busy_wait(eth);
426 	if (ret < 0)
427 		return ret;
428 
429 	mtk_w32(eth, PHY_IAC_ACCESS |
430 		PHY_IAC_START_C45 |
431 		PHY_IAC_CMD_C45_READ |
432 		PHY_IAC_REG(devad) |
433 		PHY_IAC_ADDR(phy_addr),
434 		MTK_PHY_IAC);
435 
436 	ret = mtk_mdio_busy_wait(eth);
437 	if (ret < 0)
438 		return ret;
439 
440 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
441 }
442 
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)443 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
444 			      int phy_reg, u16 val)
445 {
446 	struct mtk_eth *eth = bus->priv;
447 
448 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
449 }
450 
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)451 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
452 			      int devad, int phy_reg, u16 val)
453 {
454 	struct mtk_eth *eth = bus->priv;
455 
456 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
457 }
458 
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)459 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
460 {
461 	struct mtk_eth *eth = bus->priv;
462 
463 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
464 }
465 
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)466 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
467 			     int phy_reg)
468 {
469 	struct mtk_eth *eth = bus->priv;
470 
471 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
472 }
473 
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)474 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
475 				     phy_interface_t interface)
476 {
477 	u32 val;
478 
479 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
480 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
481 
482 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
483 			   ETHSYS_TRGMII_MT7621_MASK, val);
484 
485 	return 0;
486 }
487 
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)488 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
489 				   phy_interface_t interface)
490 {
491 	int ret;
492 
493 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
494 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
495 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
496 		if (ret)
497 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
498 		return;
499 	}
500 
501 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
502 }
503 
mtk_setup_bridge_switch(struct mtk_eth * eth)504 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
505 {
506 	/* Force Port1 XGMAC Link Up */
507 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
508 		MTK_XGMAC_STS(MTK_GMAC1_ID));
509 
510 	/* Adjust GSW bridge IPG to 11 */
511 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
512 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
513 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
514 		MTK_GSW_CFG);
515 }
516 
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)517 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
518 					      phy_interface_t interface)
519 {
520 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
521 					   phylink_config);
522 	struct mtk_eth *eth = mac->hw;
523 	unsigned int sid;
524 
525 	if (interface == PHY_INTERFACE_MODE_SGMII ||
526 	    phy_interface_mode_is_8023z(interface)) {
527 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
528 		       0 : mac->id;
529 
530 		return eth->sgmii_pcs[sid];
531 	}
532 
533 	return NULL;
534 }
535 
mtk_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t iface)536 static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
537 			   phy_interface_t iface)
538 {
539 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
540 					   phylink_config);
541 	struct mtk_eth *eth = mac->hw;
542 
543 	if (mtk_interface_mode_is_xgmii(eth, iface) &&
544 	    mac->id != MTK_GMAC1_ID) {
545 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
546 			XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
547 
548 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
549 				 MTK_XGMAC_FORCE_LINK(mac->id),
550 			MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
551 	}
552 
553 	return 0;
554 }
555 
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)556 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
557 			   const struct phylink_link_state *state)
558 {
559 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
560 					   phylink_config);
561 	struct mtk_eth *eth = mac->hw;
562 	int val, ge_mode, err = 0;
563 	u32 i;
564 
565 	/* MT76x8 has no hardware settings between for the MAC */
566 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
567 	    mac->interface != state->interface) {
568 		/* Setup soc pin functions */
569 		switch (state->interface) {
570 		case PHY_INTERFACE_MODE_TRGMII:
571 		case PHY_INTERFACE_MODE_RGMII_TXID:
572 		case PHY_INTERFACE_MODE_RGMII_RXID:
573 		case PHY_INTERFACE_MODE_RGMII_ID:
574 		case PHY_INTERFACE_MODE_RGMII:
575 		case PHY_INTERFACE_MODE_MII:
576 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
577 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
578 				if (err)
579 					goto init_err;
580 			}
581 			break;
582 		case PHY_INTERFACE_MODE_1000BASEX:
583 		case PHY_INTERFACE_MODE_2500BASEX:
584 		case PHY_INTERFACE_MODE_SGMII:
585 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
586 			if (err)
587 				goto init_err;
588 			break;
589 		case PHY_INTERFACE_MODE_GMII:
590 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
591 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
592 				if (err)
593 					goto init_err;
594 			}
595 			break;
596 		case PHY_INTERFACE_MODE_INTERNAL:
597 			if (mac->id == MTK_GMAC2_ID &&
598 			    MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
599 				err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
600 				if (err)
601 					goto init_err;
602 			}
603 			break;
604 		default:
605 			goto err_phy;
606 		}
607 
608 		/* Setup clock for 1st gmac */
609 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
610 		    !phy_interface_mode_is_8023z(state->interface) &&
611 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
612 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
613 					 MTK_TRGMII_MT7621_CLK)) {
614 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
615 							      state->interface))
616 					goto err_phy;
617 			} else {
618 				mtk_gmac0_rgmii_adjust(mac->hw,
619 						       state->interface);
620 
621 				/* mt7623_pad_clk_setup */
622 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
623 					mtk_w32(mac->hw,
624 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
625 						TRGMII_TD_ODT(i));
626 
627 				/* Assert/release MT7623 RXC reset */
628 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
629 					TRGMII_RCK_CTRL);
630 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
631 			}
632 		}
633 
634 		switch (state->interface) {
635 		case PHY_INTERFACE_MODE_MII:
636 		case PHY_INTERFACE_MODE_GMII:
637 			ge_mode = 1;
638 			break;
639 		default:
640 			ge_mode = 0;
641 			break;
642 		}
643 
644 		/* put the gmac into the right mode */
645 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
646 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
647 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
648 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
649 
650 		mac->interface = state->interface;
651 	}
652 
653 	/* SGMII */
654 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
655 	    phy_interface_mode_is_8023z(state->interface)) {
656 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
657 		 * being setup done.
658 		 */
659 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
660 
661 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
662 				   SYSCFG0_SGMII_MASK,
663 				   ~(u32)SYSCFG0_SGMII_MASK);
664 
665 		/* Save the syscfg0 value for mac_finish */
666 		mac->syscfg0 = val;
667 	} else if (phylink_autoneg_inband(mode)) {
668 		dev_err(eth->dev,
669 			"In-band mode not supported in non SGMII mode!\n");
670 		return;
671 	}
672 
673 	/* Setup gmac */
674 	if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
675 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
676 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
677 
678 		if (mac->id == MTK_GMAC1_ID)
679 			mtk_setup_bridge_switch(eth);
680 	}
681 
682 	return;
683 
684 err_phy:
685 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
686 		mac->id, phy_modes(state->interface));
687 	return;
688 
689 init_err:
690 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
691 		mac->id, phy_modes(state->interface), err);
692 }
693 
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)694 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
695 			  phy_interface_t interface)
696 {
697 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
698 					   phylink_config);
699 	struct mtk_eth *eth = mac->hw;
700 	u32 mcr_cur, mcr_new;
701 
702 	/* Enable SGMII */
703 	if (interface == PHY_INTERFACE_MODE_SGMII ||
704 	    phy_interface_mode_is_8023z(interface))
705 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
706 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
707 
708 	/* Setup gmac */
709 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
710 	mcr_new = mcr_cur;
711 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
712 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
713 
714 	/* Only update control register when needed! */
715 	if (mcr_new != mcr_cur)
716 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
717 
718 	return 0;
719 }
720 
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)721 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
722 			      phy_interface_t interface)
723 {
724 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
725 					   phylink_config);
726 
727 	if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
728 		/* GMAC modes */
729 		mtk_m32(mac->hw,
730 			MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
731 			MTK_MAC_MCR(mac->id));
732 	} else if (mac->id != MTK_GMAC1_ID) {
733 		/* XGMAC except for built-in switch */
734 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
735 			MTK_XMAC_MCR(mac->id));
736 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
737 			MTK_XGMAC_STS(mac->id));
738 	}
739 }
740 
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)741 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
742 				int speed)
743 {
744 	const struct mtk_soc_data *soc = eth->soc;
745 	u32 ofs, val;
746 
747 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
748 		return;
749 
750 	val = MTK_QTX_SCH_MIN_RATE_EN |
751 	      /* minimum: 10 Mbps */
752 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
753 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
754 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
755 	if (mtk_is_netsys_v1(eth))
756 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
757 
758 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
759 		switch (speed) {
760 		case SPEED_10:
761 			val |= MTK_QTX_SCH_MAX_RATE_EN |
762 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
763 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
764 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
765 			break;
766 		case SPEED_100:
767 			val |= MTK_QTX_SCH_MAX_RATE_EN |
768 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
769 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
770 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
771 			break;
772 		case SPEED_1000:
773 			val |= MTK_QTX_SCH_MAX_RATE_EN |
774 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
775 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
776 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
777 			break;
778 		default:
779 			break;
780 		}
781 	} else {
782 		switch (speed) {
783 		case SPEED_10:
784 			val |= MTK_QTX_SCH_MAX_RATE_EN |
785 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
786 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
787 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
788 			break;
789 		case SPEED_100:
790 			val |= MTK_QTX_SCH_MAX_RATE_EN |
791 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
792 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
793 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
794 			break;
795 		case SPEED_1000:
796 			val |= MTK_QTX_SCH_MAX_RATE_EN |
797 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
798 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
799 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
800 			break;
801 		default:
802 			break;
803 		}
804 	}
805 
806 	ofs = MTK_QTX_OFFSET * idx;
807 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
808 }
809 
mtk_gdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)810 static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
811 				struct phy_device *phy,
812 				unsigned int mode, phy_interface_t interface,
813 				int speed, int duplex, bool tx_pause,
814 				bool rx_pause)
815 {
816 	u32 mcr;
817 
818 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
819 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
820 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
821 		 MAC_MCR_FORCE_RX_FC);
822 
823 	/* Configure speed */
824 	mac->speed = speed;
825 	switch (speed) {
826 	case SPEED_2500:
827 	case SPEED_1000:
828 		mcr |= MAC_MCR_SPEED_1000;
829 		break;
830 	case SPEED_100:
831 		mcr |= MAC_MCR_SPEED_100;
832 		break;
833 	}
834 
835 	/* Configure duplex */
836 	if (duplex == DUPLEX_FULL)
837 		mcr |= MAC_MCR_FORCE_DPX;
838 
839 	/* Configure pause modes - phylink will avoid these for half duplex */
840 	if (tx_pause)
841 		mcr |= MAC_MCR_FORCE_TX_FC;
842 	if (rx_pause)
843 		mcr |= MAC_MCR_FORCE_RX_FC;
844 
845 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
846 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
847 }
848 
mtk_xgdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)849 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
850 				 struct phy_device *phy,
851 				 unsigned int mode, phy_interface_t interface,
852 				 int speed, int duplex, bool tx_pause,
853 				 bool rx_pause)
854 {
855 	u32 mcr;
856 
857 	if (mac->id == MTK_GMAC1_ID)
858 		return;
859 
860 	/* Eliminate the interference(before link-up) caused by PHY noise */
861 	mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
862 	mdelay(20);
863 	mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
864 		MTK_XMAC_CNT_CTRL(mac->id));
865 
866 	mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
867 		MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
868 
869 	mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
870 	mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
871 		 XMAC_MCR_TRX_DISABLE);
872 	/* Configure pause modes -
873 	 * phylink will avoid these for half duplex
874 	 */
875 	if (tx_pause)
876 		mcr |= XMAC_MCR_FORCE_TX_FC;
877 	if (rx_pause)
878 		mcr |= XMAC_MCR_FORCE_RX_FC;
879 
880 	mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
881 }
882 
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)883 static void mtk_mac_link_up(struct phylink_config *config,
884 			    struct phy_device *phy,
885 			    unsigned int mode, phy_interface_t interface,
886 			    int speed, int duplex, bool tx_pause, bool rx_pause)
887 {
888 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
889 					   phylink_config);
890 
891 	if (mtk_interface_mode_is_xgmii(mac->hw, interface))
892 		mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
893 				     tx_pause, rx_pause);
894 	else
895 		mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
896 				    tx_pause, rx_pause);
897 }
898 
mtk_mac_disable_tx_lpi(struct phylink_config * config)899 static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
900 {
901 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
902 					   phylink_config);
903 	struct mtk_eth *eth = mac->hw;
904 
905 	mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
906 }
907 
mtk_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)908 static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
909 				 bool tx_clk_stop)
910 {
911 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
912 					   phylink_config);
913 	struct mtk_eth *eth = mac->hw;
914 	u32 val;
915 
916 	if (mtk_interface_mode_is_xgmii(eth, mac->interface))
917 		return -EOPNOTSUPP;
918 
919 	/* Tx idle timer in ms */
920 	timer = DIV_ROUND_UP(timer, 1000);
921 
922 	/* If the timer is zero, then set LPI_MODE, which allows the
923 	 * system to enter LPI mode immediately rather than waiting for
924 	 * the LPI threshold.
925 	 */
926 	if (!timer)
927 		val = MAC_EEE_LPI_MODE;
928 	else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
929 		val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
930 	else
931 		val = MAC_EEE_LPI_TXIDLE_THD;
932 
933 	if (tx_clk_stop)
934 		val |= MAC_EEE_CKG_TXIDLE;
935 
936 	/* PHY Wake-up time, this field does not have a reset value, so use the
937 	 * reset value from MT7531 (36us for 100M and 17us for 1000M).
938 	 */
939 	val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
940 	       FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
941 
942 	mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
943 	mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
944 
945 	return 0;
946 }
947 
948 static const struct phylink_mac_ops mtk_phylink_ops = {
949 	.mac_prepare = mtk_mac_prepare,
950 	.mac_select_pcs = mtk_mac_select_pcs,
951 	.mac_config = mtk_mac_config,
952 	.mac_finish = mtk_mac_finish,
953 	.mac_link_down = mtk_mac_link_down,
954 	.mac_link_up = mtk_mac_link_up,
955 	.mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
956 	.mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
957 };
958 
mtk_mdio_config(struct mtk_eth * eth)959 static void mtk_mdio_config(struct mtk_eth *eth)
960 {
961 	u32 val;
962 
963 	/* Configure MDC Divider */
964 	val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
965 
966 	/* Configure MDC Turbo Mode */
967 	if (mtk_is_netsys_v3_or_greater(eth))
968 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
969 	else
970 		val |= PPSC_MDC_TURBO;
971 
972 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
973 }
974 
mtk_mdio_init(struct mtk_eth * eth)975 static int mtk_mdio_init(struct mtk_eth *eth)
976 {
977 	unsigned int max_clk = 2500000;
978 	struct device_node *mii_np;
979 	int ret;
980 	u32 val;
981 
982 	mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
983 	if (!mii_np) {
984 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
985 		return -ENODEV;
986 	}
987 
988 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
989 	if (!eth->mii_bus) {
990 		ret = -ENOMEM;
991 		goto err_put_node;
992 	}
993 
994 	eth->mii_bus->name = "mdio";
995 	eth->mii_bus->read = mtk_mdio_read_c22;
996 	eth->mii_bus->write = mtk_mdio_write_c22;
997 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
998 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
999 	eth->mii_bus->priv = eth;
1000 	eth->mii_bus->parent = eth->dev;
1001 
1002 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1003 
1004 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1005 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1006 			dev_err(eth->dev, "MDIO clock frequency out of range");
1007 			ret = -EINVAL;
1008 			goto err_put_node;
1009 		}
1010 		max_clk = val;
1011 	}
1012 	eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
1013 	mtk_mdio_config(eth);
1014 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
1015 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
1016 
1017 err_put_node:
1018 	of_node_put(mii_np);
1019 	return ret;
1020 }
1021 
mtk_mdio_cleanup(struct mtk_eth * eth)1022 static void mtk_mdio_cleanup(struct mtk_eth *eth)
1023 {
1024 	if (!eth->mii_bus)
1025 		return;
1026 
1027 	mdiobus_unregister(eth->mii_bus);
1028 }
1029 
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)1030 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1031 {
1032 	unsigned long flags;
1033 	u32 val;
1034 
1035 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1036 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1037 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1038 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1039 }
1040 
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)1041 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1042 {
1043 	unsigned long flags;
1044 	u32 val;
1045 
1046 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1047 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1048 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1049 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1050 }
1051 
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)1052 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1053 {
1054 	unsigned long flags;
1055 	u32 val;
1056 
1057 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1058 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1059 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1060 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1061 }
1062 
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)1063 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1064 {
1065 	unsigned long flags;
1066 	u32 val;
1067 
1068 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1069 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1070 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1071 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1072 }
1073 
mtk_set_mac_address(struct net_device * dev,void * p)1074 static int mtk_set_mac_address(struct net_device *dev, void *p)
1075 {
1076 	int ret = eth_mac_addr(dev, p);
1077 	struct mtk_mac *mac = netdev_priv(dev);
1078 	struct mtk_eth *eth = mac->hw;
1079 	const char *macaddr = dev->dev_addr;
1080 
1081 	if (ret)
1082 		return ret;
1083 
1084 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1085 		return -EBUSY;
1086 
1087 	spin_lock_bh(&mac->hw->page_lock);
1088 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1089 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1090 			MT7628_SDM_MAC_ADRH);
1091 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1092 			(macaddr[4] << 8) | macaddr[5],
1093 			MT7628_SDM_MAC_ADRL);
1094 	} else {
1095 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1096 			MTK_GDMA_MAC_ADRH(mac->id));
1097 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1098 			(macaddr[4] << 8) | macaddr[5],
1099 			MTK_GDMA_MAC_ADRL(mac->id));
1100 	}
1101 	spin_unlock_bh(&mac->hw->page_lock);
1102 
1103 	return 0;
1104 }
1105 
mtk_stats_update_mac(struct mtk_mac * mac)1106 void mtk_stats_update_mac(struct mtk_mac *mac)
1107 {
1108 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1109 	struct mtk_eth *eth = mac->hw;
1110 
1111 	u64_stats_update_begin(&hw_stats->syncp);
1112 
1113 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1114 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
1115 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
1116 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
1117 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
1118 		hw_stats->rx_checksum_errors +=
1119 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
1120 	} else {
1121 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1122 		unsigned int offs = hw_stats->reg_offset;
1123 		u64 stats;
1124 
1125 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1126 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
1127 		if (stats)
1128 			hw_stats->rx_bytes += (stats << 32);
1129 		hw_stats->rx_packets +=
1130 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1131 		hw_stats->rx_overflow +=
1132 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1133 		hw_stats->rx_fcs_errors +=
1134 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1135 		hw_stats->rx_short_errors +=
1136 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1137 		hw_stats->rx_long_errors +=
1138 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1139 		hw_stats->rx_checksum_errors +=
1140 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1141 		hw_stats->rx_flow_control_packets +=
1142 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1143 
1144 		if (mtk_is_netsys_v3_or_greater(eth)) {
1145 			hw_stats->tx_skip +=
1146 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1147 			hw_stats->tx_collisions +=
1148 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1149 			hw_stats->tx_bytes +=
1150 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1151 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1152 			if (stats)
1153 				hw_stats->tx_bytes += (stats << 32);
1154 			hw_stats->tx_packets +=
1155 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1156 		} else {
1157 			hw_stats->tx_skip +=
1158 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1159 			hw_stats->tx_collisions +=
1160 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1161 			hw_stats->tx_bytes +=
1162 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1163 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1164 			if (stats)
1165 				hw_stats->tx_bytes += (stats << 32);
1166 			hw_stats->tx_packets +=
1167 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1168 		}
1169 	}
1170 
1171 	u64_stats_update_end(&hw_stats->syncp);
1172 }
1173 
mtk_stats_update(struct mtk_eth * eth)1174 static void mtk_stats_update(struct mtk_eth *eth)
1175 {
1176 	int i;
1177 
1178 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1179 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1180 			continue;
1181 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1182 			mtk_stats_update_mac(eth->mac[i]);
1183 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1184 		}
1185 	}
1186 }
1187 
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1188 static void mtk_get_stats64(struct net_device *dev,
1189 			    struct rtnl_link_stats64 *storage)
1190 {
1191 	struct mtk_mac *mac = netdev_priv(dev);
1192 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1193 	unsigned int start;
1194 
1195 	if (netif_running(dev) && netif_device_present(dev)) {
1196 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1197 			mtk_stats_update_mac(mac);
1198 			spin_unlock_bh(&hw_stats->stats_lock);
1199 		}
1200 	}
1201 
1202 	do {
1203 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1204 		storage->rx_packets = hw_stats->rx_packets;
1205 		storage->tx_packets = hw_stats->tx_packets;
1206 		storage->rx_bytes = hw_stats->rx_bytes;
1207 		storage->tx_bytes = hw_stats->tx_bytes;
1208 		storage->collisions = hw_stats->tx_collisions;
1209 		storage->rx_length_errors = hw_stats->rx_short_errors +
1210 			hw_stats->rx_long_errors;
1211 		storage->rx_over_errors = hw_stats->rx_overflow;
1212 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1213 		storage->rx_errors = hw_stats->rx_checksum_errors;
1214 		storage->tx_aborted_errors = hw_stats->tx_skip;
1215 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1216 
1217 	storage->tx_errors = dev->stats.tx_errors;
1218 	storage->rx_dropped = dev->stats.rx_dropped;
1219 	storage->tx_dropped = dev->stats.tx_dropped;
1220 }
1221 
mtk_max_frag_size(int mtu)1222 static inline int mtk_max_frag_size(int mtu)
1223 {
1224 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1225 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1226 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1227 
1228 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1229 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1230 }
1231 
mtk_max_buf_size(int frag_size)1232 static inline int mtk_max_buf_size(int frag_size)
1233 {
1234 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1235 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1236 
1237 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1238 
1239 	return buf_size;
1240 }
1241 
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1242 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1243 			    struct mtk_rx_dma_v2 *dma_rxd)
1244 {
1245 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1246 	if (!(rxd->rxd2 & RX_DMA_DONE))
1247 		return false;
1248 
1249 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1250 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1251 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1252 	if (mtk_is_netsys_v3_or_greater(eth)) {
1253 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1254 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1255 	}
1256 
1257 	return true;
1258 }
1259 
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1260 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1261 {
1262 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1263 	unsigned long data;
1264 
1265 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1266 				get_order(size));
1267 
1268 	return (void *)data;
1269 }
1270 
mtk_dma_ring_alloc(struct mtk_eth * eth,size_t size,dma_addr_t * dma_handle,bool use_sram)1271 static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
1272 				dma_addr_t *dma_handle, bool use_sram)
1273 {
1274 	void *dma_ring;
1275 
1276 	if (use_sram && eth->sram_pool) {
1277 		dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
1278 		if (!dma_ring)
1279 			return dma_ring;
1280 		*dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
1281 						    (unsigned long)dma_ring);
1282 	} else {
1283 		dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
1284 					      GFP_KERNEL);
1285 	}
1286 
1287 	return dma_ring;
1288 }
1289 
mtk_dma_ring_free(struct mtk_eth * eth,size_t size,void * dma_ring,dma_addr_t dma_handle,bool in_sram)1290 static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
1291 			      dma_addr_t dma_handle, bool in_sram)
1292 {
1293 	if (in_sram && eth->sram_pool)
1294 		gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
1295 	else
1296 		dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
1297 }
1298 
1299 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1300 static int mtk_init_fq_dma(struct mtk_eth *eth)
1301 {
1302 	const struct mtk_soc_data *soc = eth->soc;
1303 	dma_addr_t phy_ring_tail;
1304 	int cnt = soc->tx.fq_dma_size;
1305 	dma_addr_t dma_addr;
1306 	int i, j, len;
1307 
1308 	eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
1309 					       &eth->phy_scratch_ring, true);
1310 
1311 	if (unlikely(!eth->scratch_ring))
1312 		return -ENOMEM;
1313 
1314 	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1315 
1316 	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1317 		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1318 		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1319 
1320 		if (unlikely(!eth->scratch_head[j]))
1321 			return -ENOMEM;
1322 
1323 		dma_addr = dma_map_single(eth->dma_dev,
1324 					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1325 					  DMA_FROM_DEVICE);
1326 
1327 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1328 			return -ENOMEM;
1329 
1330 		for (i = 0; i < len; i++) {
1331 			struct mtk_tx_dma_v2 *txd;
1332 
1333 			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1334 			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1335 			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1336 				txd->txd2 = eth->phy_scratch_ring +
1337 					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1338 
1339 			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1340 			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1341 				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1342 
1343 			txd->txd4 = 0;
1344 			if (mtk_is_netsys_v2_or_greater(eth)) {
1345 				txd->txd5 = 0;
1346 				txd->txd6 = 0;
1347 				txd->txd7 = 0;
1348 				txd->txd8 = 0;
1349 			}
1350 		}
1351 	}
1352 
1353 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1354 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1355 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1356 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1357 
1358 	return 0;
1359 }
1360 
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1361 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1362 {
1363 	return ring->dma + (desc - ring->phys);
1364 }
1365 
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1366 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1367 					     void *txd, u32 txd_size)
1368 {
1369 	int idx = (txd - ring->dma) / txd_size;
1370 
1371 	return &ring->buf[idx];
1372 }
1373 
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1374 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1375 				       struct mtk_tx_dma *dma)
1376 {
1377 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1378 }
1379 
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1380 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1381 {
1382 	return (dma - ring->dma) / txd_size;
1383 }
1384 
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1385 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1386 			 struct xdp_frame_bulk *bq, bool napi)
1387 {
1388 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1389 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1390 			dma_unmap_single(eth->dma_dev,
1391 					 dma_unmap_addr(tx_buf, dma_addr0),
1392 					 dma_unmap_len(tx_buf, dma_len0),
1393 					 DMA_TO_DEVICE);
1394 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1395 			dma_unmap_page(eth->dma_dev,
1396 				       dma_unmap_addr(tx_buf, dma_addr0),
1397 				       dma_unmap_len(tx_buf, dma_len0),
1398 				       DMA_TO_DEVICE);
1399 		}
1400 	} else {
1401 		if (dma_unmap_len(tx_buf, dma_len0)) {
1402 			dma_unmap_page(eth->dma_dev,
1403 				       dma_unmap_addr(tx_buf, dma_addr0),
1404 				       dma_unmap_len(tx_buf, dma_len0),
1405 				       DMA_TO_DEVICE);
1406 		}
1407 
1408 		if (dma_unmap_len(tx_buf, dma_len1)) {
1409 			dma_unmap_page(eth->dma_dev,
1410 				       dma_unmap_addr(tx_buf, dma_addr1),
1411 				       dma_unmap_len(tx_buf, dma_len1),
1412 				       DMA_TO_DEVICE);
1413 		}
1414 	}
1415 
1416 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1417 		if (tx_buf->type == MTK_TYPE_SKB) {
1418 			struct sk_buff *skb = tx_buf->data;
1419 
1420 			if (napi)
1421 				napi_consume_skb(skb, napi);
1422 			else
1423 				dev_kfree_skb_any(skb);
1424 		} else {
1425 			struct xdp_frame *xdpf = tx_buf->data;
1426 
1427 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1428 				xdp_return_frame_rx_napi(xdpf);
1429 			else if (bq)
1430 				xdp_return_frame_bulk(xdpf, bq);
1431 			else
1432 				xdp_return_frame(xdpf);
1433 		}
1434 	}
1435 	tx_buf->flags = 0;
1436 	tx_buf->data = NULL;
1437 }
1438 
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1439 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1440 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1441 			 size_t size, int idx)
1442 {
1443 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1444 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1445 		dma_unmap_len_set(tx_buf, dma_len0, size);
1446 	} else {
1447 		if (idx & 1) {
1448 			txd->txd3 = mapped_addr;
1449 			txd->txd2 |= TX_DMA_PLEN1(size);
1450 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1451 			dma_unmap_len_set(tx_buf, dma_len1, size);
1452 		} else {
1453 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1454 			txd->txd1 = mapped_addr;
1455 			txd->txd2 = TX_DMA_PLEN0(size);
1456 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1457 			dma_unmap_len_set(tx_buf, dma_len0, size);
1458 		}
1459 	}
1460 }
1461 
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1462 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1463 				   struct mtk_tx_dma_desc_info *info)
1464 {
1465 	struct mtk_mac *mac = netdev_priv(dev);
1466 	struct mtk_eth *eth = mac->hw;
1467 	struct mtk_tx_dma *desc = txd;
1468 	u32 data;
1469 
1470 	WRITE_ONCE(desc->txd1, info->addr);
1471 
1472 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1473 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1474 	if (info->last)
1475 		data |= TX_DMA_LS0;
1476 	WRITE_ONCE(desc->txd3, data);
1477 
1478 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1479 	if (info->first) {
1480 		if (info->gso)
1481 			data |= TX_DMA_TSO;
1482 		/* tx checksum offload */
1483 		if (info->csum)
1484 			data |= TX_DMA_CHKSUM;
1485 		/* vlan header offload */
1486 		if (info->vlan)
1487 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1488 	}
1489 	WRITE_ONCE(desc->txd4, data);
1490 }
1491 
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1492 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1493 				   struct mtk_tx_dma_desc_info *info)
1494 {
1495 	struct mtk_mac *mac = netdev_priv(dev);
1496 	struct mtk_tx_dma_v2 *desc = txd;
1497 	struct mtk_eth *eth = mac->hw;
1498 	u32 data;
1499 
1500 	WRITE_ONCE(desc->txd1, info->addr);
1501 
1502 	data = TX_DMA_PLEN0(info->size);
1503 	if (info->last)
1504 		data |= TX_DMA_LS0;
1505 
1506 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1507 		data |= TX_DMA_PREP_ADDR64(info->addr);
1508 
1509 	WRITE_ONCE(desc->txd3, data);
1510 
1511 	 /* set forward port */
1512 	switch (mac->id) {
1513 	case MTK_GMAC1_ID:
1514 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1515 		break;
1516 	case MTK_GMAC2_ID:
1517 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1518 		break;
1519 	case MTK_GMAC3_ID:
1520 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1521 		break;
1522 	}
1523 
1524 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1525 	WRITE_ONCE(desc->txd4, data);
1526 
1527 	data = 0;
1528 	if (info->first) {
1529 		if (info->gso)
1530 			data |= TX_DMA_TSO_V2;
1531 		/* tx checksum offload */
1532 		if (info->csum)
1533 			data |= TX_DMA_CHKSUM_V2;
1534 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1535 			data |= TX_DMA_SPTAG_V3;
1536 	}
1537 	WRITE_ONCE(desc->txd5, data);
1538 
1539 	data = 0;
1540 	if (info->first && info->vlan)
1541 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1542 	WRITE_ONCE(desc->txd6, data);
1543 
1544 	WRITE_ONCE(desc->txd7, 0);
1545 	WRITE_ONCE(desc->txd8, 0);
1546 }
1547 
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1548 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1549 				struct mtk_tx_dma_desc_info *info)
1550 {
1551 	struct mtk_mac *mac = netdev_priv(dev);
1552 	struct mtk_eth *eth = mac->hw;
1553 
1554 	if (mtk_is_netsys_v2_or_greater(eth))
1555 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1556 	else
1557 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1558 }
1559 
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1560 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1561 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1562 {
1563 	struct mtk_tx_dma_desc_info txd_info = {
1564 		.size = skb_headlen(skb),
1565 		.gso = gso,
1566 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1567 		.vlan = skb_vlan_tag_present(skb),
1568 		.qid = skb_get_queue_mapping(skb),
1569 		.vlan_tci = skb_vlan_tag_get(skb),
1570 		.first = true,
1571 		.last = !skb_is_nonlinear(skb),
1572 	};
1573 	struct netdev_queue *txq;
1574 	struct mtk_mac *mac = netdev_priv(dev);
1575 	struct mtk_eth *eth = mac->hw;
1576 	const struct mtk_soc_data *soc = eth->soc;
1577 	struct mtk_tx_dma *itxd, *txd;
1578 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1579 	struct mtk_tx_buf *itx_buf, *tx_buf;
1580 	int i, n_desc = 1;
1581 	int queue = skb_get_queue_mapping(skb);
1582 	int k = 0;
1583 
1584 	txq = netdev_get_tx_queue(dev, queue);
1585 	itxd = ring->next_free;
1586 	itxd_pdma = qdma_to_pdma(ring, itxd);
1587 	if (itxd == ring->last_free)
1588 		return -ENOMEM;
1589 
1590 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1591 	memset(itx_buf, 0, sizeof(*itx_buf));
1592 
1593 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1594 				       DMA_TO_DEVICE);
1595 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1596 		return -ENOMEM;
1597 
1598 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1599 
1600 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1601 	itx_buf->mac_id = mac->id;
1602 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1603 		     k++);
1604 
1605 	/* TX SG offload */
1606 	txd = itxd;
1607 	txd_pdma = qdma_to_pdma(ring, txd);
1608 
1609 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1610 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1611 		unsigned int offset = 0;
1612 		int frag_size = skb_frag_size(frag);
1613 
1614 		while (frag_size) {
1615 			bool new_desc = true;
1616 
1617 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1618 			    (i & 0x1)) {
1619 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1620 				txd_pdma = qdma_to_pdma(ring, txd);
1621 				if (txd == ring->last_free)
1622 					goto err_dma;
1623 
1624 				n_desc++;
1625 			} else {
1626 				new_desc = false;
1627 			}
1628 
1629 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1630 			txd_info.size = min_t(unsigned int, frag_size,
1631 					      soc->tx.dma_max_len);
1632 			txd_info.qid = queue;
1633 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1634 					!(frag_size - txd_info.size);
1635 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1636 							 offset, txd_info.size,
1637 							 DMA_TO_DEVICE);
1638 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1639 				goto err_dma;
1640 
1641 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1642 
1643 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1644 						    soc->tx.desc_size);
1645 			if (new_desc)
1646 				memset(tx_buf, 0, sizeof(*tx_buf));
1647 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1648 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1649 			tx_buf->mac_id = mac->id;
1650 
1651 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1652 				     txd_info.size, k++);
1653 
1654 			frag_size -= txd_info.size;
1655 			offset += txd_info.size;
1656 		}
1657 	}
1658 
1659 	/* store skb to cleanup */
1660 	itx_buf->type = MTK_TYPE_SKB;
1661 	itx_buf->data = skb;
1662 
1663 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1664 		if (k & 0x1)
1665 			txd_pdma->txd2 |= TX_DMA_LS0;
1666 		else
1667 			txd_pdma->txd2 |= TX_DMA_LS1;
1668 	}
1669 
1670 	netdev_tx_sent_queue(txq, skb->len);
1671 	skb_tx_timestamp(skb);
1672 
1673 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1674 	atomic_sub(n_desc, &ring->free_count);
1675 
1676 	/* make sure that all changes to the dma ring are flushed before we
1677 	 * continue
1678 	 */
1679 	wmb();
1680 
1681 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1682 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1683 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1684 	} else {
1685 		int next_idx;
1686 
1687 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1688 					 ring->dma_size);
1689 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1690 	}
1691 
1692 	return 0;
1693 
1694 err_dma:
1695 	do {
1696 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1697 
1698 		/* unmap dma */
1699 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1700 
1701 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1702 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1703 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1704 
1705 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1706 		itxd_pdma = qdma_to_pdma(ring, itxd);
1707 	} while (itxd != txd);
1708 
1709 	return -ENOMEM;
1710 }
1711 
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1712 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1713 {
1714 	int i, nfrags = 1;
1715 	skb_frag_t *frag;
1716 
1717 	if (skb_is_gso(skb)) {
1718 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1719 			frag = &skb_shinfo(skb)->frags[i];
1720 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1721 					       eth->soc->tx.dma_max_len);
1722 		}
1723 	} else {
1724 		nfrags += skb_shinfo(skb)->nr_frags;
1725 	}
1726 
1727 	return nfrags;
1728 }
1729 
mtk_queue_stopped(struct mtk_eth * eth)1730 static int mtk_queue_stopped(struct mtk_eth *eth)
1731 {
1732 	int i;
1733 
1734 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1735 		if (!eth->netdev[i])
1736 			continue;
1737 		if (netif_queue_stopped(eth->netdev[i]))
1738 			return 1;
1739 	}
1740 
1741 	return 0;
1742 }
1743 
mtk_wake_queue(struct mtk_eth * eth)1744 static void mtk_wake_queue(struct mtk_eth *eth)
1745 {
1746 	int i;
1747 
1748 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1749 		if (!eth->netdev[i])
1750 			continue;
1751 		netif_tx_wake_all_queues(eth->netdev[i]);
1752 	}
1753 }
1754 
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1755 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1756 {
1757 	struct mtk_mac *mac = netdev_priv(dev);
1758 	struct mtk_eth *eth = mac->hw;
1759 	struct mtk_tx_ring *ring = &eth->tx_ring;
1760 	struct net_device_stats *stats = &dev->stats;
1761 	bool gso = false;
1762 	int tx_num;
1763 
1764 	/* normally we can rely on the stack not calling this more than once,
1765 	 * however we have 2 queues running on the same ring so we need to lock
1766 	 * the ring access
1767 	 */
1768 	spin_lock(&eth->page_lock);
1769 
1770 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1771 		goto drop;
1772 
1773 	tx_num = mtk_cal_txd_req(eth, skb);
1774 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1775 		netif_tx_stop_all_queues(dev);
1776 		netif_err(eth, tx_queued, dev,
1777 			  "Tx Ring full when queue awake!\n");
1778 		spin_unlock(&eth->page_lock);
1779 		return NETDEV_TX_BUSY;
1780 	}
1781 
1782 	/* TSO: fill MSS info in tcp checksum field */
1783 	if (skb_is_gso(skb)) {
1784 		if (skb_cow_head(skb, 0)) {
1785 			netif_warn(eth, tx_err, dev,
1786 				   "GSO expand head fail.\n");
1787 			goto drop;
1788 		}
1789 
1790 		if (skb_shinfo(skb)->gso_type &
1791 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1792 			gso = true;
1793 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1794 		}
1795 	}
1796 
1797 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1798 		goto drop;
1799 
1800 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1801 		netif_tx_stop_all_queues(dev);
1802 
1803 	spin_unlock(&eth->page_lock);
1804 
1805 	return NETDEV_TX_OK;
1806 
1807 drop:
1808 	spin_unlock(&eth->page_lock);
1809 	stats->tx_dropped++;
1810 	dev_kfree_skb_any(skb);
1811 	return NETDEV_TX_OK;
1812 }
1813 
mtk_get_rx_ring(struct mtk_eth * eth)1814 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1815 {
1816 	int i;
1817 	struct mtk_rx_ring *ring;
1818 	int idx;
1819 
1820 	if (!eth->hwlro)
1821 		return &eth->rx_ring[0];
1822 
1823 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1824 		struct mtk_rx_dma *rxd;
1825 
1826 		ring = &eth->rx_ring[i];
1827 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1828 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
1829 		if (rxd->rxd2 & RX_DMA_DONE) {
1830 			ring->calc_idx_update = true;
1831 			return ring;
1832 		}
1833 	}
1834 
1835 	return NULL;
1836 }
1837 
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1838 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1839 {
1840 	struct mtk_rx_ring *ring;
1841 	int i;
1842 
1843 	if (!eth->hwlro) {
1844 		ring = &eth->rx_ring[0];
1845 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1846 	} else {
1847 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1848 			ring = &eth->rx_ring[i];
1849 			if (ring->calc_idx_update) {
1850 				ring->calc_idx_update = false;
1851 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1852 			}
1853 		}
1854 	}
1855 }
1856 
mtk_page_pool_enabled(struct mtk_eth * eth)1857 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1858 {
1859 	return mtk_is_netsys_v2_or_greater(eth);
1860 }
1861 
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1862 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1863 					      struct xdp_rxq_info *xdp_q,
1864 					      int id, int size)
1865 {
1866 	struct page_pool_params pp_params = {
1867 		.order = 0,
1868 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1869 		.pool_size = size,
1870 		.nid = NUMA_NO_NODE,
1871 		.dev = eth->dma_dev,
1872 		.offset = MTK_PP_HEADROOM,
1873 		.max_len = MTK_PP_MAX_BUF_SIZE,
1874 	};
1875 	struct page_pool *pp;
1876 	int err;
1877 
1878 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1879 							  : DMA_FROM_DEVICE;
1880 	pp = page_pool_create(&pp_params);
1881 	if (IS_ERR(pp))
1882 		return pp;
1883 
1884 	err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1885 				 eth->rx_napi.napi_id, PAGE_SIZE);
1886 	if (err < 0)
1887 		goto err_free_pp;
1888 
1889 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1890 	if (err)
1891 		goto err_unregister_rxq;
1892 
1893 	return pp;
1894 
1895 err_unregister_rxq:
1896 	xdp_rxq_info_unreg(xdp_q);
1897 err_free_pp:
1898 	page_pool_destroy(pp);
1899 
1900 	return ERR_PTR(err);
1901 }
1902 
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1903 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1904 				    gfp_t gfp_mask)
1905 {
1906 	struct page *page;
1907 
1908 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1909 	if (!page)
1910 		return NULL;
1911 
1912 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1913 	return page_address(page);
1914 }
1915 
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1916 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1917 {
1918 	if (ring->page_pool)
1919 		page_pool_put_full_page(ring->page_pool,
1920 					virt_to_head_page(data), napi);
1921 	else
1922 		skb_free_frag(data);
1923 }
1924 
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1925 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1926 			     struct mtk_tx_dma_desc_info *txd_info,
1927 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1928 			     void *data, u16 headroom, int index, bool dma_map)
1929 {
1930 	struct mtk_tx_ring *ring = &eth->tx_ring;
1931 	struct mtk_mac *mac = netdev_priv(dev);
1932 	struct mtk_tx_dma *txd_pdma;
1933 
1934 	if (dma_map) {  /* ndo_xdp_xmit */
1935 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1936 						txd_info->size, DMA_TO_DEVICE);
1937 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1938 			return -ENOMEM;
1939 
1940 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1941 	} else {
1942 		struct page *page = virt_to_head_page(data);
1943 
1944 		txd_info->addr = page_pool_get_dma_addr(page) +
1945 				 sizeof(struct xdp_frame) + headroom;
1946 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1947 					   txd_info->size, DMA_BIDIRECTIONAL);
1948 	}
1949 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1950 
1951 	tx_buf->mac_id = mac->id;
1952 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1953 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1954 
1955 	txd_pdma = qdma_to_pdma(ring, txd);
1956 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1957 		     index);
1958 
1959 	return 0;
1960 }
1961 
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1962 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1963 				struct net_device *dev, bool dma_map)
1964 {
1965 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1966 	const struct mtk_soc_data *soc = eth->soc;
1967 	struct mtk_tx_ring *ring = &eth->tx_ring;
1968 	struct mtk_mac *mac = netdev_priv(dev);
1969 	struct mtk_tx_dma_desc_info txd_info = {
1970 		.size	= xdpf->len,
1971 		.first	= true,
1972 		.last	= !xdp_frame_has_frags(xdpf),
1973 		.qid	= mac->id,
1974 	};
1975 	int err, index = 0, n_desc = 1, nr_frags;
1976 	struct mtk_tx_buf *htx_buf, *tx_buf;
1977 	struct mtk_tx_dma *htxd, *txd;
1978 	void *data = xdpf->data;
1979 
1980 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1981 		return -EBUSY;
1982 
1983 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1984 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1985 		return -EBUSY;
1986 
1987 	spin_lock(&eth->page_lock);
1988 
1989 	txd = ring->next_free;
1990 	if (txd == ring->last_free) {
1991 		spin_unlock(&eth->page_lock);
1992 		return -ENOMEM;
1993 	}
1994 	htxd = txd;
1995 
1996 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1997 	memset(tx_buf, 0, sizeof(*tx_buf));
1998 	htx_buf = tx_buf;
1999 
2000 	for (;;) {
2001 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
2002 					data, xdpf->headroom, index, dma_map);
2003 		if (err < 0)
2004 			goto unmap;
2005 
2006 		if (txd_info.last)
2007 			break;
2008 
2009 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
2010 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2011 			if (txd == ring->last_free)
2012 				goto unmap;
2013 
2014 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
2015 						    soc->tx.desc_size);
2016 			memset(tx_buf, 0, sizeof(*tx_buf));
2017 			n_desc++;
2018 		}
2019 
2020 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2021 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
2022 		txd_info.last = index + 1 == nr_frags;
2023 		txd_info.qid = mac->id;
2024 		data = skb_frag_address(&sinfo->frags[index]);
2025 
2026 		index++;
2027 	}
2028 	/* store xdpf for cleanup */
2029 	htx_buf->data = xdpf;
2030 
2031 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2032 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
2033 
2034 		if (index & 1)
2035 			txd_pdma->txd2 |= TX_DMA_LS0;
2036 		else
2037 			txd_pdma->txd2 |= TX_DMA_LS1;
2038 	}
2039 
2040 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2041 	atomic_sub(n_desc, &ring->free_count);
2042 
2043 	/* make sure that all changes to the dma ring are flushed before we
2044 	 * continue
2045 	 */
2046 	wmb();
2047 
2048 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2049 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
2050 	} else {
2051 		int idx;
2052 
2053 		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
2054 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
2055 			MT7628_TX_CTX_IDX0);
2056 	}
2057 
2058 	spin_unlock(&eth->page_lock);
2059 
2060 	return 0;
2061 
2062 unmap:
2063 	while (htxd != txd) {
2064 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
2065 		mtk_tx_unmap(eth, tx_buf, NULL, false);
2066 
2067 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2068 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2069 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
2070 
2071 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
2072 		}
2073 
2074 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
2075 	}
2076 
2077 	spin_unlock(&eth->page_lock);
2078 
2079 	return err;
2080 }
2081 
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)2082 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
2083 			struct xdp_frame **frames, u32 flags)
2084 {
2085 	struct mtk_mac *mac = netdev_priv(dev);
2086 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2087 	struct mtk_eth *eth = mac->hw;
2088 	int i, nxmit = 0;
2089 
2090 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2091 		return -EINVAL;
2092 
2093 	for (i = 0; i < num_frame; i++) {
2094 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
2095 			break;
2096 		nxmit++;
2097 	}
2098 
2099 	u64_stats_update_begin(&hw_stats->syncp);
2100 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
2101 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
2102 	u64_stats_update_end(&hw_stats->syncp);
2103 
2104 	return nxmit;
2105 }
2106 
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)2107 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
2108 		       struct xdp_buff *xdp, struct net_device *dev)
2109 {
2110 	struct mtk_mac *mac = netdev_priv(dev);
2111 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2112 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
2113 	struct bpf_prog *prog;
2114 	u32 act = XDP_PASS;
2115 
2116 	rcu_read_lock();
2117 
2118 	prog = rcu_dereference(eth->prog);
2119 	if (!prog)
2120 		goto out;
2121 
2122 	act = bpf_prog_run_xdp(prog, xdp);
2123 	switch (act) {
2124 	case XDP_PASS:
2125 		count = &hw_stats->xdp_stats.rx_xdp_pass;
2126 		goto update_stats;
2127 	case XDP_REDIRECT:
2128 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
2129 			act = XDP_DROP;
2130 			break;
2131 		}
2132 
2133 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
2134 		goto update_stats;
2135 	case XDP_TX: {
2136 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2137 
2138 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
2139 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
2140 			act = XDP_DROP;
2141 			break;
2142 		}
2143 
2144 		count = &hw_stats->xdp_stats.rx_xdp_tx;
2145 		goto update_stats;
2146 	}
2147 	default:
2148 		bpf_warn_invalid_xdp_action(dev, prog, act);
2149 		fallthrough;
2150 	case XDP_ABORTED:
2151 		trace_xdp_exception(dev, prog, act);
2152 		fallthrough;
2153 	case XDP_DROP:
2154 		break;
2155 	}
2156 
2157 	page_pool_put_full_page(ring->page_pool,
2158 				virt_to_head_page(xdp->data), true);
2159 
2160 update_stats:
2161 	u64_stats_update_begin(&hw_stats->syncp);
2162 	*count = *count + 1;
2163 	u64_stats_update_end(&hw_stats->syncp);
2164 out:
2165 	rcu_read_unlock();
2166 
2167 	return act;
2168 }
2169 
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2170 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2171 		       struct mtk_eth *eth)
2172 {
2173 	struct dim_sample dim_sample = {};
2174 	struct mtk_rx_ring *ring;
2175 	bool xdp_flush = false;
2176 	int idx;
2177 	struct sk_buff *skb;
2178 	u64 addr64 = 0;
2179 	u8 *data, *new_data;
2180 	struct mtk_rx_dma_v2 *rxd, trxd;
2181 	int done = 0, bytes = 0;
2182 	dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2183 	int ppe_idx = 0;
2184 
2185 	while (done < budget) {
2186 		unsigned int pktlen, *rxdcsum;
2187 		struct net_device *netdev;
2188 		u32 hash, reason;
2189 		int mac = 0;
2190 
2191 		ring = mtk_get_rx_ring(eth);
2192 		if (unlikely(!ring))
2193 			goto rx_done;
2194 
2195 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2196 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
2197 		data = ring->data[idx];
2198 
2199 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2200 			break;
2201 
2202 		/* find out which mac the packet come from. values start at 1 */
2203 		if (mtk_is_netsys_v3_or_greater(eth)) {
2204 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2205 
2206 			switch (val) {
2207 			case PSE_GDM1_PORT:
2208 			case PSE_GDM2_PORT:
2209 				mac = val - 1;
2210 				break;
2211 			case PSE_GDM3_PORT:
2212 				mac = MTK_GMAC3_ID;
2213 				break;
2214 			default:
2215 				break;
2216 			}
2217 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2218 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2219 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2220 		}
2221 
2222 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2223 			     !eth->netdev[mac]))
2224 			goto release_desc;
2225 
2226 		netdev = eth->netdev[mac];
2227 		ppe_idx = eth->mac[mac]->ppe_idx;
2228 
2229 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2230 			goto release_desc;
2231 
2232 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2233 
2234 		/* alloc new buffer */
2235 		if (ring->page_pool) {
2236 			struct page *page = virt_to_head_page(data);
2237 			struct xdp_buff xdp;
2238 			u32 ret, metasize;
2239 
2240 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2241 							  &dma_addr,
2242 							  GFP_ATOMIC);
2243 			if (unlikely(!new_data)) {
2244 				netdev->stats.rx_dropped++;
2245 				goto release_desc;
2246 			}
2247 
2248 			dma_sync_single_for_cpu(eth->dma_dev,
2249 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2250 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2251 
2252 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2253 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2254 					 true);
2255 			xdp_buff_clear_frags_flag(&xdp);
2256 
2257 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2258 			if (ret == XDP_REDIRECT)
2259 				xdp_flush = true;
2260 
2261 			if (ret != XDP_PASS)
2262 				goto skip_rx;
2263 
2264 			skb = build_skb(data, PAGE_SIZE);
2265 			if (unlikely(!skb)) {
2266 				page_pool_put_full_page(ring->page_pool,
2267 							page, true);
2268 				netdev->stats.rx_dropped++;
2269 				goto skip_rx;
2270 			}
2271 
2272 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2273 			skb_put(skb, xdp.data_end - xdp.data);
2274 			metasize = xdp.data - xdp.data_meta;
2275 			if (metasize)
2276 				skb_metadata_set(skb, metasize);
2277 			skb_mark_for_recycle(skb);
2278 		} else {
2279 			if (ring->frag_size <= PAGE_SIZE)
2280 				new_data = napi_alloc_frag(ring->frag_size);
2281 			else
2282 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2283 
2284 			if (unlikely(!new_data)) {
2285 				netdev->stats.rx_dropped++;
2286 				goto release_desc;
2287 			}
2288 
2289 			dma_addr = dma_map_single(eth->dma_dev,
2290 				new_data + NET_SKB_PAD + eth->ip_align,
2291 				ring->buf_size, DMA_FROM_DEVICE);
2292 			if (unlikely(dma_mapping_error(eth->dma_dev,
2293 						       dma_addr))) {
2294 				skb_free_frag(new_data);
2295 				netdev->stats.rx_dropped++;
2296 				goto release_desc;
2297 			}
2298 
2299 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2300 				addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2301 
2302 			dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2303 					 ring->buf_size, DMA_FROM_DEVICE);
2304 
2305 			skb = build_skb(data, ring->frag_size);
2306 			if (unlikely(!skb)) {
2307 				netdev->stats.rx_dropped++;
2308 				skb_free_frag(data);
2309 				goto skip_rx;
2310 			}
2311 
2312 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2313 			skb_put(skb, pktlen);
2314 		}
2315 
2316 		skb->dev = netdev;
2317 		bytes += skb->len;
2318 
2319 		if (mtk_is_netsys_v3_or_greater(eth)) {
2320 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2321 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2322 			if (hash != MTK_RXD5_FOE_ENTRY)
2323 				skb_set_hash(skb, jhash_1word(hash, 0),
2324 					     PKT_HASH_TYPE_L4);
2325 			rxdcsum = &trxd.rxd3;
2326 		} else {
2327 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2328 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2329 			if (hash != MTK_RXD4_FOE_ENTRY)
2330 				skb_set_hash(skb, jhash_1word(hash, 0),
2331 					     PKT_HASH_TYPE_L4);
2332 			rxdcsum = &trxd.rxd4;
2333 		}
2334 
2335 		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2336 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2337 		else
2338 			skb_checksum_none_assert(skb);
2339 		skb->protocol = eth_type_trans(skb, netdev);
2340 
2341 		/* When using VLAN untagging in combination with DSA, the
2342 		 * hardware treats the MTK special tag as a VLAN and untags it.
2343 		 */
2344 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2345 		    netdev_uses_dsa(netdev)) {
2346 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2347 
2348 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2349 			    eth->dsa_meta[port])
2350 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2351 		}
2352 
2353 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2354 			mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2355 
2356 		skb_record_rx_queue(skb, 0);
2357 		napi_gro_receive(napi, skb);
2358 
2359 skip_rx:
2360 		ring->data[idx] = new_data;
2361 		rxd->rxd1 = (unsigned int)dma_addr;
2362 release_desc:
2363 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
2364 			if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2365 				addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
2366 						   rxd->rxd2);
2367 			else
2368 				addr64 = RX_DMA_PREP_ADDR64(dma_addr);
2369 		}
2370 
2371 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2372 			rxd->rxd2 = RX_DMA_LSO;
2373 		else
2374 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
2375 
2376 		ring->calc_idx = idx;
2377 		done++;
2378 	}
2379 
2380 rx_done:
2381 	if (done) {
2382 		/* make sure that all changes to the dma ring are flushed before
2383 		 * we continue
2384 		 */
2385 		wmb();
2386 		mtk_update_rx_cpu_idx(eth);
2387 	}
2388 
2389 	eth->rx_packets += done;
2390 	eth->rx_bytes += bytes;
2391 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2392 			  &dim_sample);
2393 	net_dim(&eth->rx_dim, &dim_sample);
2394 
2395 	if (xdp_flush)
2396 		xdp_do_flush();
2397 
2398 	return done;
2399 }
2400 
2401 struct mtk_poll_state {
2402     struct netdev_queue *txq;
2403     unsigned int total;
2404     unsigned int done;
2405     unsigned int bytes;
2406 };
2407 
2408 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2409 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2410 		 struct sk_buff *skb)
2411 {
2412 	struct netdev_queue *txq;
2413 	struct net_device *dev;
2414 	unsigned int bytes = skb->len;
2415 
2416 	state->total++;
2417 	eth->tx_packets++;
2418 	eth->tx_bytes += bytes;
2419 
2420 	dev = eth->netdev[mac];
2421 	if (!dev)
2422 		return;
2423 
2424 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2425 	if (state->txq == txq) {
2426 		state->done++;
2427 		state->bytes += bytes;
2428 		return;
2429 	}
2430 
2431 	if (state->txq)
2432 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2433 
2434 	state->txq = txq;
2435 	state->done = 1;
2436 	state->bytes = bytes;
2437 }
2438 
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2439 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2440 			    struct mtk_poll_state *state)
2441 {
2442 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2443 	struct mtk_tx_ring *ring = &eth->tx_ring;
2444 	struct mtk_tx_buf *tx_buf;
2445 	struct xdp_frame_bulk bq;
2446 	struct mtk_tx_dma *desc;
2447 	u32 cpu, dma;
2448 
2449 	cpu = ring->last_free_ptr;
2450 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2451 
2452 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2453 	xdp_frame_bulk_init(&bq);
2454 
2455 	while ((cpu != dma) && budget) {
2456 		u32 next_cpu = desc->txd2;
2457 
2458 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2459 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2460 			break;
2461 
2462 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2463 					    eth->soc->tx.desc_size);
2464 		if (!tx_buf->data)
2465 			break;
2466 
2467 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2468 			if (tx_buf->type == MTK_TYPE_SKB)
2469 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2470 						 tx_buf->data);
2471 
2472 			budget--;
2473 		}
2474 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2475 
2476 		ring->last_free = desc;
2477 		atomic_inc(&ring->free_count);
2478 
2479 		cpu = next_cpu;
2480 	}
2481 	xdp_flush_frame_bulk(&bq);
2482 
2483 	ring->last_free_ptr = cpu;
2484 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2485 
2486 	return budget;
2487 }
2488 
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2489 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2490 			    struct mtk_poll_state *state)
2491 {
2492 	struct mtk_tx_ring *ring = &eth->tx_ring;
2493 	struct mtk_tx_buf *tx_buf;
2494 	struct xdp_frame_bulk bq;
2495 	struct mtk_tx_dma *desc;
2496 	u32 cpu, dma;
2497 
2498 	cpu = ring->cpu_idx;
2499 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2500 	xdp_frame_bulk_init(&bq);
2501 
2502 	while ((cpu != dma) && budget) {
2503 		tx_buf = &ring->buf[cpu];
2504 		if (!tx_buf->data)
2505 			break;
2506 
2507 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2508 			if (tx_buf->type == MTK_TYPE_SKB)
2509 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2510 			budget--;
2511 		}
2512 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2513 
2514 		desc = ring->dma + cpu * eth->soc->tx.desc_size;
2515 		ring->last_free = desc;
2516 		atomic_inc(&ring->free_count);
2517 
2518 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2519 	}
2520 	xdp_flush_frame_bulk(&bq);
2521 
2522 	ring->cpu_idx = cpu;
2523 
2524 	return budget;
2525 }
2526 
mtk_poll_tx(struct mtk_eth * eth,int budget)2527 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2528 {
2529 	struct mtk_tx_ring *ring = &eth->tx_ring;
2530 	struct dim_sample dim_sample = {};
2531 	struct mtk_poll_state state = {};
2532 
2533 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2534 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2535 	else
2536 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2537 
2538 	if (state.txq)
2539 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2540 
2541 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2542 			  &dim_sample);
2543 	net_dim(&eth->tx_dim, &dim_sample);
2544 
2545 	if (mtk_queue_stopped(eth) &&
2546 	    (atomic_read(&ring->free_count) > ring->thresh))
2547 		mtk_wake_queue(eth);
2548 
2549 	return state.total;
2550 }
2551 
mtk_handle_status_irq(struct mtk_eth * eth)2552 static void mtk_handle_status_irq(struct mtk_eth *eth)
2553 {
2554 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2555 
2556 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2557 		mtk_stats_update(eth);
2558 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2559 			MTK_INT_STATUS2);
2560 	}
2561 }
2562 
mtk_napi_tx(struct napi_struct * napi,int budget)2563 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2564 {
2565 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2566 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2567 	int tx_done = 0;
2568 
2569 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2570 		mtk_handle_status_irq(eth);
2571 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2572 	tx_done = mtk_poll_tx(eth, budget);
2573 
2574 	if (unlikely(netif_msg_intr(eth))) {
2575 		dev_info(eth->dev,
2576 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2577 			 mtk_r32(eth, reg_map->tx_irq_status),
2578 			 mtk_r32(eth, reg_map->tx_irq_mask));
2579 	}
2580 
2581 	if (tx_done == budget)
2582 		return budget;
2583 
2584 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2585 		return budget;
2586 
2587 	if (napi_complete_done(napi, tx_done))
2588 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2589 
2590 	return tx_done;
2591 }
2592 
mtk_napi_rx(struct napi_struct * napi,int budget)2593 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2594 {
2595 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2596 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2597 	int rx_done_total = 0;
2598 
2599 	mtk_handle_status_irq(eth);
2600 
2601 	do {
2602 		int rx_done;
2603 
2604 		mtk_w32(eth, eth->soc->rx.irq_done_mask,
2605 			reg_map->pdma.irq_status);
2606 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2607 		rx_done_total += rx_done;
2608 
2609 		if (unlikely(netif_msg_intr(eth))) {
2610 			dev_info(eth->dev,
2611 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2612 				 mtk_r32(eth, reg_map->pdma.irq_status),
2613 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2614 		}
2615 
2616 		if (rx_done_total == budget)
2617 			return budget;
2618 
2619 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2620 		 eth->soc->rx.irq_done_mask);
2621 
2622 	if (napi_complete_done(napi, rx_done_total))
2623 		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2624 
2625 	return rx_done_total;
2626 }
2627 
mtk_tx_alloc(struct mtk_eth * eth)2628 static int mtk_tx_alloc(struct mtk_eth *eth)
2629 {
2630 	const struct mtk_soc_data *soc = eth->soc;
2631 	struct mtk_tx_ring *ring = &eth->tx_ring;
2632 	int i, sz = soc->tx.desc_size;
2633 	struct mtk_tx_dma_v2 *txd;
2634 	int ring_size;
2635 	u32 ofs, val;
2636 
2637 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2638 		ring_size = MTK_QDMA_RING_SIZE;
2639 	else
2640 		ring_size = soc->tx.dma_size;
2641 
2642 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2643 			       GFP_KERNEL);
2644 	if (!ring->buf)
2645 		goto no_tx_mem;
2646 
2647 	ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
2648 	if (!ring->dma)
2649 		goto no_tx_mem;
2650 
2651 	for (i = 0; i < ring_size; i++) {
2652 		int next = (i + 1) % ring_size;
2653 		u32 next_ptr = ring->phys + next * sz;
2654 
2655 		txd = ring->dma + i * sz;
2656 		txd->txd2 = next_ptr;
2657 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2658 		txd->txd4 = 0;
2659 		if (mtk_is_netsys_v2_or_greater(eth)) {
2660 			txd->txd5 = 0;
2661 			txd->txd6 = 0;
2662 			txd->txd7 = 0;
2663 			txd->txd8 = 0;
2664 		}
2665 	}
2666 
2667 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2668 	 * only as the framework. The real HW descriptors are the PDMA
2669 	 * descriptors in ring->dma_pdma.
2670 	 */
2671 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2672 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2673 						    &ring->phys_pdma, GFP_KERNEL);
2674 		if (!ring->dma_pdma)
2675 			goto no_tx_mem;
2676 
2677 		for (i = 0; i < ring_size; i++) {
2678 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2679 			ring->dma_pdma[i].txd4 = 0;
2680 		}
2681 	}
2682 
2683 	ring->dma_size = ring_size;
2684 	atomic_set(&ring->free_count, ring_size - 2);
2685 	ring->next_free = ring->dma;
2686 	ring->last_free = (void *)txd;
2687 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2688 	ring->thresh = MAX_SKB_FRAGS;
2689 
2690 	/* make sure that all changes to the dma ring are flushed before we
2691 	 * continue
2692 	 */
2693 	wmb();
2694 
2695 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2696 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2697 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2698 		mtk_w32(eth,
2699 			ring->phys + ((ring_size - 1) * sz),
2700 			soc->reg_map->qdma.crx_ptr);
2701 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2702 
2703 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2704 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2705 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2706 
2707 			val = MTK_QTX_SCH_MIN_RATE_EN |
2708 			      /* minimum: 10 Mbps */
2709 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2710 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2711 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2712 			if (mtk_is_netsys_v1(eth))
2713 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2714 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2715 			ofs += MTK_QTX_OFFSET;
2716 		}
2717 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2718 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2719 		if (mtk_is_netsys_v2_or_greater(eth))
2720 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2721 	} else {
2722 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2723 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2724 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2725 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2726 	}
2727 
2728 	return 0;
2729 
2730 no_tx_mem:
2731 	return -ENOMEM;
2732 }
2733 
mtk_tx_clean(struct mtk_eth * eth)2734 static void mtk_tx_clean(struct mtk_eth *eth)
2735 {
2736 	const struct mtk_soc_data *soc = eth->soc;
2737 	struct mtk_tx_ring *ring = &eth->tx_ring;
2738 	int i;
2739 
2740 	if (ring->buf) {
2741 		for (i = 0; i < ring->dma_size; i++)
2742 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2743 		kfree(ring->buf);
2744 		ring->buf = NULL;
2745 	}
2746 
2747 	if (ring->dma) {
2748 		mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
2749 				  ring->dma, ring->phys, true);
2750 		ring->dma = NULL;
2751 	}
2752 
2753 	if (ring->dma_pdma) {
2754 		dma_free_coherent(eth->dma_dev,
2755 				  ring->dma_size * soc->tx.desc_size,
2756 				  ring->dma_pdma, ring->phys_pdma);
2757 		ring->dma_pdma = NULL;
2758 	}
2759 }
2760 
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2761 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2762 {
2763 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2764 	const struct mtk_soc_data *soc = eth->soc;
2765 	struct mtk_rx_ring *ring;
2766 	int rx_data_len, rx_dma_size;
2767 	int i;
2768 
2769 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2770 		if (ring_no)
2771 			return -EINVAL;
2772 		ring = &eth->rx_ring_qdma;
2773 	} else {
2774 		ring = &eth->rx_ring[ring_no];
2775 	}
2776 
2777 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2778 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2779 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2780 	} else {
2781 		rx_data_len = ETH_DATA_LEN;
2782 		rx_dma_size = soc->rx.dma_size;
2783 	}
2784 
2785 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2786 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2787 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2788 			     GFP_KERNEL);
2789 	if (!ring->data)
2790 		return -ENOMEM;
2791 
2792 	if (mtk_page_pool_enabled(eth)) {
2793 		struct page_pool *pp;
2794 
2795 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2796 					  rx_dma_size);
2797 		if (IS_ERR(pp))
2798 			return PTR_ERR(pp);
2799 
2800 		ring->page_pool = pp;
2801 	}
2802 
2803 	ring->dma = mtk_dma_ring_alloc(eth,
2804 				       rx_dma_size * eth->soc->rx.desc_size,
2805 				       &ring->phys,
2806 				       rx_flag == MTK_RX_FLAGS_NORMAL);
2807 	if (!ring->dma)
2808 		return -ENOMEM;
2809 
2810 	for (i = 0; i < rx_dma_size; i++) {
2811 		struct mtk_rx_dma_v2 *rxd;
2812 		dma_addr_t dma_addr;
2813 		void *data;
2814 
2815 		rxd = ring->dma + i * eth->soc->rx.desc_size;
2816 		if (ring->page_pool) {
2817 			data = mtk_page_pool_get_buff(ring->page_pool,
2818 						      &dma_addr, GFP_KERNEL);
2819 			if (!data)
2820 				return -ENOMEM;
2821 		} else {
2822 			if (ring->frag_size <= PAGE_SIZE)
2823 				data = netdev_alloc_frag(ring->frag_size);
2824 			else
2825 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2826 
2827 			if (!data)
2828 				return -ENOMEM;
2829 
2830 			dma_addr = dma_map_single(eth->dma_dev,
2831 				data + NET_SKB_PAD + eth->ip_align,
2832 				ring->buf_size, DMA_FROM_DEVICE);
2833 			if (unlikely(dma_mapping_error(eth->dma_dev,
2834 						       dma_addr))) {
2835 				skb_free_frag(data);
2836 				return -ENOMEM;
2837 			}
2838 		}
2839 		rxd->rxd1 = (unsigned int)dma_addr;
2840 		ring->data[i] = data;
2841 
2842 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2843 			rxd->rxd2 = RX_DMA_LSO;
2844 		else
2845 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2846 
2847 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2848 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2849 
2850 		rxd->rxd3 = 0;
2851 		rxd->rxd4 = 0;
2852 		if (mtk_is_netsys_v3_or_greater(eth)) {
2853 			rxd->rxd5 = 0;
2854 			rxd->rxd6 = 0;
2855 			rxd->rxd7 = 0;
2856 			rxd->rxd8 = 0;
2857 		}
2858 	}
2859 
2860 	ring->dma_size = rx_dma_size;
2861 	ring->calc_idx_update = false;
2862 	ring->calc_idx = rx_dma_size - 1;
2863 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2864 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2865 				    ring_no * MTK_QRX_OFFSET;
2866 	else
2867 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2868 				    ring_no * MTK_QRX_OFFSET;
2869 	/* make sure that all changes to the dma ring are flushed before we
2870 	 * continue
2871 	 */
2872 	wmb();
2873 
2874 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2875 		mtk_w32(eth, ring->phys,
2876 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2877 		mtk_w32(eth, rx_dma_size,
2878 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2879 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2880 			reg_map->qdma.rst_idx);
2881 	} else {
2882 		mtk_w32(eth, ring->phys,
2883 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2884 		mtk_w32(eth, rx_dma_size,
2885 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2886 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2887 			reg_map->pdma.rst_idx);
2888 	}
2889 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2890 
2891 	return 0;
2892 }
2893 
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2894 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2895 {
2896 	u64 addr64 = 0;
2897 	int i;
2898 
2899 	if (ring->data && ring->dma) {
2900 		for (i = 0; i < ring->dma_size; i++) {
2901 			struct mtk_rx_dma *rxd;
2902 
2903 			if (!ring->data[i])
2904 				continue;
2905 
2906 			rxd = ring->dma + i * eth->soc->rx.desc_size;
2907 			if (!rxd->rxd1)
2908 				continue;
2909 
2910 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2911 				addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2912 
2913 			dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2914 					 ring->buf_size, DMA_FROM_DEVICE);
2915 			mtk_rx_put_buff(ring, ring->data[i], false);
2916 		}
2917 		kfree(ring->data);
2918 		ring->data = NULL;
2919 	}
2920 
2921 	if (ring->dma) {
2922 		mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
2923 				  ring->dma, ring->phys, in_sram);
2924 		ring->dma = NULL;
2925 	}
2926 
2927 	if (ring->page_pool) {
2928 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2929 			xdp_rxq_info_unreg(&ring->xdp_q);
2930 		page_pool_destroy(ring->page_pool);
2931 		ring->page_pool = NULL;
2932 	}
2933 }
2934 
mtk_hwlro_rx_init(struct mtk_eth * eth)2935 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2936 {
2937 	int i;
2938 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2939 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2940 
2941 	/* set LRO rings to auto-learn modes */
2942 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2943 
2944 	/* validate LRO ring */
2945 	ring_ctrl_dw2 |= MTK_RING_VLD;
2946 
2947 	/* set AGE timer (unit: 20us) */
2948 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2949 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2950 
2951 	/* set max AGG timer (unit: 20us) */
2952 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2953 
2954 	/* set max LRO AGG count */
2955 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2956 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2957 
2958 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2959 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2960 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2961 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2962 	}
2963 
2964 	/* IPv4 checksum update enable */
2965 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2966 
2967 	/* switch priority comparison to packet count mode */
2968 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2969 
2970 	/* bandwidth threshold setting */
2971 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2972 
2973 	/* auto-learn score delta setting */
2974 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2975 
2976 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2977 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2978 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2979 
2980 	/* set HW LRO mode & the max aggregation count for rx packets */
2981 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2982 
2983 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2984 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2985 
2986 	/* enable HW LRO */
2987 	lro_ctrl_dw0 |= MTK_LRO_EN;
2988 
2989 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2990 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2991 
2992 	return 0;
2993 }
2994 
mtk_hwlro_rx_uninit(struct mtk_eth * eth)2995 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2996 {
2997 	int i;
2998 	u32 val;
2999 
3000 	/* relinquish lro rings, flush aggregated packets */
3001 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3002 
3003 	/* wait for relinquishments done */
3004 	for (i = 0; i < 10; i++) {
3005 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3006 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
3007 			msleep(20);
3008 			continue;
3009 		}
3010 		break;
3011 	}
3012 
3013 	/* invalidate lro rings */
3014 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3015 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3016 
3017 	/* disable HW LRO */
3018 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3019 }
3020 
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)3021 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3022 {
3023 	u32 reg_val;
3024 
3025 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3026 
3027 	/* invalidate the IP setting */
3028 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3029 
3030 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3031 
3032 	/* validate the IP setting */
3033 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3034 }
3035 
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)3036 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3037 {
3038 	u32 reg_val;
3039 
3040 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3041 
3042 	/* invalidate the IP setting */
3043 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3044 
3045 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3046 }
3047 
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)3048 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3049 {
3050 	int cnt = 0;
3051 	int i;
3052 
3053 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3054 		if (mac->hwlro_ip[i])
3055 			cnt++;
3056 	}
3057 
3058 	return cnt;
3059 }
3060 
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3061 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3062 				struct ethtool_rxnfc *cmd)
3063 {
3064 	struct ethtool_rx_flow_spec *fsp =
3065 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3066 	struct mtk_mac *mac = netdev_priv(dev);
3067 	struct mtk_eth *eth = mac->hw;
3068 	int hwlro_idx;
3069 
3070 	if ((fsp->flow_type != TCP_V4_FLOW) ||
3071 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3072 	    (fsp->location > 1))
3073 		return -EINVAL;
3074 
3075 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3076 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3077 
3078 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3079 
3080 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3081 
3082 	return 0;
3083 }
3084 
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3085 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3086 				struct ethtool_rxnfc *cmd)
3087 {
3088 	struct ethtool_rx_flow_spec *fsp =
3089 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3090 	struct mtk_mac *mac = netdev_priv(dev);
3091 	struct mtk_eth *eth = mac->hw;
3092 	int hwlro_idx;
3093 
3094 	if (fsp->location > 1)
3095 		return -EINVAL;
3096 
3097 	mac->hwlro_ip[fsp->location] = 0;
3098 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3099 
3100 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3101 
3102 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3103 
3104 	return 0;
3105 }
3106 
mtk_hwlro_netdev_disable(struct net_device * dev)3107 static void mtk_hwlro_netdev_disable(struct net_device *dev)
3108 {
3109 	struct mtk_mac *mac = netdev_priv(dev);
3110 	struct mtk_eth *eth = mac->hw;
3111 	int i, hwlro_idx;
3112 
3113 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3114 		mac->hwlro_ip[i] = 0;
3115 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3116 
3117 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3118 	}
3119 
3120 	mac->hwlro_ip_cnt = 0;
3121 }
3122 
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)3123 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3124 				    struct ethtool_rxnfc *cmd)
3125 {
3126 	struct mtk_mac *mac = netdev_priv(dev);
3127 	struct ethtool_rx_flow_spec *fsp =
3128 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3129 
3130 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
3131 		return -EINVAL;
3132 
3133 	/* only tcp dst ipv4 is meaningful, others are meaningless */
3134 	fsp->flow_type = TCP_V4_FLOW;
3135 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3136 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3137 
3138 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
3139 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3140 	fsp->h_u.tcp_ip4_spec.psrc = 0;
3141 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3142 	fsp->h_u.tcp_ip4_spec.pdst = 0;
3143 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3144 	fsp->h_u.tcp_ip4_spec.tos = 0;
3145 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
3146 
3147 	return 0;
3148 }
3149 
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3150 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3151 				  struct ethtool_rxnfc *cmd,
3152 				  u32 *rule_locs)
3153 {
3154 	struct mtk_mac *mac = netdev_priv(dev);
3155 	int cnt = 0;
3156 	int i;
3157 
3158 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3159 		if (cnt == cmd->rule_cnt)
3160 			return -EMSGSIZE;
3161 
3162 		if (mac->hwlro_ip[i]) {
3163 			rule_locs[cnt] = i;
3164 			cnt++;
3165 		}
3166 	}
3167 
3168 	cmd->rule_cnt = cnt;
3169 
3170 	return 0;
3171 }
3172 
mtk_fix_features(struct net_device * dev,netdev_features_t features)3173 static netdev_features_t mtk_fix_features(struct net_device *dev,
3174 					  netdev_features_t features)
3175 {
3176 	if (!(features & NETIF_F_LRO)) {
3177 		struct mtk_mac *mac = netdev_priv(dev);
3178 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3179 
3180 		if (ip_cnt) {
3181 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3182 
3183 			features |= NETIF_F_LRO;
3184 		}
3185 	}
3186 
3187 	return features;
3188 }
3189 
mtk_set_features(struct net_device * dev,netdev_features_t features)3190 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3191 {
3192 	netdev_features_t diff = dev->features ^ features;
3193 
3194 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3195 		mtk_hwlro_netdev_disable(dev);
3196 
3197 	return 0;
3198 }
3199 
3200 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3201 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3202 {
3203 	unsigned int reg;
3204 	int ret;
3205 	u32 val;
3206 
3207 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3208 		reg = eth->soc->reg_map->qdma.glo_cfg;
3209 	else
3210 		reg = eth->soc->reg_map->pdma.glo_cfg;
3211 
3212 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3213 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3214 					5, MTK_DMA_BUSY_TIMEOUT_US);
3215 	if (ret)
3216 		dev_err(eth->dev, "DMA init timeout\n");
3217 
3218 	return ret;
3219 }
3220 
mtk_dma_init(struct mtk_eth * eth)3221 static int mtk_dma_init(struct mtk_eth *eth)
3222 {
3223 	int err;
3224 	u32 i;
3225 
3226 	if (mtk_dma_busy_wait(eth))
3227 		return -EBUSY;
3228 
3229 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3230 		/* QDMA needs scratch memory for internal reordering of the
3231 		 * descriptors
3232 		 */
3233 		err = mtk_init_fq_dma(eth);
3234 		if (err)
3235 			return err;
3236 	}
3237 
3238 	err = mtk_tx_alloc(eth);
3239 	if (err)
3240 		return err;
3241 
3242 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3243 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3244 		if (err)
3245 			return err;
3246 	}
3247 
3248 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3249 	if (err)
3250 		return err;
3251 
3252 	if (eth->hwlro) {
3253 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3254 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3255 			if (err)
3256 				return err;
3257 		}
3258 		err = mtk_hwlro_rx_init(eth);
3259 		if (err)
3260 			return err;
3261 	}
3262 
3263 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3264 		/* Enable random early drop and set drop threshold
3265 		 * automatically
3266 		 */
3267 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3268 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3269 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3270 	}
3271 
3272 	return 0;
3273 }
3274 
mtk_dma_free(struct mtk_eth * eth)3275 static void mtk_dma_free(struct mtk_eth *eth)
3276 {
3277 	const struct mtk_soc_data *soc = eth->soc;
3278 	int i, j, txqs = 1;
3279 
3280 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3281 		txqs = MTK_QDMA_NUM_QUEUES;
3282 
3283 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3284 		if (!eth->netdev[i])
3285 			continue;
3286 
3287 		for (j = 0; j < txqs; j++)
3288 			netdev_tx_reset_subqueue(eth->netdev[i], j);
3289 	}
3290 
3291 	if (eth->scratch_ring) {
3292 		mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
3293 				  eth->scratch_ring, eth->phy_scratch_ring,
3294 				  true);
3295 		eth->scratch_ring = NULL;
3296 		eth->phy_scratch_ring = 0;
3297 	}
3298 
3299 	mtk_tx_clean(eth);
3300 	mtk_rx_clean(eth, &eth->rx_ring[0], true);
3301 	mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3302 
3303 	if (eth->hwlro) {
3304 		mtk_hwlro_rx_uninit(eth);
3305 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3306 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
3307 	}
3308 
3309 	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3310 		kfree(eth->scratch_head[i]);
3311 		eth->scratch_head[i] = NULL;
3312 	}
3313 }
3314 
mtk_hw_reset_check(struct mtk_eth * eth)3315 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3316 {
3317 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3318 
3319 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3320 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3321 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3322 }
3323 
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3324 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3325 {
3326 	struct mtk_mac *mac = netdev_priv(dev);
3327 	struct mtk_eth *eth = mac->hw;
3328 
3329 	if (test_bit(MTK_RESETTING, &eth->state))
3330 		return;
3331 
3332 	if (!mtk_hw_reset_check(eth))
3333 		return;
3334 
3335 	eth->netdev[mac->id]->stats.tx_errors++;
3336 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3337 
3338 	schedule_work(&eth->pending_work);
3339 }
3340 
mtk_get_irqs(struct platform_device * pdev,struct mtk_eth * eth)3341 static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
3342 {
3343 	int i;
3344 
3345 	/* future SoCs beginning with MT7988 should use named IRQs in dts */
3346 	eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
3347 	eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
3348 	if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
3349 		return 0;
3350 
3351 	/* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
3352 	if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
3353 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
3354 				     "Error requesting FE TX IRQ\n");
3355 
3356 	if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
3357 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
3358 				     "Error requesting FE RX IRQ\n");
3359 
3360 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
3361 		dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
3362 
3363 	/* legacy way:
3364 	 * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
3365 	 * from devicetree and used for both RX and TX - it is shared.
3366 	 * On SoCs with non-shared IRQs the first entry is not used,
3367 	 * the second is for TX, and the third is for RX.
3368 	 */
3369 	for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
3370 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3371 			if (i == MTK_FE_IRQ_SHARED)
3372 				eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
3373 			else
3374 				eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
3375 		} else {
3376 			eth->irq[i] = platform_get_irq(pdev, i + 1);
3377 		}
3378 
3379 		if (eth->irq[i] < 0) {
3380 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3381 			return -ENXIO;
3382 		}
3383 	}
3384 
3385 	return 0;
3386 }
3387 
mtk_handle_irq_rx(int irq,void * _eth)3388 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3389 {
3390 	struct mtk_eth *eth = _eth;
3391 
3392 	eth->rx_events++;
3393 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3394 		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3395 		__napi_schedule(&eth->rx_napi);
3396 	}
3397 
3398 	return IRQ_HANDLED;
3399 }
3400 
mtk_handle_irq_tx(int irq,void * _eth)3401 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3402 {
3403 	struct mtk_eth *eth = _eth;
3404 
3405 	eth->tx_events++;
3406 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3407 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3408 		__napi_schedule(&eth->tx_napi);
3409 	}
3410 
3411 	return IRQ_HANDLED;
3412 }
3413 
mtk_handle_irq(int irq,void * _eth)3414 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3415 {
3416 	struct mtk_eth *eth = _eth;
3417 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3418 
3419 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3420 	    eth->soc->rx.irq_done_mask) {
3421 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3422 		    eth->soc->rx.irq_done_mask)
3423 			mtk_handle_irq_rx(irq, _eth);
3424 	}
3425 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3426 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3427 			mtk_handle_irq_tx(irq, _eth);
3428 	}
3429 
3430 	return IRQ_HANDLED;
3431 }
3432 
3433 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3434 static void mtk_poll_controller(struct net_device *dev)
3435 {
3436 	struct mtk_mac *mac = netdev_priv(dev);
3437 	struct mtk_eth *eth = mac->hw;
3438 
3439 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3440 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3441 	mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
3442 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3443 	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3444 }
3445 #endif
3446 
mtk_start_dma(struct mtk_eth * eth)3447 static int mtk_start_dma(struct mtk_eth *eth)
3448 {
3449 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3450 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3451 	int err;
3452 
3453 	err = mtk_dma_init(eth);
3454 	if (err) {
3455 		mtk_dma_free(eth);
3456 		return err;
3457 	}
3458 
3459 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3460 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3461 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3462 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3463 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3464 
3465 		if (mtk_is_netsys_v2_or_greater(eth))
3466 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3467 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3468 			       MTK_CHK_DDONE_EN;
3469 		else
3470 			val |= MTK_RX_BT_32DWORDS;
3471 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3472 
3473 		mtk_w32(eth,
3474 			MTK_RX_DMA_EN | rx_2b_offset |
3475 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3476 			reg_map->pdma.glo_cfg);
3477 	} else {
3478 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3479 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3480 			reg_map->pdma.glo_cfg);
3481 	}
3482 
3483 	return 0;
3484 }
3485 
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3486 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3487 {
3488 	u32 val;
3489 
3490 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3491 		return;
3492 
3493 	val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3494 
3495 	/* default setup the forward port to send frame to PDMA */
3496 	val &= ~0xffff;
3497 
3498 	/* Enable RX checksum */
3499 	val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3500 
3501 	val |= config;
3502 
3503 	if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3504 		val |= MTK_GDMA_SPECIAL_TAG;
3505 
3506 	mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3507 }
3508 
3509 
mtk_uses_dsa(struct net_device * dev)3510 static bool mtk_uses_dsa(struct net_device *dev)
3511 {
3512 #if IS_ENABLED(CONFIG_NET_DSA)
3513 	return netdev_uses_dsa(dev) &&
3514 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3515 #else
3516 	return false;
3517 #endif
3518 }
3519 
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3520 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3521 {
3522 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3523 	struct mtk_eth *eth = mac->hw;
3524 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3525 	struct ethtool_link_ksettings s;
3526 	struct net_device *ldev;
3527 	struct list_head *iter;
3528 	struct dsa_port *dp;
3529 
3530 	if (event != NETDEV_CHANGE)
3531 		return NOTIFY_DONE;
3532 
3533 	netdev_for_each_lower_dev(dev, ldev, iter) {
3534 		if (netdev_priv(ldev) == mac)
3535 			goto found;
3536 	}
3537 
3538 	return NOTIFY_DONE;
3539 
3540 found:
3541 	if (!dsa_user_dev_check(dev))
3542 		return NOTIFY_DONE;
3543 
3544 	if (__ethtool_get_link_ksettings(dev, &s))
3545 		return NOTIFY_DONE;
3546 
3547 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3548 		return NOTIFY_DONE;
3549 
3550 	dp = dsa_port_from_netdev(dev);
3551 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3552 		return NOTIFY_DONE;
3553 
3554 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3555 		s.base.speed = 0;
3556 
3557 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3558 
3559 	return NOTIFY_DONE;
3560 }
3561 
mtk_open(struct net_device * dev)3562 static int mtk_open(struct net_device *dev)
3563 {
3564 	struct mtk_mac *mac = netdev_priv(dev);
3565 	struct mtk_eth *eth = mac->hw;
3566 	struct mtk_mac *target_mac;
3567 	int i, err, ppe_num;
3568 
3569 	ppe_num = eth->soc->ppe_num;
3570 
3571 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3572 	if (err) {
3573 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3574 			   err);
3575 		return err;
3576 	}
3577 
3578 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3579 	if (!refcount_read(&eth->dma_refcnt)) {
3580 		const struct mtk_soc_data *soc = eth->soc;
3581 		u32 gdm_config;
3582 		int i;
3583 
3584 		err = mtk_start_dma(eth);
3585 		if (err) {
3586 			phylink_disconnect_phy(mac->phylink);
3587 			return err;
3588 		}
3589 
3590 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3591 			mtk_ppe_start(eth->ppe[i]);
3592 
3593 		for (i = 0; i < MTK_MAX_DEVS; i++) {
3594 			if (!eth->netdev[i])
3595 				continue;
3596 
3597 			target_mac = netdev_priv(eth->netdev[i]);
3598 			if (!soc->offload_version) {
3599 				target_mac->ppe_idx = 0;
3600 				gdm_config = MTK_GDMA_TO_PDMA;
3601 			} else if (ppe_num >= 3 && target_mac->id == 2) {
3602 				target_mac->ppe_idx = 2;
3603 				gdm_config = soc->reg_map->gdma_to_ppe[2];
3604 			} else if (ppe_num >= 2 && target_mac->id == 1) {
3605 				target_mac->ppe_idx = 1;
3606 				gdm_config = soc->reg_map->gdma_to_ppe[1];
3607 			} else {
3608 				target_mac->ppe_idx = 0;
3609 				gdm_config = soc->reg_map->gdma_to_ppe[0];
3610 			}
3611 			mtk_gdm_config(eth, target_mac->id, gdm_config);
3612 		}
3613 
3614 		napi_enable(&eth->tx_napi);
3615 		napi_enable(&eth->rx_napi);
3616 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3617 		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3618 		refcount_set(&eth->dma_refcnt, 1);
3619 	} else {
3620 		refcount_inc(&eth->dma_refcnt);
3621 	}
3622 
3623 	phylink_start(mac->phylink);
3624 	netif_tx_start_all_queues(dev);
3625 
3626 	if (mtk_is_netsys_v2_or_greater(eth))
3627 		return 0;
3628 
3629 	if (mtk_uses_dsa(dev) && !eth->prog) {
3630 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3631 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3632 
3633 			if (md_dst)
3634 				continue;
3635 
3636 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3637 						    GFP_KERNEL);
3638 			if (!md_dst)
3639 				return -ENOMEM;
3640 
3641 			md_dst->u.port_info.port_id = i;
3642 			eth->dsa_meta[i] = md_dst;
3643 		}
3644 	} else {
3645 		/* Hardware DSA untagging and VLAN RX offloading need to be
3646 		 * disabled if at least one MAC does not use DSA.
3647 		 */
3648 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3649 
3650 		val &= ~MTK_CDMP_STAG_EN;
3651 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3652 
3653 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3654 	}
3655 
3656 	return 0;
3657 }
3658 
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3659 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3660 {
3661 	u32 val;
3662 	int i;
3663 
3664 	/* stop the dma engine */
3665 	spin_lock_bh(&eth->page_lock);
3666 	val = mtk_r32(eth, glo_cfg);
3667 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3668 		glo_cfg);
3669 	spin_unlock_bh(&eth->page_lock);
3670 
3671 	/* wait for dma stop */
3672 	for (i = 0; i < 10; i++) {
3673 		val = mtk_r32(eth, glo_cfg);
3674 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3675 			msleep(20);
3676 			continue;
3677 		}
3678 		break;
3679 	}
3680 }
3681 
mtk_stop(struct net_device * dev)3682 static int mtk_stop(struct net_device *dev)
3683 {
3684 	struct mtk_mac *mac = netdev_priv(dev);
3685 	struct mtk_eth *eth = mac->hw;
3686 	int i;
3687 
3688 	phylink_stop(mac->phylink);
3689 
3690 	netif_tx_disable(dev);
3691 
3692 	phylink_disconnect_phy(mac->phylink);
3693 
3694 	/* only shutdown DMA if this is the last user */
3695 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3696 		return 0;
3697 
3698 	for (i = 0; i < MTK_MAX_DEVS; i++)
3699 		mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3700 
3701 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3702 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3703 	napi_disable(&eth->tx_napi);
3704 	napi_disable(&eth->rx_napi);
3705 
3706 	cancel_work_sync(&eth->rx_dim.work);
3707 	cancel_work_sync(&eth->tx_dim.work);
3708 
3709 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3710 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3711 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3712 
3713 	mtk_dma_free(eth);
3714 
3715 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3716 		mtk_ppe_stop(eth->ppe[i]);
3717 
3718 	return 0;
3719 }
3720 
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3721 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3722 			 struct netlink_ext_ack *extack)
3723 {
3724 	struct mtk_mac *mac = netdev_priv(dev);
3725 	struct mtk_eth *eth = mac->hw;
3726 	struct bpf_prog *old_prog;
3727 	bool need_update;
3728 
3729 	if (eth->hwlro) {
3730 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3731 		return -EOPNOTSUPP;
3732 	}
3733 
3734 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3735 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3736 		return -EOPNOTSUPP;
3737 	}
3738 
3739 	need_update = !!eth->prog != !!prog;
3740 	if (netif_running(dev) && need_update)
3741 		mtk_stop(dev);
3742 
3743 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3744 	if (old_prog)
3745 		bpf_prog_put(old_prog);
3746 
3747 	if (netif_running(dev) && need_update)
3748 		return mtk_open(dev);
3749 
3750 	return 0;
3751 }
3752 
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3753 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3754 {
3755 	switch (xdp->command) {
3756 	case XDP_SETUP_PROG:
3757 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3758 	default:
3759 		return -EINVAL;
3760 	}
3761 }
3762 
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3763 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3764 {
3765 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3766 			   reset_bits,
3767 			   reset_bits);
3768 
3769 	usleep_range(1000, 1100);
3770 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3771 			   reset_bits,
3772 			   ~reset_bits);
3773 	mdelay(10);
3774 }
3775 
mtk_clk_disable(struct mtk_eth * eth)3776 static void mtk_clk_disable(struct mtk_eth *eth)
3777 {
3778 	int clk;
3779 
3780 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3781 		clk_disable_unprepare(eth->clks[clk]);
3782 }
3783 
mtk_clk_enable(struct mtk_eth * eth)3784 static int mtk_clk_enable(struct mtk_eth *eth)
3785 {
3786 	int clk, ret;
3787 
3788 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3789 		ret = clk_prepare_enable(eth->clks[clk]);
3790 		if (ret)
3791 			goto err_disable_clks;
3792 	}
3793 
3794 	return 0;
3795 
3796 err_disable_clks:
3797 	while (--clk >= 0)
3798 		clk_disable_unprepare(eth->clks[clk]);
3799 
3800 	return ret;
3801 }
3802 
mtk_dim_rx(struct work_struct * work)3803 static void mtk_dim_rx(struct work_struct *work)
3804 {
3805 	struct dim *dim = container_of(work, struct dim, work);
3806 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3807 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3808 	struct dim_cq_moder cur_profile;
3809 	u32 val, cur;
3810 
3811 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3812 						dim->profile_ix);
3813 	spin_lock_bh(&eth->dim_lock);
3814 
3815 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3816 	val &= MTK_PDMA_DELAY_TX_MASK;
3817 	val |= MTK_PDMA_DELAY_RX_EN;
3818 
3819 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3820 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3821 
3822 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3823 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3824 
3825 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3826 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3827 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3828 
3829 	spin_unlock_bh(&eth->dim_lock);
3830 
3831 	dim->state = DIM_START_MEASURE;
3832 }
3833 
mtk_dim_tx(struct work_struct * work)3834 static void mtk_dim_tx(struct work_struct *work)
3835 {
3836 	struct dim *dim = container_of(work, struct dim, work);
3837 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3838 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3839 	struct dim_cq_moder cur_profile;
3840 	u32 val, cur;
3841 
3842 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3843 						dim->profile_ix);
3844 	spin_lock_bh(&eth->dim_lock);
3845 
3846 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3847 	val &= MTK_PDMA_DELAY_RX_MASK;
3848 	val |= MTK_PDMA_DELAY_TX_EN;
3849 
3850 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3851 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3852 
3853 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3854 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3855 
3856 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3857 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3858 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3859 
3860 	spin_unlock_bh(&eth->dim_lock);
3861 
3862 	dim->state = DIM_START_MEASURE;
3863 }
3864 
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3865 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3866 {
3867 	struct mtk_eth *eth = mac->hw;
3868 	u32 mcr_cur, mcr_new;
3869 
3870 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3871 		return;
3872 
3873 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3874 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3875 
3876 	if (val <= 1518)
3877 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3878 	else if (val <= 1536)
3879 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3880 	else if (val <= 1552)
3881 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3882 	else
3883 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3884 
3885 	if (mcr_new != mcr_cur)
3886 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3887 }
3888 
mtk_hw_reset(struct mtk_eth * eth)3889 static void mtk_hw_reset(struct mtk_eth *eth)
3890 {
3891 	u32 val;
3892 
3893 	if (mtk_is_netsys_v2_or_greater(eth))
3894 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3895 
3896 	if (mtk_is_netsys_v3_or_greater(eth)) {
3897 		val = RSTCTRL_PPE0_V3;
3898 
3899 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3900 			val |= RSTCTRL_PPE1_V3;
3901 
3902 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3903 			val |= RSTCTRL_PPE2;
3904 
3905 		val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3906 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3907 		val = RSTCTRL_PPE0_V2;
3908 
3909 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3910 			val |= RSTCTRL_PPE1;
3911 	} else {
3912 		val = RSTCTRL_PPE0;
3913 	}
3914 
3915 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3916 
3917 	if (mtk_is_netsys_v3_or_greater(eth))
3918 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3919 			     0x6f8ff);
3920 	else if (mtk_is_netsys_v2_or_greater(eth))
3921 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3922 			     0x3ffffff);
3923 }
3924 
mtk_hw_reset_read(struct mtk_eth * eth)3925 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3926 {
3927 	u32 val;
3928 
3929 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3930 	return val;
3931 }
3932 
mtk_hw_warm_reset(struct mtk_eth * eth)3933 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3934 {
3935 	u32 rst_mask, val;
3936 
3937 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3938 			   RSTCTRL_FE);
3939 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3940 				      val & RSTCTRL_FE, 1, 1000)) {
3941 		dev_err(eth->dev, "warm reset failed\n");
3942 		mtk_hw_reset(eth);
3943 		return;
3944 	}
3945 
3946 	if (mtk_is_netsys_v3_or_greater(eth)) {
3947 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3948 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3949 			rst_mask |= RSTCTRL_PPE1_V3;
3950 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3951 			rst_mask |= RSTCTRL_PPE2;
3952 
3953 		rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3954 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3955 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3956 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3957 			rst_mask |= RSTCTRL_PPE1;
3958 	} else {
3959 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3960 	}
3961 
3962 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3963 
3964 	udelay(1);
3965 	val = mtk_hw_reset_read(eth);
3966 	if (!(val & rst_mask))
3967 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3968 			val, rst_mask);
3969 
3970 	rst_mask |= RSTCTRL_FE;
3971 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3972 
3973 	udelay(1);
3974 	val = mtk_hw_reset_read(eth);
3975 	if (val & rst_mask)
3976 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3977 			val, rst_mask);
3978 }
3979 
mtk_hw_check_dma_hang(struct mtk_eth * eth)3980 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3981 {
3982 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3983 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3984 	bool oq_hang, cdm1_busy, adma_busy;
3985 	bool wtx_busy, cdm_full, oq_free;
3986 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3987 	bool qfsm_hang, qfwd_hang;
3988 	bool ret = false;
3989 
3990 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3991 		return false;
3992 
3993 	/* WDMA sanity checks */
3994 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3995 
3996 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3997 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3998 
3999 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
4000 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
4001 
4002 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
4003 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
4004 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
4005 
4006 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
4007 		if (++eth->reset.wdma_hang_count > 2) {
4008 			eth->reset.wdma_hang_count = 0;
4009 			ret = true;
4010 		}
4011 		goto out;
4012 	}
4013 
4014 	/* QDMA sanity checks */
4015 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
4016 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
4017 
4018 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
4019 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
4020 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
4021 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
4022 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
4023 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
4024 
4025 	if (qfsm_hang && qfwd_hang &&
4026 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
4027 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
4028 		if (++eth->reset.qdma_hang_count > 2) {
4029 			eth->reset.qdma_hang_count = 0;
4030 			ret = true;
4031 		}
4032 		goto out;
4033 	}
4034 
4035 	/* ADMA sanity checks */
4036 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
4037 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
4038 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
4039 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
4040 
4041 	if (oq_hang && cdm1_busy && adma_busy) {
4042 		if (++eth->reset.adma_hang_count > 2) {
4043 			eth->reset.adma_hang_count = 0;
4044 			ret = true;
4045 		}
4046 		goto out;
4047 	}
4048 
4049 	eth->reset.wdma_hang_count = 0;
4050 	eth->reset.qdma_hang_count = 0;
4051 	eth->reset.adma_hang_count = 0;
4052 out:
4053 	eth->reset.wdidx = wdidx;
4054 
4055 	return ret;
4056 }
4057 
mtk_hw_reset_monitor_work(struct work_struct * work)4058 static void mtk_hw_reset_monitor_work(struct work_struct *work)
4059 {
4060 	struct delayed_work *del_work = to_delayed_work(work);
4061 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
4062 					   reset.monitor_work);
4063 
4064 	if (test_bit(MTK_RESETTING, &eth->state))
4065 		goto out;
4066 
4067 	/* DMA stuck checks */
4068 	if (mtk_hw_check_dma_hang(eth))
4069 		schedule_work(&eth->pending_work);
4070 
4071 out:
4072 	schedule_delayed_work(&eth->reset.monitor_work,
4073 			      MTK_DMA_MONITOR_TIMEOUT);
4074 }
4075 
mtk_hw_init(struct mtk_eth * eth,bool reset)4076 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
4077 {
4078 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4079 		       ETHSYS_DMA_AG_MAP_PPE;
4080 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4081 	int i, val, ret;
4082 
4083 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
4084 		return 0;
4085 
4086 	if (!reset) {
4087 		pm_runtime_enable(eth->dev);
4088 		pm_runtime_get_sync(eth->dev);
4089 
4090 		ret = mtk_clk_enable(eth);
4091 		if (ret)
4092 			goto err_disable_pm;
4093 	}
4094 
4095 	if (eth->ethsys)
4096 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4097 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
4098 
4099 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4100 		ret = device_reset(eth->dev);
4101 		if (ret) {
4102 			dev_err(eth->dev, "MAC reset failed!\n");
4103 			goto err_disable_pm;
4104 		}
4105 
4106 		/* set interrupt delays based on current Net DIM sample */
4107 		mtk_dim_rx(&eth->rx_dim.work);
4108 		mtk_dim_tx(&eth->tx_dim.work);
4109 
4110 		/* disable delay and normal interrupt */
4111 		mtk_tx_irq_disable(eth, ~0);
4112 		mtk_rx_irq_disable(eth, ~0);
4113 
4114 		return 0;
4115 	}
4116 
4117 	msleep(100);
4118 
4119 	if (reset)
4120 		mtk_hw_warm_reset(eth);
4121 	else
4122 		mtk_hw_reset(eth);
4123 
4124 	/* No MT7628/88 support yet */
4125 	if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4126 		mtk_mdio_config(eth);
4127 
4128 	if (mtk_is_netsys_v3_or_greater(eth)) {
4129 		/* Set FE to PDMAv2 if necessary */
4130 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
4131 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
4132 	}
4133 
4134 	if (eth->pctl) {
4135 		/* Set GE2 driving and slew rate */
4136 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4137 
4138 		/* set GE2 TDSEL */
4139 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4140 
4141 		/* set GE2 TUNE */
4142 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4143 	}
4144 
4145 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
4146 	 * up with the more appropriate value when mtk_mac_config call is being
4147 	 * invoked.
4148 	 */
4149 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4150 		struct net_device *dev = eth->netdev[i];
4151 
4152 		if (!dev)
4153 			continue;
4154 
4155 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4156 		mtk_set_mcr_max_rx(netdev_priv(dev),
4157 				   dev->mtu + MTK_RX_ETH_HLEN);
4158 	}
4159 
4160 	/* Indicates CDM to parse the MTK special tag from CPU
4161 	 * which also is working out for untag packets.
4162 	 */
4163 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4164 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4165 	if (mtk_is_netsys_v1(eth)) {
4166 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4167 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4168 
4169 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4170 	}
4171 
4172 	/* set interrupt delays based on current Net DIM sample */
4173 	mtk_dim_rx(&eth->rx_dim.work);
4174 	mtk_dim_tx(&eth->tx_dim.work);
4175 
4176 	/* disable delay and normal interrupt */
4177 	mtk_tx_irq_disable(eth, ~0);
4178 	mtk_rx_irq_disable(eth, ~0);
4179 
4180 	/* FE int grouping */
4181 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
4182 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
4183 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
4184 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
4185 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
4186 
4187 	if (mtk_is_netsys_v3_or_greater(eth)) {
4188 		/* PSE dummy page mechanism */
4189 		mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
4190 			PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
4191 
4192 		/* PSE free buffer drop threshold */
4193 		mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4194 
4195 		/* PSE should not drop port8, port9 and port13 packets from
4196 		 * WDMA Tx
4197 		 */
4198 		mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
4199 
4200 		/* PSE should drop packets to port8, port9 and port13 on WDMA Rx
4201 		 * ring full
4202 		 */
4203 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
4204 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
4205 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
4206 
4207 		/* GDM and CDM Threshold */
4208 		mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
4209 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4210 
4211 		/* Disable GDM1 RX CRC stripping */
4212 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4213 
4214 		/* PSE GDM3 MIB counter has incorrect hw default values,
4215 		 * so the driver ought to read clear the values beforehand
4216 		 * in case ethtool retrieve wrong mib values.
4217 		 */
4218 		for (i = 0; i < 0x80; i += 0x4)
4219 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4220 	} else if (!mtk_is_netsys_v1(eth)) {
4221 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
4222 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4223 
4224 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4225 		mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
4226 
4227 		/* PSE Free Queue Flow Control  */
4228 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4229 
4230 		/* PSE config input queue threshold */
4231 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4232 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4233 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4234 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4235 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4236 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4237 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4238 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4239 
4240 		/* PSE config output queue threshold */
4241 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4242 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4243 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4244 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4245 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4246 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4247 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4248 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4249 
4250 		/* GDM and CDM Threshold */
4251 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4252 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4253 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4254 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4255 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4256 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4257 	}
4258 
4259 	return 0;
4260 
4261 err_disable_pm:
4262 	if (!reset) {
4263 		pm_runtime_put_sync(eth->dev);
4264 		pm_runtime_disable(eth->dev);
4265 	}
4266 
4267 	return ret;
4268 }
4269 
mtk_hw_deinit(struct mtk_eth * eth)4270 static int mtk_hw_deinit(struct mtk_eth *eth)
4271 {
4272 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4273 		return 0;
4274 
4275 	mtk_clk_disable(eth);
4276 
4277 	pm_runtime_put_sync(eth->dev);
4278 	pm_runtime_disable(eth->dev);
4279 
4280 	return 0;
4281 }
4282 
mtk_uninit(struct net_device * dev)4283 static void mtk_uninit(struct net_device *dev)
4284 {
4285 	struct mtk_mac *mac = netdev_priv(dev);
4286 	struct mtk_eth *eth = mac->hw;
4287 
4288 	phylink_disconnect_phy(mac->phylink);
4289 	mtk_tx_irq_disable(eth, ~0);
4290 	mtk_rx_irq_disable(eth, ~0);
4291 }
4292 
mtk_change_mtu(struct net_device * dev,int new_mtu)4293 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4294 {
4295 	int length = new_mtu + MTK_RX_ETH_HLEN;
4296 	struct mtk_mac *mac = netdev_priv(dev);
4297 	struct mtk_eth *eth = mac->hw;
4298 
4299 	if (rcu_access_pointer(eth->prog) &&
4300 	    length > MTK_PP_MAX_BUF_SIZE) {
4301 		netdev_err(dev, "Invalid MTU for XDP mode\n");
4302 		return -EINVAL;
4303 	}
4304 
4305 	mtk_set_mcr_max_rx(mac, length);
4306 	WRITE_ONCE(dev->mtu, new_mtu);
4307 
4308 	return 0;
4309 }
4310 
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4311 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4312 {
4313 	struct mtk_mac *mac = netdev_priv(dev);
4314 
4315 	switch (cmd) {
4316 	case SIOCGMIIPHY:
4317 	case SIOCGMIIREG:
4318 	case SIOCSMIIREG:
4319 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4320 	default:
4321 		break;
4322 	}
4323 
4324 	return -EOPNOTSUPP;
4325 }
4326 
mtk_prepare_for_reset(struct mtk_eth * eth)4327 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4328 {
4329 	u32 val;
4330 	int i;
4331 
4332 	/* set FE PPE ports link down */
4333 	for (i = MTK_GMAC1_ID;
4334 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4335 	     i += 2) {
4336 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4337 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4338 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4339 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4340 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4341 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4342 	}
4343 
4344 	/* adjust PPE configurations to prepare for reset */
4345 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4346 		mtk_ppe_prepare_reset(eth->ppe[i]);
4347 
4348 	/* disable NETSYS interrupts */
4349 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4350 
4351 	/* force link down GMAC */
4352 	for (i = 0; i < 2; i++) {
4353 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4354 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4355 	}
4356 }
4357 
mtk_pending_work(struct work_struct * work)4358 static void mtk_pending_work(struct work_struct *work)
4359 {
4360 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4361 	unsigned long restart = 0;
4362 	u32 val;
4363 	int i;
4364 
4365 	rtnl_lock();
4366 	set_bit(MTK_RESETTING, &eth->state);
4367 
4368 	mtk_prepare_for_reset(eth);
4369 	mtk_wed_fe_reset();
4370 	/* Run again reset preliminary configuration in order to avoid any
4371 	 * possible race during FE reset since it can run releasing RTNL lock.
4372 	 */
4373 	mtk_prepare_for_reset(eth);
4374 
4375 	/* stop all devices to make sure that dma is properly shut down */
4376 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4377 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4378 			continue;
4379 
4380 		mtk_stop(eth->netdev[i]);
4381 		__set_bit(i, &restart);
4382 	}
4383 
4384 	usleep_range(15000, 16000);
4385 
4386 	if (eth->dev->pins)
4387 		pinctrl_select_state(eth->dev->pins->p,
4388 				     eth->dev->pins->default_state);
4389 	mtk_hw_init(eth, true);
4390 
4391 	/* restart DMA and enable IRQs */
4392 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4393 		if (!eth->netdev[i] || !test_bit(i, &restart))
4394 			continue;
4395 
4396 		if (mtk_open(eth->netdev[i])) {
4397 			netif_alert(eth, ifup, eth->netdev[i],
4398 				    "Driver up/down cycle failed\n");
4399 			dev_close(eth->netdev[i]);
4400 		}
4401 	}
4402 
4403 	/* set FE PPE ports link up */
4404 	for (i = MTK_GMAC1_ID;
4405 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4406 	     i += 2) {
4407 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4408 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4409 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4410 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4411 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4412 
4413 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4414 	}
4415 
4416 	clear_bit(MTK_RESETTING, &eth->state);
4417 
4418 	mtk_wed_fe_reset_complete();
4419 
4420 	rtnl_unlock();
4421 }
4422 
mtk_free_dev(struct mtk_eth * eth)4423 static int mtk_free_dev(struct mtk_eth *eth)
4424 {
4425 	int i;
4426 
4427 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4428 		if (!eth->netdev[i])
4429 			continue;
4430 		free_netdev(eth->netdev[i]);
4431 	}
4432 
4433 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4434 		if (!eth->dsa_meta[i])
4435 			break;
4436 		metadata_dst_free(eth->dsa_meta[i]);
4437 	}
4438 
4439 	return 0;
4440 }
4441 
mtk_unreg_dev(struct mtk_eth * eth)4442 static int mtk_unreg_dev(struct mtk_eth *eth)
4443 {
4444 	int i;
4445 
4446 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4447 		struct mtk_mac *mac;
4448 		if (!eth->netdev[i])
4449 			continue;
4450 		mac = netdev_priv(eth->netdev[i]);
4451 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4452 			unregister_netdevice_notifier(&mac->device_notifier);
4453 		unregister_netdev(eth->netdev[i]);
4454 	}
4455 
4456 	return 0;
4457 }
4458 
mtk_sgmii_destroy(struct mtk_eth * eth)4459 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4460 {
4461 	int i;
4462 
4463 	for (i = 0; i < MTK_MAX_DEVS; i++)
4464 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4465 }
4466 
mtk_cleanup(struct mtk_eth * eth)4467 static int mtk_cleanup(struct mtk_eth *eth)
4468 {
4469 	mtk_sgmii_destroy(eth);
4470 	mtk_unreg_dev(eth);
4471 	mtk_free_dev(eth);
4472 	cancel_work_sync(&eth->pending_work);
4473 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4474 
4475 	return 0;
4476 }
4477 
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4478 static int mtk_get_link_ksettings(struct net_device *ndev,
4479 				  struct ethtool_link_ksettings *cmd)
4480 {
4481 	struct mtk_mac *mac = netdev_priv(ndev);
4482 
4483 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4484 		return -EBUSY;
4485 
4486 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4487 }
4488 
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4489 static int mtk_set_link_ksettings(struct net_device *ndev,
4490 				  const struct ethtool_link_ksettings *cmd)
4491 {
4492 	struct mtk_mac *mac = netdev_priv(ndev);
4493 
4494 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4495 		return -EBUSY;
4496 
4497 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4498 }
4499 
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4500 static void mtk_get_drvinfo(struct net_device *dev,
4501 			    struct ethtool_drvinfo *info)
4502 {
4503 	struct mtk_mac *mac = netdev_priv(dev);
4504 
4505 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4506 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4507 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4508 }
4509 
mtk_get_msglevel(struct net_device * dev)4510 static u32 mtk_get_msglevel(struct net_device *dev)
4511 {
4512 	struct mtk_mac *mac = netdev_priv(dev);
4513 
4514 	return mac->hw->msg_enable;
4515 }
4516 
mtk_set_msglevel(struct net_device * dev,u32 value)4517 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4518 {
4519 	struct mtk_mac *mac = netdev_priv(dev);
4520 
4521 	mac->hw->msg_enable = value;
4522 }
4523 
mtk_nway_reset(struct net_device * dev)4524 static int mtk_nway_reset(struct net_device *dev)
4525 {
4526 	struct mtk_mac *mac = netdev_priv(dev);
4527 
4528 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4529 		return -EBUSY;
4530 
4531 	if (!mac->phylink)
4532 		return -ENOTSUPP;
4533 
4534 	return phylink_ethtool_nway_reset(mac->phylink);
4535 }
4536 
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4537 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4538 {
4539 	int i;
4540 
4541 	switch (stringset) {
4542 	case ETH_SS_STATS: {
4543 		struct mtk_mac *mac = netdev_priv(dev);
4544 
4545 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4546 			ethtool_puts(&data, mtk_ethtool_stats[i].str);
4547 		if (mtk_page_pool_enabled(mac->hw))
4548 			page_pool_ethtool_stats_get_strings(data);
4549 		break;
4550 	}
4551 	default:
4552 		break;
4553 	}
4554 }
4555 
mtk_get_sset_count(struct net_device * dev,int sset)4556 static int mtk_get_sset_count(struct net_device *dev, int sset)
4557 {
4558 	switch (sset) {
4559 	case ETH_SS_STATS: {
4560 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4561 		struct mtk_mac *mac = netdev_priv(dev);
4562 
4563 		if (mtk_page_pool_enabled(mac->hw))
4564 			count += page_pool_ethtool_stats_get_count();
4565 		return count;
4566 	}
4567 	default:
4568 		return -EOPNOTSUPP;
4569 	}
4570 }
4571 
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4572 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4573 {
4574 	struct page_pool_stats stats = {};
4575 	int i;
4576 
4577 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4578 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4579 
4580 		if (!ring->page_pool)
4581 			continue;
4582 
4583 		page_pool_get_stats(ring->page_pool, &stats);
4584 	}
4585 	page_pool_ethtool_stats_get(data, &stats);
4586 }
4587 
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4588 static void mtk_get_ethtool_stats(struct net_device *dev,
4589 				  struct ethtool_stats *stats, u64 *data)
4590 {
4591 	struct mtk_mac *mac = netdev_priv(dev);
4592 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4593 	u64 *data_src, *data_dst;
4594 	unsigned int start;
4595 	int i;
4596 
4597 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4598 		return;
4599 
4600 	if (netif_running(dev) && netif_device_present(dev)) {
4601 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4602 			mtk_stats_update_mac(mac);
4603 			spin_unlock_bh(&hwstats->stats_lock);
4604 		}
4605 	}
4606 
4607 	data_src = (u64 *)hwstats;
4608 
4609 	do {
4610 		data_dst = data;
4611 		start = u64_stats_fetch_begin(&hwstats->syncp);
4612 
4613 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4614 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4615 		if (mtk_page_pool_enabled(mac->hw))
4616 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4617 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4618 }
4619 
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4620 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4621 			 u32 *rule_locs)
4622 {
4623 	int ret = -EOPNOTSUPP;
4624 
4625 	switch (cmd->cmd) {
4626 	case ETHTOOL_GRXRINGS:
4627 		if (dev->hw_features & NETIF_F_LRO) {
4628 			cmd->data = MTK_MAX_RX_RING_NUM;
4629 			ret = 0;
4630 		}
4631 		break;
4632 	case ETHTOOL_GRXCLSRLCNT:
4633 		if (dev->hw_features & NETIF_F_LRO) {
4634 			struct mtk_mac *mac = netdev_priv(dev);
4635 
4636 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4637 			ret = 0;
4638 		}
4639 		break;
4640 	case ETHTOOL_GRXCLSRULE:
4641 		if (dev->hw_features & NETIF_F_LRO)
4642 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4643 		break;
4644 	case ETHTOOL_GRXCLSRLALL:
4645 		if (dev->hw_features & NETIF_F_LRO)
4646 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4647 						     rule_locs);
4648 		break;
4649 	default:
4650 		break;
4651 	}
4652 
4653 	return ret;
4654 }
4655 
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4656 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4657 {
4658 	int ret = -EOPNOTSUPP;
4659 
4660 	switch (cmd->cmd) {
4661 	case ETHTOOL_SRXCLSRLINS:
4662 		if (dev->hw_features & NETIF_F_LRO)
4663 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4664 		break;
4665 	case ETHTOOL_SRXCLSRLDEL:
4666 		if (dev->hw_features & NETIF_F_LRO)
4667 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4668 		break;
4669 	default:
4670 		break;
4671 	}
4672 
4673 	return ret;
4674 }
4675 
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4676 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4677 {
4678 	struct mtk_mac *mac = netdev_priv(dev);
4679 
4680 	phylink_ethtool_get_pauseparam(mac->phylink, pause);
4681 }
4682 
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4683 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4684 {
4685 	struct mtk_mac *mac = netdev_priv(dev);
4686 
4687 	return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4688 }
4689 
mtk_get_eee(struct net_device * dev,struct ethtool_keee * eee)4690 static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4691 {
4692 	struct mtk_mac *mac = netdev_priv(dev);
4693 
4694 	return phylink_ethtool_get_eee(mac->phylink, eee);
4695 }
4696 
mtk_set_eee(struct net_device * dev,struct ethtool_keee * eee)4697 static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4698 {
4699 	struct mtk_mac *mac = netdev_priv(dev);
4700 
4701 	return phylink_ethtool_set_eee(mac->phylink, eee);
4702 }
4703 
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4704 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4705 			    struct net_device *sb_dev)
4706 {
4707 	struct mtk_mac *mac = netdev_priv(dev);
4708 	unsigned int queue = 0;
4709 
4710 	if (netdev_uses_dsa(dev))
4711 		queue = skb_get_queue_mapping(skb) + 3;
4712 	else
4713 		queue = mac->id;
4714 
4715 	if (queue >= dev->num_tx_queues)
4716 		queue = 0;
4717 
4718 	return queue;
4719 }
4720 
4721 static const struct ethtool_ops mtk_ethtool_ops = {
4722 	.get_link_ksettings	= mtk_get_link_ksettings,
4723 	.set_link_ksettings	= mtk_set_link_ksettings,
4724 	.get_drvinfo		= mtk_get_drvinfo,
4725 	.get_msglevel		= mtk_get_msglevel,
4726 	.set_msglevel		= mtk_set_msglevel,
4727 	.nway_reset		= mtk_nway_reset,
4728 	.get_link		= ethtool_op_get_link,
4729 	.get_strings		= mtk_get_strings,
4730 	.get_sset_count		= mtk_get_sset_count,
4731 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4732 	.get_pauseparam		= mtk_get_pauseparam,
4733 	.set_pauseparam		= mtk_set_pauseparam,
4734 	.get_rxnfc		= mtk_get_rxnfc,
4735 	.set_rxnfc		= mtk_set_rxnfc,
4736 	.get_eee		= mtk_get_eee,
4737 	.set_eee		= mtk_set_eee,
4738 };
4739 
4740 static const struct net_device_ops mtk_netdev_ops = {
4741 	.ndo_uninit		= mtk_uninit,
4742 	.ndo_open		= mtk_open,
4743 	.ndo_stop		= mtk_stop,
4744 	.ndo_start_xmit		= mtk_start_xmit,
4745 	.ndo_set_mac_address	= mtk_set_mac_address,
4746 	.ndo_validate_addr	= eth_validate_addr,
4747 	.ndo_eth_ioctl		= mtk_do_ioctl,
4748 	.ndo_change_mtu		= mtk_change_mtu,
4749 	.ndo_tx_timeout		= mtk_tx_timeout,
4750 	.ndo_get_stats64        = mtk_get_stats64,
4751 	.ndo_fix_features	= mtk_fix_features,
4752 	.ndo_set_features	= mtk_set_features,
4753 #ifdef CONFIG_NET_POLL_CONTROLLER
4754 	.ndo_poll_controller	= mtk_poll_controller,
4755 #endif
4756 	.ndo_setup_tc		= mtk_eth_setup_tc,
4757 	.ndo_bpf		= mtk_xdp,
4758 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4759 	.ndo_select_queue	= mtk_select_queue,
4760 };
4761 
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4762 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4763 {
4764 	const __be32 *_id = of_get_property(np, "reg", NULL);
4765 	phy_interface_t phy_mode;
4766 	struct phylink *phylink;
4767 	struct mtk_mac *mac;
4768 	int id, err;
4769 	int txqs = 1;
4770 	u32 val;
4771 
4772 	if (!_id) {
4773 		dev_err(eth->dev, "missing mac id\n");
4774 		return -EINVAL;
4775 	}
4776 
4777 	id = be32_to_cpup(_id);
4778 	if (id >= MTK_MAX_DEVS) {
4779 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4780 		return -EINVAL;
4781 	}
4782 
4783 	if (eth->netdev[id]) {
4784 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4785 		return -EINVAL;
4786 	}
4787 
4788 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4789 		txqs = MTK_QDMA_NUM_QUEUES;
4790 
4791 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4792 	if (!eth->netdev[id]) {
4793 		dev_err(eth->dev, "alloc_etherdev failed\n");
4794 		return -ENOMEM;
4795 	}
4796 	mac = netdev_priv(eth->netdev[id]);
4797 	eth->mac[id] = mac;
4798 	mac->id = id;
4799 	mac->hw = eth;
4800 	mac->of_node = np;
4801 
4802 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4803 	if (err == -EPROBE_DEFER)
4804 		return err;
4805 
4806 	if (err) {
4807 		/* If the mac address is invalid, use random mac address */
4808 		eth_hw_addr_random(eth->netdev[id]);
4809 		dev_err(eth->dev, "generated random MAC address %pM\n",
4810 			eth->netdev[id]->dev_addr);
4811 	}
4812 
4813 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4814 	mac->hwlro_ip_cnt = 0;
4815 
4816 	mac->hw_stats = devm_kzalloc(eth->dev,
4817 				     sizeof(*mac->hw_stats),
4818 				     GFP_KERNEL);
4819 	if (!mac->hw_stats) {
4820 		dev_err(eth->dev, "failed to allocate counter memory\n");
4821 		err = -ENOMEM;
4822 		goto free_netdev;
4823 	}
4824 	spin_lock_init(&mac->hw_stats->stats_lock);
4825 	u64_stats_init(&mac->hw_stats->syncp);
4826 
4827 	if (mtk_is_netsys_v3_or_greater(eth))
4828 		mac->hw_stats->reg_offset = id * 0x80;
4829 	else
4830 		mac->hw_stats->reg_offset = id * 0x40;
4831 
4832 	/* phylink create */
4833 	err = of_get_phy_mode(np, &phy_mode);
4834 	if (err) {
4835 		dev_err(eth->dev, "incorrect phy-mode\n");
4836 		goto free_netdev;
4837 	}
4838 
4839 	/* mac config is not set */
4840 	mac->interface = PHY_INTERFACE_MODE_NA;
4841 	mac->speed = SPEED_UNKNOWN;
4842 
4843 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4844 	mac->phylink_config.type = PHYLINK_NETDEV;
4845 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4846 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4847 	mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
4848 		MAC_2500FD;
4849 	mac->phylink_config.lpi_timer_default = 1000;
4850 
4851 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4852 	 * in its .mac_config method (since state->speed is not valid there.
4853 	 * Disable support for MII, GMII and RGMII.
4854 	 */
4855 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4856 		__set_bit(PHY_INTERFACE_MODE_MII,
4857 			  mac->phylink_config.supported_interfaces);
4858 		__set_bit(PHY_INTERFACE_MODE_GMII,
4859 			  mac->phylink_config.supported_interfaces);
4860 
4861 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4862 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4863 	}
4864 
4865 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4866 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4867 			  mac->phylink_config.supported_interfaces);
4868 
4869 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4870 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4871 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4872 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4873 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4874 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4875 				    mac->phylink_config.supported_interfaces);
4876 	}
4877 
4878 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4879 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4880 			  mac->phylink_config.supported_interfaces);
4881 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4882 			  mac->phylink_config.supported_interfaces);
4883 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4884 			  mac->phylink_config.supported_interfaces);
4885 	}
4886 
4887 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4888 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
4889 	    id == MTK_GMAC1_ID) {
4890 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4891 						       MAC_SYM_PAUSE |
4892 						       MAC_10000FD;
4893 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4894 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4895 			  mac->phylink_config.supported_interfaces);
4896 	}
4897 
4898 	phylink = phylink_create(&mac->phylink_config,
4899 				 of_fwnode_handle(mac->of_node),
4900 				 phy_mode, &mtk_phylink_ops);
4901 	if (IS_ERR(phylink)) {
4902 		err = PTR_ERR(phylink);
4903 		goto free_netdev;
4904 	}
4905 
4906 	mac->phylink = phylink;
4907 
4908 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
4909 	    id == MTK_GMAC2_ID)
4910 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4911 			  mac->phylink_config.supported_interfaces);
4912 
4913 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4914 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4915 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4916 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4917 
4918 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4919 	if (eth->hwlro)
4920 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4921 
4922 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4923 		~NETIF_F_HW_VLAN_CTAG_TX;
4924 	eth->netdev[id]->features |= eth->soc->hw_features;
4925 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4926 
4927 	eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
4928 	eth->netdev[id]->dev.of_node = np;
4929 
4930 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4931 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4932 	else
4933 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4934 
4935 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4936 		mac->device_notifier.notifier_call = mtk_device_event;
4937 		register_netdevice_notifier(&mac->device_notifier);
4938 	}
4939 
4940 	if (mtk_page_pool_enabled(eth))
4941 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4942 						NETDEV_XDP_ACT_REDIRECT |
4943 						NETDEV_XDP_ACT_NDO_XMIT |
4944 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4945 
4946 	return 0;
4947 
4948 free_netdev:
4949 	free_netdev(eth->netdev[id]);
4950 	return err;
4951 }
4952 
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4953 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4954 {
4955 	struct net_device *dev, *tmp;
4956 	LIST_HEAD(dev_list);
4957 	int i;
4958 
4959 	rtnl_lock();
4960 
4961 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4962 		dev = eth->netdev[i];
4963 
4964 		if (!dev || !(dev->flags & IFF_UP))
4965 			continue;
4966 
4967 		list_add_tail(&dev->close_list, &dev_list);
4968 	}
4969 
4970 	netif_close_many(&dev_list, false);
4971 
4972 	eth->dma_dev = dma_dev;
4973 
4974 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4975 		list_del_init(&dev->close_list);
4976 		dev_open(dev, NULL);
4977 	}
4978 
4979 	rtnl_unlock();
4980 }
4981 
mtk_sgmii_init(struct mtk_eth * eth)4982 static int mtk_sgmii_init(struct mtk_eth *eth)
4983 {
4984 	struct device_node *np;
4985 	struct regmap *regmap;
4986 	u32 flags;
4987 	int i;
4988 
4989 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4990 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4991 		if (!np)
4992 			break;
4993 
4994 		regmap = syscon_node_to_regmap(np);
4995 		flags = 0;
4996 		if (of_property_read_bool(np, "mediatek,pnswap"))
4997 			flags |= MTK_SGMII_FLAG_PN_SWAP;
4998 
4999 		of_node_put(np);
5000 
5001 		if (IS_ERR(regmap))
5002 			return PTR_ERR(regmap);
5003 
5004 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
5005 							 eth->soc->ana_rgc3,
5006 							 flags);
5007 	}
5008 
5009 	return 0;
5010 }
5011 
mtk_setup_legacy_sram(struct mtk_eth * eth,struct resource * res)5012 static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
5013 {
5014 	dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
5015 
5016 	if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
5017 	    res->end)
5018 		return -EINVAL;
5019 
5020 	eth->sram_pool = devm_gen_pool_create(eth->dev,
5021 					      const_ilog2(MTK_ETH_SRAM_GRANULARITY),
5022 					      NUMA_NO_NODE, dev_name(eth->dev));
5023 
5024 	if (IS_ERR(eth->sram_pool))
5025 		return PTR_ERR(eth->sram_pool);
5026 
5027 	return gen_pool_add_virt(eth->sram_pool,
5028 				 (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
5029 				 res->start + MTK_ETH_SRAM_OFFSET,
5030 				 MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
5031 }
5032 
mtk_probe(struct platform_device * pdev)5033 static int mtk_probe(struct platform_device *pdev)
5034 {
5035 	struct resource *res = NULL;
5036 	struct device_node *mac_np;
5037 	struct mtk_eth *eth;
5038 	int err, i;
5039 
5040 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5041 	if (!eth)
5042 		return -ENOMEM;
5043 
5044 	eth->soc = of_device_get_match_data(&pdev->dev);
5045 
5046 	eth->dev = &pdev->dev;
5047 	eth->dma_dev = &pdev->dev;
5048 	eth->base = devm_platform_ioremap_resource(pdev, 0);
5049 	if (IS_ERR(eth->base))
5050 		return PTR_ERR(eth->base);
5051 
5052 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
5053 		eth->ip_align = NET_IP_ALIGN;
5054 
5055 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
5056 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5057 		if (!err)
5058 			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5059 
5060 		if (err) {
5061 			dev_err(&pdev->dev, "Wrong DMA config\n");
5062 			return -EINVAL;
5063 		}
5064 	}
5065 
5066 	spin_lock_init(&eth->page_lock);
5067 	spin_lock_init(&eth->tx_irq_lock);
5068 	spin_lock_init(&eth->rx_irq_lock);
5069 	spin_lock_init(&eth->dim_lock);
5070 
5071 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5072 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
5073 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
5074 
5075 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5076 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
5077 
5078 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5079 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5080 							      "mediatek,ethsys");
5081 		if (IS_ERR(eth->ethsys)) {
5082 			dev_err(&pdev->dev, "no ethsys regmap found\n");
5083 			return PTR_ERR(eth->ethsys);
5084 		}
5085 	}
5086 
5087 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5088 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5089 							     "mediatek,infracfg");
5090 		if (IS_ERR(eth->infra)) {
5091 			dev_err(&pdev->dev, "no infracfg regmap found\n");
5092 			return PTR_ERR(eth->infra);
5093 		}
5094 	}
5095 
5096 	if (of_dma_is_coherent(pdev->dev.of_node)) {
5097 		struct regmap *cci;
5098 
5099 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5100 						      "cci-control-port");
5101 		/* enable CPU/bus coherency */
5102 		if (!IS_ERR(cci))
5103 			regmap_write(cci, 0, 3);
5104 	}
5105 
5106 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
5107 		err = mtk_sgmii_init(eth);
5108 
5109 		if (err)
5110 			return err;
5111 	}
5112 
5113 	if (eth->soc->required_pctl) {
5114 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5115 							    "mediatek,pctl");
5116 		if (IS_ERR(eth->pctl)) {
5117 			dev_err(&pdev->dev, "no pctl regmap found\n");
5118 			err = PTR_ERR(eth->pctl);
5119 			goto err_destroy_sgmii;
5120 		}
5121 	}
5122 
5123 	if (mtk_is_netsys_v2_or_greater(eth)) {
5124 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5125 		if (!res) {
5126 			err = -EINVAL;
5127 			goto err_destroy_sgmii;
5128 		}
5129 
5130 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
5131 			eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
5132 							 "sram", 0);
5133 			if (!eth->sram_pool) {
5134 				if (!mtk_is_netsys_v3_or_greater(eth)) {
5135 					err = mtk_setup_legacy_sram(eth, res);
5136 					if (err)
5137 						goto err_destroy_sgmii;
5138 				} else {
5139 					dev_err(&pdev->dev,
5140 						"Could not get SRAM pool\n");
5141 					err = -EINVAL;
5142 					goto err_destroy_sgmii;
5143 				}
5144 			}
5145 		}
5146 	}
5147 
5148 	if (eth->soc->offload_version) {
5149 		for (i = 0;; i++) {
5150 			struct device_node *np;
5151 			phys_addr_t wdma_phy;
5152 			u32 wdma_base;
5153 
5154 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
5155 				break;
5156 
5157 			np = of_parse_phandle(pdev->dev.of_node,
5158 					      "mediatek,wed", i);
5159 			if (!np)
5160 				break;
5161 
5162 			wdma_base = eth->soc->reg_map->wdma_base[i];
5163 			wdma_phy = res ? res->start + wdma_base : 0;
5164 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
5165 				       wdma_phy, i);
5166 		}
5167 	}
5168 
5169 	err = mtk_get_irqs(pdev, eth);
5170 	if (err)
5171 		goto err_wed_exit;
5172 
5173 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5174 		eth->clks[i] = devm_clk_get(eth->dev,
5175 					    mtk_clks_source_name[i]);
5176 		if (IS_ERR(eth->clks[i])) {
5177 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
5178 				err = -EPROBE_DEFER;
5179 				goto err_wed_exit;
5180 			}
5181 			if (eth->soc->required_clks & BIT(i)) {
5182 				dev_err(&pdev->dev, "clock %s not found\n",
5183 					mtk_clks_source_name[i]);
5184 				err = -EINVAL;
5185 				goto err_wed_exit;
5186 			}
5187 			eth->clks[i] = NULL;
5188 		}
5189 	}
5190 
5191 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5192 	INIT_WORK(&eth->pending_work, mtk_pending_work);
5193 
5194 	err = mtk_hw_init(eth, false);
5195 	if (err)
5196 		goto err_wed_exit;
5197 
5198 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5199 
5200 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
5201 		if (!of_device_is_compatible(mac_np,
5202 					     "mediatek,eth-mac"))
5203 			continue;
5204 
5205 		if (!of_device_is_available(mac_np))
5206 			continue;
5207 
5208 		err = mtk_add_mac(eth, mac_np);
5209 		if (err) {
5210 			of_node_put(mac_np);
5211 			goto err_deinit_hw;
5212 		}
5213 	}
5214 
5215 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
5216 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
5217 				       mtk_handle_irq, 0,
5218 				       dev_name(eth->dev), eth);
5219 	} else {
5220 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
5221 				       mtk_handle_irq_tx, 0,
5222 				       dev_name(eth->dev), eth);
5223 		if (err)
5224 			goto err_free_dev;
5225 
5226 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
5227 				       mtk_handle_irq_rx, 0,
5228 				       dev_name(eth->dev), eth);
5229 	}
5230 	if (err)
5231 		goto err_free_dev;
5232 
5233 	/* No MT7628/88 support yet */
5234 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5235 		err = mtk_mdio_init(eth);
5236 		if (err)
5237 			goto err_free_dev;
5238 	}
5239 
5240 	if (eth->soc->offload_version) {
5241 		u8 ppe_num = eth->soc->ppe_num;
5242 
5243 		ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5244 		for (i = 0; i < ppe_num; i++) {
5245 			u32 ppe_addr = eth->soc->reg_map->ppe_base;
5246 
5247 			ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5248 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5249 
5250 			if (!eth->ppe[i]) {
5251 				err = -ENOMEM;
5252 				goto err_deinit_ppe;
5253 			}
5254 			err = mtk_eth_offload_init(eth, i);
5255 
5256 			if (err)
5257 				goto err_deinit_ppe;
5258 		}
5259 	}
5260 
5261 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5262 		if (!eth->netdev[i])
5263 			continue;
5264 
5265 		err = register_netdev(eth->netdev[i]);
5266 		if (err) {
5267 			dev_err(eth->dev, "error bringing up device\n");
5268 			goto err_deinit_ppe;
5269 		} else
5270 			netif_info(eth, probe, eth->netdev[i],
5271 				   "mediatek frame engine at 0x%08lx, irq %d\n",
5272 				   eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
5273 	}
5274 
5275 	/* we run 2 devices on the same DMA ring so we need a dummy device
5276 	 * for NAPI to work
5277 	 */
5278 	eth->dummy_dev = alloc_netdev_dummy(0);
5279 	if (!eth->dummy_dev) {
5280 		err = -ENOMEM;
5281 		dev_err(eth->dev, "failed to allocated dummy device\n");
5282 		goto err_unreg_netdev;
5283 	}
5284 	netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
5285 	netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
5286 
5287 	platform_set_drvdata(pdev, eth);
5288 	schedule_delayed_work(&eth->reset.monitor_work,
5289 			      MTK_DMA_MONITOR_TIMEOUT);
5290 
5291 	return 0;
5292 
5293 err_unreg_netdev:
5294 	mtk_unreg_dev(eth);
5295 err_deinit_ppe:
5296 	mtk_ppe_deinit(eth);
5297 	mtk_mdio_cleanup(eth);
5298 err_free_dev:
5299 	mtk_free_dev(eth);
5300 err_deinit_hw:
5301 	mtk_hw_deinit(eth);
5302 err_wed_exit:
5303 	mtk_wed_exit();
5304 err_destroy_sgmii:
5305 	mtk_sgmii_destroy(eth);
5306 
5307 	return err;
5308 }
5309 
mtk_remove(struct platform_device * pdev)5310 static void mtk_remove(struct platform_device *pdev)
5311 {
5312 	struct mtk_eth *eth = platform_get_drvdata(pdev);
5313 	struct mtk_mac *mac;
5314 	int i;
5315 
5316 	/* stop all devices to make sure that dma is properly shut down */
5317 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5318 		if (!eth->netdev[i])
5319 			continue;
5320 		mtk_stop(eth->netdev[i]);
5321 		mac = netdev_priv(eth->netdev[i]);
5322 		phylink_disconnect_phy(mac->phylink);
5323 	}
5324 
5325 	mtk_wed_exit();
5326 	mtk_hw_deinit(eth);
5327 
5328 	netif_napi_del(&eth->tx_napi);
5329 	netif_napi_del(&eth->rx_napi);
5330 	mtk_cleanup(eth);
5331 	free_netdev(eth->dummy_dev);
5332 	mtk_mdio_cleanup(eth);
5333 }
5334 
5335 static const struct mtk_soc_data mt2701_data = {
5336 	.reg_map = &mtk_reg_map,
5337 	.caps = MT7623_CAPS | MTK_HWLRO,
5338 	.hw_features = MTK_HW_FEATURES,
5339 	.required_clks = MT7623_CLKS_BITMAP,
5340 	.required_pctl = true,
5341 	.version = 1,
5342 	.tx = {
5343 		.desc_size = sizeof(struct mtk_tx_dma),
5344 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5345 		.dma_len_offset = 16,
5346 		.dma_size = MTK_DMA_SIZE(2K),
5347 		.fq_dma_size = MTK_DMA_SIZE(2K),
5348 	},
5349 	.rx = {
5350 		.desc_size = sizeof(struct mtk_rx_dma),
5351 		.irq_done_mask = MTK_RX_DONE_INT,
5352 		.dma_l4_valid = RX_DMA_L4_VALID,
5353 		.dma_size = MTK_DMA_SIZE(2K),
5354 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5355 		.dma_len_offset = 16,
5356 	},
5357 };
5358 
5359 static const struct mtk_soc_data mt7621_data = {
5360 	.reg_map = &mtk_reg_map,
5361 	.caps = MT7621_CAPS,
5362 	.hw_features = MTK_HW_FEATURES,
5363 	.required_clks = MT7621_CLKS_BITMAP,
5364 	.required_pctl = false,
5365 	.version = 1,
5366 	.offload_version = 1,
5367 	.ppe_num = 1,
5368 	.hash_offset = 2,
5369 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5370 	.tx = {
5371 		.desc_size = sizeof(struct mtk_tx_dma),
5372 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5373 		.dma_len_offset = 16,
5374 		.dma_size = MTK_DMA_SIZE(2K),
5375 		.fq_dma_size = MTK_DMA_SIZE(2K),
5376 	},
5377 	.rx = {
5378 		.desc_size = sizeof(struct mtk_rx_dma),
5379 		.irq_done_mask = MTK_RX_DONE_INT,
5380 		.dma_l4_valid = RX_DMA_L4_VALID,
5381 		.dma_size = MTK_DMA_SIZE(2K),
5382 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5383 		.dma_len_offset = 16,
5384 	},
5385 };
5386 
5387 static const struct mtk_soc_data mt7622_data = {
5388 	.reg_map = &mtk_reg_map,
5389 	.ana_rgc3 = 0x2028,
5390 	.caps = MT7622_CAPS | MTK_HWLRO,
5391 	.hw_features = MTK_HW_FEATURES,
5392 	.required_clks = MT7622_CLKS_BITMAP,
5393 	.required_pctl = false,
5394 	.version = 1,
5395 	.offload_version = 2,
5396 	.ppe_num = 1,
5397 	.hash_offset = 2,
5398 	.has_accounting = true,
5399 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5400 	.tx = {
5401 		.desc_size = sizeof(struct mtk_tx_dma),
5402 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5403 		.dma_len_offset = 16,
5404 		.dma_size = MTK_DMA_SIZE(2K),
5405 		.fq_dma_size = MTK_DMA_SIZE(2K),
5406 	},
5407 	.rx = {
5408 		.desc_size = sizeof(struct mtk_rx_dma),
5409 		.irq_done_mask = MTK_RX_DONE_INT,
5410 		.dma_l4_valid = RX_DMA_L4_VALID,
5411 		.dma_size = MTK_DMA_SIZE(2K),
5412 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5413 		.dma_len_offset = 16,
5414 	},
5415 };
5416 
5417 static const struct mtk_soc_data mt7623_data = {
5418 	.reg_map = &mtk_reg_map,
5419 	.caps = MT7623_CAPS | MTK_HWLRO,
5420 	.hw_features = MTK_HW_FEATURES,
5421 	.required_clks = MT7623_CLKS_BITMAP,
5422 	.required_pctl = true,
5423 	.version = 1,
5424 	.offload_version = 1,
5425 	.ppe_num = 1,
5426 	.hash_offset = 2,
5427 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5428 	.disable_pll_modes = true,
5429 	.tx = {
5430 		.desc_size = sizeof(struct mtk_tx_dma),
5431 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5432 		.dma_len_offset = 16,
5433 		.dma_size = MTK_DMA_SIZE(2K),
5434 		.fq_dma_size = MTK_DMA_SIZE(2K),
5435 	},
5436 	.rx = {
5437 		.desc_size = sizeof(struct mtk_rx_dma),
5438 		.irq_done_mask = MTK_RX_DONE_INT,
5439 		.dma_l4_valid = RX_DMA_L4_VALID,
5440 		.dma_size = MTK_DMA_SIZE(2K),
5441 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5442 		.dma_len_offset = 16,
5443 	},
5444 };
5445 
5446 static const struct mtk_soc_data mt7629_data = {
5447 	.reg_map = &mtk_reg_map,
5448 	.ana_rgc3 = 0x128,
5449 	.caps = MT7629_CAPS | MTK_HWLRO,
5450 	.hw_features = MTK_HW_FEATURES,
5451 	.required_clks = MT7629_CLKS_BITMAP,
5452 	.required_pctl = false,
5453 	.has_accounting = true,
5454 	.version = 1,
5455 	.tx = {
5456 		.desc_size = sizeof(struct mtk_tx_dma),
5457 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5458 		.dma_len_offset = 16,
5459 		.dma_size = MTK_DMA_SIZE(2K),
5460 		.fq_dma_size = MTK_DMA_SIZE(2K),
5461 	},
5462 	.rx = {
5463 		.desc_size = sizeof(struct mtk_rx_dma),
5464 		.irq_done_mask = MTK_RX_DONE_INT,
5465 		.dma_l4_valid = RX_DMA_L4_VALID,
5466 		.dma_size = MTK_DMA_SIZE(2K),
5467 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5468 		.dma_len_offset = 16,
5469 	},
5470 };
5471 
5472 static const struct mtk_soc_data mt7981_data = {
5473 	.reg_map = &mt7986_reg_map,
5474 	.ana_rgc3 = 0x128,
5475 	.caps = MT7981_CAPS,
5476 	.hw_features = MTK_HW_FEATURES,
5477 	.required_clks = MT7981_CLKS_BITMAP,
5478 	.required_pctl = false,
5479 	.version = 2,
5480 	.offload_version = 2,
5481 	.ppe_num = 2,
5482 	.hash_offset = 4,
5483 	.has_accounting = true,
5484 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5485 	.tx = {
5486 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5487 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5488 		.dma_len_offset = 8,
5489 		.dma_size = MTK_DMA_SIZE(2K),
5490 		.fq_dma_size = MTK_DMA_SIZE(2K),
5491 	},
5492 	.rx = {
5493 		.desc_size = sizeof(struct mtk_rx_dma),
5494 		.irq_done_mask = MTK_RX_DONE_INT,
5495 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5496 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5497 		.dma_len_offset = 16,
5498 		.dma_size = MTK_DMA_SIZE(2K),
5499 	},
5500 };
5501 
5502 static const struct mtk_soc_data mt7986_data = {
5503 	.reg_map = &mt7986_reg_map,
5504 	.ana_rgc3 = 0x128,
5505 	.caps = MT7986_CAPS,
5506 	.hw_features = MTK_HW_FEATURES,
5507 	.required_clks = MT7986_CLKS_BITMAP,
5508 	.required_pctl = false,
5509 	.version = 2,
5510 	.offload_version = 2,
5511 	.ppe_num = 2,
5512 	.hash_offset = 4,
5513 	.has_accounting = true,
5514 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5515 	.tx = {
5516 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5517 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5518 		.dma_len_offset = 8,
5519 		.dma_size = MTK_DMA_SIZE(2K),
5520 		.fq_dma_size = MTK_DMA_SIZE(2K),
5521 	},
5522 	.rx = {
5523 		.desc_size = sizeof(struct mtk_rx_dma),
5524 		.irq_done_mask = MTK_RX_DONE_INT,
5525 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5526 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5527 		.dma_len_offset = 16,
5528 		.dma_size = MTK_DMA_SIZE(2K),
5529 	},
5530 };
5531 
5532 static const struct mtk_soc_data mt7988_data = {
5533 	.reg_map = &mt7988_reg_map,
5534 	.ana_rgc3 = 0x128,
5535 	.caps = MT7988_CAPS,
5536 	.hw_features = MTK_HW_FEATURES,
5537 	.required_clks = MT7988_CLKS_BITMAP,
5538 	.required_pctl = false,
5539 	.version = 3,
5540 	.offload_version = 2,
5541 	.ppe_num = 3,
5542 	.hash_offset = 4,
5543 	.has_accounting = true,
5544 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5545 	.tx = {
5546 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5547 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5548 		.dma_len_offset = 8,
5549 		.dma_size = MTK_DMA_SIZE(2K),
5550 		.fq_dma_size = MTK_DMA_SIZE(4K),
5551 	},
5552 	.rx = {
5553 		.desc_size = sizeof(struct mtk_rx_dma_v2),
5554 		.irq_done_mask = MTK_RX_DONE_INT_V2,
5555 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5556 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5557 		.dma_len_offset = 8,
5558 		.dma_size = MTK_DMA_SIZE(2K),
5559 	},
5560 };
5561 
5562 static const struct mtk_soc_data rt5350_data = {
5563 	.reg_map = &mt7628_reg_map,
5564 	.caps = MT7628_CAPS,
5565 	.hw_features = MTK_HW_FEATURES_MT7628,
5566 	.required_clks = MT7628_CLKS_BITMAP,
5567 	.required_pctl = false,
5568 	.version = 1,
5569 	.tx = {
5570 		.desc_size = sizeof(struct mtk_tx_dma),
5571 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5572 		.dma_len_offset = 16,
5573 		.dma_size = MTK_DMA_SIZE(2K),
5574 	},
5575 	.rx = {
5576 		.desc_size = sizeof(struct mtk_rx_dma),
5577 		.irq_done_mask = MTK_RX_DONE_INT,
5578 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5579 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5580 		.dma_len_offset = 16,
5581 		.dma_size = MTK_DMA_SIZE(2K),
5582 	},
5583 };
5584 
5585 const struct of_device_id of_mtk_match[] = {
5586 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5587 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5588 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5589 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5590 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5591 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5592 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5593 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5594 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5595 	{},
5596 };
5597 MODULE_DEVICE_TABLE(of, of_mtk_match);
5598 
5599 static struct platform_driver mtk_driver = {
5600 	.probe = mtk_probe,
5601 	.remove = mtk_remove,
5602 	.driver = {
5603 		.name = "mtk_soc_eth",
5604 		.of_match_table = of_mtk_match,
5605 	},
5606 };
5607 
5608 module_platform_driver(mtk_driver);
5609 
5610 MODULE_LICENSE("GPL");
5611 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5612 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5613 MODULE_IMPORT_NS("NETDEV_INTERNAL");
5614