xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 #include <linux/genalloc.h>
31 
32 #include "mtk_eth_soc.h"
33 #include "mtk_wed.h"
34 
35 static int mtk_msg_level = -1;
36 module_param_named(msg_level, mtk_msg_level, int, 0);
37 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
38 
39 #define MTK_ETHTOOL_STAT(x) { #x, \
40 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
41 
42 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
43 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
44 				  sizeof(u64) }
45 
46 static const struct mtk_reg_map mtk_reg_map = {
47 	.tx_irq_mask		= 0x1a1c,
48 	.tx_irq_status		= 0x1a18,
49 	.pdma = {
50 		.rx_ptr		= 0x0900,
51 		.rx_cnt_cfg	= 0x0904,
52 		.pcrx_ptr	= 0x0908,
53 		.glo_cfg	= 0x0a04,
54 		.rst_idx	= 0x0a08,
55 		.delay_irq	= 0x0a0c,
56 		.irq_status	= 0x0a20,
57 		.irq_mask	= 0x0a28,
58 		.adma_rx_dbg0	= 0x0a38,
59 		.int_grp	= 0x0a50,
60 	},
61 	.qdma = {
62 		.qtx_cfg	= 0x1800,
63 		.qtx_sch	= 0x1804,
64 		.rx_ptr		= 0x1900,
65 		.rx_cnt_cfg	= 0x1904,
66 		.qcrx_ptr	= 0x1908,
67 		.glo_cfg	= 0x1a04,
68 		.rst_idx	= 0x1a08,
69 		.delay_irq	= 0x1a0c,
70 		.fc_th		= 0x1a10,
71 		.tx_sch_rate	= 0x1a14,
72 		.int_grp	= 0x1a20,
73 		.hred		= 0x1a44,
74 		.ctx_ptr	= 0x1b00,
75 		.dtx_ptr	= 0x1b04,
76 		.crx_ptr	= 0x1b10,
77 		.drx_ptr	= 0x1b14,
78 		.fq_head	= 0x1b20,
79 		.fq_tail	= 0x1b24,
80 		.fq_count	= 0x1b28,
81 		.fq_blen	= 0x1b2c,
82 	},
83 	.gdm1_cnt		= 0x2400,
84 	.gdma_to_ppe	= {
85 		[0]		= 0x4444,
86 	},
87 	.ppe_base		= 0x0c00,
88 	.wdma_base = {
89 		[0]		= 0x2800,
90 		[1]		= 0x2c00,
91 	},
92 	.pse_iq_sta		= 0x0110,
93 	.pse_oq_sta		= 0x0118,
94 };
95 
96 static const struct mtk_reg_map mt7628_reg_map = {
97 	.tx_irq_mask		= 0x0a28,
98 	.tx_irq_status		= 0x0a20,
99 	.pdma = {
100 		.rx_ptr		= 0x0900,
101 		.rx_cnt_cfg	= 0x0904,
102 		.pcrx_ptr	= 0x0908,
103 		.glo_cfg	= 0x0a04,
104 		.rst_idx	= 0x0a08,
105 		.delay_irq	= 0x0a0c,
106 		.irq_status	= 0x0a20,
107 		.irq_mask	= 0x0a28,
108 		.int_grp	= 0x0a50,
109 	},
110 };
111 
112 static const struct mtk_reg_map mt7986_reg_map = {
113 	.tx_irq_mask		= 0x461c,
114 	.tx_irq_status		= 0x4618,
115 	.pdma = {
116 		.rx_ptr		= 0x4100,
117 		.rx_cnt_cfg	= 0x4104,
118 		.pcrx_ptr	= 0x4108,
119 		.glo_cfg	= 0x4204,
120 		.rst_idx	= 0x4208,
121 		.delay_irq	= 0x420c,
122 		.irq_status	= 0x4220,
123 		.irq_mask	= 0x4228,
124 		.adma_rx_dbg0	= 0x4238,
125 		.int_grp	= 0x4250,
126 	},
127 	.qdma = {
128 		.qtx_cfg	= 0x4400,
129 		.qtx_sch	= 0x4404,
130 		.rx_ptr		= 0x4500,
131 		.rx_cnt_cfg	= 0x4504,
132 		.qcrx_ptr	= 0x4508,
133 		.glo_cfg	= 0x4604,
134 		.rst_idx	= 0x4608,
135 		.delay_irq	= 0x460c,
136 		.fc_th		= 0x4610,
137 		.int_grp	= 0x4620,
138 		.hred		= 0x4644,
139 		.ctx_ptr	= 0x4700,
140 		.dtx_ptr	= 0x4704,
141 		.crx_ptr	= 0x4710,
142 		.drx_ptr	= 0x4714,
143 		.fq_head	= 0x4720,
144 		.fq_tail	= 0x4724,
145 		.fq_count	= 0x4728,
146 		.fq_blen	= 0x472c,
147 		.tx_sch_rate	= 0x4798,
148 	},
149 	.gdm1_cnt		= 0x1c00,
150 	.gdma_to_ppe	= {
151 		[0]		= 0x3333,
152 		[1]		= 0x4444,
153 	},
154 	.ppe_base		= 0x2000,
155 	.wdma_base = {
156 		[0]		= 0x4800,
157 		[1]		= 0x4c00,
158 	},
159 	.pse_iq_sta		= 0x0180,
160 	.pse_oq_sta		= 0x01a0,
161 };
162 
163 static const struct mtk_reg_map mt7988_reg_map = {
164 	.tx_irq_mask		= 0x461c,
165 	.tx_irq_status		= 0x4618,
166 	.pdma = {
167 		.rx_ptr		= 0x6900,
168 		.rx_cnt_cfg	= 0x6904,
169 		.pcrx_ptr	= 0x6908,
170 		.glo_cfg	= 0x6a04,
171 		.rst_idx	= 0x6a08,
172 		.delay_irq	= 0x6a0c,
173 		.irq_status	= 0x6a20,
174 		.irq_mask	= 0x6a28,
175 		.adma_rx_dbg0	= 0x6a38,
176 		.int_grp	= 0x6a50,
177 	},
178 	.qdma = {
179 		.qtx_cfg	= 0x4400,
180 		.qtx_sch	= 0x4404,
181 		.rx_ptr		= 0x4500,
182 		.rx_cnt_cfg	= 0x4504,
183 		.qcrx_ptr	= 0x4508,
184 		.glo_cfg	= 0x4604,
185 		.rst_idx	= 0x4608,
186 		.delay_irq	= 0x460c,
187 		.fc_th		= 0x4610,
188 		.int_grp	= 0x4620,
189 		.hred		= 0x4644,
190 		.ctx_ptr	= 0x4700,
191 		.dtx_ptr	= 0x4704,
192 		.crx_ptr	= 0x4710,
193 		.drx_ptr	= 0x4714,
194 		.fq_head	= 0x4720,
195 		.fq_tail	= 0x4724,
196 		.fq_count	= 0x4728,
197 		.fq_blen	= 0x472c,
198 		.tx_sch_rate	= 0x4798,
199 	},
200 	.gdm1_cnt		= 0x1c00,
201 	.gdma_to_ppe	= {
202 		[0]		= 0x3333,
203 		[1]		= 0x4444,
204 		[2]		= 0xcccc,
205 	},
206 	.ppe_base		= 0x2000,
207 	.wdma_base = {
208 		[0]		= 0x4800,
209 		[1]		= 0x4c00,
210 		[2]		= 0x5000,
211 	},
212 	.pse_iq_sta		= 0x0180,
213 	.pse_oq_sta		= 0x01a0,
214 };
215 
216 /* strings used by ethtool */
217 static const struct mtk_ethtool_stats {
218 	char str[ETH_GSTRING_LEN];
219 	u32 offset;
220 } mtk_ethtool_stats[] = {
221 	MTK_ETHTOOL_STAT(tx_bytes),
222 	MTK_ETHTOOL_STAT(tx_packets),
223 	MTK_ETHTOOL_STAT(tx_skip),
224 	MTK_ETHTOOL_STAT(tx_collisions),
225 	MTK_ETHTOOL_STAT(rx_bytes),
226 	MTK_ETHTOOL_STAT(rx_packets),
227 	MTK_ETHTOOL_STAT(rx_overflow),
228 	MTK_ETHTOOL_STAT(rx_fcs_errors),
229 	MTK_ETHTOOL_STAT(rx_short_errors),
230 	MTK_ETHTOOL_STAT(rx_long_errors),
231 	MTK_ETHTOOL_STAT(rx_checksum_errors),
232 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
233 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
234 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
235 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
236 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
237 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
238 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
239 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
240 };
241 
242 static const char * const mtk_clks_source_name[] = {
243 	"ethif",
244 	"sgmiitop",
245 	"esw",
246 	"gp0",
247 	"gp1",
248 	"gp2",
249 	"gp3",
250 	"xgp1",
251 	"xgp2",
252 	"xgp3",
253 	"crypto",
254 	"fe",
255 	"trgpll",
256 	"sgmii_tx250m",
257 	"sgmii_rx250m",
258 	"sgmii_cdr_ref",
259 	"sgmii_cdr_fb",
260 	"sgmii2_tx250m",
261 	"sgmii2_rx250m",
262 	"sgmii2_cdr_ref",
263 	"sgmii2_cdr_fb",
264 	"sgmii_ck",
265 	"eth2pll",
266 	"wocpu0",
267 	"wocpu1",
268 	"netsys0",
269 	"netsys1",
270 	"ethwarp_wocpu2",
271 	"ethwarp_wocpu1",
272 	"ethwarp_wocpu0",
273 	"top_sgm0_sel",
274 	"top_sgm1_sel",
275 	"top_eth_gmii_sel",
276 	"top_eth_refck_50m_sel",
277 	"top_eth_sys_200m_sel",
278 	"top_eth_sys_sel",
279 	"top_eth_xgmii_sel",
280 	"top_eth_mii_sel",
281 	"top_netsys_sel",
282 	"top_netsys_500m_sel",
283 	"top_netsys_pao_2x_sel",
284 	"top_netsys_sync_250m_sel",
285 	"top_netsys_ppefb_250m_sel",
286 	"top_netsys_warp_sel",
287 };
288 
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)289 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
290 {
291 	__raw_writel(val, eth->base + reg);
292 }
293 
mtk_r32(struct mtk_eth * eth,unsigned reg)294 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
295 {
296 	return __raw_readl(eth->base + reg);
297 }
298 
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)299 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
300 {
301 	u32 val;
302 
303 	val = mtk_r32(eth, reg);
304 	val &= ~mask;
305 	val |= set;
306 	mtk_w32(eth, val, reg);
307 	return reg;
308 }
309 
mtk_mdio_busy_wait(struct mtk_eth * eth)310 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
311 {
312 	unsigned long t_start = jiffies;
313 
314 	while (1) {
315 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
316 			return 0;
317 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
318 			break;
319 		cond_resched();
320 	}
321 
322 	dev_err(eth->dev, "mdio: MDIO timeout\n");
323 	return -ETIMEDOUT;
324 }
325 
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)326 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
327 			       u32 write_data)
328 {
329 	int ret;
330 
331 	ret = mtk_mdio_busy_wait(eth);
332 	if (ret < 0)
333 		return ret;
334 
335 	mtk_w32(eth, PHY_IAC_ACCESS |
336 		PHY_IAC_START_C22 |
337 		PHY_IAC_CMD_WRITE |
338 		PHY_IAC_REG(phy_reg) |
339 		PHY_IAC_ADDR(phy_addr) |
340 		PHY_IAC_DATA(write_data),
341 		MTK_PHY_IAC);
342 
343 	ret = mtk_mdio_busy_wait(eth);
344 	if (ret < 0)
345 		return ret;
346 
347 	return 0;
348 }
349 
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)350 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
351 			       u32 devad, u32 phy_reg, u32 write_data)
352 {
353 	int ret;
354 
355 	ret = mtk_mdio_busy_wait(eth);
356 	if (ret < 0)
357 		return ret;
358 
359 	mtk_w32(eth, PHY_IAC_ACCESS |
360 		PHY_IAC_START_C45 |
361 		PHY_IAC_CMD_C45_ADDR |
362 		PHY_IAC_REG(devad) |
363 		PHY_IAC_ADDR(phy_addr) |
364 		PHY_IAC_DATA(phy_reg),
365 		MTK_PHY_IAC);
366 
367 	ret = mtk_mdio_busy_wait(eth);
368 	if (ret < 0)
369 		return ret;
370 
371 	mtk_w32(eth, PHY_IAC_ACCESS |
372 		PHY_IAC_START_C45 |
373 		PHY_IAC_CMD_WRITE |
374 		PHY_IAC_REG(devad) |
375 		PHY_IAC_ADDR(phy_addr) |
376 		PHY_IAC_DATA(write_data),
377 		MTK_PHY_IAC);
378 
379 	ret = mtk_mdio_busy_wait(eth);
380 	if (ret < 0)
381 		return ret;
382 
383 	return 0;
384 }
385 
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)386 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
387 {
388 	int ret;
389 
390 	ret = mtk_mdio_busy_wait(eth);
391 	if (ret < 0)
392 		return ret;
393 
394 	mtk_w32(eth, PHY_IAC_ACCESS |
395 		PHY_IAC_START_C22 |
396 		PHY_IAC_CMD_C22_READ |
397 		PHY_IAC_REG(phy_reg) |
398 		PHY_IAC_ADDR(phy_addr),
399 		MTK_PHY_IAC);
400 
401 	ret = mtk_mdio_busy_wait(eth);
402 	if (ret < 0)
403 		return ret;
404 
405 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
406 }
407 
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)408 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
409 			      u32 devad, u32 phy_reg)
410 {
411 	int ret;
412 
413 	ret = mtk_mdio_busy_wait(eth);
414 	if (ret < 0)
415 		return ret;
416 
417 	mtk_w32(eth, PHY_IAC_ACCESS |
418 		PHY_IAC_START_C45 |
419 		PHY_IAC_CMD_C45_ADDR |
420 		PHY_IAC_REG(devad) |
421 		PHY_IAC_ADDR(phy_addr) |
422 		PHY_IAC_DATA(phy_reg),
423 		MTK_PHY_IAC);
424 
425 	ret = mtk_mdio_busy_wait(eth);
426 	if (ret < 0)
427 		return ret;
428 
429 	mtk_w32(eth, PHY_IAC_ACCESS |
430 		PHY_IAC_START_C45 |
431 		PHY_IAC_CMD_C45_READ |
432 		PHY_IAC_REG(devad) |
433 		PHY_IAC_ADDR(phy_addr),
434 		MTK_PHY_IAC);
435 
436 	ret = mtk_mdio_busy_wait(eth);
437 	if (ret < 0)
438 		return ret;
439 
440 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
441 }
442 
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)443 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
444 			      int phy_reg, u16 val)
445 {
446 	struct mtk_eth *eth = bus->priv;
447 
448 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
449 }
450 
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)451 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
452 			      int devad, int phy_reg, u16 val)
453 {
454 	struct mtk_eth *eth = bus->priv;
455 
456 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
457 }
458 
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)459 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
460 {
461 	struct mtk_eth *eth = bus->priv;
462 
463 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
464 }
465 
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)466 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
467 			     int phy_reg)
468 {
469 	struct mtk_eth *eth = bus->priv;
470 
471 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
472 }
473 
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)474 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
475 				     phy_interface_t interface)
476 {
477 	u32 val;
478 
479 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
480 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
481 
482 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
483 			   ETHSYS_TRGMII_MT7621_MASK, val);
484 
485 	return 0;
486 }
487 
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)488 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
489 				   phy_interface_t interface)
490 {
491 	int ret;
492 
493 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
494 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
495 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
496 		if (ret)
497 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
498 		return;
499 	}
500 
501 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
502 }
503 
mtk_setup_bridge_switch(struct mtk_eth * eth)504 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
505 {
506 	/* Force Port1 XGMAC Link Up */
507 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
508 		MTK_XGMAC_STS(MTK_GMAC1_ID));
509 
510 	/* Adjust GSW bridge IPG to 11 */
511 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
512 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
513 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
514 		MTK_GSW_CFG);
515 }
516 
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)517 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
518 					      phy_interface_t interface)
519 {
520 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
521 					   phylink_config);
522 	struct mtk_eth *eth = mac->hw;
523 	unsigned int sid;
524 
525 	if (interface == PHY_INTERFACE_MODE_SGMII ||
526 	    phy_interface_mode_is_8023z(interface)) {
527 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
528 		       0 : mac->id;
529 
530 		return eth->sgmii_pcs[sid];
531 	}
532 
533 	return NULL;
534 }
535 
mtk_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t iface)536 static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
537 			   phy_interface_t iface)
538 {
539 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
540 					   phylink_config);
541 	struct mtk_eth *eth = mac->hw;
542 
543 	if (mtk_interface_mode_is_xgmii(eth, iface) &&
544 	    mac->id != MTK_GMAC1_ID) {
545 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
546 			XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
547 
548 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
549 				 MTK_XGMAC_FORCE_LINK(mac->id),
550 			MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
551 	}
552 
553 	return 0;
554 }
555 
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)556 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
557 			   const struct phylink_link_state *state)
558 {
559 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
560 					   phylink_config);
561 	struct mtk_eth *eth = mac->hw;
562 	int val, ge_mode, err = 0;
563 	u32 i;
564 
565 	/* MT76x8 has no hardware settings between for the MAC */
566 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
567 	    mac->interface != state->interface) {
568 		/* Setup soc pin functions */
569 		switch (state->interface) {
570 		case PHY_INTERFACE_MODE_TRGMII:
571 		case PHY_INTERFACE_MODE_RGMII_TXID:
572 		case PHY_INTERFACE_MODE_RGMII_RXID:
573 		case PHY_INTERFACE_MODE_RGMII_ID:
574 		case PHY_INTERFACE_MODE_RGMII:
575 		case PHY_INTERFACE_MODE_MII:
576 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
577 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
578 				if (err)
579 					goto init_err;
580 			}
581 			break;
582 		case PHY_INTERFACE_MODE_1000BASEX:
583 		case PHY_INTERFACE_MODE_2500BASEX:
584 		case PHY_INTERFACE_MODE_SGMII:
585 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
586 			if (err)
587 				goto init_err;
588 			break;
589 		case PHY_INTERFACE_MODE_GMII:
590 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
591 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
592 				if (err)
593 					goto init_err;
594 			}
595 			break;
596 		case PHY_INTERFACE_MODE_INTERNAL:
597 			if (mac->id == MTK_GMAC2_ID &&
598 			    MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
599 				err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
600 				if (err)
601 					goto init_err;
602 			}
603 			break;
604 		default:
605 			goto err_phy;
606 		}
607 
608 		/* Setup clock for 1st gmac */
609 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
610 		    !phy_interface_mode_is_8023z(state->interface) &&
611 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
612 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
613 					 MTK_TRGMII_MT7621_CLK)) {
614 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
615 							      state->interface))
616 					goto err_phy;
617 			} else {
618 				mtk_gmac0_rgmii_adjust(mac->hw,
619 						       state->interface);
620 
621 				/* mt7623_pad_clk_setup */
622 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
623 					mtk_w32(mac->hw,
624 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
625 						TRGMII_TD_ODT(i));
626 
627 				/* Assert/release MT7623 RXC reset */
628 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
629 					TRGMII_RCK_CTRL);
630 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
631 			}
632 		}
633 
634 		switch (state->interface) {
635 		case PHY_INTERFACE_MODE_MII:
636 		case PHY_INTERFACE_MODE_GMII:
637 			ge_mode = 1;
638 			break;
639 		default:
640 			ge_mode = 0;
641 			break;
642 		}
643 
644 		/* put the gmac into the right mode */
645 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
646 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
647 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
648 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
649 
650 		mac->interface = state->interface;
651 	}
652 
653 	/* SGMII */
654 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
655 	    phy_interface_mode_is_8023z(state->interface)) {
656 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
657 		 * being setup done.
658 		 */
659 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
660 
661 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
662 				   SYSCFG0_SGMII_MASK,
663 				   ~(u32)SYSCFG0_SGMII_MASK);
664 
665 		/* Save the syscfg0 value for mac_finish */
666 		mac->syscfg0 = val;
667 	} else if (phylink_autoneg_inband(mode)) {
668 		dev_err(eth->dev,
669 			"In-band mode not supported in non SGMII mode!\n");
670 		return;
671 	}
672 
673 	/* Setup gmac */
674 	if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
675 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
676 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
677 
678 		if (mac->id == MTK_GMAC1_ID)
679 			mtk_setup_bridge_switch(eth);
680 	}
681 
682 	return;
683 
684 err_phy:
685 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
686 		mac->id, phy_modes(state->interface));
687 	return;
688 
689 init_err:
690 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
691 		mac->id, phy_modes(state->interface), err);
692 }
693 
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)694 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
695 			  phy_interface_t interface)
696 {
697 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
698 					   phylink_config);
699 	struct mtk_eth *eth = mac->hw;
700 	u32 mcr_cur, mcr_new;
701 
702 	/* Enable SGMII */
703 	if (interface == PHY_INTERFACE_MODE_SGMII ||
704 	    phy_interface_mode_is_8023z(interface))
705 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
706 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
707 
708 	/* Setup gmac */
709 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
710 	mcr_new = mcr_cur;
711 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
712 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
713 
714 	/* Only update control register when needed! */
715 	if (mcr_new != mcr_cur)
716 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
717 
718 	return 0;
719 }
720 
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)721 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
722 			      phy_interface_t interface)
723 {
724 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
725 					   phylink_config);
726 
727 	if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
728 		/* GMAC modes */
729 		mtk_m32(mac->hw,
730 			MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
731 			MTK_MAC_MCR(mac->id));
732 	} else if (mac->id != MTK_GMAC1_ID) {
733 		/* XGMAC except for built-in switch */
734 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
735 			MTK_XMAC_MCR(mac->id));
736 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
737 			MTK_XGMAC_STS(mac->id));
738 	}
739 }
740 
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)741 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
742 				int speed)
743 {
744 	const struct mtk_soc_data *soc = eth->soc;
745 	u32 ofs, val;
746 
747 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
748 		return;
749 
750 	val = MTK_QTX_SCH_MIN_RATE_EN |
751 	      /* minimum: 10 Mbps */
752 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
753 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
754 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
755 	if (mtk_is_netsys_v1(eth))
756 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
757 
758 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
759 		switch (speed) {
760 		case SPEED_10:
761 			val |= MTK_QTX_SCH_MAX_RATE_EN |
762 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
763 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
764 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
765 			break;
766 		case SPEED_100:
767 			val |= MTK_QTX_SCH_MAX_RATE_EN |
768 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
769 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
770 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
771 			break;
772 		case SPEED_1000:
773 			val |= MTK_QTX_SCH_MAX_RATE_EN |
774 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
775 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
776 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
777 			break;
778 		default:
779 			break;
780 		}
781 	} else {
782 		switch (speed) {
783 		case SPEED_10:
784 			val |= MTK_QTX_SCH_MAX_RATE_EN |
785 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
786 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
787 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
788 			break;
789 		case SPEED_100:
790 			val |= MTK_QTX_SCH_MAX_RATE_EN |
791 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
792 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
793 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
794 			break;
795 		case SPEED_1000:
796 			val |= MTK_QTX_SCH_MAX_RATE_EN |
797 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
798 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
799 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
800 			break;
801 		default:
802 			break;
803 		}
804 	}
805 
806 	ofs = MTK_QTX_OFFSET * idx;
807 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
808 }
809 
mtk_gdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)810 static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
811 				struct phy_device *phy,
812 				unsigned int mode, phy_interface_t interface,
813 				int speed, int duplex, bool tx_pause,
814 				bool rx_pause)
815 {
816 	u32 mcr;
817 
818 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
819 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
820 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
821 		 MAC_MCR_FORCE_RX_FC);
822 
823 	/* Configure speed */
824 	mac->speed = speed;
825 	switch (speed) {
826 	case SPEED_2500:
827 	case SPEED_1000:
828 		mcr |= MAC_MCR_SPEED_1000;
829 		break;
830 	case SPEED_100:
831 		mcr |= MAC_MCR_SPEED_100;
832 		break;
833 	}
834 
835 	/* Configure duplex */
836 	if (duplex == DUPLEX_FULL)
837 		mcr |= MAC_MCR_FORCE_DPX;
838 
839 	/* Configure pause modes - phylink will avoid these for half duplex */
840 	if (tx_pause)
841 		mcr |= MAC_MCR_FORCE_TX_FC;
842 	if (rx_pause)
843 		mcr |= MAC_MCR_FORCE_RX_FC;
844 
845 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
846 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
847 }
848 
mtk_xgdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)849 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
850 				 struct phy_device *phy,
851 				 unsigned int mode, phy_interface_t interface,
852 				 int speed, int duplex, bool tx_pause,
853 				 bool rx_pause)
854 {
855 	u32 mcr;
856 
857 	if (mac->id == MTK_GMAC1_ID)
858 		return;
859 
860 	/* Eliminate the interference(before link-up) caused by PHY noise */
861 	mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
862 	mdelay(20);
863 	mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
864 		MTK_XMAC_CNT_CTRL(mac->id));
865 
866 	mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
867 		MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
868 
869 	mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
870 	mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
871 		 XMAC_MCR_TRX_DISABLE);
872 	/* Configure pause modes -
873 	 * phylink will avoid these for half duplex
874 	 */
875 	if (tx_pause)
876 		mcr |= XMAC_MCR_FORCE_TX_FC;
877 	if (rx_pause)
878 		mcr |= XMAC_MCR_FORCE_RX_FC;
879 
880 	mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
881 }
882 
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)883 static void mtk_mac_link_up(struct phylink_config *config,
884 			    struct phy_device *phy,
885 			    unsigned int mode, phy_interface_t interface,
886 			    int speed, int duplex, bool tx_pause, bool rx_pause)
887 {
888 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
889 					   phylink_config);
890 
891 	if (mtk_interface_mode_is_xgmii(mac->hw, interface))
892 		mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
893 				     tx_pause, rx_pause);
894 	else
895 		mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
896 				    tx_pause, rx_pause);
897 }
898 
mtk_mac_disable_tx_lpi(struct phylink_config * config)899 static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
900 {
901 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
902 					   phylink_config);
903 	struct mtk_eth *eth = mac->hw;
904 
905 	mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
906 }
907 
mtk_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)908 static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
909 				 bool tx_clk_stop)
910 {
911 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
912 					   phylink_config);
913 	struct mtk_eth *eth = mac->hw;
914 	u32 val;
915 
916 	if (mtk_interface_mode_is_xgmii(eth, mac->interface))
917 		return -EOPNOTSUPP;
918 
919 	/* Tx idle timer in ms */
920 	timer = DIV_ROUND_UP(timer, 1000);
921 
922 	/* If the timer is zero, then set LPI_MODE, which allows the
923 	 * system to enter LPI mode immediately rather than waiting for
924 	 * the LPI threshold.
925 	 */
926 	if (!timer)
927 		val = MAC_EEE_LPI_MODE;
928 	else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
929 		val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
930 	else
931 		val = MAC_EEE_LPI_TXIDLE_THD;
932 
933 	if (tx_clk_stop)
934 		val |= MAC_EEE_CKG_TXIDLE;
935 
936 	/* PHY Wake-up time, this field does not have a reset value, so use the
937 	 * reset value from MT7531 (36us for 100M and 17us for 1000M).
938 	 */
939 	val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
940 	       FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
941 
942 	mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
943 	mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
944 
945 	return 0;
946 }
947 
948 static const struct phylink_mac_ops mtk_phylink_ops = {
949 	.mac_prepare = mtk_mac_prepare,
950 	.mac_select_pcs = mtk_mac_select_pcs,
951 	.mac_config = mtk_mac_config,
952 	.mac_finish = mtk_mac_finish,
953 	.mac_link_down = mtk_mac_link_down,
954 	.mac_link_up = mtk_mac_link_up,
955 	.mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
956 	.mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
957 };
958 
mtk_mdio_config(struct mtk_eth * eth)959 static void mtk_mdio_config(struct mtk_eth *eth)
960 {
961 	u32 val;
962 
963 	/* Configure MDC Divider */
964 	val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
965 
966 	/* Configure MDC Turbo Mode */
967 	if (mtk_is_netsys_v3_or_greater(eth))
968 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
969 	else
970 		val |= PPSC_MDC_TURBO;
971 
972 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
973 }
974 
mtk_mdio_init(struct mtk_eth * eth)975 static int mtk_mdio_init(struct mtk_eth *eth)
976 {
977 	unsigned int max_clk = 2500000;
978 	struct device_node *mii_np;
979 	int ret;
980 	u32 val;
981 
982 	mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
983 	if (!mii_np) {
984 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
985 		return -ENODEV;
986 	}
987 
988 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
989 	if (!eth->mii_bus) {
990 		ret = -ENOMEM;
991 		goto err_put_node;
992 	}
993 
994 	eth->mii_bus->name = "mdio";
995 	eth->mii_bus->read = mtk_mdio_read_c22;
996 	eth->mii_bus->write = mtk_mdio_write_c22;
997 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
998 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
999 	eth->mii_bus->priv = eth;
1000 	eth->mii_bus->parent = eth->dev;
1001 
1002 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1003 
1004 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1005 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1006 			dev_err(eth->dev, "MDIO clock frequency out of range");
1007 			ret = -EINVAL;
1008 			goto err_put_node;
1009 		}
1010 		max_clk = val;
1011 	}
1012 	eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
1013 	mtk_mdio_config(eth);
1014 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
1015 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
1016 
1017 err_put_node:
1018 	of_node_put(mii_np);
1019 	return ret;
1020 }
1021 
mtk_mdio_cleanup(struct mtk_eth * eth)1022 static void mtk_mdio_cleanup(struct mtk_eth *eth)
1023 {
1024 	if (!eth->mii_bus)
1025 		return;
1026 
1027 	mdiobus_unregister(eth->mii_bus);
1028 }
1029 
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)1030 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1031 {
1032 	unsigned long flags;
1033 	u32 val;
1034 
1035 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1036 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1037 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1038 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1039 }
1040 
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)1041 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1042 {
1043 	unsigned long flags;
1044 	u32 val;
1045 
1046 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1047 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1048 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1049 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1050 }
1051 
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)1052 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1053 {
1054 	unsigned long flags;
1055 	u32 val;
1056 
1057 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1058 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1059 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1060 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1061 }
1062 
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)1063 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1064 {
1065 	unsigned long flags;
1066 	u32 val;
1067 
1068 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1069 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1070 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1071 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1072 }
1073 
mtk_set_mac_address(struct net_device * dev,void * p)1074 static int mtk_set_mac_address(struct net_device *dev, void *p)
1075 {
1076 	int ret = eth_mac_addr(dev, p);
1077 	struct mtk_mac *mac = netdev_priv(dev);
1078 	struct mtk_eth *eth = mac->hw;
1079 	const char *macaddr = dev->dev_addr;
1080 
1081 	if (ret)
1082 		return ret;
1083 
1084 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1085 		return -EBUSY;
1086 
1087 	spin_lock_bh(&mac->hw->page_lock);
1088 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1089 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1090 			MT7628_SDM_MAC_ADRH);
1091 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1092 			(macaddr[4] << 8) | macaddr[5],
1093 			MT7628_SDM_MAC_ADRL);
1094 	} else {
1095 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1096 			MTK_GDMA_MAC_ADRH(mac->id));
1097 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1098 			(macaddr[4] << 8) | macaddr[5],
1099 			MTK_GDMA_MAC_ADRL(mac->id));
1100 	}
1101 	spin_unlock_bh(&mac->hw->page_lock);
1102 
1103 	return 0;
1104 }
1105 
mtk_stats_update_mac(struct mtk_mac * mac)1106 void mtk_stats_update_mac(struct mtk_mac *mac)
1107 {
1108 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1109 	struct mtk_eth *eth = mac->hw;
1110 
1111 	u64_stats_update_begin(&hw_stats->syncp);
1112 
1113 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1114 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
1115 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
1116 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
1117 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
1118 		hw_stats->rx_checksum_errors +=
1119 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
1120 	} else {
1121 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1122 		unsigned int offs = hw_stats->reg_offset;
1123 		u64 stats;
1124 
1125 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1126 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
1127 		if (stats)
1128 			hw_stats->rx_bytes += (stats << 32);
1129 		hw_stats->rx_packets +=
1130 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1131 		hw_stats->rx_overflow +=
1132 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1133 		hw_stats->rx_fcs_errors +=
1134 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1135 		hw_stats->rx_short_errors +=
1136 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1137 		hw_stats->rx_long_errors +=
1138 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1139 		hw_stats->rx_checksum_errors +=
1140 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1141 		hw_stats->rx_flow_control_packets +=
1142 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1143 
1144 		if (mtk_is_netsys_v3_or_greater(eth)) {
1145 			hw_stats->tx_skip +=
1146 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1147 			hw_stats->tx_collisions +=
1148 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1149 			hw_stats->tx_bytes +=
1150 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1151 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1152 			if (stats)
1153 				hw_stats->tx_bytes += (stats << 32);
1154 			hw_stats->tx_packets +=
1155 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1156 		} else {
1157 			hw_stats->tx_skip +=
1158 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1159 			hw_stats->tx_collisions +=
1160 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1161 			hw_stats->tx_bytes +=
1162 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1163 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1164 			if (stats)
1165 				hw_stats->tx_bytes += (stats << 32);
1166 			hw_stats->tx_packets +=
1167 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1168 		}
1169 	}
1170 
1171 	u64_stats_update_end(&hw_stats->syncp);
1172 }
1173 
mtk_stats_update(struct mtk_eth * eth)1174 static void mtk_stats_update(struct mtk_eth *eth)
1175 {
1176 	int i;
1177 
1178 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1179 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1180 			continue;
1181 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1182 			mtk_stats_update_mac(eth->mac[i]);
1183 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1184 		}
1185 	}
1186 }
1187 
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1188 static void mtk_get_stats64(struct net_device *dev,
1189 			    struct rtnl_link_stats64 *storage)
1190 {
1191 	struct mtk_mac *mac = netdev_priv(dev);
1192 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1193 	unsigned int start;
1194 
1195 	if (netif_running(dev) && netif_device_present(dev)) {
1196 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1197 			mtk_stats_update_mac(mac);
1198 			spin_unlock_bh(&hw_stats->stats_lock);
1199 		}
1200 	}
1201 
1202 	do {
1203 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1204 		storage->rx_packets = hw_stats->rx_packets;
1205 		storage->tx_packets = hw_stats->tx_packets;
1206 		storage->rx_bytes = hw_stats->rx_bytes;
1207 		storage->tx_bytes = hw_stats->tx_bytes;
1208 		storage->collisions = hw_stats->tx_collisions;
1209 		storage->rx_length_errors = hw_stats->rx_short_errors +
1210 			hw_stats->rx_long_errors;
1211 		storage->rx_over_errors = hw_stats->rx_overflow;
1212 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1213 		storage->rx_errors = hw_stats->rx_checksum_errors;
1214 		storage->tx_aborted_errors = hw_stats->tx_skip;
1215 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1216 
1217 	storage->tx_errors = dev->stats.tx_errors;
1218 	storage->rx_dropped = dev->stats.rx_dropped;
1219 	storage->tx_dropped = dev->stats.tx_dropped;
1220 }
1221 
mtk_max_frag_size(int mtu)1222 static inline int mtk_max_frag_size(int mtu)
1223 {
1224 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1225 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1226 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1227 
1228 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1229 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1230 }
1231 
mtk_max_buf_size(int frag_size)1232 static inline int mtk_max_buf_size(int frag_size)
1233 {
1234 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1235 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1236 
1237 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1238 
1239 	return buf_size;
1240 }
1241 
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1242 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1243 			    struct mtk_rx_dma_v2 *dma_rxd)
1244 {
1245 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1246 	if (!(rxd->rxd2 & RX_DMA_DONE))
1247 		return false;
1248 
1249 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1250 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1251 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1252 	if (mtk_is_netsys_v3_or_greater(eth)) {
1253 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1254 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1255 	}
1256 
1257 	return true;
1258 }
1259 
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1260 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1261 {
1262 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1263 	unsigned long data;
1264 
1265 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1266 				get_order(size));
1267 
1268 	return (void *)data;
1269 }
1270 
mtk_dma_ring_alloc(struct mtk_eth * eth,size_t size,dma_addr_t * dma_handle,bool use_sram)1271 static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
1272 				dma_addr_t *dma_handle, bool use_sram)
1273 {
1274 	void *dma_ring;
1275 
1276 	if (use_sram && eth->sram_pool) {
1277 		dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
1278 		if (!dma_ring)
1279 			return dma_ring;
1280 		*dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
1281 						    (unsigned long)dma_ring);
1282 	} else {
1283 		dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
1284 					      GFP_KERNEL);
1285 	}
1286 
1287 	return dma_ring;
1288 }
1289 
mtk_dma_ring_free(struct mtk_eth * eth,size_t size,void * dma_ring,dma_addr_t dma_handle,bool in_sram)1290 static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
1291 			      dma_addr_t dma_handle, bool in_sram)
1292 {
1293 	if (in_sram && eth->sram_pool)
1294 		gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
1295 	else
1296 		dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
1297 }
1298 
1299 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1300 static int mtk_init_fq_dma(struct mtk_eth *eth)
1301 {
1302 	const struct mtk_soc_data *soc = eth->soc;
1303 	dma_addr_t phy_ring_tail;
1304 	int cnt = soc->tx.fq_dma_size;
1305 	dma_addr_t dma_addr;
1306 	int i, j, len;
1307 
1308 	eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
1309 					       &eth->phy_scratch_ring, true);
1310 
1311 	if (unlikely(!eth->scratch_ring))
1312 		return -ENOMEM;
1313 
1314 	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1315 
1316 	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1317 		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1318 		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1319 
1320 		if (unlikely(!eth->scratch_head[j]))
1321 			return -ENOMEM;
1322 
1323 		dma_addr = dma_map_single(eth->dma_dev,
1324 					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1325 					  DMA_FROM_DEVICE);
1326 
1327 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1328 			return -ENOMEM;
1329 
1330 		for (i = 0; i < len; i++) {
1331 			struct mtk_tx_dma_v2 *txd;
1332 
1333 			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1334 			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1335 			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1336 				txd->txd2 = eth->phy_scratch_ring +
1337 					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1338 
1339 			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1340 			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1341 				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1342 
1343 			txd->txd4 = 0;
1344 			if (mtk_is_netsys_v2_or_greater(eth)) {
1345 				txd->txd5 = 0;
1346 				txd->txd6 = 0;
1347 				txd->txd7 = 0;
1348 				txd->txd8 = 0;
1349 			}
1350 		}
1351 	}
1352 
1353 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1354 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1355 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1356 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1357 
1358 	return 0;
1359 }
1360 
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1361 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1362 {
1363 	return ring->dma + (desc - ring->phys);
1364 }
1365 
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1366 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1367 					     void *txd, u32 txd_size)
1368 {
1369 	int idx = (txd - ring->dma) / txd_size;
1370 
1371 	return &ring->buf[idx];
1372 }
1373 
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1374 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1375 				       struct mtk_tx_dma *dma)
1376 {
1377 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1378 }
1379 
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1380 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1381 {
1382 	return (dma - ring->dma) / txd_size;
1383 }
1384 
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1385 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1386 			 struct xdp_frame_bulk *bq, bool napi)
1387 {
1388 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1389 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1390 			dma_unmap_single(eth->dma_dev,
1391 					 dma_unmap_addr(tx_buf, dma_addr0),
1392 					 dma_unmap_len(tx_buf, dma_len0),
1393 					 DMA_TO_DEVICE);
1394 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1395 			dma_unmap_page(eth->dma_dev,
1396 				       dma_unmap_addr(tx_buf, dma_addr0),
1397 				       dma_unmap_len(tx_buf, dma_len0),
1398 				       DMA_TO_DEVICE);
1399 		}
1400 	} else {
1401 		if (dma_unmap_len(tx_buf, dma_len0)) {
1402 			dma_unmap_page(eth->dma_dev,
1403 				       dma_unmap_addr(tx_buf, dma_addr0),
1404 				       dma_unmap_len(tx_buf, dma_len0),
1405 				       DMA_TO_DEVICE);
1406 		}
1407 
1408 		if (dma_unmap_len(tx_buf, dma_len1)) {
1409 			dma_unmap_page(eth->dma_dev,
1410 				       dma_unmap_addr(tx_buf, dma_addr1),
1411 				       dma_unmap_len(tx_buf, dma_len1),
1412 				       DMA_TO_DEVICE);
1413 		}
1414 	}
1415 
1416 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1417 		if (tx_buf->type == MTK_TYPE_SKB) {
1418 			struct sk_buff *skb = tx_buf->data;
1419 
1420 			if (napi)
1421 				napi_consume_skb(skb, napi);
1422 			else
1423 				dev_kfree_skb_any(skb);
1424 		} else {
1425 			struct xdp_frame *xdpf = tx_buf->data;
1426 
1427 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1428 				xdp_return_frame_rx_napi(xdpf);
1429 			else if (bq)
1430 				xdp_return_frame_bulk(xdpf, bq);
1431 			else
1432 				xdp_return_frame(xdpf);
1433 		}
1434 	}
1435 	tx_buf->flags = 0;
1436 	tx_buf->data = NULL;
1437 }
1438 
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1439 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1440 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1441 			 size_t size, int idx)
1442 {
1443 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1444 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1445 		dma_unmap_len_set(tx_buf, dma_len0, size);
1446 	} else {
1447 		if (idx & 1) {
1448 			txd->txd3 = mapped_addr;
1449 			txd->txd2 |= TX_DMA_PLEN1(size);
1450 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1451 			dma_unmap_len_set(tx_buf, dma_len1, size);
1452 		} else {
1453 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1454 			txd->txd1 = mapped_addr;
1455 			txd->txd2 = TX_DMA_PLEN0(size);
1456 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1457 			dma_unmap_len_set(tx_buf, dma_len0, size);
1458 		}
1459 	}
1460 }
1461 
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1462 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1463 				   struct mtk_tx_dma_desc_info *info)
1464 {
1465 	struct mtk_mac *mac = netdev_priv(dev);
1466 	struct mtk_eth *eth = mac->hw;
1467 	struct mtk_tx_dma *desc = txd;
1468 	u32 data;
1469 
1470 	WRITE_ONCE(desc->txd1, info->addr);
1471 
1472 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1473 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1474 	if (info->last)
1475 		data |= TX_DMA_LS0;
1476 	WRITE_ONCE(desc->txd3, data);
1477 
1478 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1479 	if (info->first) {
1480 		if (info->gso)
1481 			data |= TX_DMA_TSO;
1482 		/* tx checksum offload */
1483 		if (info->csum)
1484 			data |= TX_DMA_CHKSUM;
1485 		/* vlan header offload */
1486 		if (info->vlan)
1487 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1488 	}
1489 	WRITE_ONCE(desc->txd4, data);
1490 }
1491 
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1492 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1493 				   struct mtk_tx_dma_desc_info *info)
1494 {
1495 	struct mtk_mac *mac = netdev_priv(dev);
1496 	struct mtk_tx_dma_v2 *desc = txd;
1497 	struct mtk_eth *eth = mac->hw;
1498 	u32 data;
1499 
1500 	WRITE_ONCE(desc->txd1, info->addr);
1501 
1502 	data = TX_DMA_PLEN0(info->size);
1503 	if (info->last)
1504 		data |= TX_DMA_LS0;
1505 
1506 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1507 		data |= TX_DMA_PREP_ADDR64(info->addr);
1508 
1509 	WRITE_ONCE(desc->txd3, data);
1510 
1511 	 /* set forward port */
1512 	switch (mac->id) {
1513 	case MTK_GMAC1_ID:
1514 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1515 		break;
1516 	case MTK_GMAC2_ID:
1517 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1518 		break;
1519 	case MTK_GMAC3_ID:
1520 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1521 		break;
1522 	}
1523 
1524 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1525 	WRITE_ONCE(desc->txd4, data);
1526 
1527 	data = 0;
1528 	if (info->first) {
1529 		if (info->gso)
1530 			data |= TX_DMA_TSO_V2;
1531 		/* tx checksum offload */
1532 		if (info->csum)
1533 			data |= TX_DMA_CHKSUM_V2;
1534 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1535 			data |= TX_DMA_SPTAG_V3;
1536 	}
1537 	WRITE_ONCE(desc->txd5, data);
1538 
1539 	data = 0;
1540 	if (info->first && info->vlan)
1541 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1542 	WRITE_ONCE(desc->txd6, data);
1543 
1544 	WRITE_ONCE(desc->txd7, 0);
1545 	WRITE_ONCE(desc->txd8, 0);
1546 }
1547 
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1548 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1549 				struct mtk_tx_dma_desc_info *info)
1550 {
1551 	struct mtk_mac *mac = netdev_priv(dev);
1552 	struct mtk_eth *eth = mac->hw;
1553 
1554 	if (mtk_is_netsys_v2_or_greater(eth))
1555 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1556 	else
1557 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1558 }
1559 
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1560 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1561 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1562 {
1563 	struct mtk_tx_dma_desc_info txd_info = {
1564 		.size = skb_headlen(skb),
1565 		.gso = gso,
1566 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1567 		.vlan = skb_vlan_tag_present(skb),
1568 		.qid = skb_get_queue_mapping(skb),
1569 		.vlan_tci = skb_vlan_tag_get(skb),
1570 		.first = true,
1571 		.last = !skb_is_nonlinear(skb),
1572 	};
1573 	struct netdev_queue *txq;
1574 	struct mtk_mac *mac = netdev_priv(dev);
1575 	struct mtk_eth *eth = mac->hw;
1576 	const struct mtk_soc_data *soc = eth->soc;
1577 	struct mtk_tx_dma *itxd, *txd;
1578 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1579 	struct mtk_tx_buf *itx_buf, *tx_buf;
1580 	int i, n_desc = 1;
1581 	int queue = skb_get_queue_mapping(skb);
1582 	int k = 0;
1583 
1584 	txq = netdev_get_tx_queue(dev, queue);
1585 	itxd = ring->next_free;
1586 	itxd_pdma = qdma_to_pdma(ring, itxd);
1587 	if (itxd == ring->last_free)
1588 		return -ENOMEM;
1589 
1590 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1591 	memset(itx_buf, 0, sizeof(*itx_buf));
1592 
1593 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1594 				       DMA_TO_DEVICE);
1595 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1596 		return -ENOMEM;
1597 
1598 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1599 
1600 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1601 	itx_buf->mac_id = mac->id;
1602 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1603 		     k++);
1604 
1605 	/* TX SG offload */
1606 	txd = itxd;
1607 	txd_pdma = qdma_to_pdma(ring, txd);
1608 
1609 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1610 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1611 		unsigned int offset = 0;
1612 		int frag_size = skb_frag_size(frag);
1613 
1614 		while (frag_size) {
1615 			bool new_desc = true;
1616 
1617 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1618 			    (i & 0x1)) {
1619 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1620 				txd_pdma = qdma_to_pdma(ring, txd);
1621 				if (txd == ring->last_free)
1622 					goto err_dma;
1623 
1624 				n_desc++;
1625 			} else {
1626 				new_desc = false;
1627 			}
1628 
1629 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1630 			txd_info.size = min_t(unsigned int, frag_size,
1631 					      soc->tx.dma_max_len);
1632 			txd_info.qid = queue;
1633 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1634 					!(frag_size - txd_info.size);
1635 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1636 							 offset, txd_info.size,
1637 							 DMA_TO_DEVICE);
1638 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1639 				goto err_dma;
1640 
1641 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1642 
1643 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1644 						    soc->tx.desc_size);
1645 			if (new_desc)
1646 				memset(tx_buf, 0, sizeof(*tx_buf));
1647 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1648 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1649 			tx_buf->mac_id = mac->id;
1650 
1651 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1652 				     txd_info.size, k++);
1653 
1654 			frag_size -= txd_info.size;
1655 			offset += txd_info.size;
1656 		}
1657 	}
1658 
1659 	/* store skb to cleanup */
1660 	itx_buf->type = MTK_TYPE_SKB;
1661 	itx_buf->data = skb;
1662 
1663 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1664 		if (k & 0x1)
1665 			txd_pdma->txd2 |= TX_DMA_LS0;
1666 		else
1667 			txd_pdma->txd2 |= TX_DMA_LS1;
1668 	}
1669 
1670 	netdev_tx_sent_queue(txq, skb->len);
1671 	skb_tx_timestamp(skb);
1672 
1673 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1674 	atomic_sub(n_desc, &ring->free_count);
1675 
1676 	/* make sure that all changes to the dma ring are flushed before we
1677 	 * continue
1678 	 */
1679 	wmb();
1680 
1681 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1682 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1683 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1684 	} else {
1685 		int next_idx;
1686 
1687 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1688 					 ring->dma_size);
1689 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1690 	}
1691 
1692 	return 0;
1693 
1694 err_dma:
1695 	do {
1696 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1697 
1698 		/* unmap dma */
1699 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1700 
1701 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1702 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1703 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1704 
1705 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1706 		itxd_pdma = qdma_to_pdma(ring, itxd);
1707 	} while (itxd != txd);
1708 
1709 	return -ENOMEM;
1710 }
1711 
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1712 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1713 {
1714 	int i, nfrags = 1;
1715 	skb_frag_t *frag;
1716 
1717 	if (skb_is_gso(skb)) {
1718 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1719 			frag = &skb_shinfo(skb)->frags[i];
1720 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1721 					       eth->soc->tx.dma_max_len);
1722 		}
1723 	} else {
1724 		nfrags += skb_shinfo(skb)->nr_frags;
1725 	}
1726 
1727 	return nfrags;
1728 }
1729 
mtk_queue_stopped(struct mtk_eth * eth)1730 static int mtk_queue_stopped(struct mtk_eth *eth)
1731 {
1732 	int i;
1733 
1734 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1735 		if (!eth->netdev[i])
1736 			continue;
1737 		if (netif_queue_stopped(eth->netdev[i]))
1738 			return 1;
1739 	}
1740 
1741 	return 0;
1742 }
1743 
mtk_wake_queue(struct mtk_eth * eth)1744 static void mtk_wake_queue(struct mtk_eth *eth)
1745 {
1746 	int i;
1747 
1748 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1749 		if (!eth->netdev[i])
1750 			continue;
1751 		netif_tx_wake_all_queues(eth->netdev[i]);
1752 	}
1753 }
1754 
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1755 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1756 {
1757 	struct mtk_mac *mac = netdev_priv(dev);
1758 	struct mtk_eth *eth = mac->hw;
1759 	struct mtk_tx_ring *ring = &eth->tx_ring;
1760 	struct net_device_stats *stats = &dev->stats;
1761 	bool gso = false;
1762 	int tx_num;
1763 
1764 	if (skb_vlan_tag_present(skb) &&
1765 	    !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
1766 		skb = __vlan_hwaccel_push_inside(skb);
1767 		if (!skb)
1768 			goto dropped;
1769 	}
1770 
1771 	/* normally we can rely on the stack not calling this more than once,
1772 	 * however we have 2 queues running on the same ring so we need to lock
1773 	 * the ring access
1774 	 */
1775 	spin_lock(&eth->page_lock);
1776 
1777 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1778 		goto drop;
1779 
1780 	tx_num = mtk_cal_txd_req(eth, skb);
1781 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1782 		netif_tx_stop_all_queues(dev);
1783 		netif_err(eth, tx_queued, dev,
1784 			  "Tx Ring full when queue awake!\n");
1785 		spin_unlock(&eth->page_lock);
1786 		return NETDEV_TX_BUSY;
1787 	}
1788 
1789 	/* TSO: fill MSS info in tcp checksum field */
1790 	if (skb_is_gso(skb)) {
1791 		if (skb_cow_head(skb, 0)) {
1792 			netif_warn(eth, tx_err, dev,
1793 				   "GSO expand head fail.\n");
1794 			goto drop;
1795 		}
1796 
1797 		if (skb_shinfo(skb)->gso_type &
1798 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1799 			gso = true;
1800 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1801 		}
1802 	}
1803 
1804 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1805 		goto drop;
1806 
1807 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1808 		netif_tx_stop_all_queues(dev);
1809 
1810 	spin_unlock(&eth->page_lock);
1811 
1812 	return NETDEV_TX_OK;
1813 
1814 drop:
1815 	spin_unlock(&eth->page_lock);
1816 	dev_kfree_skb_any(skb);
1817 dropped:
1818 	stats->tx_dropped++;
1819 	return NETDEV_TX_OK;
1820 }
1821 
mtk_get_rx_ring(struct mtk_eth * eth)1822 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1823 {
1824 	int i;
1825 	struct mtk_rx_ring *ring;
1826 	int idx;
1827 
1828 	if (!eth->hwlro)
1829 		return &eth->rx_ring[0];
1830 
1831 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1832 		struct mtk_rx_dma *rxd;
1833 
1834 		ring = &eth->rx_ring[i];
1835 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1836 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
1837 		if (rxd->rxd2 & RX_DMA_DONE) {
1838 			ring->calc_idx_update = true;
1839 			return ring;
1840 		}
1841 	}
1842 
1843 	return NULL;
1844 }
1845 
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1846 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1847 {
1848 	struct mtk_rx_ring *ring;
1849 	int i;
1850 
1851 	if (!eth->hwlro) {
1852 		ring = &eth->rx_ring[0];
1853 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1854 	} else {
1855 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1856 			ring = &eth->rx_ring[i];
1857 			if (ring->calc_idx_update) {
1858 				ring->calc_idx_update = false;
1859 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1860 			}
1861 		}
1862 	}
1863 }
1864 
mtk_page_pool_enabled(struct mtk_eth * eth)1865 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1866 {
1867 	return mtk_is_netsys_v2_or_greater(eth);
1868 }
1869 
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1870 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1871 					      struct xdp_rxq_info *xdp_q,
1872 					      int id, int size)
1873 {
1874 	struct page_pool_params pp_params = {
1875 		.order = 0,
1876 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1877 		.pool_size = size,
1878 		.nid = NUMA_NO_NODE,
1879 		.dev = eth->dma_dev,
1880 		.offset = MTK_PP_HEADROOM,
1881 		.max_len = MTK_PP_MAX_BUF_SIZE,
1882 	};
1883 	struct page_pool *pp;
1884 	int err;
1885 
1886 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1887 							  : DMA_FROM_DEVICE;
1888 	pp = page_pool_create(&pp_params);
1889 	if (IS_ERR(pp))
1890 		return pp;
1891 
1892 	err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1893 				 eth->rx_napi.napi_id, PAGE_SIZE);
1894 	if (err < 0)
1895 		goto err_free_pp;
1896 
1897 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1898 	if (err)
1899 		goto err_unregister_rxq;
1900 
1901 	return pp;
1902 
1903 err_unregister_rxq:
1904 	xdp_rxq_info_unreg(xdp_q);
1905 err_free_pp:
1906 	page_pool_destroy(pp);
1907 
1908 	return ERR_PTR(err);
1909 }
1910 
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1911 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1912 				    gfp_t gfp_mask)
1913 {
1914 	struct page *page;
1915 
1916 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1917 	if (!page)
1918 		return NULL;
1919 
1920 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1921 	return page_address(page);
1922 }
1923 
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1924 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1925 {
1926 	if (ring->page_pool)
1927 		page_pool_put_full_page(ring->page_pool,
1928 					virt_to_head_page(data), napi);
1929 	else
1930 		skb_free_frag(data);
1931 }
1932 
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1933 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1934 			     struct mtk_tx_dma_desc_info *txd_info,
1935 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1936 			     void *data, u16 headroom, int index, bool dma_map)
1937 {
1938 	struct mtk_tx_ring *ring = &eth->tx_ring;
1939 	struct mtk_mac *mac = netdev_priv(dev);
1940 	struct mtk_tx_dma *txd_pdma;
1941 
1942 	if (dma_map) {  /* ndo_xdp_xmit */
1943 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1944 						txd_info->size, DMA_TO_DEVICE);
1945 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1946 			return -ENOMEM;
1947 
1948 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1949 	} else {
1950 		struct page *page = virt_to_head_page(data);
1951 
1952 		txd_info->addr = page_pool_get_dma_addr(page) +
1953 				 sizeof(struct xdp_frame) + headroom;
1954 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1955 					   txd_info->size, DMA_BIDIRECTIONAL);
1956 	}
1957 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1958 
1959 	tx_buf->mac_id = mac->id;
1960 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1961 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1962 
1963 	txd_pdma = qdma_to_pdma(ring, txd);
1964 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1965 		     index);
1966 
1967 	return 0;
1968 }
1969 
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1970 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1971 				struct net_device *dev, bool dma_map)
1972 {
1973 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1974 	const struct mtk_soc_data *soc = eth->soc;
1975 	struct mtk_tx_ring *ring = &eth->tx_ring;
1976 	struct mtk_mac *mac = netdev_priv(dev);
1977 	struct mtk_tx_dma_desc_info txd_info = {
1978 		.size	= xdpf->len,
1979 		.first	= true,
1980 		.last	= !xdp_frame_has_frags(xdpf),
1981 		.qid	= mac->id,
1982 	};
1983 	int err, index = 0, n_desc = 1, nr_frags;
1984 	struct mtk_tx_buf *htx_buf, *tx_buf;
1985 	struct mtk_tx_dma *htxd, *txd;
1986 	void *data = xdpf->data;
1987 
1988 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1989 		return -EBUSY;
1990 
1991 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1992 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1993 		return -EBUSY;
1994 
1995 	spin_lock(&eth->page_lock);
1996 
1997 	txd = ring->next_free;
1998 	if (txd == ring->last_free) {
1999 		spin_unlock(&eth->page_lock);
2000 		return -ENOMEM;
2001 	}
2002 	htxd = txd;
2003 
2004 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
2005 	memset(tx_buf, 0, sizeof(*tx_buf));
2006 	htx_buf = tx_buf;
2007 
2008 	for (;;) {
2009 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
2010 					data, xdpf->headroom, index, dma_map);
2011 		if (err < 0)
2012 			goto unmap;
2013 
2014 		if (txd_info.last)
2015 			break;
2016 
2017 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
2018 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2019 			if (txd == ring->last_free)
2020 				goto unmap;
2021 
2022 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
2023 						    soc->tx.desc_size);
2024 			memset(tx_buf, 0, sizeof(*tx_buf));
2025 			n_desc++;
2026 		}
2027 
2028 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2029 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
2030 		txd_info.last = index + 1 == nr_frags;
2031 		txd_info.qid = mac->id;
2032 		data = skb_frag_address(&sinfo->frags[index]);
2033 
2034 		index++;
2035 	}
2036 	/* store xdpf for cleanup */
2037 	htx_buf->data = xdpf;
2038 
2039 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2040 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
2041 
2042 		if (index & 1)
2043 			txd_pdma->txd2 |= TX_DMA_LS0;
2044 		else
2045 			txd_pdma->txd2 |= TX_DMA_LS1;
2046 	}
2047 
2048 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2049 	atomic_sub(n_desc, &ring->free_count);
2050 
2051 	/* make sure that all changes to the dma ring are flushed before we
2052 	 * continue
2053 	 */
2054 	wmb();
2055 
2056 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2057 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
2058 	} else {
2059 		int idx;
2060 
2061 		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
2062 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
2063 			MT7628_TX_CTX_IDX0);
2064 	}
2065 
2066 	spin_unlock(&eth->page_lock);
2067 
2068 	return 0;
2069 
2070 unmap:
2071 	while (htxd != txd) {
2072 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
2073 		mtk_tx_unmap(eth, tx_buf, NULL, false);
2074 
2075 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2076 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2077 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
2078 
2079 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
2080 		}
2081 
2082 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
2083 	}
2084 
2085 	spin_unlock(&eth->page_lock);
2086 
2087 	return err;
2088 }
2089 
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)2090 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
2091 			struct xdp_frame **frames, u32 flags)
2092 {
2093 	struct mtk_mac *mac = netdev_priv(dev);
2094 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2095 	struct mtk_eth *eth = mac->hw;
2096 	int i, nxmit = 0;
2097 
2098 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2099 		return -EINVAL;
2100 
2101 	for (i = 0; i < num_frame; i++) {
2102 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
2103 			break;
2104 		nxmit++;
2105 	}
2106 
2107 	u64_stats_update_begin(&hw_stats->syncp);
2108 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
2109 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
2110 	u64_stats_update_end(&hw_stats->syncp);
2111 
2112 	return nxmit;
2113 }
2114 
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)2115 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
2116 		       struct xdp_buff *xdp, struct net_device *dev)
2117 {
2118 	struct mtk_mac *mac = netdev_priv(dev);
2119 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2120 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
2121 	struct bpf_prog *prog;
2122 	u32 act = XDP_PASS;
2123 
2124 	rcu_read_lock();
2125 
2126 	prog = rcu_dereference(eth->prog);
2127 	if (!prog)
2128 		goto out;
2129 
2130 	act = bpf_prog_run_xdp(prog, xdp);
2131 	switch (act) {
2132 	case XDP_PASS:
2133 		count = &hw_stats->xdp_stats.rx_xdp_pass;
2134 		goto update_stats;
2135 	case XDP_REDIRECT:
2136 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
2137 			act = XDP_DROP;
2138 			break;
2139 		}
2140 
2141 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
2142 		goto update_stats;
2143 	case XDP_TX: {
2144 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2145 
2146 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
2147 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
2148 			act = XDP_DROP;
2149 			break;
2150 		}
2151 
2152 		count = &hw_stats->xdp_stats.rx_xdp_tx;
2153 		goto update_stats;
2154 	}
2155 	default:
2156 		bpf_warn_invalid_xdp_action(dev, prog, act);
2157 		fallthrough;
2158 	case XDP_ABORTED:
2159 		trace_xdp_exception(dev, prog, act);
2160 		fallthrough;
2161 	case XDP_DROP:
2162 		break;
2163 	}
2164 
2165 	page_pool_put_full_page(ring->page_pool,
2166 				virt_to_head_page(xdp->data), true);
2167 
2168 update_stats:
2169 	u64_stats_update_begin(&hw_stats->syncp);
2170 	*count = *count + 1;
2171 	u64_stats_update_end(&hw_stats->syncp);
2172 out:
2173 	rcu_read_unlock();
2174 
2175 	return act;
2176 }
2177 
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2178 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2179 		       struct mtk_eth *eth)
2180 {
2181 	struct dim_sample dim_sample = {};
2182 	struct mtk_rx_ring *ring;
2183 	bool xdp_flush = false;
2184 	int idx;
2185 	struct sk_buff *skb;
2186 	u64 addr64 = 0;
2187 	u8 *data, *new_data;
2188 	struct mtk_rx_dma_v2 *rxd, trxd;
2189 	int done = 0, bytes = 0;
2190 	dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2191 	int ppe_idx = 0;
2192 
2193 	while (done < budget) {
2194 		unsigned int pktlen, *rxdcsum;
2195 		struct net_device *netdev;
2196 		u32 hash, reason;
2197 		int mac = 0;
2198 
2199 		ring = mtk_get_rx_ring(eth);
2200 		if (unlikely(!ring))
2201 			goto rx_done;
2202 
2203 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2204 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
2205 		data = ring->data[idx];
2206 
2207 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2208 			break;
2209 
2210 		/* find out which mac the packet come from. values start at 1 */
2211 		if (mtk_is_netsys_v3_or_greater(eth)) {
2212 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2213 
2214 			switch (val) {
2215 			case PSE_GDM1_PORT:
2216 			case PSE_GDM2_PORT:
2217 				mac = val - 1;
2218 				break;
2219 			case PSE_GDM3_PORT:
2220 				mac = MTK_GMAC3_ID;
2221 				break;
2222 			default:
2223 				break;
2224 			}
2225 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2226 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2227 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2228 		}
2229 
2230 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2231 			     !eth->netdev[mac]))
2232 			goto release_desc;
2233 
2234 		netdev = eth->netdev[mac];
2235 		ppe_idx = eth->mac[mac]->ppe_idx;
2236 
2237 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2238 			goto release_desc;
2239 
2240 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2241 
2242 		/* alloc new buffer */
2243 		if (ring->page_pool) {
2244 			struct page *page = virt_to_head_page(data);
2245 			struct xdp_buff xdp;
2246 			u32 ret, metasize;
2247 
2248 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2249 							  &dma_addr,
2250 							  GFP_ATOMIC);
2251 			if (unlikely(!new_data)) {
2252 				netdev->stats.rx_dropped++;
2253 				goto release_desc;
2254 			}
2255 
2256 			dma_sync_single_for_cpu(eth->dma_dev,
2257 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2258 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2259 
2260 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2261 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2262 					 true);
2263 			xdp_buff_clear_frags_flag(&xdp);
2264 
2265 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2266 			if (ret == XDP_REDIRECT)
2267 				xdp_flush = true;
2268 
2269 			if (ret != XDP_PASS)
2270 				goto skip_rx;
2271 
2272 			skb = build_skb(data, PAGE_SIZE);
2273 			if (unlikely(!skb)) {
2274 				page_pool_put_full_page(ring->page_pool,
2275 							page, true);
2276 				netdev->stats.rx_dropped++;
2277 				goto skip_rx;
2278 			}
2279 
2280 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2281 			skb_put(skb, xdp.data_end - xdp.data);
2282 			metasize = xdp.data - xdp.data_meta;
2283 			if (metasize)
2284 				skb_metadata_set(skb, metasize);
2285 			skb_mark_for_recycle(skb);
2286 		} else {
2287 			if (ring->frag_size <= PAGE_SIZE)
2288 				new_data = napi_alloc_frag(ring->frag_size);
2289 			else
2290 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2291 
2292 			if (unlikely(!new_data)) {
2293 				netdev->stats.rx_dropped++;
2294 				goto release_desc;
2295 			}
2296 
2297 			dma_addr = dma_map_single(eth->dma_dev,
2298 				new_data + NET_SKB_PAD + eth->ip_align,
2299 				ring->buf_size, DMA_FROM_DEVICE);
2300 			if (unlikely(dma_mapping_error(eth->dma_dev,
2301 						       dma_addr))) {
2302 				skb_free_frag(new_data);
2303 				netdev->stats.rx_dropped++;
2304 				goto release_desc;
2305 			}
2306 
2307 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2308 				addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2309 
2310 			dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2311 					 ring->buf_size, DMA_FROM_DEVICE);
2312 
2313 			skb = build_skb(data, ring->frag_size);
2314 			if (unlikely(!skb)) {
2315 				netdev->stats.rx_dropped++;
2316 				skb_free_frag(data);
2317 				goto skip_rx;
2318 			}
2319 
2320 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2321 			skb_put(skb, pktlen);
2322 		}
2323 
2324 		skb->dev = netdev;
2325 		bytes += skb->len;
2326 
2327 		if (mtk_is_netsys_v3_or_greater(eth)) {
2328 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2329 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2330 			if (hash != MTK_RXD5_FOE_ENTRY)
2331 				skb_set_hash(skb, jhash_1word(hash, 0),
2332 					     PKT_HASH_TYPE_L4);
2333 			rxdcsum = &trxd.rxd3;
2334 		} else {
2335 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2336 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2337 			if (hash != MTK_RXD4_FOE_ENTRY)
2338 				skb_set_hash(skb, jhash_1word(hash, 0),
2339 					     PKT_HASH_TYPE_L4);
2340 			rxdcsum = &trxd.rxd4;
2341 		}
2342 
2343 		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2344 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2345 		else
2346 			skb_checksum_none_assert(skb);
2347 		skb->protocol = eth_type_trans(skb, netdev);
2348 
2349 		/* When using VLAN untagging in combination with DSA, the
2350 		 * hardware treats the MTK special tag as a VLAN and untags it.
2351 		 */
2352 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2353 		    netdev_uses_dsa(netdev)) {
2354 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2355 
2356 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2357 			    eth->dsa_meta[port])
2358 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2359 		}
2360 
2361 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2362 			mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2363 
2364 		skb_record_rx_queue(skb, 0);
2365 		napi_gro_receive(napi, skb);
2366 
2367 skip_rx:
2368 		ring->data[idx] = new_data;
2369 		rxd->rxd1 = (unsigned int)dma_addr;
2370 release_desc:
2371 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
2372 			if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2373 				addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
2374 						   rxd->rxd2);
2375 			else
2376 				addr64 = RX_DMA_PREP_ADDR64(dma_addr);
2377 		}
2378 
2379 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2380 			rxd->rxd2 = RX_DMA_LSO;
2381 		else
2382 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
2383 
2384 		ring->calc_idx = idx;
2385 		done++;
2386 	}
2387 
2388 rx_done:
2389 	if (done) {
2390 		/* make sure that all changes to the dma ring are flushed before
2391 		 * we continue
2392 		 */
2393 		wmb();
2394 		mtk_update_rx_cpu_idx(eth);
2395 	}
2396 
2397 	eth->rx_packets += done;
2398 	eth->rx_bytes += bytes;
2399 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2400 			  &dim_sample);
2401 	net_dim(&eth->rx_dim, &dim_sample);
2402 
2403 	if (xdp_flush)
2404 		xdp_do_flush();
2405 
2406 	return done;
2407 }
2408 
2409 struct mtk_poll_state {
2410     struct netdev_queue *txq;
2411     unsigned int total;
2412     unsigned int done;
2413     unsigned int bytes;
2414 };
2415 
2416 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2417 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2418 		 struct sk_buff *skb)
2419 {
2420 	struct netdev_queue *txq;
2421 	struct net_device *dev;
2422 	unsigned int bytes = skb->len;
2423 
2424 	state->total++;
2425 	eth->tx_packets++;
2426 	eth->tx_bytes += bytes;
2427 
2428 	dev = eth->netdev[mac];
2429 	if (!dev)
2430 		return;
2431 
2432 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2433 	if (state->txq == txq) {
2434 		state->done++;
2435 		state->bytes += bytes;
2436 		return;
2437 	}
2438 
2439 	if (state->txq)
2440 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2441 
2442 	state->txq = txq;
2443 	state->done = 1;
2444 	state->bytes = bytes;
2445 }
2446 
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2447 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2448 			    struct mtk_poll_state *state)
2449 {
2450 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2451 	struct mtk_tx_ring *ring = &eth->tx_ring;
2452 	struct mtk_tx_buf *tx_buf;
2453 	struct xdp_frame_bulk bq;
2454 	struct mtk_tx_dma *desc;
2455 	u32 cpu, dma;
2456 
2457 	cpu = ring->last_free_ptr;
2458 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2459 
2460 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2461 	xdp_frame_bulk_init(&bq);
2462 
2463 	while ((cpu != dma) && budget) {
2464 		u32 next_cpu = desc->txd2;
2465 
2466 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2467 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2468 			break;
2469 
2470 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2471 					    eth->soc->tx.desc_size);
2472 		if (!tx_buf->data)
2473 			break;
2474 
2475 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2476 			if (tx_buf->type == MTK_TYPE_SKB)
2477 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2478 						 tx_buf->data);
2479 
2480 			budget--;
2481 		}
2482 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2483 
2484 		ring->last_free = desc;
2485 		atomic_inc(&ring->free_count);
2486 
2487 		cpu = next_cpu;
2488 	}
2489 	xdp_flush_frame_bulk(&bq);
2490 
2491 	ring->last_free_ptr = cpu;
2492 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2493 
2494 	return budget;
2495 }
2496 
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2497 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2498 			    struct mtk_poll_state *state)
2499 {
2500 	struct mtk_tx_ring *ring = &eth->tx_ring;
2501 	struct mtk_tx_buf *tx_buf;
2502 	struct xdp_frame_bulk bq;
2503 	struct mtk_tx_dma *desc;
2504 	u32 cpu, dma;
2505 
2506 	cpu = ring->cpu_idx;
2507 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2508 	xdp_frame_bulk_init(&bq);
2509 
2510 	while ((cpu != dma) && budget) {
2511 		tx_buf = &ring->buf[cpu];
2512 		if (!tx_buf->data)
2513 			break;
2514 
2515 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2516 			if (tx_buf->type == MTK_TYPE_SKB)
2517 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2518 			budget--;
2519 		}
2520 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2521 
2522 		desc = ring->dma + cpu * eth->soc->tx.desc_size;
2523 		ring->last_free = desc;
2524 		atomic_inc(&ring->free_count);
2525 
2526 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2527 	}
2528 	xdp_flush_frame_bulk(&bq);
2529 
2530 	ring->cpu_idx = cpu;
2531 
2532 	return budget;
2533 }
2534 
mtk_poll_tx(struct mtk_eth * eth,int budget)2535 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2536 {
2537 	struct mtk_tx_ring *ring = &eth->tx_ring;
2538 	struct dim_sample dim_sample = {};
2539 	struct mtk_poll_state state = {};
2540 
2541 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2542 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2543 	else
2544 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2545 
2546 	if (state.txq)
2547 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2548 
2549 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2550 			  &dim_sample);
2551 	net_dim(&eth->tx_dim, &dim_sample);
2552 
2553 	if (mtk_queue_stopped(eth) &&
2554 	    (atomic_read(&ring->free_count) > ring->thresh))
2555 		mtk_wake_queue(eth);
2556 
2557 	return state.total;
2558 }
2559 
mtk_handle_status_irq(struct mtk_eth * eth)2560 static void mtk_handle_status_irq(struct mtk_eth *eth)
2561 {
2562 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2563 
2564 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2565 		mtk_stats_update(eth);
2566 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2567 			MTK_INT_STATUS2);
2568 	}
2569 }
2570 
mtk_napi_tx(struct napi_struct * napi,int budget)2571 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2572 {
2573 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2574 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2575 	int tx_done = 0;
2576 
2577 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2578 		mtk_handle_status_irq(eth);
2579 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2580 	tx_done = mtk_poll_tx(eth, budget);
2581 
2582 	if (unlikely(netif_msg_intr(eth))) {
2583 		dev_info(eth->dev,
2584 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2585 			 mtk_r32(eth, reg_map->tx_irq_status),
2586 			 mtk_r32(eth, reg_map->tx_irq_mask));
2587 	}
2588 
2589 	if (tx_done == budget)
2590 		return budget;
2591 
2592 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2593 		return budget;
2594 
2595 	if (napi_complete_done(napi, tx_done))
2596 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2597 
2598 	return tx_done;
2599 }
2600 
mtk_napi_rx(struct napi_struct * napi,int budget)2601 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2602 {
2603 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2604 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2605 	int rx_done_total = 0;
2606 
2607 	mtk_handle_status_irq(eth);
2608 
2609 	do {
2610 		int rx_done;
2611 
2612 		mtk_w32(eth, eth->soc->rx.irq_done_mask,
2613 			reg_map->pdma.irq_status);
2614 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2615 		rx_done_total += rx_done;
2616 
2617 		if (unlikely(netif_msg_intr(eth))) {
2618 			dev_info(eth->dev,
2619 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2620 				 mtk_r32(eth, reg_map->pdma.irq_status),
2621 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2622 		}
2623 
2624 		if (rx_done_total == budget)
2625 			return budget;
2626 
2627 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2628 		 eth->soc->rx.irq_done_mask);
2629 
2630 	if (napi_complete_done(napi, rx_done_total))
2631 		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2632 
2633 	return rx_done_total;
2634 }
2635 
mtk_tx_alloc(struct mtk_eth * eth)2636 static int mtk_tx_alloc(struct mtk_eth *eth)
2637 {
2638 	const struct mtk_soc_data *soc = eth->soc;
2639 	struct mtk_tx_ring *ring = &eth->tx_ring;
2640 	int i, sz = soc->tx.desc_size;
2641 	struct mtk_tx_dma_v2 *txd;
2642 	int ring_size;
2643 	u32 ofs, val;
2644 
2645 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2646 		ring_size = MTK_QDMA_RING_SIZE;
2647 	else
2648 		ring_size = soc->tx.dma_size;
2649 
2650 	ring->buf = kzalloc_objs(*ring->buf, ring_size);
2651 	if (!ring->buf)
2652 		goto no_tx_mem;
2653 
2654 	ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
2655 	if (!ring->dma)
2656 		goto no_tx_mem;
2657 
2658 	for (i = 0; i < ring_size; i++) {
2659 		int next = (i + 1) % ring_size;
2660 		u32 next_ptr = ring->phys + next * sz;
2661 
2662 		txd = ring->dma + i * sz;
2663 		txd->txd2 = next_ptr;
2664 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2665 		txd->txd4 = 0;
2666 		if (mtk_is_netsys_v2_or_greater(eth)) {
2667 			txd->txd5 = 0;
2668 			txd->txd6 = 0;
2669 			txd->txd7 = 0;
2670 			txd->txd8 = 0;
2671 		}
2672 	}
2673 
2674 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2675 	 * only as the framework. The real HW descriptors are the PDMA
2676 	 * descriptors in ring->dma_pdma.
2677 	 */
2678 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2679 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2680 						    &ring->phys_pdma, GFP_KERNEL);
2681 		if (!ring->dma_pdma)
2682 			goto no_tx_mem;
2683 
2684 		for (i = 0; i < ring_size; i++) {
2685 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2686 			ring->dma_pdma[i].txd4 = 0;
2687 		}
2688 	}
2689 
2690 	ring->dma_size = ring_size;
2691 	atomic_set(&ring->free_count, ring_size - 2);
2692 	ring->next_free = ring->dma;
2693 	ring->last_free = (void *)txd;
2694 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2695 	ring->thresh = MAX_SKB_FRAGS;
2696 
2697 	/* make sure that all changes to the dma ring are flushed before we
2698 	 * continue
2699 	 */
2700 	wmb();
2701 
2702 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2703 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2704 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2705 		mtk_w32(eth,
2706 			ring->phys + ((ring_size - 1) * sz),
2707 			soc->reg_map->qdma.crx_ptr);
2708 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2709 
2710 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2711 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2712 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2713 
2714 			val = MTK_QTX_SCH_MIN_RATE_EN |
2715 			      /* minimum: 10 Mbps */
2716 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2717 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2718 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2719 			if (mtk_is_netsys_v1(eth))
2720 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2721 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2722 			ofs += MTK_QTX_OFFSET;
2723 		}
2724 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2725 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2726 		if (mtk_is_netsys_v2_or_greater(eth))
2727 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2728 	} else {
2729 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2730 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2731 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2732 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2733 	}
2734 
2735 	return 0;
2736 
2737 no_tx_mem:
2738 	return -ENOMEM;
2739 }
2740 
mtk_tx_clean(struct mtk_eth * eth)2741 static void mtk_tx_clean(struct mtk_eth *eth)
2742 {
2743 	const struct mtk_soc_data *soc = eth->soc;
2744 	struct mtk_tx_ring *ring = &eth->tx_ring;
2745 	int i;
2746 
2747 	if (ring->buf) {
2748 		for (i = 0; i < ring->dma_size; i++)
2749 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2750 		kfree(ring->buf);
2751 		ring->buf = NULL;
2752 	}
2753 
2754 	if (ring->dma) {
2755 		mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
2756 				  ring->dma, ring->phys, true);
2757 		ring->dma = NULL;
2758 	}
2759 
2760 	if (ring->dma_pdma) {
2761 		dma_free_coherent(eth->dma_dev,
2762 				  ring->dma_size * soc->tx.desc_size,
2763 				  ring->dma_pdma, ring->phys_pdma);
2764 		ring->dma_pdma = NULL;
2765 	}
2766 }
2767 
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2768 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2769 {
2770 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2771 	const struct mtk_soc_data *soc = eth->soc;
2772 	struct mtk_rx_ring *ring;
2773 	int rx_data_len, rx_dma_size;
2774 	int i;
2775 
2776 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2777 		if (ring_no)
2778 			return -EINVAL;
2779 		ring = &eth->rx_ring_qdma;
2780 	} else {
2781 		ring = &eth->rx_ring[ring_no];
2782 	}
2783 
2784 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2785 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2786 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2787 	} else {
2788 		rx_data_len = ETH_DATA_LEN;
2789 		rx_dma_size = soc->rx.dma_size;
2790 	}
2791 
2792 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2793 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2794 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2795 			     GFP_KERNEL);
2796 	if (!ring->data)
2797 		return -ENOMEM;
2798 
2799 	if (mtk_page_pool_enabled(eth)) {
2800 		struct page_pool *pp;
2801 
2802 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2803 					  rx_dma_size);
2804 		if (IS_ERR(pp))
2805 			return PTR_ERR(pp);
2806 
2807 		ring->page_pool = pp;
2808 	}
2809 
2810 	ring->dma = mtk_dma_ring_alloc(eth,
2811 				       rx_dma_size * eth->soc->rx.desc_size,
2812 				       &ring->phys,
2813 				       rx_flag == MTK_RX_FLAGS_NORMAL);
2814 	if (!ring->dma)
2815 		return -ENOMEM;
2816 
2817 	for (i = 0; i < rx_dma_size; i++) {
2818 		struct mtk_rx_dma_v2 *rxd;
2819 		dma_addr_t dma_addr;
2820 		void *data;
2821 
2822 		rxd = ring->dma + i * eth->soc->rx.desc_size;
2823 		if (ring->page_pool) {
2824 			data = mtk_page_pool_get_buff(ring->page_pool,
2825 						      &dma_addr, GFP_KERNEL);
2826 			if (!data)
2827 				return -ENOMEM;
2828 		} else {
2829 			if (ring->frag_size <= PAGE_SIZE)
2830 				data = netdev_alloc_frag(ring->frag_size);
2831 			else
2832 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2833 
2834 			if (!data)
2835 				return -ENOMEM;
2836 
2837 			dma_addr = dma_map_single(eth->dma_dev,
2838 				data + NET_SKB_PAD + eth->ip_align,
2839 				ring->buf_size, DMA_FROM_DEVICE);
2840 			if (unlikely(dma_mapping_error(eth->dma_dev,
2841 						       dma_addr))) {
2842 				skb_free_frag(data);
2843 				return -ENOMEM;
2844 			}
2845 		}
2846 		rxd->rxd1 = (unsigned int)dma_addr;
2847 		ring->data[i] = data;
2848 
2849 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2850 			rxd->rxd2 = RX_DMA_LSO;
2851 		else
2852 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2853 
2854 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2855 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2856 
2857 		rxd->rxd3 = 0;
2858 		rxd->rxd4 = 0;
2859 		if (mtk_is_netsys_v3_or_greater(eth)) {
2860 			rxd->rxd5 = 0;
2861 			rxd->rxd6 = 0;
2862 			rxd->rxd7 = 0;
2863 			rxd->rxd8 = 0;
2864 		}
2865 	}
2866 
2867 	ring->dma_size = rx_dma_size;
2868 	ring->calc_idx_update = false;
2869 	ring->calc_idx = rx_dma_size - 1;
2870 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2871 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2872 				    ring_no * MTK_QRX_OFFSET;
2873 	else
2874 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2875 				    ring_no * MTK_QRX_OFFSET;
2876 	/* make sure that all changes to the dma ring are flushed before we
2877 	 * continue
2878 	 */
2879 	wmb();
2880 
2881 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2882 		mtk_w32(eth, ring->phys,
2883 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2884 		mtk_w32(eth, rx_dma_size,
2885 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2886 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2887 			reg_map->qdma.rst_idx);
2888 	} else {
2889 		mtk_w32(eth, ring->phys,
2890 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2891 		mtk_w32(eth, rx_dma_size,
2892 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2893 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2894 			reg_map->pdma.rst_idx);
2895 	}
2896 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2897 
2898 	return 0;
2899 }
2900 
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2901 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2902 {
2903 	u64 addr64 = 0;
2904 	int i;
2905 
2906 	if (ring->data && ring->dma) {
2907 		for (i = 0; i < ring->dma_size; i++) {
2908 			struct mtk_rx_dma *rxd;
2909 
2910 			if (!ring->data[i])
2911 				continue;
2912 
2913 			rxd = ring->dma + i * eth->soc->rx.desc_size;
2914 			if (!rxd->rxd1)
2915 				continue;
2916 
2917 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2918 				addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2919 
2920 			dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2921 					 ring->buf_size, DMA_FROM_DEVICE);
2922 			mtk_rx_put_buff(ring, ring->data[i], false);
2923 		}
2924 		kfree(ring->data);
2925 		ring->data = NULL;
2926 	}
2927 
2928 	if (ring->dma) {
2929 		mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
2930 				  ring->dma, ring->phys, in_sram);
2931 		ring->dma = NULL;
2932 	}
2933 
2934 	if (ring->page_pool) {
2935 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2936 			xdp_rxq_info_unreg(&ring->xdp_q);
2937 		page_pool_destroy(ring->page_pool);
2938 		ring->page_pool = NULL;
2939 	}
2940 }
2941 
mtk_hwlro_rx_init(struct mtk_eth * eth)2942 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2943 {
2944 	int i;
2945 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2946 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2947 
2948 	/* set LRO rings to auto-learn modes */
2949 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2950 
2951 	/* validate LRO ring */
2952 	ring_ctrl_dw2 |= MTK_RING_VLD;
2953 
2954 	/* set AGE timer (unit: 20us) */
2955 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2956 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2957 
2958 	/* set max AGG timer (unit: 20us) */
2959 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2960 
2961 	/* set max LRO AGG count */
2962 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2963 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2964 
2965 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2966 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2967 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2968 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2969 	}
2970 
2971 	/* IPv4 checksum update enable */
2972 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2973 
2974 	/* switch priority comparison to packet count mode */
2975 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2976 
2977 	/* bandwidth threshold setting */
2978 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2979 
2980 	/* auto-learn score delta setting */
2981 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2982 
2983 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2984 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2985 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2986 
2987 	/* set HW LRO mode & the max aggregation count for rx packets */
2988 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2989 
2990 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2991 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2992 
2993 	/* enable HW LRO */
2994 	lro_ctrl_dw0 |= MTK_LRO_EN;
2995 
2996 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2997 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2998 
2999 	return 0;
3000 }
3001 
mtk_hwlro_rx_uninit(struct mtk_eth * eth)3002 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
3003 {
3004 	int i;
3005 	u32 val;
3006 
3007 	/* relinquish lro rings, flush aggregated packets */
3008 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3009 
3010 	/* wait for relinquishments done */
3011 	for (i = 0; i < 10; i++) {
3012 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3013 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
3014 			msleep(20);
3015 			continue;
3016 		}
3017 		break;
3018 	}
3019 
3020 	/* invalidate lro rings */
3021 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3022 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3023 
3024 	/* disable HW LRO */
3025 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3026 }
3027 
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)3028 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3029 {
3030 	u32 reg_val;
3031 
3032 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3033 
3034 	/* invalidate the IP setting */
3035 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3036 
3037 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3038 
3039 	/* validate the IP setting */
3040 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3041 }
3042 
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)3043 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3044 {
3045 	u32 reg_val;
3046 
3047 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3048 
3049 	/* invalidate the IP setting */
3050 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3051 
3052 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3053 }
3054 
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)3055 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3056 {
3057 	int cnt = 0;
3058 	int i;
3059 
3060 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3061 		if (mac->hwlro_ip[i])
3062 			cnt++;
3063 	}
3064 
3065 	return cnt;
3066 }
3067 
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3068 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3069 				struct ethtool_rxnfc *cmd)
3070 {
3071 	struct ethtool_rx_flow_spec *fsp =
3072 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3073 	struct mtk_mac *mac = netdev_priv(dev);
3074 	struct mtk_eth *eth = mac->hw;
3075 	int hwlro_idx;
3076 
3077 	if ((fsp->flow_type != TCP_V4_FLOW) ||
3078 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3079 	    (fsp->location > 1))
3080 		return -EINVAL;
3081 
3082 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3083 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3084 
3085 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3086 
3087 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3088 
3089 	return 0;
3090 }
3091 
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3092 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3093 				struct ethtool_rxnfc *cmd)
3094 {
3095 	struct ethtool_rx_flow_spec *fsp =
3096 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3097 	struct mtk_mac *mac = netdev_priv(dev);
3098 	struct mtk_eth *eth = mac->hw;
3099 	int hwlro_idx;
3100 
3101 	if (fsp->location > 1)
3102 		return -EINVAL;
3103 
3104 	mac->hwlro_ip[fsp->location] = 0;
3105 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3106 
3107 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3108 
3109 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3110 
3111 	return 0;
3112 }
3113 
mtk_hwlro_netdev_disable(struct net_device * dev)3114 static void mtk_hwlro_netdev_disable(struct net_device *dev)
3115 {
3116 	struct mtk_mac *mac = netdev_priv(dev);
3117 	struct mtk_eth *eth = mac->hw;
3118 	int i, hwlro_idx;
3119 
3120 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3121 		mac->hwlro_ip[i] = 0;
3122 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3123 
3124 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3125 	}
3126 
3127 	mac->hwlro_ip_cnt = 0;
3128 }
3129 
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)3130 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3131 				    struct ethtool_rxnfc *cmd)
3132 {
3133 	struct mtk_mac *mac = netdev_priv(dev);
3134 	struct ethtool_rx_flow_spec *fsp =
3135 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3136 
3137 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
3138 		return -EINVAL;
3139 
3140 	/* only tcp dst ipv4 is meaningful, others are meaningless */
3141 	fsp->flow_type = TCP_V4_FLOW;
3142 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3143 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3144 
3145 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
3146 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3147 	fsp->h_u.tcp_ip4_spec.psrc = 0;
3148 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3149 	fsp->h_u.tcp_ip4_spec.pdst = 0;
3150 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3151 	fsp->h_u.tcp_ip4_spec.tos = 0;
3152 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
3153 
3154 	return 0;
3155 }
3156 
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3157 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3158 				  struct ethtool_rxnfc *cmd,
3159 				  u32 *rule_locs)
3160 {
3161 	struct mtk_mac *mac = netdev_priv(dev);
3162 	int cnt = 0;
3163 	int i;
3164 
3165 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3166 		if (cnt == cmd->rule_cnt)
3167 			return -EMSGSIZE;
3168 
3169 		if (mac->hwlro_ip[i]) {
3170 			rule_locs[cnt] = i;
3171 			cnt++;
3172 		}
3173 	}
3174 
3175 	cmd->rule_cnt = cnt;
3176 
3177 	return 0;
3178 }
3179 
mtk_fix_features(struct net_device * dev,netdev_features_t features)3180 static netdev_features_t mtk_fix_features(struct net_device *dev,
3181 					  netdev_features_t features)
3182 {
3183 	if (!(features & NETIF_F_LRO)) {
3184 		struct mtk_mac *mac = netdev_priv(dev);
3185 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3186 
3187 		if (ip_cnt) {
3188 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3189 
3190 			features |= NETIF_F_LRO;
3191 		}
3192 	}
3193 
3194 	return features;
3195 }
3196 
mtk_set_features(struct net_device * dev,netdev_features_t features)3197 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3198 {
3199 	netdev_features_t diff = dev->features ^ features;
3200 
3201 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3202 		mtk_hwlro_netdev_disable(dev);
3203 
3204 	return 0;
3205 }
3206 
3207 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3208 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3209 {
3210 	unsigned int reg;
3211 	int ret;
3212 	u32 val;
3213 
3214 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3215 		reg = eth->soc->reg_map->qdma.glo_cfg;
3216 	else
3217 		reg = eth->soc->reg_map->pdma.glo_cfg;
3218 
3219 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3220 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3221 					5, MTK_DMA_BUSY_TIMEOUT_US);
3222 	if (ret)
3223 		dev_err(eth->dev, "DMA init timeout\n");
3224 
3225 	return ret;
3226 }
3227 
mtk_dma_init(struct mtk_eth * eth)3228 static int mtk_dma_init(struct mtk_eth *eth)
3229 {
3230 	int err;
3231 	u32 i;
3232 
3233 	if (mtk_dma_busy_wait(eth))
3234 		return -EBUSY;
3235 
3236 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3237 		/* QDMA needs scratch memory for internal reordering of the
3238 		 * descriptors
3239 		 */
3240 		err = mtk_init_fq_dma(eth);
3241 		if (err)
3242 			return err;
3243 	}
3244 
3245 	err = mtk_tx_alloc(eth);
3246 	if (err)
3247 		return err;
3248 
3249 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3250 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3251 		if (err)
3252 			return err;
3253 	}
3254 
3255 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3256 	if (err)
3257 		return err;
3258 
3259 	if (eth->hwlro) {
3260 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3261 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3262 			if (err)
3263 				return err;
3264 		}
3265 		err = mtk_hwlro_rx_init(eth);
3266 		if (err)
3267 			return err;
3268 	}
3269 
3270 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3271 		/* Enable random early drop and set drop threshold
3272 		 * automatically
3273 		 */
3274 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3275 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3276 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3277 	}
3278 
3279 	return 0;
3280 }
3281 
mtk_dma_free(struct mtk_eth * eth)3282 static void mtk_dma_free(struct mtk_eth *eth)
3283 {
3284 	const struct mtk_soc_data *soc = eth->soc;
3285 	int i, j, txqs = 1;
3286 
3287 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3288 		txqs = MTK_QDMA_NUM_QUEUES;
3289 
3290 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3291 		if (!eth->netdev[i])
3292 			continue;
3293 
3294 		for (j = 0; j < txqs; j++)
3295 			netdev_tx_reset_subqueue(eth->netdev[i], j);
3296 	}
3297 
3298 	if (eth->scratch_ring) {
3299 		mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
3300 				  eth->scratch_ring, eth->phy_scratch_ring,
3301 				  true);
3302 		eth->scratch_ring = NULL;
3303 		eth->phy_scratch_ring = 0;
3304 	}
3305 
3306 	mtk_tx_clean(eth);
3307 	mtk_rx_clean(eth, &eth->rx_ring[0], true);
3308 	mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3309 
3310 	if (eth->hwlro) {
3311 		mtk_hwlro_rx_uninit(eth);
3312 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3313 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
3314 	}
3315 
3316 	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3317 		kfree(eth->scratch_head[i]);
3318 		eth->scratch_head[i] = NULL;
3319 	}
3320 }
3321 
mtk_hw_reset_check(struct mtk_eth * eth)3322 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3323 {
3324 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3325 
3326 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3327 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3328 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3329 }
3330 
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3331 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3332 {
3333 	struct mtk_mac *mac = netdev_priv(dev);
3334 	struct mtk_eth *eth = mac->hw;
3335 
3336 	if (test_bit(MTK_RESETTING, &eth->state))
3337 		return;
3338 
3339 	if (!mtk_hw_reset_check(eth))
3340 		return;
3341 
3342 	eth->netdev[mac->id]->stats.tx_errors++;
3343 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3344 
3345 	schedule_work(&eth->pending_work);
3346 }
3347 
mtk_get_irqs(struct platform_device * pdev,struct mtk_eth * eth)3348 static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
3349 {
3350 	int i;
3351 
3352 	/* future SoCs beginning with MT7988 should use named IRQs in dts */
3353 	eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
3354 	eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
3355 	if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
3356 		return 0;
3357 
3358 	/* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
3359 	if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
3360 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
3361 				     "Error requesting FE TX IRQ\n");
3362 
3363 	if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
3364 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
3365 				     "Error requesting FE RX IRQ\n");
3366 
3367 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
3368 		dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
3369 
3370 	/* legacy way:
3371 	 * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
3372 	 * from devicetree and used for both RX and TX - it is shared.
3373 	 * On SoCs with non-shared IRQs the first entry is not used,
3374 	 * the second is for TX, and the third is for RX.
3375 	 */
3376 	for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
3377 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3378 			if (i == MTK_FE_IRQ_SHARED)
3379 				eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
3380 			else
3381 				eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
3382 		} else {
3383 			eth->irq[i] = platform_get_irq(pdev, i + 1);
3384 		}
3385 
3386 		if (eth->irq[i] < 0) {
3387 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3388 			return -ENXIO;
3389 		}
3390 	}
3391 
3392 	return 0;
3393 }
3394 
mtk_handle_irq_rx(int irq,void * _eth)3395 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3396 {
3397 	struct mtk_eth *eth = _eth;
3398 
3399 	eth->rx_events++;
3400 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3401 		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3402 		__napi_schedule(&eth->rx_napi);
3403 	}
3404 
3405 	return IRQ_HANDLED;
3406 }
3407 
mtk_handle_irq_tx(int irq,void * _eth)3408 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3409 {
3410 	struct mtk_eth *eth = _eth;
3411 
3412 	eth->tx_events++;
3413 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3414 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3415 		__napi_schedule(&eth->tx_napi);
3416 	}
3417 
3418 	return IRQ_HANDLED;
3419 }
3420 
mtk_handle_irq(int irq,void * _eth)3421 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3422 {
3423 	struct mtk_eth *eth = _eth;
3424 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3425 
3426 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3427 	    eth->soc->rx.irq_done_mask) {
3428 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3429 		    eth->soc->rx.irq_done_mask)
3430 			mtk_handle_irq_rx(irq, _eth);
3431 	}
3432 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3433 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3434 			mtk_handle_irq_tx(irq, _eth);
3435 	}
3436 
3437 	return IRQ_HANDLED;
3438 }
3439 
3440 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3441 static void mtk_poll_controller(struct net_device *dev)
3442 {
3443 	struct mtk_mac *mac = netdev_priv(dev);
3444 	struct mtk_eth *eth = mac->hw;
3445 
3446 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3447 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3448 	mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
3449 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3450 	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3451 }
3452 #endif
3453 
mtk_start_dma(struct mtk_eth * eth)3454 static int mtk_start_dma(struct mtk_eth *eth)
3455 {
3456 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3457 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3458 	int err;
3459 
3460 	err = mtk_dma_init(eth);
3461 	if (err) {
3462 		mtk_dma_free(eth);
3463 		return err;
3464 	}
3465 
3466 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3467 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3468 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3469 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3470 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3471 
3472 		if (mtk_is_netsys_v2_or_greater(eth))
3473 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3474 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3475 			       MTK_CHK_DDONE_EN;
3476 		else
3477 			val |= MTK_RX_BT_32DWORDS;
3478 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3479 
3480 		mtk_w32(eth,
3481 			MTK_RX_DMA_EN | rx_2b_offset |
3482 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3483 			reg_map->pdma.glo_cfg);
3484 	} else {
3485 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3486 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3487 			reg_map->pdma.glo_cfg);
3488 	}
3489 
3490 	return 0;
3491 }
3492 
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3493 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3494 {
3495 	u32 val;
3496 
3497 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3498 		return;
3499 
3500 	val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3501 
3502 	/* default setup the forward port to send frame to PDMA */
3503 	val &= ~0xffff;
3504 
3505 	/* Enable RX checksum */
3506 	val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3507 
3508 	val |= config;
3509 
3510 	if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3511 		val |= MTK_GDMA_SPECIAL_TAG;
3512 
3513 	mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3514 }
3515 
3516 
mtk_uses_dsa(struct net_device * dev)3517 static bool mtk_uses_dsa(struct net_device *dev)
3518 {
3519 #if IS_ENABLED(CONFIG_NET_DSA)
3520 	return netdev_uses_dsa(dev) &&
3521 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3522 #else
3523 	return false;
3524 #endif
3525 }
3526 
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3527 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3528 {
3529 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3530 	struct mtk_eth *eth = mac->hw;
3531 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3532 	struct ethtool_link_ksettings s;
3533 	struct net_device *ldev;
3534 	struct list_head *iter;
3535 	struct dsa_port *dp;
3536 
3537 	if (event != NETDEV_CHANGE)
3538 		return NOTIFY_DONE;
3539 
3540 	netdev_for_each_lower_dev(dev, ldev, iter) {
3541 		if (netdev_priv(ldev) == mac)
3542 			goto found;
3543 	}
3544 
3545 	return NOTIFY_DONE;
3546 
3547 found:
3548 	if (!dsa_user_dev_check(dev))
3549 		return NOTIFY_DONE;
3550 
3551 	if (__ethtool_get_link_ksettings(dev, &s))
3552 		return NOTIFY_DONE;
3553 
3554 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3555 		return NOTIFY_DONE;
3556 
3557 	dp = dsa_port_from_netdev(dev);
3558 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3559 		return NOTIFY_DONE;
3560 
3561 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3562 		s.base.speed = 0;
3563 
3564 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3565 
3566 	return NOTIFY_DONE;
3567 }
3568 
mtk_open(struct net_device * dev)3569 static int mtk_open(struct net_device *dev)
3570 {
3571 	struct mtk_mac *mac = netdev_priv(dev);
3572 	struct mtk_eth *eth = mac->hw;
3573 	struct mtk_mac *target_mac;
3574 	int i, err, ppe_num;
3575 
3576 	ppe_num = eth->soc->ppe_num;
3577 
3578 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3579 	if (err) {
3580 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3581 			   err);
3582 		return err;
3583 	}
3584 
3585 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3586 	if (!refcount_read(&eth->dma_refcnt)) {
3587 		const struct mtk_soc_data *soc = eth->soc;
3588 		u32 gdm_config;
3589 		int i;
3590 
3591 		err = mtk_start_dma(eth);
3592 		if (err) {
3593 			phylink_disconnect_phy(mac->phylink);
3594 			return err;
3595 		}
3596 
3597 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3598 			mtk_ppe_start(eth->ppe[i]);
3599 
3600 		for (i = 0; i < MTK_MAX_DEVS; i++) {
3601 			if (!eth->netdev[i])
3602 				continue;
3603 
3604 			target_mac = netdev_priv(eth->netdev[i]);
3605 			if (!soc->offload_version) {
3606 				target_mac->ppe_idx = 0;
3607 				gdm_config = MTK_GDMA_TO_PDMA;
3608 			} else if (ppe_num >= 3 && target_mac->id == 2) {
3609 				target_mac->ppe_idx = 2;
3610 				gdm_config = soc->reg_map->gdma_to_ppe[2];
3611 			} else if (ppe_num >= 2 && target_mac->id == 1) {
3612 				target_mac->ppe_idx = 1;
3613 				gdm_config = soc->reg_map->gdma_to_ppe[1];
3614 			} else {
3615 				target_mac->ppe_idx = 0;
3616 				gdm_config = soc->reg_map->gdma_to_ppe[0];
3617 			}
3618 			mtk_gdm_config(eth, target_mac->id, gdm_config);
3619 		}
3620 
3621 		napi_enable(&eth->tx_napi);
3622 		napi_enable(&eth->rx_napi);
3623 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3624 		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3625 		refcount_set(&eth->dma_refcnt, 1);
3626 	} else {
3627 		refcount_inc(&eth->dma_refcnt);
3628 	}
3629 
3630 	phylink_start(mac->phylink);
3631 	netif_tx_start_all_queues(dev);
3632 
3633 	if (mtk_is_netsys_v2_or_greater(eth))
3634 		return 0;
3635 
3636 	if (mtk_uses_dsa(dev) && !eth->prog) {
3637 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3638 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3639 
3640 			if (md_dst)
3641 				continue;
3642 
3643 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3644 						    GFP_KERNEL);
3645 			if (!md_dst)
3646 				return -ENOMEM;
3647 
3648 			md_dst->u.port_info.port_id = i;
3649 			eth->dsa_meta[i] = md_dst;
3650 		}
3651 	} else {
3652 		/* Hardware DSA untagging and VLAN RX offloading need to be
3653 		 * disabled if at least one MAC does not use DSA.
3654 		 */
3655 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3656 
3657 		val &= ~MTK_CDMP_STAG_EN;
3658 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3659 
3660 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3661 	}
3662 
3663 	return 0;
3664 }
3665 
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3666 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3667 {
3668 	u32 val;
3669 	int i;
3670 
3671 	/* stop the dma engine */
3672 	spin_lock_bh(&eth->page_lock);
3673 	val = mtk_r32(eth, glo_cfg);
3674 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3675 		glo_cfg);
3676 	spin_unlock_bh(&eth->page_lock);
3677 
3678 	/* wait for dma stop */
3679 	for (i = 0; i < 10; i++) {
3680 		val = mtk_r32(eth, glo_cfg);
3681 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3682 			msleep(20);
3683 			continue;
3684 		}
3685 		break;
3686 	}
3687 }
3688 
mtk_stop(struct net_device * dev)3689 static int mtk_stop(struct net_device *dev)
3690 {
3691 	struct mtk_mac *mac = netdev_priv(dev);
3692 	struct mtk_eth *eth = mac->hw;
3693 	int i;
3694 
3695 	phylink_stop(mac->phylink);
3696 
3697 	netif_tx_disable(dev);
3698 
3699 	phylink_disconnect_phy(mac->phylink);
3700 
3701 	/* only shutdown DMA if this is the last user */
3702 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3703 		return 0;
3704 
3705 	for (i = 0; i < MTK_MAX_DEVS; i++)
3706 		mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3707 
3708 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3709 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3710 	napi_disable(&eth->tx_napi);
3711 	napi_disable(&eth->rx_napi);
3712 
3713 	cancel_work_sync(&eth->rx_dim.work);
3714 	cancel_work_sync(&eth->tx_dim.work);
3715 
3716 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3717 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3718 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3719 
3720 	mtk_dma_free(eth);
3721 
3722 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3723 		mtk_ppe_stop(eth->ppe[i]);
3724 
3725 	return 0;
3726 }
3727 
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3728 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3729 			 struct netlink_ext_ack *extack)
3730 {
3731 	struct mtk_mac *mac = netdev_priv(dev);
3732 	struct mtk_eth *eth = mac->hw;
3733 	struct bpf_prog *old_prog;
3734 	bool need_update;
3735 
3736 	if (eth->hwlro) {
3737 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3738 		return -EOPNOTSUPP;
3739 	}
3740 
3741 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3742 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3743 		return -EOPNOTSUPP;
3744 	}
3745 
3746 	need_update = !!eth->prog != !!prog;
3747 	if (netif_running(dev) && need_update)
3748 		mtk_stop(dev);
3749 
3750 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3751 
3752 	if (netif_running(dev) && need_update) {
3753 		int err;
3754 
3755 		err = mtk_open(dev);
3756 		if (err) {
3757 			rcu_assign_pointer(eth->prog, old_prog);
3758 
3759 			return err;
3760 		}
3761 	}
3762 
3763 	if (old_prog)
3764 		bpf_prog_put(old_prog);
3765 
3766 	return 0;
3767 }
3768 
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3769 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3770 {
3771 	switch (xdp->command) {
3772 	case XDP_SETUP_PROG:
3773 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3774 	default:
3775 		return -EINVAL;
3776 	}
3777 }
3778 
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3779 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3780 {
3781 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3782 			   reset_bits,
3783 			   reset_bits);
3784 
3785 	usleep_range(1000, 1100);
3786 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3787 			   reset_bits,
3788 			   ~reset_bits);
3789 	mdelay(10);
3790 }
3791 
mtk_clk_disable(struct mtk_eth * eth)3792 static void mtk_clk_disable(struct mtk_eth *eth)
3793 {
3794 	int clk;
3795 
3796 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3797 		clk_disable_unprepare(eth->clks[clk]);
3798 }
3799 
mtk_clk_enable(struct mtk_eth * eth)3800 static int mtk_clk_enable(struct mtk_eth *eth)
3801 {
3802 	int clk, ret;
3803 
3804 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3805 		ret = clk_prepare_enable(eth->clks[clk]);
3806 		if (ret)
3807 			goto err_disable_clks;
3808 	}
3809 
3810 	return 0;
3811 
3812 err_disable_clks:
3813 	while (--clk >= 0)
3814 		clk_disable_unprepare(eth->clks[clk]);
3815 
3816 	return ret;
3817 }
3818 
mtk_dim_rx(struct work_struct * work)3819 static void mtk_dim_rx(struct work_struct *work)
3820 {
3821 	struct dim *dim = container_of(work, struct dim, work);
3822 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3823 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3824 	struct dim_cq_moder cur_profile;
3825 	u32 val, cur;
3826 
3827 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3828 						dim->profile_ix);
3829 	spin_lock_bh(&eth->dim_lock);
3830 
3831 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3832 	val &= MTK_PDMA_DELAY_TX_MASK;
3833 	val |= MTK_PDMA_DELAY_RX_EN;
3834 
3835 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3836 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3837 
3838 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3839 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3840 
3841 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3842 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3843 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3844 
3845 	spin_unlock_bh(&eth->dim_lock);
3846 
3847 	dim->state = DIM_START_MEASURE;
3848 }
3849 
mtk_dim_tx(struct work_struct * work)3850 static void mtk_dim_tx(struct work_struct *work)
3851 {
3852 	struct dim *dim = container_of(work, struct dim, work);
3853 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3854 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3855 	struct dim_cq_moder cur_profile;
3856 	u32 val, cur;
3857 
3858 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3859 						dim->profile_ix);
3860 	spin_lock_bh(&eth->dim_lock);
3861 
3862 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3863 	val &= MTK_PDMA_DELAY_RX_MASK;
3864 	val |= MTK_PDMA_DELAY_TX_EN;
3865 
3866 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3867 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3868 
3869 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3870 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3871 
3872 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3873 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3874 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3875 
3876 	spin_unlock_bh(&eth->dim_lock);
3877 
3878 	dim->state = DIM_START_MEASURE;
3879 }
3880 
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3881 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3882 {
3883 	struct mtk_eth *eth = mac->hw;
3884 	u32 mcr_cur, mcr_new;
3885 
3886 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3887 		return;
3888 
3889 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3890 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3891 
3892 	if (val <= 1518)
3893 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3894 	else if (val <= 1536)
3895 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3896 	else if (val <= 1552)
3897 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3898 	else
3899 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3900 
3901 	if (mcr_new != mcr_cur)
3902 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3903 }
3904 
mtk_hw_reset(struct mtk_eth * eth)3905 static void mtk_hw_reset(struct mtk_eth *eth)
3906 {
3907 	u32 val;
3908 
3909 	if (mtk_is_netsys_v2_or_greater(eth))
3910 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3911 
3912 	if (mtk_is_netsys_v3_or_greater(eth)) {
3913 		val = RSTCTRL_PPE0_V3;
3914 
3915 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3916 			val |= RSTCTRL_PPE1_V3;
3917 
3918 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3919 			val |= RSTCTRL_PPE2;
3920 
3921 		val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3922 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3923 		val = RSTCTRL_PPE0_V2;
3924 
3925 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3926 			val |= RSTCTRL_PPE1;
3927 	} else {
3928 		val = RSTCTRL_PPE0;
3929 	}
3930 
3931 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3932 
3933 	if (mtk_is_netsys_v3_or_greater(eth))
3934 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3935 			     0x6f8ff);
3936 	else if (mtk_is_netsys_v2_or_greater(eth))
3937 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3938 			     0x3ffffff);
3939 }
3940 
mtk_hw_reset_read(struct mtk_eth * eth)3941 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3942 {
3943 	u32 val;
3944 
3945 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3946 	return val;
3947 }
3948 
mtk_hw_warm_reset(struct mtk_eth * eth)3949 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3950 {
3951 	u32 rst_mask, val;
3952 
3953 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3954 			   RSTCTRL_FE);
3955 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3956 				      val & RSTCTRL_FE, 1, 1000)) {
3957 		dev_err(eth->dev, "warm reset failed\n");
3958 		mtk_hw_reset(eth);
3959 		return;
3960 	}
3961 
3962 	if (mtk_is_netsys_v3_or_greater(eth)) {
3963 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3964 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3965 			rst_mask |= RSTCTRL_PPE1_V3;
3966 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3967 			rst_mask |= RSTCTRL_PPE2;
3968 
3969 		rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3970 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3971 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3972 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3973 			rst_mask |= RSTCTRL_PPE1;
3974 	} else {
3975 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3976 	}
3977 
3978 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3979 
3980 	udelay(1);
3981 	val = mtk_hw_reset_read(eth);
3982 	if (!(val & rst_mask))
3983 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3984 			val, rst_mask);
3985 
3986 	rst_mask |= RSTCTRL_FE;
3987 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3988 
3989 	udelay(1);
3990 	val = mtk_hw_reset_read(eth);
3991 	if (val & rst_mask)
3992 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3993 			val, rst_mask);
3994 }
3995 
mtk_hw_check_dma_hang(struct mtk_eth * eth)3996 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3997 {
3998 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3999 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
4000 	bool oq_hang, cdm1_busy, adma_busy;
4001 	bool wtx_busy, cdm_full, oq_free;
4002 	u32 wdidx, val, gdm1_fc, gdm2_fc;
4003 	bool qfsm_hang, qfwd_hang;
4004 	bool ret = false;
4005 
4006 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4007 		return false;
4008 
4009 	/* WDMA sanity checks */
4010 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
4011 
4012 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
4013 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
4014 
4015 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
4016 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
4017 
4018 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
4019 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
4020 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
4021 
4022 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
4023 		if (++eth->reset.wdma_hang_count > 2) {
4024 			eth->reset.wdma_hang_count = 0;
4025 			ret = true;
4026 		}
4027 		goto out;
4028 	}
4029 
4030 	/* QDMA sanity checks */
4031 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
4032 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
4033 
4034 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
4035 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
4036 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
4037 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
4038 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
4039 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
4040 
4041 	if (qfsm_hang && qfwd_hang &&
4042 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
4043 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
4044 		if (++eth->reset.qdma_hang_count > 2) {
4045 			eth->reset.qdma_hang_count = 0;
4046 			ret = true;
4047 		}
4048 		goto out;
4049 	}
4050 
4051 	/* ADMA sanity checks */
4052 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
4053 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
4054 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
4055 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
4056 
4057 	if (oq_hang && cdm1_busy && adma_busy) {
4058 		if (++eth->reset.adma_hang_count > 2) {
4059 			eth->reset.adma_hang_count = 0;
4060 			ret = true;
4061 		}
4062 		goto out;
4063 	}
4064 
4065 	eth->reset.wdma_hang_count = 0;
4066 	eth->reset.qdma_hang_count = 0;
4067 	eth->reset.adma_hang_count = 0;
4068 out:
4069 	eth->reset.wdidx = wdidx;
4070 
4071 	return ret;
4072 }
4073 
mtk_hw_reset_monitor_work(struct work_struct * work)4074 static void mtk_hw_reset_monitor_work(struct work_struct *work)
4075 {
4076 	struct delayed_work *del_work = to_delayed_work(work);
4077 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
4078 					   reset.monitor_work);
4079 
4080 	if (test_bit(MTK_RESETTING, &eth->state))
4081 		goto out;
4082 
4083 	/* DMA stuck checks */
4084 	if (mtk_hw_check_dma_hang(eth))
4085 		schedule_work(&eth->pending_work);
4086 
4087 out:
4088 	schedule_delayed_work(&eth->reset.monitor_work,
4089 			      MTK_DMA_MONITOR_TIMEOUT);
4090 }
4091 
mtk_hw_init(struct mtk_eth * eth,bool reset)4092 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
4093 {
4094 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4095 		       ETHSYS_DMA_AG_MAP_PPE;
4096 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4097 	int i, val, ret;
4098 
4099 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
4100 		return 0;
4101 
4102 	if (!reset) {
4103 		pm_runtime_enable(eth->dev);
4104 		pm_runtime_get_sync(eth->dev);
4105 
4106 		ret = mtk_clk_enable(eth);
4107 		if (ret)
4108 			goto err_disable_pm;
4109 	}
4110 
4111 	if (eth->ethsys)
4112 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4113 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
4114 
4115 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4116 		ret = device_reset(eth->dev);
4117 		if (ret) {
4118 			dev_err(eth->dev, "MAC reset failed!\n");
4119 			goto err_disable_pm;
4120 		}
4121 
4122 		/* set interrupt delays based on current Net DIM sample */
4123 		mtk_dim_rx(&eth->rx_dim.work);
4124 		mtk_dim_tx(&eth->tx_dim.work);
4125 
4126 		/* disable delay and normal interrupt */
4127 		mtk_tx_irq_disable(eth, ~0);
4128 		mtk_rx_irq_disable(eth, ~0);
4129 
4130 		return 0;
4131 	}
4132 
4133 	msleep(100);
4134 
4135 	if (reset)
4136 		mtk_hw_warm_reset(eth);
4137 	else
4138 		mtk_hw_reset(eth);
4139 
4140 	/* No MT7628/88 support yet */
4141 	if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4142 		mtk_mdio_config(eth);
4143 
4144 	if (mtk_is_netsys_v3_or_greater(eth)) {
4145 		/* Set FE to PDMAv2 if necessary */
4146 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
4147 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
4148 	}
4149 
4150 	if (eth->pctl) {
4151 		/* Set GE2 driving and slew rate */
4152 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4153 
4154 		/* set GE2 TDSEL */
4155 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4156 
4157 		/* set GE2 TUNE */
4158 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4159 	}
4160 
4161 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
4162 	 * up with the more appropriate value when mtk_mac_config call is being
4163 	 * invoked.
4164 	 */
4165 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4166 		struct net_device *dev = eth->netdev[i];
4167 
4168 		if (!dev)
4169 			continue;
4170 
4171 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4172 		mtk_set_mcr_max_rx(netdev_priv(dev),
4173 				   dev->mtu + MTK_RX_ETH_HLEN);
4174 	}
4175 
4176 	/* Indicates CDM to parse the MTK special tag from CPU
4177 	 * which also is working out for untag packets.
4178 	 */
4179 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4180 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4181 	if (mtk_is_netsys_v1(eth)) {
4182 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4183 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4184 
4185 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4186 	}
4187 
4188 	/* set interrupt delays based on current Net DIM sample */
4189 	mtk_dim_rx(&eth->rx_dim.work);
4190 	mtk_dim_tx(&eth->tx_dim.work);
4191 
4192 	/* disable delay and normal interrupt */
4193 	mtk_tx_irq_disable(eth, ~0);
4194 	mtk_rx_irq_disable(eth, ~0);
4195 
4196 	/* FE int grouping */
4197 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
4198 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
4199 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
4200 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
4201 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
4202 
4203 	if (mtk_is_netsys_v3_or_greater(eth)) {
4204 		/* PSE dummy page mechanism */
4205 		mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
4206 			PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
4207 
4208 		/* PSE free buffer drop threshold */
4209 		mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4210 
4211 		/* PSE should not drop port8, port9 and port13 packets from
4212 		 * WDMA Tx
4213 		 */
4214 		mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
4215 
4216 		/* PSE should drop packets to port8, port9 and port13 on WDMA Rx
4217 		 * ring full
4218 		 */
4219 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
4220 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
4221 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
4222 
4223 		/* GDM and CDM Threshold */
4224 		mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
4225 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4226 
4227 		/* Disable GDM1 RX CRC stripping */
4228 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4229 
4230 		/* PSE GDM3 MIB counter has incorrect hw default values,
4231 		 * so the driver ought to read clear the values beforehand
4232 		 * in case ethtool retrieve wrong mib values.
4233 		 */
4234 		for (i = 0; i < 0x80; i += 0x4)
4235 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4236 	} else if (!mtk_is_netsys_v1(eth)) {
4237 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
4238 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4239 
4240 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4241 		mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
4242 
4243 		/* PSE Free Queue Flow Control  */
4244 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4245 
4246 		/* PSE config input queue threshold */
4247 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4248 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4249 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4250 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4251 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4252 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4253 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4254 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4255 
4256 		/* PSE config output queue threshold */
4257 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4258 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4259 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4260 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4261 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4262 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4263 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4264 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4265 
4266 		/* GDM and CDM Threshold */
4267 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4268 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4269 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4270 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4271 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4272 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4273 	}
4274 
4275 	return 0;
4276 
4277 err_disable_pm:
4278 	if (!reset) {
4279 		pm_runtime_put_sync(eth->dev);
4280 		pm_runtime_disable(eth->dev);
4281 	}
4282 
4283 	return ret;
4284 }
4285 
mtk_hw_deinit(struct mtk_eth * eth)4286 static int mtk_hw_deinit(struct mtk_eth *eth)
4287 {
4288 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4289 		return 0;
4290 
4291 	mtk_clk_disable(eth);
4292 
4293 	pm_runtime_put_sync(eth->dev);
4294 	pm_runtime_disable(eth->dev);
4295 
4296 	return 0;
4297 }
4298 
mtk_uninit(struct net_device * dev)4299 static void mtk_uninit(struct net_device *dev)
4300 {
4301 	struct mtk_mac *mac = netdev_priv(dev);
4302 	struct mtk_eth *eth = mac->hw;
4303 
4304 	phylink_disconnect_phy(mac->phylink);
4305 	mtk_tx_irq_disable(eth, ~0);
4306 	mtk_rx_irq_disable(eth, ~0);
4307 }
4308 
mtk_change_mtu(struct net_device * dev,int new_mtu)4309 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4310 {
4311 	int length = new_mtu + MTK_RX_ETH_HLEN;
4312 	struct mtk_mac *mac = netdev_priv(dev);
4313 	struct mtk_eth *eth = mac->hw;
4314 
4315 	if (rcu_access_pointer(eth->prog) &&
4316 	    length > MTK_PP_MAX_BUF_SIZE) {
4317 		netdev_err(dev, "Invalid MTU for XDP mode\n");
4318 		return -EINVAL;
4319 	}
4320 
4321 	mtk_set_mcr_max_rx(mac, length);
4322 	WRITE_ONCE(dev->mtu, new_mtu);
4323 
4324 	return 0;
4325 }
4326 
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4327 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4328 {
4329 	struct mtk_mac *mac = netdev_priv(dev);
4330 
4331 	switch (cmd) {
4332 	case SIOCGMIIPHY:
4333 	case SIOCGMIIREG:
4334 	case SIOCSMIIREG:
4335 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4336 	default:
4337 		break;
4338 	}
4339 
4340 	return -EOPNOTSUPP;
4341 }
4342 
mtk_prepare_for_reset(struct mtk_eth * eth)4343 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4344 {
4345 	u32 val;
4346 	int i;
4347 
4348 	/* set FE PPE ports link down */
4349 	for (i = MTK_GMAC1_ID;
4350 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4351 	     i += 2) {
4352 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4353 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4354 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4355 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4356 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4357 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4358 	}
4359 
4360 	/* adjust PPE configurations to prepare for reset */
4361 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4362 		mtk_ppe_prepare_reset(eth->ppe[i]);
4363 
4364 	/* disable NETSYS interrupts */
4365 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4366 
4367 	/* force link down GMAC */
4368 	for (i = 0; i < 2; i++) {
4369 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4370 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4371 	}
4372 }
4373 
mtk_pending_work(struct work_struct * work)4374 static void mtk_pending_work(struct work_struct *work)
4375 {
4376 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4377 	unsigned long restart = 0;
4378 	u32 val;
4379 	int i;
4380 
4381 	rtnl_lock();
4382 	set_bit(MTK_RESETTING, &eth->state);
4383 
4384 	mtk_prepare_for_reset(eth);
4385 	mtk_wed_fe_reset();
4386 	/* Run again reset preliminary configuration in order to avoid any
4387 	 * possible race during FE reset since it can run releasing RTNL lock.
4388 	 */
4389 	mtk_prepare_for_reset(eth);
4390 
4391 	/* stop all devices to make sure that dma is properly shut down */
4392 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4393 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4394 			continue;
4395 
4396 		mtk_stop(eth->netdev[i]);
4397 		__set_bit(i, &restart);
4398 	}
4399 
4400 	usleep_range(15000, 16000);
4401 
4402 	if (eth->dev->pins)
4403 		pinctrl_select_state(eth->dev->pins->p,
4404 				     eth->dev->pins->default_state);
4405 	mtk_hw_init(eth, true);
4406 
4407 	/* restart DMA and enable IRQs */
4408 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4409 		if (!eth->netdev[i] || !test_bit(i, &restart))
4410 			continue;
4411 
4412 		if (mtk_open(eth->netdev[i])) {
4413 			netif_alert(eth, ifup, eth->netdev[i],
4414 				    "Driver up/down cycle failed\n");
4415 			dev_close(eth->netdev[i]);
4416 		}
4417 	}
4418 
4419 	/* set FE PPE ports link up */
4420 	for (i = MTK_GMAC1_ID;
4421 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4422 	     i += 2) {
4423 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4424 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4425 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4426 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4427 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4428 
4429 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4430 	}
4431 
4432 	clear_bit(MTK_RESETTING, &eth->state);
4433 
4434 	mtk_wed_fe_reset_complete();
4435 
4436 	rtnl_unlock();
4437 }
4438 
mtk_free_dev(struct mtk_eth * eth)4439 static int mtk_free_dev(struct mtk_eth *eth)
4440 {
4441 	int i;
4442 
4443 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4444 		if (!eth->netdev[i])
4445 			continue;
4446 		free_netdev(eth->netdev[i]);
4447 	}
4448 
4449 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4450 		if (!eth->dsa_meta[i])
4451 			break;
4452 		metadata_dst_free(eth->dsa_meta[i]);
4453 	}
4454 
4455 	return 0;
4456 }
4457 
mtk_unreg_dev(struct mtk_eth * eth)4458 static int mtk_unreg_dev(struct mtk_eth *eth)
4459 {
4460 	int i;
4461 
4462 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4463 		struct mtk_mac *mac;
4464 		if (!eth->netdev[i])
4465 			continue;
4466 		mac = netdev_priv(eth->netdev[i]);
4467 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4468 			unregister_netdevice_notifier(&mac->device_notifier);
4469 		unregister_netdev(eth->netdev[i]);
4470 	}
4471 
4472 	return 0;
4473 }
4474 
mtk_sgmii_destroy(struct mtk_eth * eth)4475 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4476 {
4477 	int i;
4478 
4479 	for (i = 0; i < MTK_MAX_DEVS; i++)
4480 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4481 }
4482 
mtk_cleanup(struct mtk_eth * eth)4483 static int mtk_cleanup(struct mtk_eth *eth)
4484 {
4485 	mtk_sgmii_destroy(eth);
4486 	mtk_unreg_dev(eth);
4487 	mtk_free_dev(eth);
4488 	cancel_work_sync(&eth->pending_work);
4489 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4490 
4491 	return 0;
4492 }
4493 
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4494 static int mtk_get_link_ksettings(struct net_device *ndev,
4495 				  struct ethtool_link_ksettings *cmd)
4496 {
4497 	struct mtk_mac *mac = netdev_priv(ndev);
4498 
4499 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4500 		return -EBUSY;
4501 
4502 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4503 }
4504 
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4505 static int mtk_set_link_ksettings(struct net_device *ndev,
4506 				  const struct ethtool_link_ksettings *cmd)
4507 {
4508 	struct mtk_mac *mac = netdev_priv(ndev);
4509 
4510 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4511 		return -EBUSY;
4512 
4513 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4514 }
4515 
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4516 static void mtk_get_drvinfo(struct net_device *dev,
4517 			    struct ethtool_drvinfo *info)
4518 {
4519 	struct mtk_mac *mac = netdev_priv(dev);
4520 
4521 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4522 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4523 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4524 }
4525 
mtk_get_msglevel(struct net_device * dev)4526 static u32 mtk_get_msglevel(struct net_device *dev)
4527 {
4528 	struct mtk_mac *mac = netdev_priv(dev);
4529 
4530 	return mac->hw->msg_enable;
4531 }
4532 
mtk_set_msglevel(struct net_device * dev,u32 value)4533 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4534 {
4535 	struct mtk_mac *mac = netdev_priv(dev);
4536 
4537 	mac->hw->msg_enable = value;
4538 }
4539 
mtk_nway_reset(struct net_device * dev)4540 static int mtk_nway_reset(struct net_device *dev)
4541 {
4542 	struct mtk_mac *mac = netdev_priv(dev);
4543 
4544 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4545 		return -EBUSY;
4546 
4547 	if (!mac->phylink)
4548 		return -ENOTSUPP;
4549 
4550 	return phylink_ethtool_nway_reset(mac->phylink);
4551 }
4552 
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4553 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4554 {
4555 	int i;
4556 
4557 	switch (stringset) {
4558 	case ETH_SS_STATS: {
4559 		struct mtk_mac *mac = netdev_priv(dev);
4560 
4561 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4562 			ethtool_puts(&data, mtk_ethtool_stats[i].str);
4563 		if (mtk_page_pool_enabled(mac->hw))
4564 			page_pool_ethtool_stats_get_strings(data);
4565 		break;
4566 	}
4567 	default:
4568 		break;
4569 	}
4570 }
4571 
mtk_get_sset_count(struct net_device * dev,int sset)4572 static int mtk_get_sset_count(struct net_device *dev, int sset)
4573 {
4574 	switch (sset) {
4575 	case ETH_SS_STATS: {
4576 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4577 		struct mtk_mac *mac = netdev_priv(dev);
4578 
4579 		if (mtk_page_pool_enabled(mac->hw))
4580 			count += page_pool_ethtool_stats_get_count();
4581 		return count;
4582 	}
4583 	default:
4584 		return -EOPNOTSUPP;
4585 	}
4586 }
4587 
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4588 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4589 {
4590 	struct page_pool_stats stats = {};
4591 	int i;
4592 
4593 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4594 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4595 
4596 		if (!ring->page_pool)
4597 			continue;
4598 
4599 		page_pool_get_stats(ring->page_pool, &stats);
4600 	}
4601 	page_pool_ethtool_stats_get(data, &stats);
4602 }
4603 
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4604 static void mtk_get_ethtool_stats(struct net_device *dev,
4605 				  struct ethtool_stats *stats, u64 *data)
4606 {
4607 	struct mtk_mac *mac = netdev_priv(dev);
4608 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4609 	u64 *data_src, *data_dst;
4610 	unsigned int start;
4611 	int i;
4612 
4613 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4614 		return;
4615 
4616 	if (netif_running(dev) && netif_device_present(dev)) {
4617 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4618 			mtk_stats_update_mac(mac);
4619 			spin_unlock_bh(&hwstats->stats_lock);
4620 		}
4621 	}
4622 
4623 	data_src = (u64 *)hwstats;
4624 
4625 	do {
4626 		data_dst = data;
4627 		start = u64_stats_fetch_begin(&hwstats->syncp);
4628 
4629 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4630 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4631 		if (mtk_page_pool_enabled(mac->hw))
4632 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4633 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4634 }
4635 
mtk_get_rx_ring_count(struct net_device * dev)4636 static u32 mtk_get_rx_ring_count(struct net_device *dev)
4637 {
4638 	if (dev->hw_features & NETIF_F_LRO)
4639 		return MTK_MAX_RX_RING_NUM;
4640 
4641 	return 0;
4642 }
4643 
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4644 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4645 			 u32 *rule_locs)
4646 {
4647 	int ret = -EOPNOTSUPP;
4648 
4649 	switch (cmd->cmd) {
4650 	case ETHTOOL_GRXCLSRLCNT:
4651 		if (dev->hw_features & NETIF_F_LRO) {
4652 			struct mtk_mac *mac = netdev_priv(dev);
4653 
4654 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4655 			ret = 0;
4656 		}
4657 		break;
4658 	case ETHTOOL_GRXCLSRULE:
4659 		if (dev->hw_features & NETIF_F_LRO)
4660 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4661 		break;
4662 	case ETHTOOL_GRXCLSRLALL:
4663 		if (dev->hw_features & NETIF_F_LRO)
4664 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4665 						     rule_locs);
4666 		break;
4667 	default:
4668 		break;
4669 	}
4670 
4671 	return ret;
4672 }
4673 
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4674 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4675 {
4676 	int ret = -EOPNOTSUPP;
4677 
4678 	switch (cmd->cmd) {
4679 	case ETHTOOL_SRXCLSRLINS:
4680 		if (dev->hw_features & NETIF_F_LRO)
4681 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4682 		break;
4683 	case ETHTOOL_SRXCLSRLDEL:
4684 		if (dev->hw_features & NETIF_F_LRO)
4685 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4686 		break;
4687 	default:
4688 		break;
4689 	}
4690 
4691 	return ret;
4692 }
4693 
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4694 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4695 {
4696 	struct mtk_mac *mac = netdev_priv(dev);
4697 
4698 	phylink_ethtool_get_pauseparam(mac->phylink, pause);
4699 }
4700 
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4701 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4702 {
4703 	struct mtk_mac *mac = netdev_priv(dev);
4704 
4705 	return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4706 }
4707 
mtk_get_eee(struct net_device * dev,struct ethtool_keee * eee)4708 static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4709 {
4710 	struct mtk_mac *mac = netdev_priv(dev);
4711 
4712 	return phylink_ethtool_get_eee(mac->phylink, eee);
4713 }
4714 
mtk_set_eee(struct net_device * dev,struct ethtool_keee * eee)4715 static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4716 {
4717 	struct mtk_mac *mac = netdev_priv(dev);
4718 
4719 	return phylink_ethtool_set_eee(mac->phylink, eee);
4720 }
4721 
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4722 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4723 			    struct net_device *sb_dev)
4724 {
4725 	struct mtk_mac *mac = netdev_priv(dev);
4726 	unsigned int queue = 0;
4727 
4728 	if (netdev_uses_dsa(dev))
4729 		queue = skb_get_queue_mapping(skb) + 3;
4730 	else
4731 		queue = mac->id;
4732 
4733 	if (queue >= dev->num_tx_queues)
4734 		queue = 0;
4735 
4736 	return queue;
4737 }
4738 
4739 static const struct ethtool_ops mtk_ethtool_ops = {
4740 	.get_link_ksettings	= mtk_get_link_ksettings,
4741 	.set_link_ksettings	= mtk_set_link_ksettings,
4742 	.get_drvinfo		= mtk_get_drvinfo,
4743 	.get_msglevel		= mtk_get_msglevel,
4744 	.set_msglevel		= mtk_set_msglevel,
4745 	.nway_reset		= mtk_nway_reset,
4746 	.get_link		= ethtool_op_get_link,
4747 	.get_strings		= mtk_get_strings,
4748 	.get_sset_count		= mtk_get_sset_count,
4749 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4750 	.get_pauseparam		= mtk_get_pauseparam,
4751 	.set_pauseparam		= mtk_set_pauseparam,
4752 	.get_rxnfc		= mtk_get_rxnfc,
4753 	.set_rxnfc		= mtk_set_rxnfc,
4754 	.get_rx_ring_count	= mtk_get_rx_ring_count,
4755 	.get_eee		= mtk_get_eee,
4756 	.set_eee		= mtk_set_eee,
4757 };
4758 
4759 static const struct net_device_ops mtk_netdev_ops = {
4760 	.ndo_uninit		= mtk_uninit,
4761 	.ndo_open		= mtk_open,
4762 	.ndo_stop		= mtk_stop,
4763 	.ndo_start_xmit		= mtk_start_xmit,
4764 	.ndo_set_mac_address	= mtk_set_mac_address,
4765 	.ndo_validate_addr	= eth_validate_addr,
4766 	.ndo_eth_ioctl		= mtk_do_ioctl,
4767 	.ndo_change_mtu		= mtk_change_mtu,
4768 	.ndo_tx_timeout		= mtk_tx_timeout,
4769 	.ndo_get_stats64        = mtk_get_stats64,
4770 	.ndo_fix_features	= mtk_fix_features,
4771 	.ndo_set_features	= mtk_set_features,
4772 #ifdef CONFIG_NET_POLL_CONTROLLER
4773 	.ndo_poll_controller	= mtk_poll_controller,
4774 #endif
4775 	.ndo_setup_tc		= mtk_eth_setup_tc,
4776 	.ndo_bpf		= mtk_xdp,
4777 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4778 	.ndo_select_queue	= mtk_select_queue,
4779 };
4780 
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4781 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4782 {
4783 	const __be32 *_id = of_get_property(np, "reg", NULL);
4784 	phy_interface_t phy_mode;
4785 	struct phylink *phylink;
4786 	struct mtk_mac *mac;
4787 	int id, err;
4788 	int txqs = 1;
4789 	u32 val;
4790 
4791 	if (!_id) {
4792 		dev_err(eth->dev, "missing mac id\n");
4793 		return -EINVAL;
4794 	}
4795 
4796 	id = be32_to_cpup(_id);
4797 	if (id >= MTK_MAX_DEVS) {
4798 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4799 		return -EINVAL;
4800 	}
4801 
4802 	if (eth->netdev[id]) {
4803 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4804 		return -EINVAL;
4805 	}
4806 
4807 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4808 		txqs = MTK_QDMA_NUM_QUEUES;
4809 
4810 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4811 	if (!eth->netdev[id]) {
4812 		dev_err(eth->dev, "alloc_etherdev failed\n");
4813 		return -ENOMEM;
4814 	}
4815 	mac = netdev_priv(eth->netdev[id]);
4816 	eth->mac[id] = mac;
4817 	mac->id = id;
4818 	mac->hw = eth;
4819 	mac->of_node = np;
4820 
4821 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4822 	if (err == -EPROBE_DEFER)
4823 		return err;
4824 
4825 	if (err) {
4826 		/* If the mac address is invalid, use random mac address */
4827 		eth_hw_addr_random(eth->netdev[id]);
4828 		dev_err(eth->dev, "generated random MAC address %pM\n",
4829 			eth->netdev[id]->dev_addr);
4830 	}
4831 
4832 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4833 	mac->hwlro_ip_cnt = 0;
4834 
4835 	mac->hw_stats = devm_kzalloc(eth->dev,
4836 				     sizeof(*mac->hw_stats),
4837 				     GFP_KERNEL);
4838 	if (!mac->hw_stats) {
4839 		dev_err(eth->dev, "failed to allocate counter memory\n");
4840 		err = -ENOMEM;
4841 		goto free_netdev;
4842 	}
4843 	spin_lock_init(&mac->hw_stats->stats_lock);
4844 	u64_stats_init(&mac->hw_stats->syncp);
4845 
4846 	if (mtk_is_netsys_v3_or_greater(eth))
4847 		mac->hw_stats->reg_offset = id * 0x80;
4848 	else
4849 		mac->hw_stats->reg_offset = id * 0x40;
4850 
4851 	/* phylink create */
4852 	err = of_get_phy_mode(np, &phy_mode);
4853 	if (err) {
4854 		dev_err(eth->dev, "incorrect phy-mode\n");
4855 		goto free_netdev;
4856 	}
4857 
4858 	/* mac config is not set */
4859 	mac->interface = PHY_INTERFACE_MODE_NA;
4860 	mac->speed = SPEED_UNKNOWN;
4861 
4862 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4863 	mac->phylink_config.type = PHYLINK_NETDEV;
4864 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4865 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4866 	mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
4867 		MAC_2500FD;
4868 	mac->phylink_config.lpi_timer_default = 1000;
4869 
4870 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4871 	 * in its .mac_config method (since state->speed is not valid there.
4872 	 * Disable support for MII, GMII and RGMII.
4873 	 */
4874 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4875 		__set_bit(PHY_INTERFACE_MODE_MII,
4876 			  mac->phylink_config.supported_interfaces);
4877 		__set_bit(PHY_INTERFACE_MODE_GMII,
4878 			  mac->phylink_config.supported_interfaces);
4879 
4880 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4881 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4882 	}
4883 
4884 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4885 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4886 			  mac->phylink_config.supported_interfaces);
4887 
4888 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4889 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4890 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4891 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4892 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4893 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4894 				    mac->phylink_config.supported_interfaces);
4895 	}
4896 
4897 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4898 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4899 			  mac->phylink_config.supported_interfaces);
4900 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4901 			  mac->phylink_config.supported_interfaces);
4902 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4903 			  mac->phylink_config.supported_interfaces);
4904 	}
4905 
4906 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4907 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
4908 	    id == MTK_GMAC1_ID) {
4909 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4910 						       MAC_SYM_PAUSE |
4911 						       MAC_10000FD;
4912 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4913 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4914 			  mac->phylink_config.supported_interfaces);
4915 	}
4916 
4917 	phylink = phylink_create(&mac->phylink_config,
4918 				 of_fwnode_handle(mac->of_node),
4919 				 phy_mode, &mtk_phylink_ops);
4920 	if (IS_ERR(phylink)) {
4921 		err = PTR_ERR(phylink);
4922 		goto free_netdev;
4923 	}
4924 
4925 	mac->phylink = phylink;
4926 
4927 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
4928 	    id == MTK_GMAC2_ID)
4929 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4930 			  mac->phylink_config.supported_interfaces);
4931 
4932 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4933 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4934 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4935 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4936 
4937 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4938 	if (eth->hwlro)
4939 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4940 
4941 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4942 		~NETIF_F_HW_VLAN_CTAG_TX;
4943 	eth->netdev[id]->features |= eth->soc->hw_features;
4944 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4945 
4946 	eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
4947 	eth->netdev[id]->dev.of_node = np;
4948 
4949 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4950 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4951 	else
4952 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4953 
4954 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4955 		mac->device_notifier.notifier_call = mtk_device_event;
4956 		register_netdevice_notifier(&mac->device_notifier);
4957 	}
4958 
4959 	if (mtk_page_pool_enabled(eth))
4960 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4961 						NETDEV_XDP_ACT_REDIRECT |
4962 						NETDEV_XDP_ACT_NDO_XMIT |
4963 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4964 
4965 	return 0;
4966 
4967 free_netdev:
4968 	free_netdev(eth->netdev[id]);
4969 	return err;
4970 }
4971 
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4972 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4973 {
4974 	struct net_device *dev, *tmp;
4975 	LIST_HEAD(dev_list);
4976 	int i;
4977 
4978 	rtnl_lock();
4979 
4980 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4981 		dev = eth->netdev[i];
4982 
4983 		if (!dev || !(dev->flags & IFF_UP))
4984 			continue;
4985 
4986 		list_add_tail(&dev->close_list, &dev_list);
4987 	}
4988 
4989 	netif_close_many(&dev_list, false);
4990 
4991 	eth->dma_dev = dma_dev;
4992 
4993 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4994 		list_del_init(&dev->close_list);
4995 		dev_open(dev, NULL);
4996 	}
4997 
4998 	rtnl_unlock();
4999 }
5000 
mtk_sgmii_init(struct mtk_eth * eth)5001 static int mtk_sgmii_init(struct mtk_eth *eth)
5002 {
5003 	struct device_node *np;
5004 	struct regmap *regmap;
5005 	int i;
5006 
5007 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5008 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
5009 		if (!np)
5010 			break;
5011 
5012 		regmap = syscon_node_to_regmap(np);
5013 		if (IS_ERR(regmap)) {
5014 			of_node_put(np);
5015 			return PTR_ERR(regmap);
5016 		}
5017 
5018 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev,
5019 							 of_fwnode_handle(np),
5020 							 regmap,
5021 							 eth->soc->ana_rgc3);
5022 		of_node_put(np);
5023 	}
5024 
5025 	return 0;
5026 }
5027 
mtk_setup_legacy_sram(struct mtk_eth * eth,struct resource * res)5028 static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
5029 {
5030 	dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
5031 
5032 	if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
5033 	    res->end)
5034 		return -EINVAL;
5035 
5036 	eth->sram_pool = devm_gen_pool_create(eth->dev,
5037 					      const_ilog2(MTK_ETH_SRAM_GRANULARITY),
5038 					      NUMA_NO_NODE, dev_name(eth->dev));
5039 
5040 	if (IS_ERR(eth->sram_pool))
5041 		return PTR_ERR(eth->sram_pool);
5042 
5043 	return gen_pool_add_virt(eth->sram_pool,
5044 				 (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
5045 				 res->start + MTK_ETH_SRAM_OFFSET,
5046 				 MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
5047 }
5048 
mtk_probe(struct platform_device * pdev)5049 static int mtk_probe(struct platform_device *pdev)
5050 {
5051 	struct resource *res = NULL;
5052 	struct device_node *mac_np;
5053 	struct mtk_eth *eth;
5054 	int err, i;
5055 
5056 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5057 	if (!eth)
5058 		return -ENOMEM;
5059 
5060 	eth->soc = of_device_get_match_data(&pdev->dev);
5061 
5062 	eth->dev = &pdev->dev;
5063 	eth->dma_dev = &pdev->dev;
5064 	eth->base = devm_platform_ioremap_resource(pdev, 0);
5065 	if (IS_ERR(eth->base))
5066 		return PTR_ERR(eth->base);
5067 
5068 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
5069 		eth->ip_align = NET_IP_ALIGN;
5070 
5071 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
5072 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5073 		if (!err)
5074 			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5075 
5076 		if (err) {
5077 			dev_err(&pdev->dev, "Wrong DMA config\n");
5078 			return -EINVAL;
5079 		}
5080 	}
5081 
5082 	spin_lock_init(&eth->page_lock);
5083 	spin_lock_init(&eth->tx_irq_lock);
5084 	spin_lock_init(&eth->rx_irq_lock);
5085 	spin_lock_init(&eth->dim_lock);
5086 
5087 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5088 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
5089 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
5090 
5091 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5092 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
5093 
5094 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5095 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5096 							      "mediatek,ethsys");
5097 		if (IS_ERR(eth->ethsys)) {
5098 			dev_err(&pdev->dev, "no ethsys regmap found\n");
5099 			return PTR_ERR(eth->ethsys);
5100 		}
5101 	}
5102 
5103 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5104 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5105 							     "mediatek,infracfg");
5106 		if (IS_ERR(eth->infra)) {
5107 			dev_err(&pdev->dev, "no infracfg regmap found\n");
5108 			return PTR_ERR(eth->infra);
5109 		}
5110 	}
5111 
5112 	if (of_dma_is_coherent(pdev->dev.of_node)) {
5113 		struct regmap *cci;
5114 
5115 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5116 						      "cci-control-port");
5117 		/* enable CPU/bus coherency */
5118 		if (!IS_ERR(cci))
5119 			regmap_write(cci, 0, 3);
5120 	}
5121 
5122 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
5123 		err = mtk_sgmii_init(eth);
5124 
5125 		if (err)
5126 			return err;
5127 	}
5128 
5129 	if (eth->soc->required_pctl) {
5130 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5131 							    "mediatek,pctl");
5132 		if (IS_ERR(eth->pctl)) {
5133 			dev_err(&pdev->dev, "no pctl regmap found\n");
5134 			err = PTR_ERR(eth->pctl);
5135 			goto err_destroy_sgmii;
5136 		}
5137 	}
5138 
5139 	if (mtk_is_netsys_v2_or_greater(eth)) {
5140 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5141 		if (!res) {
5142 			err = -EINVAL;
5143 			goto err_destroy_sgmii;
5144 		}
5145 
5146 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
5147 			eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
5148 							 "sram", 0);
5149 			if (!eth->sram_pool) {
5150 				if (!mtk_is_netsys_v3_or_greater(eth)) {
5151 					err = mtk_setup_legacy_sram(eth, res);
5152 					if (err)
5153 						goto err_destroy_sgmii;
5154 				} else {
5155 					dev_err(&pdev->dev,
5156 						"Could not get SRAM pool\n");
5157 					err = -EINVAL;
5158 					goto err_destroy_sgmii;
5159 				}
5160 			}
5161 		}
5162 	}
5163 
5164 	if (eth->soc->offload_version) {
5165 		for (i = 0;; i++) {
5166 			struct device_node *np;
5167 			phys_addr_t wdma_phy;
5168 			u32 wdma_base;
5169 
5170 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
5171 				break;
5172 
5173 			np = of_parse_phandle(pdev->dev.of_node,
5174 					      "mediatek,wed", i);
5175 			if (!np)
5176 				break;
5177 
5178 			wdma_base = eth->soc->reg_map->wdma_base[i];
5179 			wdma_phy = res ? res->start + wdma_base : 0;
5180 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
5181 				       wdma_phy, i);
5182 		}
5183 	}
5184 
5185 	err = mtk_get_irqs(pdev, eth);
5186 	if (err)
5187 		goto err_wed_exit;
5188 
5189 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5190 		eth->clks[i] = devm_clk_get(eth->dev,
5191 					    mtk_clks_source_name[i]);
5192 		if (IS_ERR(eth->clks[i])) {
5193 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
5194 				err = -EPROBE_DEFER;
5195 				goto err_wed_exit;
5196 			}
5197 			if (eth->soc->required_clks & BIT(i)) {
5198 				dev_err(&pdev->dev, "clock %s not found\n",
5199 					mtk_clks_source_name[i]);
5200 				err = -EINVAL;
5201 				goto err_wed_exit;
5202 			}
5203 			eth->clks[i] = NULL;
5204 		}
5205 	}
5206 
5207 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5208 	INIT_WORK(&eth->pending_work, mtk_pending_work);
5209 
5210 	err = mtk_hw_init(eth, false);
5211 	if (err)
5212 		goto err_wed_exit;
5213 
5214 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5215 
5216 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
5217 		if (!of_device_is_compatible(mac_np,
5218 					     "mediatek,eth-mac"))
5219 			continue;
5220 
5221 		if (!of_device_is_available(mac_np))
5222 			continue;
5223 
5224 		err = mtk_add_mac(eth, mac_np);
5225 		if (err) {
5226 			of_node_put(mac_np);
5227 			goto err_deinit_hw;
5228 		}
5229 	}
5230 
5231 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
5232 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
5233 				       mtk_handle_irq, 0,
5234 				       dev_name(eth->dev), eth);
5235 	} else {
5236 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
5237 				       mtk_handle_irq_tx, 0,
5238 				       dev_name(eth->dev), eth);
5239 		if (err)
5240 			goto err_free_dev;
5241 
5242 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
5243 				       mtk_handle_irq_rx, 0,
5244 				       dev_name(eth->dev), eth);
5245 	}
5246 	if (err)
5247 		goto err_free_dev;
5248 
5249 	/* No MT7628/88 support yet */
5250 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5251 		err = mtk_mdio_init(eth);
5252 		if (err)
5253 			goto err_free_dev;
5254 	}
5255 
5256 	if (eth->soc->offload_version) {
5257 		u8 ppe_num = eth->soc->ppe_num;
5258 
5259 		ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5260 		for (i = 0; i < ppe_num; i++) {
5261 			u32 ppe_addr = eth->soc->reg_map->ppe_base;
5262 
5263 			ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5264 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5265 
5266 			if (!eth->ppe[i]) {
5267 				err = -ENOMEM;
5268 				goto err_deinit_ppe;
5269 			}
5270 			err = mtk_eth_offload_init(eth, i);
5271 
5272 			if (err)
5273 				goto err_deinit_ppe;
5274 		}
5275 	}
5276 
5277 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5278 		if (!eth->netdev[i])
5279 			continue;
5280 
5281 		err = register_netdev(eth->netdev[i]);
5282 		if (err) {
5283 			dev_err(eth->dev, "error bringing up device\n");
5284 			goto err_deinit_ppe;
5285 		} else
5286 			netif_info(eth, probe, eth->netdev[i],
5287 				   "mediatek frame engine at 0x%08lx, irq %d\n",
5288 				   eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
5289 	}
5290 
5291 	/* we run 2 devices on the same DMA ring so we need a dummy device
5292 	 * for NAPI to work
5293 	 */
5294 	eth->dummy_dev = alloc_netdev_dummy(0);
5295 	if (!eth->dummy_dev) {
5296 		err = -ENOMEM;
5297 		dev_err(eth->dev, "failed to allocated dummy device\n");
5298 		goto err_unreg_netdev;
5299 	}
5300 	netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
5301 	netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
5302 
5303 	platform_set_drvdata(pdev, eth);
5304 	schedule_delayed_work(&eth->reset.monitor_work,
5305 			      MTK_DMA_MONITOR_TIMEOUT);
5306 
5307 	return 0;
5308 
5309 err_unreg_netdev:
5310 	mtk_unreg_dev(eth);
5311 err_deinit_ppe:
5312 	mtk_ppe_deinit(eth);
5313 	mtk_mdio_cleanup(eth);
5314 err_free_dev:
5315 	mtk_free_dev(eth);
5316 err_deinit_hw:
5317 	mtk_hw_deinit(eth);
5318 err_wed_exit:
5319 	mtk_wed_exit();
5320 err_destroy_sgmii:
5321 	mtk_sgmii_destroy(eth);
5322 
5323 	return err;
5324 }
5325 
mtk_remove(struct platform_device * pdev)5326 static void mtk_remove(struct platform_device *pdev)
5327 {
5328 	struct mtk_eth *eth = platform_get_drvdata(pdev);
5329 	struct mtk_mac *mac;
5330 	int i;
5331 
5332 	/* stop all devices to make sure that dma is properly shut down */
5333 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5334 		if (!eth->netdev[i])
5335 			continue;
5336 		mtk_stop(eth->netdev[i]);
5337 		mac = netdev_priv(eth->netdev[i]);
5338 		phylink_disconnect_phy(mac->phylink);
5339 	}
5340 
5341 	mtk_wed_exit();
5342 	mtk_hw_deinit(eth);
5343 
5344 	netif_napi_del(&eth->tx_napi);
5345 	netif_napi_del(&eth->rx_napi);
5346 	mtk_cleanup(eth);
5347 	free_netdev(eth->dummy_dev);
5348 	mtk_mdio_cleanup(eth);
5349 }
5350 
5351 static const struct mtk_soc_data mt2701_data = {
5352 	.reg_map = &mtk_reg_map,
5353 	.caps = MT7623_CAPS | MTK_HWLRO,
5354 	.hw_features = MTK_HW_FEATURES,
5355 	.required_clks = MT7623_CLKS_BITMAP,
5356 	.required_pctl = true,
5357 	.version = 1,
5358 	.tx = {
5359 		.desc_size = sizeof(struct mtk_tx_dma),
5360 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5361 		.dma_len_offset = 16,
5362 		.dma_size = MTK_DMA_SIZE(2K),
5363 		.fq_dma_size = MTK_DMA_SIZE(2K),
5364 	},
5365 	.rx = {
5366 		.desc_size = sizeof(struct mtk_rx_dma),
5367 		.irq_done_mask = MTK_RX_DONE_INT,
5368 		.dma_l4_valid = RX_DMA_L4_VALID,
5369 		.dma_size = MTK_DMA_SIZE(2K),
5370 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5371 		.dma_len_offset = 16,
5372 	},
5373 };
5374 
5375 static const struct mtk_soc_data mt7621_data = {
5376 	.reg_map = &mtk_reg_map,
5377 	.caps = MT7621_CAPS,
5378 	.hw_features = MTK_HW_FEATURES,
5379 	.required_clks = MT7621_CLKS_BITMAP,
5380 	.required_pctl = false,
5381 	.version = 1,
5382 	.offload_version = 1,
5383 	.ppe_num = 1,
5384 	.hash_offset = 2,
5385 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5386 	.tx = {
5387 		.desc_size = sizeof(struct mtk_tx_dma),
5388 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5389 		.dma_len_offset = 16,
5390 		.dma_size = MTK_DMA_SIZE(2K),
5391 		.fq_dma_size = MTK_DMA_SIZE(2K),
5392 	},
5393 	.rx = {
5394 		.desc_size = sizeof(struct mtk_rx_dma),
5395 		.irq_done_mask = MTK_RX_DONE_INT,
5396 		.dma_l4_valid = RX_DMA_L4_VALID,
5397 		.dma_size = MTK_DMA_SIZE(2K),
5398 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5399 		.dma_len_offset = 16,
5400 	},
5401 };
5402 
5403 static const struct mtk_soc_data mt7622_data = {
5404 	.reg_map = &mtk_reg_map,
5405 	.ana_rgc3 = 0x2028,
5406 	.caps = MT7622_CAPS | MTK_HWLRO,
5407 	.hw_features = MTK_HW_FEATURES,
5408 	.required_clks = MT7622_CLKS_BITMAP,
5409 	.required_pctl = false,
5410 	.version = 1,
5411 	.offload_version = 2,
5412 	.ppe_num = 1,
5413 	.hash_offset = 2,
5414 	.has_accounting = true,
5415 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5416 	.tx = {
5417 		.desc_size = sizeof(struct mtk_tx_dma),
5418 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5419 		.dma_len_offset = 16,
5420 		.dma_size = MTK_DMA_SIZE(2K),
5421 		.fq_dma_size = MTK_DMA_SIZE(2K),
5422 	},
5423 	.rx = {
5424 		.desc_size = sizeof(struct mtk_rx_dma),
5425 		.irq_done_mask = MTK_RX_DONE_INT,
5426 		.dma_l4_valid = RX_DMA_L4_VALID,
5427 		.dma_size = MTK_DMA_SIZE(2K),
5428 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5429 		.dma_len_offset = 16,
5430 	},
5431 };
5432 
5433 static const struct mtk_soc_data mt7623_data = {
5434 	.reg_map = &mtk_reg_map,
5435 	.caps = MT7623_CAPS | MTK_HWLRO,
5436 	.hw_features = MTK_HW_FEATURES,
5437 	.required_clks = MT7623_CLKS_BITMAP,
5438 	.required_pctl = true,
5439 	.version = 1,
5440 	.offload_version = 1,
5441 	.ppe_num = 1,
5442 	.hash_offset = 2,
5443 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5444 	.disable_pll_modes = true,
5445 	.tx = {
5446 		.desc_size = sizeof(struct mtk_tx_dma),
5447 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5448 		.dma_len_offset = 16,
5449 		.dma_size = MTK_DMA_SIZE(2K),
5450 		.fq_dma_size = MTK_DMA_SIZE(2K),
5451 	},
5452 	.rx = {
5453 		.desc_size = sizeof(struct mtk_rx_dma),
5454 		.irq_done_mask = MTK_RX_DONE_INT,
5455 		.dma_l4_valid = RX_DMA_L4_VALID,
5456 		.dma_size = MTK_DMA_SIZE(2K),
5457 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5458 		.dma_len_offset = 16,
5459 	},
5460 };
5461 
5462 static const struct mtk_soc_data mt7629_data = {
5463 	.reg_map = &mtk_reg_map,
5464 	.ana_rgc3 = 0x128,
5465 	.caps = MT7629_CAPS | MTK_HWLRO,
5466 	.hw_features = MTK_HW_FEATURES,
5467 	.required_clks = MT7629_CLKS_BITMAP,
5468 	.required_pctl = false,
5469 	.has_accounting = true,
5470 	.version = 1,
5471 	.tx = {
5472 		.desc_size = sizeof(struct mtk_tx_dma),
5473 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5474 		.dma_len_offset = 16,
5475 		.dma_size = MTK_DMA_SIZE(2K),
5476 		.fq_dma_size = MTK_DMA_SIZE(2K),
5477 	},
5478 	.rx = {
5479 		.desc_size = sizeof(struct mtk_rx_dma),
5480 		.irq_done_mask = MTK_RX_DONE_INT,
5481 		.dma_l4_valid = RX_DMA_L4_VALID,
5482 		.dma_size = MTK_DMA_SIZE(2K),
5483 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5484 		.dma_len_offset = 16,
5485 	},
5486 };
5487 
5488 static const struct mtk_soc_data mt7981_data = {
5489 	.reg_map = &mt7986_reg_map,
5490 	.ana_rgc3 = 0x128,
5491 	.caps = MT7981_CAPS,
5492 	.hw_features = MTK_HW_FEATURES,
5493 	.required_clks = MT7981_CLKS_BITMAP,
5494 	.required_pctl = false,
5495 	.version = 2,
5496 	.offload_version = 2,
5497 	.ppe_num = 2,
5498 	.hash_offset = 4,
5499 	.has_accounting = true,
5500 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5501 	.tx = {
5502 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5503 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5504 		.dma_len_offset = 8,
5505 		.dma_size = MTK_DMA_SIZE(2K),
5506 		.fq_dma_size = MTK_DMA_SIZE(2K),
5507 	},
5508 	.rx = {
5509 		.desc_size = sizeof(struct mtk_rx_dma),
5510 		.irq_done_mask = MTK_RX_DONE_INT,
5511 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5512 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5513 		.dma_len_offset = 16,
5514 		.dma_size = MTK_DMA_SIZE(2K),
5515 	},
5516 };
5517 
5518 static const struct mtk_soc_data mt7986_data = {
5519 	.reg_map = &mt7986_reg_map,
5520 	.ana_rgc3 = 0x128,
5521 	.caps = MT7986_CAPS,
5522 	.hw_features = MTK_HW_FEATURES,
5523 	.required_clks = MT7986_CLKS_BITMAP,
5524 	.required_pctl = false,
5525 	.version = 2,
5526 	.offload_version = 2,
5527 	.ppe_num = 2,
5528 	.hash_offset = 4,
5529 	.has_accounting = true,
5530 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5531 	.tx = {
5532 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5533 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5534 		.dma_len_offset = 8,
5535 		.dma_size = MTK_DMA_SIZE(2K),
5536 		.fq_dma_size = MTK_DMA_SIZE(2K),
5537 	},
5538 	.rx = {
5539 		.desc_size = sizeof(struct mtk_rx_dma),
5540 		.irq_done_mask = MTK_RX_DONE_INT,
5541 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5542 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5543 		.dma_len_offset = 16,
5544 		.dma_size = MTK_DMA_SIZE(2K),
5545 	},
5546 };
5547 
5548 static const struct mtk_soc_data mt7988_data = {
5549 	.reg_map = &mt7988_reg_map,
5550 	.ana_rgc3 = 0x128,
5551 	.caps = MT7988_CAPS,
5552 	.hw_features = MTK_HW_FEATURES,
5553 	.required_clks = MT7988_CLKS_BITMAP,
5554 	.required_pctl = false,
5555 	.version = 3,
5556 	.offload_version = 2,
5557 	.ppe_num = 3,
5558 	.hash_offset = 4,
5559 	.has_accounting = true,
5560 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5561 	.tx = {
5562 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5563 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5564 		.dma_len_offset = 8,
5565 		.dma_size = MTK_DMA_SIZE(2K),
5566 		.fq_dma_size = MTK_DMA_SIZE(4K),
5567 	},
5568 	.rx = {
5569 		.desc_size = sizeof(struct mtk_rx_dma_v2),
5570 		.irq_done_mask = MTK_RX_DONE_INT_V2,
5571 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5572 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5573 		.dma_len_offset = 8,
5574 		.dma_size = MTK_DMA_SIZE(2K),
5575 	},
5576 };
5577 
5578 static const struct mtk_soc_data rt5350_data = {
5579 	.reg_map = &mt7628_reg_map,
5580 	.caps = MT7628_CAPS,
5581 	.hw_features = MTK_HW_FEATURES_MT7628,
5582 	.required_clks = MT7628_CLKS_BITMAP,
5583 	.required_pctl = false,
5584 	.version = 1,
5585 	.tx = {
5586 		.desc_size = sizeof(struct mtk_tx_dma),
5587 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5588 		.dma_len_offset = 16,
5589 		.dma_size = MTK_DMA_SIZE(2K),
5590 	},
5591 	.rx = {
5592 		.desc_size = sizeof(struct mtk_rx_dma),
5593 		.irq_done_mask = MTK_RX_DONE_INT,
5594 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5595 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5596 		.dma_len_offset = 16,
5597 		.dma_size = MTK_DMA_SIZE(2K),
5598 	},
5599 };
5600 
5601 const struct of_device_id of_mtk_match[] = {
5602 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5603 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5604 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5605 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5606 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5607 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5608 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5609 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5610 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5611 	{},
5612 };
5613 MODULE_DEVICE_TABLE(of, of_mtk_match);
5614 
5615 static struct platform_driver mtk_driver = {
5616 	.probe = mtk_probe,
5617 	.remove = mtk_remove,
5618 	.driver = {
5619 		.name = "mtk_soc_eth",
5620 		.of_match_table = of_mtk_match,
5621 	},
5622 };
5623 
5624 module_platform_driver(mtk_driver);
5625 
5626 MODULE_LICENSE("GPL");
5627 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5628 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5629 MODULE_IMPORT_NS("NETDEV_INTERNAL");
5630