1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 #include <linux/genalloc.h>
31
32 #include "mtk_eth_soc.h"
33 #include "mtk_wed.h"
34
35 static int mtk_msg_level = -1;
36 module_param_named(msg_level, mtk_msg_level, int, 0);
37 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
38
39 #define MTK_ETHTOOL_STAT(x) { #x, \
40 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
41
42 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
43 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
44 sizeof(u64) }
45
46 static const struct mtk_reg_map mtk_reg_map = {
47 .tx_irq_mask = 0x1a1c,
48 .tx_irq_status = 0x1a18,
49 .pdma = {
50 .rx_ptr = 0x0900,
51 .rx_cnt_cfg = 0x0904,
52 .pcrx_ptr = 0x0908,
53 .glo_cfg = 0x0a04,
54 .rst_idx = 0x0a08,
55 .delay_irq = 0x0a0c,
56 .irq_status = 0x0a20,
57 .irq_mask = 0x0a28,
58 .adma_rx_dbg0 = 0x0a38,
59 .int_grp = 0x0a50,
60 },
61 .qdma = {
62 .qtx_cfg = 0x1800,
63 .qtx_sch = 0x1804,
64 .rx_ptr = 0x1900,
65 .rx_cnt_cfg = 0x1904,
66 .qcrx_ptr = 0x1908,
67 .glo_cfg = 0x1a04,
68 .rst_idx = 0x1a08,
69 .delay_irq = 0x1a0c,
70 .fc_th = 0x1a10,
71 .tx_sch_rate = 0x1a14,
72 .int_grp = 0x1a20,
73 .hred = 0x1a44,
74 .ctx_ptr = 0x1b00,
75 .dtx_ptr = 0x1b04,
76 .crx_ptr = 0x1b10,
77 .drx_ptr = 0x1b14,
78 .fq_head = 0x1b20,
79 .fq_tail = 0x1b24,
80 .fq_count = 0x1b28,
81 .fq_blen = 0x1b2c,
82 },
83 .gdm1_cnt = 0x2400,
84 .gdma_to_ppe = {
85 [0] = 0x4444,
86 },
87 .ppe_base = 0x0c00,
88 .wdma_base = {
89 [0] = 0x2800,
90 [1] = 0x2c00,
91 },
92 .pse_iq_sta = 0x0110,
93 .pse_oq_sta = 0x0118,
94 };
95
96 static const struct mtk_reg_map mt7628_reg_map = {
97 .tx_irq_mask = 0x0a28,
98 .tx_irq_status = 0x0a20,
99 .pdma = {
100 .rx_ptr = 0x0900,
101 .rx_cnt_cfg = 0x0904,
102 .pcrx_ptr = 0x0908,
103 .glo_cfg = 0x0a04,
104 .rst_idx = 0x0a08,
105 .delay_irq = 0x0a0c,
106 .irq_status = 0x0a20,
107 .irq_mask = 0x0a28,
108 .int_grp = 0x0a50,
109 },
110 };
111
112 static const struct mtk_reg_map mt7986_reg_map = {
113 .tx_irq_mask = 0x461c,
114 .tx_irq_status = 0x4618,
115 .pdma = {
116 .rx_ptr = 0x4100,
117 .rx_cnt_cfg = 0x4104,
118 .pcrx_ptr = 0x4108,
119 .glo_cfg = 0x4204,
120 .rst_idx = 0x4208,
121 .delay_irq = 0x420c,
122 .irq_status = 0x4220,
123 .irq_mask = 0x4228,
124 .adma_rx_dbg0 = 0x4238,
125 .int_grp = 0x4250,
126 },
127 .qdma = {
128 .qtx_cfg = 0x4400,
129 .qtx_sch = 0x4404,
130 .rx_ptr = 0x4500,
131 .rx_cnt_cfg = 0x4504,
132 .qcrx_ptr = 0x4508,
133 .glo_cfg = 0x4604,
134 .rst_idx = 0x4608,
135 .delay_irq = 0x460c,
136 .fc_th = 0x4610,
137 .int_grp = 0x4620,
138 .hred = 0x4644,
139 .ctx_ptr = 0x4700,
140 .dtx_ptr = 0x4704,
141 .crx_ptr = 0x4710,
142 .drx_ptr = 0x4714,
143 .fq_head = 0x4720,
144 .fq_tail = 0x4724,
145 .fq_count = 0x4728,
146 .fq_blen = 0x472c,
147 .tx_sch_rate = 0x4798,
148 },
149 .gdm1_cnt = 0x1c00,
150 .gdma_to_ppe = {
151 [0] = 0x3333,
152 [1] = 0x4444,
153 },
154 .ppe_base = 0x2000,
155 .wdma_base = {
156 [0] = 0x4800,
157 [1] = 0x4c00,
158 },
159 .pse_iq_sta = 0x0180,
160 .pse_oq_sta = 0x01a0,
161 };
162
163 static const struct mtk_reg_map mt7988_reg_map = {
164 .tx_irq_mask = 0x461c,
165 .tx_irq_status = 0x4618,
166 .pdma = {
167 .rx_ptr = 0x6900,
168 .rx_cnt_cfg = 0x6904,
169 .pcrx_ptr = 0x6908,
170 .glo_cfg = 0x6a04,
171 .rst_idx = 0x6a08,
172 .delay_irq = 0x6a0c,
173 .irq_status = 0x6a20,
174 .irq_mask = 0x6a28,
175 .adma_rx_dbg0 = 0x6a38,
176 .int_grp = 0x6a50,
177 },
178 .qdma = {
179 .qtx_cfg = 0x4400,
180 .qtx_sch = 0x4404,
181 .rx_ptr = 0x4500,
182 .rx_cnt_cfg = 0x4504,
183 .qcrx_ptr = 0x4508,
184 .glo_cfg = 0x4604,
185 .rst_idx = 0x4608,
186 .delay_irq = 0x460c,
187 .fc_th = 0x4610,
188 .int_grp = 0x4620,
189 .hred = 0x4644,
190 .ctx_ptr = 0x4700,
191 .dtx_ptr = 0x4704,
192 .crx_ptr = 0x4710,
193 .drx_ptr = 0x4714,
194 .fq_head = 0x4720,
195 .fq_tail = 0x4724,
196 .fq_count = 0x4728,
197 .fq_blen = 0x472c,
198 .tx_sch_rate = 0x4798,
199 },
200 .gdm1_cnt = 0x1c00,
201 .gdma_to_ppe = {
202 [0] = 0x3333,
203 [1] = 0x4444,
204 [2] = 0xcccc,
205 },
206 .ppe_base = 0x2000,
207 .wdma_base = {
208 [0] = 0x4800,
209 [1] = 0x4c00,
210 [2] = 0x5000,
211 },
212 .pse_iq_sta = 0x0180,
213 .pse_oq_sta = 0x01a0,
214 };
215
216 /* strings used by ethtool */
217 static const struct mtk_ethtool_stats {
218 char str[ETH_GSTRING_LEN];
219 u32 offset;
220 } mtk_ethtool_stats[] = {
221 MTK_ETHTOOL_STAT(tx_bytes),
222 MTK_ETHTOOL_STAT(tx_packets),
223 MTK_ETHTOOL_STAT(tx_skip),
224 MTK_ETHTOOL_STAT(tx_collisions),
225 MTK_ETHTOOL_STAT(rx_bytes),
226 MTK_ETHTOOL_STAT(rx_packets),
227 MTK_ETHTOOL_STAT(rx_overflow),
228 MTK_ETHTOOL_STAT(rx_fcs_errors),
229 MTK_ETHTOOL_STAT(rx_short_errors),
230 MTK_ETHTOOL_STAT(rx_long_errors),
231 MTK_ETHTOOL_STAT(rx_checksum_errors),
232 MTK_ETHTOOL_STAT(rx_flow_control_packets),
233 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
234 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
235 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
236 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
237 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
238 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
239 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
240 };
241
242 static const char * const mtk_clks_source_name[] = {
243 "ethif",
244 "sgmiitop",
245 "esw",
246 "gp0",
247 "gp1",
248 "gp2",
249 "gp3",
250 "xgp1",
251 "xgp2",
252 "xgp3",
253 "crypto",
254 "fe",
255 "trgpll",
256 "sgmii_tx250m",
257 "sgmii_rx250m",
258 "sgmii_cdr_ref",
259 "sgmii_cdr_fb",
260 "sgmii2_tx250m",
261 "sgmii2_rx250m",
262 "sgmii2_cdr_ref",
263 "sgmii2_cdr_fb",
264 "sgmii_ck",
265 "eth2pll",
266 "wocpu0",
267 "wocpu1",
268 "netsys0",
269 "netsys1",
270 "ethwarp_wocpu2",
271 "ethwarp_wocpu1",
272 "ethwarp_wocpu0",
273 "top_sgm0_sel",
274 "top_sgm1_sel",
275 "top_eth_gmii_sel",
276 "top_eth_refck_50m_sel",
277 "top_eth_sys_200m_sel",
278 "top_eth_sys_sel",
279 "top_eth_xgmii_sel",
280 "top_eth_mii_sel",
281 "top_netsys_sel",
282 "top_netsys_500m_sel",
283 "top_netsys_pao_2x_sel",
284 "top_netsys_sync_250m_sel",
285 "top_netsys_ppefb_250m_sel",
286 "top_netsys_warp_sel",
287 };
288
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)289 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
290 {
291 __raw_writel(val, eth->base + reg);
292 }
293
mtk_r32(struct mtk_eth * eth,unsigned reg)294 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
295 {
296 return __raw_readl(eth->base + reg);
297 }
298
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)299 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
300 {
301 u32 val;
302
303 val = mtk_r32(eth, reg);
304 val &= ~mask;
305 val |= set;
306 mtk_w32(eth, val, reg);
307 return reg;
308 }
309
mtk_mdio_busy_wait(struct mtk_eth * eth)310 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
311 {
312 unsigned long t_start = jiffies;
313
314 while (1) {
315 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
316 return 0;
317 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
318 break;
319 cond_resched();
320 }
321
322 dev_err(eth->dev, "mdio: MDIO timeout\n");
323 return -ETIMEDOUT;
324 }
325
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)326 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
327 u32 write_data)
328 {
329 int ret;
330
331 ret = mtk_mdio_busy_wait(eth);
332 if (ret < 0)
333 return ret;
334
335 mtk_w32(eth, PHY_IAC_ACCESS |
336 PHY_IAC_START_C22 |
337 PHY_IAC_CMD_WRITE |
338 PHY_IAC_REG(phy_reg) |
339 PHY_IAC_ADDR(phy_addr) |
340 PHY_IAC_DATA(write_data),
341 MTK_PHY_IAC);
342
343 ret = mtk_mdio_busy_wait(eth);
344 if (ret < 0)
345 return ret;
346
347 return 0;
348 }
349
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)350 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
351 u32 devad, u32 phy_reg, u32 write_data)
352 {
353 int ret;
354
355 ret = mtk_mdio_busy_wait(eth);
356 if (ret < 0)
357 return ret;
358
359 mtk_w32(eth, PHY_IAC_ACCESS |
360 PHY_IAC_START_C45 |
361 PHY_IAC_CMD_C45_ADDR |
362 PHY_IAC_REG(devad) |
363 PHY_IAC_ADDR(phy_addr) |
364 PHY_IAC_DATA(phy_reg),
365 MTK_PHY_IAC);
366
367 ret = mtk_mdio_busy_wait(eth);
368 if (ret < 0)
369 return ret;
370
371 mtk_w32(eth, PHY_IAC_ACCESS |
372 PHY_IAC_START_C45 |
373 PHY_IAC_CMD_WRITE |
374 PHY_IAC_REG(devad) |
375 PHY_IAC_ADDR(phy_addr) |
376 PHY_IAC_DATA(write_data),
377 MTK_PHY_IAC);
378
379 ret = mtk_mdio_busy_wait(eth);
380 if (ret < 0)
381 return ret;
382
383 return 0;
384 }
385
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)386 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
387 {
388 int ret;
389
390 ret = mtk_mdio_busy_wait(eth);
391 if (ret < 0)
392 return ret;
393
394 mtk_w32(eth, PHY_IAC_ACCESS |
395 PHY_IAC_START_C22 |
396 PHY_IAC_CMD_C22_READ |
397 PHY_IAC_REG(phy_reg) |
398 PHY_IAC_ADDR(phy_addr),
399 MTK_PHY_IAC);
400
401 ret = mtk_mdio_busy_wait(eth);
402 if (ret < 0)
403 return ret;
404
405 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
406 }
407
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)408 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
409 u32 devad, u32 phy_reg)
410 {
411 int ret;
412
413 ret = mtk_mdio_busy_wait(eth);
414 if (ret < 0)
415 return ret;
416
417 mtk_w32(eth, PHY_IAC_ACCESS |
418 PHY_IAC_START_C45 |
419 PHY_IAC_CMD_C45_ADDR |
420 PHY_IAC_REG(devad) |
421 PHY_IAC_ADDR(phy_addr) |
422 PHY_IAC_DATA(phy_reg),
423 MTK_PHY_IAC);
424
425 ret = mtk_mdio_busy_wait(eth);
426 if (ret < 0)
427 return ret;
428
429 mtk_w32(eth, PHY_IAC_ACCESS |
430 PHY_IAC_START_C45 |
431 PHY_IAC_CMD_C45_READ |
432 PHY_IAC_REG(devad) |
433 PHY_IAC_ADDR(phy_addr),
434 MTK_PHY_IAC);
435
436 ret = mtk_mdio_busy_wait(eth);
437 if (ret < 0)
438 return ret;
439
440 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
441 }
442
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)443 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
444 int phy_reg, u16 val)
445 {
446 struct mtk_eth *eth = bus->priv;
447
448 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
449 }
450
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)451 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
452 int devad, int phy_reg, u16 val)
453 {
454 struct mtk_eth *eth = bus->priv;
455
456 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
457 }
458
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)459 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
460 {
461 struct mtk_eth *eth = bus->priv;
462
463 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
464 }
465
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)466 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
467 int phy_reg)
468 {
469 struct mtk_eth *eth = bus->priv;
470
471 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
472 }
473
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)474 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
475 phy_interface_t interface)
476 {
477 u32 val;
478
479 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
480 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
481
482 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
483 ETHSYS_TRGMII_MT7621_MASK, val);
484
485 return 0;
486 }
487
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)488 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
489 phy_interface_t interface)
490 {
491 int ret;
492
493 if (interface == PHY_INTERFACE_MODE_TRGMII) {
494 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
495 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
496 if (ret)
497 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
498 return;
499 }
500
501 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
502 }
503
mtk_setup_bridge_switch(struct mtk_eth * eth)504 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
505 {
506 /* Force Port1 XGMAC Link Up */
507 mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
508 MTK_XGMAC_STS(MTK_GMAC1_ID));
509
510 /* Adjust GSW bridge IPG to 11 */
511 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
512 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
513 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
514 MTK_GSW_CFG);
515 }
516
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)517 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
518 phy_interface_t interface)
519 {
520 struct mtk_mac *mac = container_of(config, struct mtk_mac,
521 phylink_config);
522 struct mtk_eth *eth = mac->hw;
523 unsigned int sid;
524
525 if (interface == PHY_INTERFACE_MODE_SGMII ||
526 phy_interface_mode_is_8023z(interface)) {
527 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
528 0 : mac->id;
529
530 return eth->sgmii_pcs[sid];
531 }
532
533 return NULL;
534 }
535
mtk_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t iface)536 static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
537 phy_interface_t iface)
538 {
539 struct mtk_mac *mac = container_of(config, struct mtk_mac,
540 phylink_config);
541 struct mtk_eth *eth = mac->hw;
542
543 if (mtk_interface_mode_is_xgmii(eth, iface) &&
544 mac->id != MTK_GMAC1_ID) {
545 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
546 XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
547
548 mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
549 MTK_XGMAC_FORCE_LINK(mac->id),
550 MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
551 }
552
553 return 0;
554 }
555
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)556 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
557 const struct phylink_link_state *state)
558 {
559 struct mtk_mac *mac = container_of(config, struct mtk_mac,
560 phylink_config);
561 struct mtk_eth *eth = mac->hw;
562 int val, ge_mode, err = 0;
563 u32 i;
564
565 /* MT76x8 has no hardware settings between for the MAC */
566 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
567 mac->interface != state->interface) {
568 /* Setup soc pin functions */
569 switch (state->interface) {
570 case PHY_INTERFACE_MODE_TRGMII:
571 case PHY_INTERFACE_MODE_RGMII_TXID:
572 case PHY_INTERFACE_MODE_RGMII_RXID:
573 case PHY_INTERFACE_MODE_RGMII_ID:
574 case PHY_INTERFACE_MODE_RGMII:
575 case PHY_INTERFACE_MODE_MII:
576 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
577 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
578 if (err)
579 goto init_err;
580 }
581 break;
582 case PHY_INTERFACE_MODE_1000BASEX:
583 case PHY_INTERFACE_MODE_2500BASEX:
584 case PHY_INTERFACE_MODE_SGMII:
585 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
586 if (err)
587 goto init_err;
588 break;
589 case PHY_INTERFACE_MODE_GMII:
590 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
591 err = mtk_gmac_gephy_path_setup(eth, mac->id);
592 if (err)
593 goto init_err;
594 }
595 break;
596 case PHY_INTERFACE_MODE_INTERNAL:
597 if (mac->id == MTK_GMAC2_ID &&
598 MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
599 err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
600 if (err)
601 goto init_err;
602 }
603 break;
604 default:
605 goto err_phy;
606 }
607
608 /* Setup clock for 1st gmac */
609 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
610 !phy_interface_mode_is_8023z(state->interface) &&
611 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
612 if (MTK_HAS_CAPS(mac->hw->soc->caps,
613 MTK_TRGMII_MT7621_CLK)) {
614 if (mt7621_gmac0_rgmii_adjust(mac->hw,
615 state->interface))
616 goto err_phy;
617 } else {
618 mtk_gmac0_rgmii_adjust(mac->hw,
619 state->interface);
620
621 /* mt7623_pad_clk_setup */
622 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
623 mtk_w32(mac->hw,
624 TD_DM_DRVP(8) | TD_DM_DRVN(8),
625 TRGMII_TD_ODT(i));
626
627 /* Assert/release MT7623 RXC reset */
628 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
629 TRGMII_RCK_CTRL);
630 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
631 }
632 }
633
634 switch (state->interface) {
635 case PHY_INTERFACE_MODE_MII:
636 case PHY_INTERFACE_MODE_GMII:
637 ge_mode = 1;
638 break;
639 default:
640 ge_mode = 0;
641 break;
642 }
643
644 /* put the gmac into the right mode */
645 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
646 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
647 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
648 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
649
650 mac->interface = state->interface;
651 }
652
653 /* SGMII */
654 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
655 phy_interface_mode_is_8023z(state->interface)) {
656 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
657 * being setup done.
658 */
659 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
660
661 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
662 SYSCFG0_SGMII_MASK,
663 ~(u32)SYSCFG0_SGMII_MASK);
664
665 /* Save the syscfg0 value for mac_finish */
666 mac->syscfg0 = val;
667 } else if (phylink_autoneg_inband(mode)) {
668 dev_err(eth->dev,
669 "In-band mode not supported in non SGMII mode!\n");
670 return;
671 }
672
673 /* Setup gmac */
674 if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
675 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
676 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
677
678 if (mac->id == MTK_GMAC1_ID)
679 mtk_setup_bridge_switch(eth);
680 }
681
682 return;
683
684 err_phy:
685 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
686 mac->id, phy_modes(state->interface));
687 return;
688
689 init_err:
690 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
691 mac->id, phy_modes(state->interface), err);
692 }
693
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)694 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
695 phy_interface_t interface)
696 {
697 struct mtk_mac *mac = container_of(config, struct mtk_mac,
698 phylink_config);
699 struct mtk_eth *eth = mac->hw;
700 u32 mcr_cur, mcr_new;
701
702 /* Enable SGMII */
703 if (interface == PHY_INTERFACE_MODE_SGMII ||
704 phy_interface_mode_is_8023z(interface))
705 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
706 SYSCFG0_SGMII_MASK, mac->syscfg0);
707
708 /* Setup gmac */
709 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
710 mcr_new = mcr_cur;
711 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
712 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
713
714 /* Only update control register when needed! */
715 if (mcr_new != mcr_cur)
716 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
717
718 return 0;
719 }
720
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)721 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
722 phy_interface_t interface)
723 {
724 struct mtk_mac *mac = container_of(config, struct mtk_mac,
725 phylink_config);
726
727 if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
728 /* GMAC modes */
729 mtk_m32(mac->hw,
730 MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
731 MTK_MAC_MCR(mac->id));
732 } else if (mac->id != MTK_GMAC1_ID) {
733 /* XGMAC except for built-in switch */
734 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
735 MTK_XMAC_MCR(mac->id));
736 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
737 MTK_XGMAC_STS(mac->id));
738 }
739 }
740
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)741 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
742 int speed)
743 {
744 const struct mtk_soc_data *soc = eth->soc;
745 u32 ofs, val;
746
747 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
748 return;
749
750 val = MTK_QTX_SCH_MIN_RATE_EN |
751 /* minimum: 10 Mbps */
752 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
753 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
754 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
755 if (mtk_is_netsys_v1(eth))
756 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
757
758 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
759 switch (speed) {
760 case SPEED_10:
761 val |= MTK_QTX_SCH_MAX_RATE_EN |
762 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
763 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
764 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
765 break;
766 case SPEED_100:
767 val |= MTK_QTX_SCH_MAX_RATE_EN |
768 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
769 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
770 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
771 break;
772 case SPEED_1000:
773 val |= MTK_QTX_SCH_MAX_RATE_EN |
774 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
775 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
776 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
777 break;
778 default:
779 break;
780 }
781 } else {
782 switch (speed) {
783 case SPEED_10:
784 val |= MTK_QTX_SCH_MAX_RATE_EN |
785 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
786 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
787 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
788 break;
789 case SPEED_100:
790 val |= MTK_QTX_SCH_MAX_RATE_EN |
791 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
792 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
793 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
794 break;
795 case SPEED_1000:
796 val |= MTK_QTX_SCH_MAX_RATE_EN |
797 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
798 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
799 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
800 break;
801 default:
802 break;
803 }
804 }
805
806 ofs = MTK_QTX_OFFSET * idx;
807 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
808 }
809
mtk_gdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)810 static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
811 struct phy_device *phy,
812 unsigned int mode, phy_interface_t interface,
813 int speed, int duplex, bool tx_pause,
814 bool rx_pause)
815 {
816 u32 mcr;
817
818 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
819 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
820 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
821 MAC_MCR_FORCE_RX_FC);
822
823 /* Configure speed */
824 mac->speed = speed;
825 switch (speed) {
826 case SPEED_2500:
827 case SPEED_1000:
828 mcr |= MAC_MCR_SPEED_1000;
829 break;
830 case SPEED_100:
831 mcr |= MAC_MCR_SPEED_100;
832 break;
833 }
834
835 /* Configure duplex */
836 if (duplex == DUPLEX_FULL)
837 mcr |= MAC_MCR_FORCE_DPX;
838
839 /* Configure pause modes - phylink will avoid these for half duplex */
840 if (tx_pause)
841 mcr |= MAC_MCR_FORCE_TX_FC;
842 if (rx_pause)
843 mcr |= MAC_MCR_FORCE_RX_FC;
844
845 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
846 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
847 }
848
mtk_xgdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)849 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
850 struct phy_device *phy,
851 unsigned int mode, phy_interface_t interface,
852 int speed, int duplex, bool tx_pause,
853 bool rx_pause)
854 {
855 u32 mcr;
856
857 if (mac->id == MTK_GMAC1_ID)
858 return;
859
860 /* Eliminate the interference(before link-up) caused by PHY noise */
861 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
862 mdelay(20);
863 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
864 MTK_XMAC_CNT_CTRL(mac->id));
865
866 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
867 MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
868
869 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
870 mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
871 XMAC_MCR_TRX_DISABLE);
872 /* Configure pause modes -
873 * phylink will avoid these for half duplex
874 */
875 if (tx_pause)
876 mcr |= XMAC_MCR_FORCE_TX_FC;
877 if (rx_pause)
878 mcr |= XMAC_MCR_FORCE_RX_FC;
879
880 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
881 }
882
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)883 static void mtk_mac_link_up(struct phylink_config *config,
884 struct phy_device *phy,
885 unsigned int mode, phy_interface_t interface,
886 int speed, int duplex, bool tx_pause, bool rx_pause)
887 {
888 struct mtk_mac *mac = container_of(config, struct mtk_mac,
889 phylink_config);
890
891 if (mtk_interface_mode_is_xgmii(mac->hw, interface))
892 mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
893 tx_pause, rx_pause);
894 else
895 mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
896 tx_pause, rx_pause);
897 }
898
mtk_mac_disable_tx_lpi(struct phylink_config * config)899 static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
900 {
901 struct mtk_mac *mac = container_of(config, struct mtk_mac,
902 phylink_config);
903 struct mtk_eth *eth = mac->hw;
904
905 mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
906 }
907
mtk_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)908 static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
909 bool tx_clk_stop)
910 {
911 struct mtk_mac *mac = container_of(config, struct mtk_mac,
912 phylink_config);
913 struct mtk_eth *eth = mac->hw;
914 u32 val;
915
916 if (mtk_interface_mode_is_xgmii(eth, mac->interface))
917 return -EOPNOTSUPP;
918
919 /* Tx idle timer in ms */
920 timer = DIV_ROUND_UP(timer, 1000);
921
922 /* If the timer is zero, then set LPI_MODE, which allows the
923 * system to enter LPI mode immediately rather than waiting for
924 * the LPI threshold.
925 */
926 if (!timer)
927 val = MAC_EEE_LPI_MODE;
928 else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
929 val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
930 else
931 val = MAC_EEE_LPI_TXIDLE_THD;
932
933 if (tx_clk_stop)
934 val |= MAC_EEE_CKG_TXIDLE;
935
936 /* PHY Wake-up time, this field does not have a reset value, so use the
937 * reset value from MT7531 (36us for 100M and 17us for 1000M).
938 */
939 val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
940 FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
941
942 mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
943 mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
944
945 return 0;
946 }
947
948 static const struct phylink_mac_ops mtk_phylink_ops = {
949 .mac_prepare = mtk_mac_prepare,
950 .mac_select_pcs = mtk_mac_select_pcs,
951 .mac_config = mtk_mac_config,
952 .mac_finish = mtk_mac_finish,
953 .mac_link_down = mtk_mac_link_down,
954 .mac_link_up = mtk_mac_link_up,
955 .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
956 .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
957 };
958
mtk_mdio_config(struct mtk_eth * eth)959 static void mtk_mdio_config(struct mtk_eth *eth)
960 {
961 u32 val;
962
963 /* Configure MDC Divider */
964 val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
965
966 /* Configure MDC Turbo Mode */
967 if (mtk_is_netsys_v3_or_greater(eth))
968 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
969 else
970 val |= PPSC_MDC_TURBO;
971
972 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
973 }
974
mtk_mdio_init(struct mtk_eth * eth)975 static int mtk_mdio_init(struct mtk_eth *eth)
976 {
977 unsigned int max_clk = 2500000;
978 struct device_node *mii_np;
979 int ret;
980 u32 val;
981
982 mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
983 if (!mii_np) {
984 dev_err(eth->dev, "no %s child node found", "mdio-bus");
985 return -ENODEV;
986 }
987
988 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
989 if (!eth->mii_bus) {
990 ret = -ENOMEM;
991 goto err_put_node;
992 }
993
994 eth->mii_bus->name = "mdio";
995 eth->mii_bus->read = mtk_mdio_read_c22;
996 eth->mii_bus->write = mtk_mdio_write_c22;
997 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
998 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
999 eth->mii_bus->priv = eth;
1000 eth->mii_bus->parent = eth->dev;
1001
1002 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1003
1004 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1005 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1006 dev_err(eth->dev, "MDIO clock frequency out of range");
1007 ret = -EINVAL;
1008 goto err_put_node;
1009 }
1010 max_clk = val;
1011 }
1012 eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
1013 mtk_mdio_config(eth);
1014 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
1015 ret = of_mdiobus_register(eth->mii_bus, mii_np);
1016
1017 err_put_node:
1018 of_node_put(mii_np);
1019 return ret;
1020 }
1021
mtk_mdio_cleanup(struct mtk_eth * eth)1022 static void mtk_mdio_cleanup(struct mtk_eth *eth)
1023 {
1024 if (!eth->mii_bus)
1025 return;
1026
1027 mdiobus_unregister(eth->mii_bus);
1028 }
1029
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)1030 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1031 {
1032 unsigned long flags;
1033 u32 val;
1034
1035 spin_lock_irqsave(ð->tx_irq_lock, flags);
1036 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1037 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1038 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
1039 }
1040
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)1041 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1042 {
1043 unsigned long flags;
1044 u32 val;
1045
1046 spin_lock_irqsave(ð->tx_irq_lock, flags);
1047 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1048 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1049 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
1050 }
1051
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)1052 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1053 {
1054 unsigned long flags;
1055 u32 val;
1056
1057 spin_lock_irqsave(ð->rx_irq_lock, flags);
1058 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1059 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1060 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
1061 }
1062
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)1063 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1064 {
1065 unsigned long flags;
1066 u32 val;
1067
1068 spin_lock_irqsave(ð->rx_irq_lock, flags);
1069 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1070 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1071 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
1072 }
1073
mtk_set_mac_address(struct net_device * dev,void * p)1074 static int mtk_set_mac_address(struct net_device *dev, void *p)
1075 {
1076 int ret = eth_mac_addr(dev, p);
1077 struct mtk_mac *mac = netdev_priv(dev);
1078 struct mtk_eth *eth = mac->hw;
1079 const char *macaddr = dev->dev_addr;
1080
1081 if (ret)
1082 return ret;
1083
1084 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1085 return -EBUSY;
1086
1087 spin_lock_bh(&mac->hw->page_lock);
1088 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1089 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1090 MT7628_SDM_MAC_ADRH);
1091 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1092 (macaddr[4] << 8) | macaddr[5],
1093 MT7628_SDM_MAC_ADRL);
1094 } else {
1095 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1096 MTK_GDMA_MAC_ADRH(mac->id));
1097 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1098 (macaddr[4] << 8) | macaddr[5],
1099 MTK_GDMA_MAC_ADRL(mac->id));
1100 }
1101 spin_unlock_bh(&mac->hw->page_lock);
1102
1103 return 0;
1104 }
1105
mtk_stats_update_mac(struct mtk_mac * mac)1106 void mtk_stats_update_mac(struct mtk_mac *mac)
1107 {
1108 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1109 struct mtk_eth *eth = mac->hw;
1110
1111 u64_stats_update_begin(&hw_stats->syncp);
1112
1113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1114 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
1115 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
1116 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
1117 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
1118 hw_stats->rx_checksum_errors +=
1119 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
1120 } else {
1121 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1122 unsigned int offs = hw_stats->reg_offset;
1123 u64 stats;
1124
1125 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1126 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
1127 if (stats)
1128 hw_stats->rx_bytes += (stats << 32);
1129 hw_stats->rx_packets +=
1130 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1131 hw_stats->rx_overflow +=
1132 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1133 hw_stats->rx_fcs_errors +=
1134 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1135 hw_stats->rx_short_errors +=
1136 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1137 hw_stats->rx_long_errors +=
1138 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1139 hw_stats->rx_checksum_errors +=
1140 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1141 hw_stats->rx_flow_control_packets +=
1142 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1143
1144 if (mtk_is_netsys_v3_or_greater(eth)) {
1145 hw_stats->tx_skip +=
1146 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1147 hw_stats->tx_collisions +=
1148 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1149 hw_stats->tx_bytes +=
1150 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1151 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1152 if (stats)
1153 hw_stats->tx_bytes += (stats << 32);
1154 hw_stats->tx_packets +=
1155 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1156 } else {
1157 hw_stats->tx_skip +=
1158 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1159 hw_stats->tx_collisions +=
1160 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1161 hw_stats->tx_bytes +=
1162 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1163 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1164 if (stats)
1165 hw_stats->tx_bytes += (stats << 32);
1166 hw_stats->tx_packets +=
1167 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1168 }
1169 }
1170
1171 u64_stats_update_end(&hw_stats->syncp);
1172 }
1173
mtk_stats_update(struct mtk_eth * eth)1174 static void mtk_stats_update(struct mtk_eth *eth)
1175 {
1176 int i;
1177
1178 for (i = 0; i < MTK_MAX_DEVS; i++) {
1179 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1180 continue;
1181 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1182 mtk_stats_update_mac(eth->mac[i]);
1183 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1184 }
1185 }
1186 }
1187
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1188 static void mtk_get_stats64(struct net_device *dev,
1189 struct rtnl_link_stats64 *storage)
1190 {
1191 struct mtk_mac *mac = netdev_priv(dev);
1192 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1193 unsigned int start;
1194
1195 if (netif_running(dev) && netif_device_present(dev)) {
1196 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1197 mtk_stats_update_mac(mac);
1198 spin_unlock_bh(&hw_stats->stats_lock);
1199 }
1200 }
1201
1202 do {
1203 start = u64_stats_fetch_begin(&hw_stats->syncp);
1204 storage->rx_packets = hw_stats->rx_packets;
1205 storage->tx_packets = hw_stats->tx_packets;
1206 storage->rx_bytes = hw_stats->rx_bytes;
1207 storage->tx_bytes = hw_stats->tx_bytes;
1208 storage->collisions = hw_stats->tx_collisions;
1209 storage->rx_length_errors = hw_stats->rx_short_errors +
1210 hw_stats->rx_long_errors;
1211 storage->rx_over_errors = hw_stats->rx_overflow;
1212 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1213 storage->rx_errors = hw_stats->rx_checksum_errors;
1214 storage->tx_aborted_errors = hw_stats->tx_skip;
1215 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1216
1217 storage->tx_errors = dev->stats.tx_errors;
1218 storage->rx_dropped = dev->stats.rx_dropped;
1219 storage->tx_dropped = dev->stats.tx_dropped;
1220 }
1221
mtk_max_frag_size(int mtu)1222 static inline int mtk_max_frag_size(int mtu)
1223 {
1224 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1225 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1226 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1227
1228 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1229 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1230 }
1231
mtk_max_buf_size(int frag_size)1232 static inline int mtk_max_buf_size(int frag_size)
1233 {
1234 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1236
1237 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1238
1239 return buf_size;
1240 }
1241
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1242 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1243 struct mtk_rx_dma_v2 *dma_rxd)
1244 {
1245 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1246 if (!(rxd->rxd2 & RX_DMA_DONE))
1247 return false;
1248
1249 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1250 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1251 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1252 if (mtk_is_netsys_v3_or_greater(eth)) {
1253 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1254 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1255 }
1256
1257 return true;
1258 }
1259
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1260 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1261 {
1262 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1263 unsigned long data;
1264
1265 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1266 get_order(size));
1267
1268 return (void *)data;
1269 }
1270
mtk_dma_ring_alloc(struct mtk_eth * eth,size_t size,dma_addr_t * dma_handle,bool use_sram)1271 static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
1272 dma_addr_t *dma_handle, bool use_sram)
1273 {
1274 void *dma_ring;
1275
1276 if (use_sram && eth->sram_pool) {
1277 dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
1278 if (!dma_ring)
1279 return dma_ring;
1280 *dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
1281 (unsigned long)dma_ring);
1282 } else {
1283 dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
1284 GFP_KERNEL);
1285 }
1286
1287 return dma_ring;
1288 }
1289
mtk_dma_ring_free(struct mtk_eth * eth,size_t size,void * dma_ring,dma_addr_t dma_handle,bool in_sram)1290 static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
1291 dma_addr_t dma_handle, bool in_sram)
1292 {
1293 if (in_sram && eth->sram_pool)
1294 gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
1295 else
1296 dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
1297 }
1298
1299 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1300 static int mtk_init_fq_dma(struct mtk_eth *eth)
1301 {
1302 const struct mtk_soc_data *soc = eth->soc;
1303 dma_addr_t phy_ring_tail;
1304 int cnt = soc->tx.fq_dma_size;
1305 dma_addr_t dma_addr;
1306 int i, j, len;
1307
1308 eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
1309 ð->phy_scratch_ring, true);
1310
1311 if (unlikely(!eth->scratch_ring))
1312 return -ENOMEM;
1313
1314 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1315
1316 for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1317 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1318 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1319
1320 if (unlikely(!eth->scratch_head[j]))
1321 return -ENOMEM;
1322
1323 dma_addr = dma_map_single(eth->dma_dev,
1324 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1325 DMA_FROM_DEVICE);
1326
1327 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1328 return -ENOMEM;
1329
1330 for (i = 0; i < len; i++) {
1331 struct mtk_tx_dma_v2 *txd;
1332
1333 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1334 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1335 if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1336 txd->txd2 = eth->phy_scratch_ring +
1337 (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1338
1339 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1340 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1341 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1342
1343 txd->txd4 = 0;
1344 if (mtk_is_netsys_v2_or_greater(eth)) {
1345 txd->txd5 = 0;
1346 txd->txd6 = 0;
1347 txd->txd7 = 0;
1348 txd->txd8 = 0;
1349 }
1350 }
1351 }
1352
1353 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1354 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1355 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1356 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1357
1358 return 0;
1359 }
1360
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1361 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1362 {
1363 return ring->dma + (desc - ring->phys);
1364 }
1365
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1366 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1367 void *txd, u32 txd_size)
1368 {
1369 int idx = (txd - ring->dma) / txd_size;
1370
1371 return &ring->buf[idx];
1372 }
1373
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1374 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1375 struct mtk_tx_dma *dma)
1376 {
1377 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1378 }
1379
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1380 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1381 {
1382 return (dma - ring->dma) / txd_size;
1383 }
1384
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1385 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1386 struct xdp_frame_bulk *bq, bool napi)
1387 {
1388 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1389 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1390 dma_unmap_single(eth->dma_dev,
1391 dma_unmap_addr(tx_buf, dma_addr0),
1392 dma_unmap_len(tx_buf, dma_len0),
1393 DMA_TO_DEVICE);
1394 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1395 dma_unmap_page(eth->dma_dev,
1396 dma_unmap_addr(tx_buf, dma_addr0),
1397 dma_unmap_len(tx_buf, dma_len0),
1398 DMA_TO_DEVICE);
1399 }
1400 } else {
1401 if (dma_unmap_len(tx_buf, dma_len0)) {
1402 dma_unmap_page(eth->dma_dev,
1403 dma_unmap_addr(tx_buf, dma_addr0),
1404 dma_unmap_len(tx_buf, dma_len0),
1405 DMA_TO_DEVICE);
1406 }
1407
1408 if (dma_unmap_len(tx_buf, dma_len1)) {
1409 dma_unmap_page(eth->dma_dev,
1410 dma_unmap_addr(tx_buf, dma_addr1),
1411 dma_unmap_len(tx_buf, dma_len1),
1412 DMA_TO_DEVICE);
1413 }
1414 }
1415
1416 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1417 if (tx_buf->type == MTK_TYPE_SKB) {
1418 struct sk_buff *skb = tx_buf->data;
1419
1420 if (napi)
1421 napi_consume_skb(skb, napi);
1422 else
1423 dev_kfree_skb_any(skb);
1424 } else {
1425 struct xdp_frame *xdpf = tx_buf->data;
1426
1427 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1428 xdp_return_frame_rx_napi(xdpf);
1429 else if (bq)
1430 xdp_return_frame_bulk(xdpf, bq);
1431 else
1432 xdp_return_frame(xdpf);
1433 }
1434 }
1435 tx_buf->flags = 0;
1436 tx_buf->data = NULL;
1437 }
1438
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1439 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1440 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1441 size_t size, int idx)
1442 {
1443 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1444 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1445 dma_unmap_len_set(tx_buf, dma_len0, size);
1446 } else {
1447 if (idx & 1) {
1448 txd->txd3 = mapped_addr;
1449 txd->txd2 |= TX_DMA_PLEN1(size);
1450 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1451 dma_unmap_len_set(tx_buf, dma_len1, size);
1452 } else {
1453 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1454 txd->txd1 = mapped_addr;
1455 txd->txd2 = TX_DMA_PLEN0(size);
1456 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1457 dma_unmap_len_set(tx_buf, dma_len0, size);
1458 }
1459 }
1460 }
1461
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1462 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1463 struct mtk_tx_dma_desc_info *info)
1464 {
1465 struct mtk_mac *mac = netdev_priv(dev);
1466 struct mtk_eth *eth = mac->hw;
1467 struct mtk_tx_dma *desc = txd;
1468 u32 data;
1469
1470 WRITE_ONCE(desc->txd1, info->addr);
1471
1472 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1473 FIELD_PREP(TX_DMA_PQID, info->qid);
1474 if (info->last)
1475 data |= TX_DMA_LS0;
1476 WRITE_ONCE(desc->txd3, data);
1477
1478 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1479 if (info->first) {
1480 if (info->gso)
1481 data |= TX_DMA_TSO;
1482 /* tx checksum offload */
1483 if (info->csum)
1484 data |= TX_DMA_CHKSUM;
1485 /* vlan header offload */
1486 if (info->vlan)
1487 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1488 }
1489 WRITE_ONCE(desc->txd4, data);
1490 }
1491
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1492 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1493 struct mtk_tx_dma_desc_info *info)
1494 {
1495 struct mtk_mac *mac = netdev_priv(dev);
1496 struct mtk_tx_dma_v2 *desc = txd;
1497 struct mtk_eth *eth = mac->hw;
1498 u32 data;
1499
1500 WRITE_ONCE(desc->txd1, info->addr);
1501
1502 data = TX_DMA_PLEN0(info->size);
1503 if (info->last)
1504 data |= TX_DMA_LS0;
1505
1506 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1507 data |= TX_DMA_PREP_ADDR64(info->addr);
1508
1509 WRITE_ONCE(desc->txd3, data);
1510
1511 /* set forward port */
1512 switch (mac->id) {
1513 case MTK_GMAC1_ID:
1514 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1515 break;
1516 case MTK_GMAC2_ID:
1517 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1518 break;
1519 case MTK_GMAC3_ID:
1520 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1521 break;
1522 }
1523
1524 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1525 WRITE_ONCE(desc->txd4, data);
1526
1527 data = 0;
1528 if (info->first) {
1529 if (info->gso)
1530 data |= TX_DMA_TSO_V2;
1531 /* tx checksum offload */
1532 if (info->csum)
1533 data |= TX_DMA_CHKSUM_V2;
1534 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1535 data |= TX_DMA_SPTAG_V3;
1536 }
1537 WRITE_ONCE(desc->txd5, data);
1538
1539 data = 0;
1540 if (info->first && info->vlan)
1541 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1542 WRITE_ONCE(desc->txd6, data);
1543
1544 WRITE_ONCE(desc->txd7, 0);
1545 WRITE_ONCE(desc->txd8, 0);
1546 }
1547
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1548 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1549 struct mtk_tx_dma_desc_info *info)
1550 {
1551 struct mtk_mac *mac = netdev_priv(dev);
1552 struct mtk_eth *eth = mac->hw;
1553
1554 if (mtk_is_netsys_v2_or_greater(eth))
1555 mtk_tx_set_dma_desc_v2(dev, txd, info);
1556 else
1557 mtk_tx_set_dma_desc_v1(dev, txd, info);
1558 }
1559
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1560 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1561 int tx_num, struct mtk_tx_ring *ring, bool gso)
1562 {
1563 struct mtk_tx_dma_desc_info txd_info = {
1564 .size = skb_headlen(skb),
1565 .gso = gso,
1566 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1567 .vlan = skb_vlan_tag_present(skb),
1568 .qid = skb_get_queue_mapping(skb),
1569 .vlan_tci = skb_vlan_tag_get(skb),
1570 .first = true,
1571 .last = !skb_is_nonlinear(skb),
1572 };
1573 struct netdev_queue *txq;
1574 struct mtk_mac *mac = netdev_priv(dev);
1575 struct mtk_eth *eth = mac->hw;
1576 const struct mtk_soc_data *soc = eth->soc;
1577 struct mtk_tx_dma *itxd, *txd;
1578 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1579 struct mtk_tx_buf *itx_buf, *tx_buf;
1580 int i, n_desc = 1;
1581 int queue = skb_get_queue_mapping(skb);
1582 int k = 0;
1583
1584 txq = netdev_get_tx_queue(dev, queue);
1585 itxd = ring->next_free;
1586 itxd_pdma = qdma_to_pdma(ring, itxd);
1587 if (itxd == ring->last_free)
1588 return -ENOMEM;
1589
1590 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1591 memset(itx_buf, 0, sizeof(*itx_buf));
1592
1593 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1594 DMA_TO_DEVICE);
1595 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1596 return -ENOMEM;
1597
1598 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1599
1600 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1601 itx_buf->mac_id = mac->id;
1602 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1603 k++);
1604
1605 /* TX SG offload */
1606 txd = itxd;
1607 txd_pdma = qdma_to_pdma(ring, txd);
1608
1609 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1610 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1611 unsigned int offset = 0;
1612 int frag_size = skb_frag_size(frag);
1613
1614 while (frag_size) {
1615 bool new_desc = true;
1616
1617 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1618 (i & 0x1)) {
1619 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1620 txd_pdma = qdma_to_pdma(ring, txd);
1621 if (txd == ring->last_free)
1622 goto err_dma;
1623
1624 n_desc++;
1625 } else {
1626 new_desc = false;
1627 }
1628
1629 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1630 txd_info.size = min_t(unsigned int, frag_size,
1631 soc->tx.dma_max_len);
1632 txd_info.qid = queue;
1633 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1634 !(frag_size - txd_info.size);
1635 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1636 offset, txd_info.size,
1637 DMA_TO_DEVICE);
1638 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1639 goto err_dma;
1640
1641 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1642
1643 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1644 soc->tx.desc_size);
1645 if (new_desc)
1646 memset(tx_buf, 0, sizeof(*tx_buf));
1647 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1648 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1649 tx_buf->mac_id = mac->id;
1650
1651 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1652 txd_info.size, k++);
1653
1654 frag_size -= txd_info.size;
1655 offset += txd_info.size;
1656 }
1657 }
1658
1659 /* store skb to cleanup */
1660 itx_buf->type = MTK_TYPE_SKB;
1661 itx_buf->data = skb;
1662
1663 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1664 if (k & 0x1)
1665 txd_pdma->txd2 |= TX_DMA_LS0;
1666 else
1667 txd_pdma->txd2 |= TX_DMA_LS1;
1668 }
1669
1670 netdev_tx_sent_queue(txq, skb->len);
1671 skb_tx_timestamp(skb);
1672
1673 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1674 atomic_sub(n_desc, &ring->free_count);
1675
1676 /* make sure that all changes to the dma ring are flushed before we
1677 * continue
1678 */
1679 wmb();
1680
1681 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1682 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1683 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1684 } else {
1685 int next_idx;
1686
1687 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1688 ring->dma_size);
1689 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1690 }
1691
1692 return 0;
1693
1694 err_dma:
1695 do {
1696 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1697
1698 /* unmap dma */
1699 mtk_tx_unmap(eth, tx_buf, NULL, false);
1700
1701 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1702 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1703 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1704
1705 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1706 itxd_pdma = qdma_to_pdma(ring, itxd);
1707 } while (itxd != txd);
1708
1709 return -ENOMEM;
1710 }
1711
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1712 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1713 {
1714 int i, nfrags = 1;
1715 skb_frag_t *frag;
1716
1717 if (skb_is_gso(skb)) {
1718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1719 frag = &skb_shinfo(skb)->frags[i];
1720 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1721 eth->soc->tx.dma_max_len);
1722 }
1723 } else {
1724 nfrags += skb_shinfo(skb)->nr_frags;
1725 }
1726
1727 return nfrags;
1728 }
1729
mtk_queue_stopped(struct mtk_eth * eth)1730 static int mtk_queue_stopped(struct mtk_eth *eth)
1731 {
1732 int i;
1733
1734 for (i = 0; i < MTK_MAX_DEVS; i++) {
1735 if (!eth->netdev[i])
1736 continue;
1737 if (netif_queue_stopped(eth->netdev[i]))
1738 return 1;
1739 }
1740
1741 return 0;
1742 }
1743
mtk_wake_queue(struct mtk_eth * eth)1744 static void mtk_wake_queue(struct mtk_eth *eth)
1745 {
1746 int i;
1747
1748 for (i = 0; i < MTK_MAX_DEVS; i++) {
1749 if (!eth->netdev[i])
1750 continue;
1751 netif_tx_wake_all_queues(eth->netdev[i]);
1752 }
1753 }
1754
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1755 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1756 {
1757 struct mtk_mac *mac = netdev_priv(dev);
1758 struct mtk_eth *eth = mac->hw;
1759 struct mtk_tx_ring *ring = ð->tx_ring;
1760 struct net_device_stats *stats = &dev->stats;
1761 bool gso = false;
1762 int tx_num;
1763
1764 if (skb_vlan_tag_present(skb) &&
1765 !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
1766 skb = __vlan_hwaccel_push_inside(skb);
1767 if (!skb)
1768 goto dropped;
1769 }
1770
1771 /* normally we can rely on the stack not calling this more than once,
1772 * however we have 2 queues running on the same ring so we need to lock
1773 * the ring access
1774 */
1775 spin_lock(ð->page_lock);
1776
1777 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1778 goto drop;
1779
1780 tx_num = mtk_cal_txd_req(eth, skb);
1781 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1782 netif_tx_stop_all_queues(dev);
1783 netif_err(eth, tx_queued, dev,
1784 "Tx Ring full when queue awake!\n");
1785 spin_unlock(ð->page_lock);
1786 return NETDEV_TX_BUSY;
1787 }
1788
1789 /* TSO: fill MSS info in tcp checksum field */
1790 if (skb_is_gso(skb)) {
1791 if (skb_cow_head(skb, 0)) {
1792 netif_warn(eth, tx_err, dev,
1793 "GSO expand head fail.\n");
1794 goto drop;
1795 }
1796
1797 if (skb_shinfo(skb)->gso_type &
1798 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1799 gso = true;
1800 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1801 }
1802 }
1803
1804 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1805 goto drop;
1806
1807 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1808 netif_tx_stop_all_queues(dev);
1809
1810 spin_unlock(ð->page_lock);
1811
1812 return NETDEV_TX_OK;
1813
1814 drop:
1815 spin_unlock(ð->page_lock);
1816 dev_kfree_skb_any(skb);
1817 dropped:
1818 stats->tx_dropped++;
1819 return NETDEV_TX_OK;
1820 }
1821
mtk_get_rx_ring(struct mtk_eth * eth)1822 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1823 {
1824 int i;
1825 struct mtk_rx_ring *ring;
1826 int idx;
1827
1828 if (!eth->hwlro)
1829 return ð->rx_ring[0];
1830
1831 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1832 struct mtk_rx_dma *rxd;
1833
1834 ring = ð->rx_ring[i];
1835 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1836 rxd = ring->dma + idx * eth->soc->rx.desc_size;
1837 if (rxd->rxd2 & RX_DMA_DONE) {
1838 ring->calc_idx_update = true;
1839 return ring;
1840 }
1841 }
1842
1843 return NULL;
1844 }
1845
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1846 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1847 {
1848 struct mtk_rx_ring *ring;
1849 int i;
1850
1851 if (!eth->hwlro) {
1852 ring = ð->rx_ring[0];
1853 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1854 } else {
1855 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1856 ring = ð->rx_ring[i];
1857 if (ring->calc_idx_update) {
1858 ring->calc_idx_update = false;
1859 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1860 }
1861 }
1862 }
1863 }
1864
mtk_page_pool_enabled(struct mtk_eth * eth)1865 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1866 {
1867 return mtk_is_netsys_v2_or_greater(eth);
1868 }
1869
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1870 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1871 struct xdp_rxq_info *xdp_q,
1872 int id, int size)
1873 {
1874 struct page_pool_params pp_params = {
1875 .order = 0,
1876 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1877 .pool_size = size,
1878 .nid = NUMA_NO_NODE,
1879 .dev = eth->dma_dev,
1880 .offset = MTK_PP_HEADROOM,
1881 .max_len = MTK_PP_MAX_BUF_SIZE,
1882 };
1883 struct page_pool *pp;
1884 int err;
1885
1886 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1887 : DMA_FROM_DEVICE;
1888 pp = page_pool_create(&pp_params);
1889 if (IS_ERR(pp))
1890 return pp;
1891
1892 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1893 eth->rx_napi.napi_id, PAGE_SIZE);
1894 if (err < 0)
1895 goto err_free_pp;
1896
1897 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1898 if (err)
1899 goto err_unregister_rxq;
1900
1901 return pp;
1902
1903 err_unregister_rxq:
1904 xdp_rxq_info_unreg(xdp_q);
1905 err_free_pp:
1906 page_pool_destroy(pp);
1907
1908 return ERR_PTR(err);
1909 }
1910
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1911 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1912 gfp_t gfp_mask)
1913 {
1914 struct page *page;
1915
1916 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1917 if (!page)
1918 return NULL;
1919
1920 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1921 return page_address(page);
1922 }
1923
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1924 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1925 {
1926 if (ring->page_pool)
1927 page_pool_put_full_page(ring->page_pool,
1928 virt_to_head_page(data), napi);
1929 else
1930 skb_free_frag(data);
1931 }
1932
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1933 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1934 struct mtk_tx_dma_desc_info *txd_info,
1935 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1936 void *data, u16 headroom, int index, bool dma_map)
1937 {
1938 struct mtk_tx_ring *ring = ð->tx_ring;
1939 struct mtk_mac *mac = netdev_priv(dev);
1940 struct mtk_tx_dma *txd_pdma;
1941
1942 if (dma_map) { /* ndo_xdp_xmit */
1943 txd_info->addr = dma_map_single(eth->dma_dev, data,
1944 txd_info->size, DMA_TO_DEVICE);
1945 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1946 return -ENOMEM;
1947
1948 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1949 } else {
1950 struct page *page = virt_to_head_page(data);
1951
1952 txd_info->addr = page_pool_get_dma_addr(page) +
1953 sizeof(struct xdp_frame) + headroom;
1954 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1955 txd_info->size, DMA_BIDIRECTIONAL);
1956 }
1957 mtk_tx_set_dma_desc(dev, txd, txd_info);
1958
1959 tx_buf->mac_id = mac->id;
1960 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1961 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1962
1963 txd_pdma = qdma_to_pdma(ring, txd);
1964 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1965 index);
1966
1967 return 0;
1968 }
1969
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1970 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1971 struct net_device *dev, bool dma_map)
1972 {
1973 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1974 const struct mtk_soc_data *soc = eth->soc;
1975 struct mtk_tx_ring *ring = ð->tx_ring;
1976 struct mtk_mac *mac = netdev_priv(dev);
1977 struct mtk_tx_dma_desc_info txd_info = {
1978 .size = xdpf->len,
1979 .first = true,
1980 .last = !xdp_frame_has_frags(xdpf),
1981 .qid = mac->id,
1982 };
1983 int err, index = 0, n_desc = 1, nr_frags;
1984 struct mtk_tx_buf *htx_buf, *tx_buf;
1985 struct mtk_tx_dma *htxd, *txd;
1986 void *data = xdpf->data;
1987
1988 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1989 return -EBUSY;
1990
1991 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1992 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1993 return -EBUSY;
1994
1995 spin_lock(ð->page_lock);
1996
1997 txd = ring->next_free;
1998 if (txd == ring->last_free) {
1999 spin_unlock(ð->page_lock);
2000 return -ENOMEM;
2001 }
2002 htxd = txd;
2003
2004 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
2005 memset(tx_buf, 0, sizeof(*tx_buf));
2006 htx_buf = tx_buf;
2007
2008 for (;;) {
2009 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
2010 data, xdpf->headroom, index, dma_map);
2011 if (err < 0)
2012 goto unmap;
2013
2014 if (txd_info.last)
2015 break;
2016
2017 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
2018 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2019 if (txd == ring->last_free)
2020 goto unmap;
2021
2022 tx_buf = mtk_desc_to_tx_buf(ring, txd,
2023 soc->tx.desc_size);
2024 memset(tx_buf, 0, sizeof(*tx_buf));
2025 n_desc++;
2026 }
2027
2028 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2029 txd_info.size = skb_frag_size(&sinfo->frags[index]);
2030 txd_info.last = index + 1 == nr_frags;
2031 txd_info.qid = mac->id;
2032 data = skb_frag_address(&sinfo->frags[index]);
2033
2034 index++;
2035 }
2036 /* store xdpf for cleanup */
2037 htx_buf->data = xdpf;
2038
2039 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2040 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
2041
2042 if (index & 1)
2043 txd_pdma->txd2 |= TX_DMA_LS0;
2044 else
2045 txd_pdma->txd2 |= TX_DMA_LS1;
2046 }
2047
2048 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2049 atomic_sub(n_desc, &ring->free_count);
2050
2051 /* make sure that all changes to the dma ring are flushed before we
2052 * continue
2053 */
2054 wmb();
2055
2056 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2057 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
2058 } else {
2059 int idx;
2060
2061 idx = txd_to_idx(ring, txd, soc->tx.desc_size);
2062 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
2063 MT7628_TX_CTX_IDX0);
2064 }
2065
2066 spin_unlock(ð->page_lock);
2067
2068 return 0;
2069
2070 unmap:
2071 while (htxd != txd) {
2072 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
2073 mtk_tx_unmap(eth, tx_buf, NULL, false);
2074
2075 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2076 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2077 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
2078
2079 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
2080 }
2081
2082 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
2083 }
2084
2085 spin_unlock(ð->page_lock);
2086
2087 return err;
2088 }
2089
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)2090 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
2091 struct xdp_frame **frames, u32 flags)
2092 {
2093 struct mtk_mac *mac = netdev_priv(dev);
2094 struct mtk_hw_stats *hw_stats = mac->hw_stats;
2095 struct mtk_eth *eth = mac->hw;
2096 int i, nxmit = 0;
2097
2098 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2099 return -EINVAL;
2100
2101 for (i = 0; i < num_frame; i++) {
2102 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
2103 break;
2104 nxmit++;
2105 }
2106
2107 u64_stats_update_begin(&hw_stats->syncp);
2108 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
2109 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
2110 u64_stats_update_end(&hw_stats->syncp);
2111
2112 return nxmit;
2113 }
2114
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)2115 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
2116 struct xdp_buff *xdp, struct net_device *dev)
2117 {
2118 struct mtk_mac *mac = netdev_priv(dev);
2119 struct mtk_hw_stats *hw_stats = mac->hw_stats;
2120 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
2121 struct bpf_prog *prog;
2122 u32 act = XDP_PASS;
2123
2124 rcu_read_lock();
2125
2126 prog = rcu_dereference(eth->prog);
2127 if (!prog)
2128 goto out;
2129
2130 act = bpf_prog_run_xdp(prog, xdp);
2131 switch (act) {
2132 case XDP_PASS:
2133 count = &hw_stats->xdp_stats.rx_xdp_pass;
2134 goto update_stats;
2135 case XDP_REDIRECT:
2136 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
2137 act = XDP_DROP;
2138 break;
2139 }
2140
2141 count = &hw_stats->xdp_stats.rx_xdp_redirect;
2142 goto update_stats;
2143 case XDP_TX: {
2144 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2145
2146 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
2147 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
2148 act = XDP_DROP;
2149 break;
2150 }
2151
2152 count = &hw_stats->xdp_stats.rx_xdp_tx;
2153 goto update_stats;
2154 }
2155 default:
2156 bpf_warn_invalid_xdp_action(dev, prog, act);
2157 fallthrough;
2158 case XDP_ABORTED:
2159 trace_xdp_exception(dev, prog, act);
2160 fallthrough;
2161 case XDP_DROP:
2162 break;
2163 }
2164
2165 page_pool_put_full_page(ring->page_pool,
2166 virt_to_head_page(xdp->data), true);
2167
2168 update_stats:
2169 u64_stats_update_begin(&hw_stats->syncp);
2170 *count = *count + 1;
2171 u64_stats_update_end(&hw_stats->syncp);
2172 out:
2173 rcu_read_unlock();
2174
2175 return act;
2176 }
2177
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2178 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2179 struct mtk_eth *eth)
2180 {
2181 struct dim_sample dim_sample = {};
2182 struct mtk_rx_ring *ring;
2183 bool xdp_flush = false;
2184 int idx;
2185 struct sk_buff *skb;
2186 u64 addr64 = 0;
2187 u8 *data, *new_data;
2188 struct mtk_rx_dma_v2 *rxd, trxd;
2189 int done = 0, bytes = 0;
2190 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2191 int ppe_idx = 0;
2192
2193 while (done < budget) {
2194 unsigned int pktlen, *rxdcsum;
2195 struct net_device *netdev;
2196 u32 hash, reason;
2197 int mac = 0;
2198
2199 ring = mtk_get_rx_ring(eth);
2200 if (unlikely(!ring))
2201 goto rx_done;
2202
2203 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2204 rxd = ring->dma + idx * eth->soc->rx.desc_size;
2205 data = ring->data[idx];
2206
2207 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2208 break;
2209
2210 /* find out which mac the packet come from. values start at 1 */
2211 if (mtk_is_netsys_v3_or_greater(eth)) {
2212 u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2213
2214 switch (val) {
2215 case PSE_GDM1_PORT:
2216 case PSE_GDM2_PORT:
2217 mac = val - 1;
2218 break;
2219 case PSE_GDM3_PORT:
2220 mac = MTK_GMAC3_ID;
2221 break;
2222 default:
2223 break;
2224 }
2225 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2226 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2227 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2228 }
2229
2230 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2231 !eth->netdev[mac]))
2232 goto release_desc;
2233
2234 netdev = eth->netdev[mac];
2235 ppe_idx = eth->mac[mac]->ppe_idx;
2236
2237 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2238 goto release_desc;
2239
2240 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2241
2242 /* alloc new buffer */
2243 if (ring->page_pool) {
2244 struct page *page = virt_to_head_page(data);
2245 struct xdp_buff xdp;
2246 u32 ret, metasize;
2247
2248 new_data = mtk_page_pool_get_buff(ring->page_pool,
2249 &dma_addr,
2250 GFP_ATOMIC);
2251 if (unlikely(!new_data)) {
2252 netdev->stats.rx_dropped++;
2253 goto release_desc;
2254 }
2255
2256 dma_sync_single_for_cpu(eth->dma_dev,
2257 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2258 pktlen, page_pool_get_dma_dir(ring->page_pool));
2259
2260 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2261 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2262 true);
2263 xdp_buff_clear_frags_flag(&xdp);
2264
2265 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2266 if (ret == XDP_REDIRECT)
2267 xdp_flush = true;
2268
2269 if (ret != XDP_PASS)
2270 goto skip_rx;
2271
2272 skb = build_skb(data, PAGE_SIZE);
2273 if (unlikely(!skb)) {
2274 page_pool_put_full_page(ring->page_pool,
2275 page, true);
2276 netdev->stats.rx_dropped++;
2277 goto skip_rx;
2278 }
2279
2280 skb_reserve(skb, xdp.data - xdp.data_hard_start);
2281 skb_put(skb, xdp.data_end - xdp.data);
2282 metasize = xdp.data - xdp.data_meta;
2283 if (metasize)
2284 skb_metadata_set(skb, metasize);
2285 skb_mark_for_recycle(skb);
2286 } else {
2287 if (ring->frag_size <= PAGE_SIZE)
2288 new_data = napi_alloc_frag(ring->frag_size);
2289 else
2290 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2291
2292 if (unlikely(!new_data)) {
2293 netdev->stats.rx_dropped++;
2294 goto release_desc;
2295 }
2296
2297 dma_addr = dma_map_single(eth->dma_dev,
2298 new_data + NET_SKB_PAD + eth->ip_align,
2299 ring->buf_size, DMA_FROM_DEVICE);
2300 if (unlikely(dma_mapping_error(eth->dma_dev,
2301 dma_addr))) {
2302 skb_free_frag(new_data);
2303 netdev->stats.rx_dropped++;
2304 goto release_desc;
2305 }
2306
2307 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2308 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2309
2310 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2311 ring->buf_size, DMA_FROM_DEVICE);
2312
2313 skb = build_skb(data, ring->frag_size);
2314 if (unlikely(!skb)) {
2315 netdev->stats.rx_dropped++;
2316 skb_free_frag(data);
2317 goto skip_rx;
2318 }
2319
2320 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2321 skb_put(skb, pktlen);
2322 }
2323
2324 skb->dev = netdev;
2325 bytes += skb->len;
2326
2327 if (mtk_is_netsys_v3_or_greater(eth)) {
2328 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2329 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2330 if (hash != MTK_RXD5_FOE_ENTRY)
2331 skb_set_hash(skb, jhash_1word(hash, 0),
2332 PKT_HASH_TYPE_L4);
2333 rxdcsum = &trxd.rxd3;
2334 } else {
2335 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2336 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2337 if (hash != MTK_RXD4_FOE_ENTRY)
2338 skb_set_hash(skb, jhash_1word(hash, 0),
2339 PKT_HASH_TYPE_L4);
2340 rxdcsum = &trxd.rxd4;
2341 }
2342
2343 if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2344 skb->ip_summed = CHECKSUM_UNNECESSARY;
2345 else
2346 skb_checksum_none_assert(skb);
2347 skb->protocol = eth_type_trans(skb, netdev);
2348
2349 /* When using VLAN untagging in combination with DSA, the
2350 * hardware treats the MTK special tag as a VLAN and untags it.
2351 */
2352 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2353 netdev_uses_dsa(netdev)) {
2354 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2355
2356 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2357 eth->dsa_meta[port])
2358 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2359 }
2360
2361 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2362 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2363
2364 skb_record_rx_queue(skb, 0);
2365 napi_gro_receive(napi, skb);
2366
2367 skip_rx:
2368 ring->data[idx] = new_data;
2369 rxd->rxd1 = (unsigned int)dma_addr;
2370 release_desc:
2371 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
2372 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2373 addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
2374 rxd->rxd2);
2375 else
2376 addr64 = RX_DMA_PREP_ADDR64(dma_addr);
2377 }
2378
2379 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2380 rxd->rxd2 = RX_DMA_LSO;
2381 else
2382 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
2383
2384 ring->calc_idx = idx;
2385 done++;
2386 }
2387
2388 rx_done:
2389 if (done) {
2390 /* make sure that all changes to the dma ring are flushed before
2391 * we continue
2392 */
2393 wmb();
2394 mtk_update_rx_cpu_idx(eth);
2395 }
2396
2397 eth->rx_packets += done;
2398 eth->rx_bytes += bytes;
2399 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2400 &dim_sample);
2401 net_dim(ð->rx_dim, &dim_sample);
2402
2403 if (xdp_flush)
2404 xdp_do_flush();
2405
2406 return done;
2407 }
2408
2409 struct mtk_poll_state {
2410 struct netdev_queue *txq;
2411 unsigned int total;
2412 unsigned int done;
2413 unsigned int bytes;
2414 };
2415
2416 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2417 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2418 struct sk_buff *skb)
2419 {
2420 struct netdev_queue *txq;
2421 struct net_device *dev;
2422 unsigned int bytes = skb->len;
2423
2424 state->total++;
2425 eth->tx_packets++;
2426 eth->tx_bytes += bytes;
2427
2428 dev = eth->netdev[mac];
2429 if (!dev)
2430 return;
2431
2432 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2433 if (state->txq == txq) {
2434 state->done++;
2435 state->bytes += bytes;
2436 return;
2437 }
2438
2439 if (state->txq)
2440 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2441
2442 state->txq = txq;
2443 state->done = 1;
2444 state->bytes = bytes;
2445 }
2446
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2447 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2448 struct mtk_poll_state *state)
2449 {
2450 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2451 struct mtk_tx_ring *ring = ð->tx_ring;
2452 struct mtk_tx_buf *tx_buf;
2453 struct xdp_frame_bulk bq;
2454 struct mtk_tx_dma *desc;
2455 u32 cpu, dma;
2456
2457 cpu = ring->last_free_ptr;
2458 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2459
2460 desc = mtk_qdma_phys_to_virt(ring, cpu);
2461 xdp_frame_bulk_init(&bq);
2462
2463 while ((cpu != dma) && budget) {
2464 u32 next_cpu = desc->txd2;
2465
2466 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2467 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2468 break;
2469
2470 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2471 eth->soc->tx.desc_size);
2472 if (!tx_buf->data)
2473 break;
2474
2475 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2476 if (tx_buf->type == MTK_TYPE_SKB)
2477 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2478 tx_buf->data);
2479
2480 budget--;
2481 }
2482 mtk_tx_unmap(eth, tx_buf, &bq, true);
2483
2484 ring->last_free = desc;
2485 atomic_inc(&ring->free_count);
2486
2487 cpu = next_cpu;
2488 }
2489 xdp_flush_frame_bulk(&bq);
2490
2491 ring->last_free_ptr = cpu;
2492 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2493
2494 return budget;
2495 }
2496
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2497 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2498 struct mtk_poll_state *state)
2499 {
2500 struct mtk_tx_ring *ring = ð->tx_ring;
2501 struct mtk_tx_buf *tx_buf;
2502 struct xdp_frame_bulk bq;
2503 struct mtk_tx_dma *desc;
2504 u32 cpu, dma;
2505
2506 cpu = ring->cpu_idx;
2507 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2508 xdp_frame_bulk_init(&bq);
2509
2510 while ((cpu != dma) && budget) {
2511 tx_buf = &ring->buf[cpu];
2512 if (!tx_buf->data)
2513 break;
2514
2515 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2516 if (tx_buf->type == MTK_TYPE_SKB)
2517 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2518 budget--;
2519 }
2520 mtk_tx_unmap(eth, tx_buf, &bq, true);
2521
2522 desc = ring->dma + cpu * eth->soc->tx.desc_size;
2523 ring->last_free = desc;
2524 atomic_inc(&ring->free_count);
2525
2526 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2527 }
2528 xdp_flush_frame_bulk(&bq);
2529
2530 ring->cpu_idx = cpu;
2531
2532 return budget;
2533 }
2534
mtk_poll_tx(struct mtk_eth * eth,int budget)2535 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2536 {
2537 struct mtk_tx_ring *ring = ð->tx_ring;
2538 struct dim_sample dim_sample = {};
2539 struct mtk_poll_state state = {};
2540
2541 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2542 budget = mtk_poll_tx_qdma(eth, budget, &state);
2543 else
2544 budget = mtk_poll_tx_pdma(eth, budget, &state);
2545
2546 if (state.txq)
2547 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2548
2549 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2550 &dim_sample);
2551 net_dim(ð->tx_dim, &dim_sample);
2552
2553 if (mtk_queue_stopped(eth) &&
2554 (atomic_read(&ring->free_count) > ring->thresh))
2555 mtk_wake_queue(eth);
2556
2557 return state.total;
2558 }
2559
mtk_handle_status_irq(struct mtk_eth * eth)2560 static void mtk_handle_status_irq(struct mtk_eth *eth)
2561 {
2562 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2563
2564 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2565 mtk_stats_update(eth);
2566 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2567 MTK_INT_STATUS2);
2568 }
2569 }
2570
mtk_napi_tx(struct napi_struct * napi,int budget)2571 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2572 {
2573 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2574 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2575 int tx_done = 0;
2576
2577 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2578 mtk_handle_status_irq(eth);
2579 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2580 tx_done = mtk_poll_tx(eth, budget);
2581
2582 if (unlikely(netif_msg_intr(eth))) {
2583 dev_info(eth->dev,
2584 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2585 mtk_r32(eth, reg_map->tx_irq_status),
2586 mtk_r32(eth, reg_map->tx_irq_mask));
2587 }
2588
2589 if (tx_done == budget)
2590 return budget;
2591
2592 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2593 return budget;
2594
2595 if (napi_complete_done(napi, tx_done))
2596 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2597
2598 return tx_done;
2599 }
2600
mtk_napi_rx(struct napi_struct * napi,int budget)2601 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2602 {
2603 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2604 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2605 int rx_done_total = 0;
2606
2607 mtk_handle_status_irq(eth);
2608
2609 do {
2610 int rx_done;
2611
2612 mtk_w32(eth, eth->soc->rx.irq_done_mask,
2613 reg_map->pdma.irq_status);
2614 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2615 rx_done_total += rx_done;
2616
2617 if (unlikely(netif_msg_intr(eth))) {
2618 dev_info(eth->dev,
2619 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2620 mtk_r32(eth, reg_map->pdma.irq_status),
2621 mtk_r32(eth, reg_map->pdma.irq_mask));
2622 }
2623
2624 if (rx_done_total == budget)
2625 return budget;
2626
2627 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2628 eth->soc->rx.irq_done_mask);
2629
2630 if (napi_complete_done(napi, rx_done_total))
2631 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2632
2633 return rx_done_total;
2634 }
2635
mtk_tx_alloc(struct mtk_eth * eth)2636 static int mtk_tx_alloc(struct mtk_eth *eth)
2637 {
2638 const struct mtk_soc_data *soc = eth->soc;
2639 struct mtk_tx_ring *ring = ð->tx_ring;
2640 int i, sz = soc->tx.desc_size;
2641 struct mtk_tx_dma_v2 *txd;
2642 int ring_size;
2643 u32 ofs, val;
2644
2645 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2646 ring_size = MTK_QDMA_RING_SIZE;
2647 else
2648 ring_size = soc->tx.dma_size;
2649
2650 ring->buf = kzalloc_objs(*ring->buf, ring_size);
2651 if (!ring->buf)
2652 goto no_tx_mem;
2653
2654 ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
2655 if (!ring->dma)
2656 goto no_tx_mem;
2657
2658 for (i = 0; i < ring_size; i++) {
2659 int next = (i + 1) % ring_size;
2660 u32 next_ptr = ring->phys + next * sz;
2661
2662 txd = ring->dma + i * sz;
2663 txd->txd2 = next_ptr;
2664 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2665 txd->txd4 = 0;
2666 if (mtk_is_netsys_v2_or_greater(eth)) {
2667 txd->txd5 = 0;
2668 txd->txd6 = 0;
2669 txd->txd7 = 0;
2670 txd->txd8 = 0;
2671 }
2672 }
2673
2674 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2675 * only as the framework. The real HW descriptors are the PDMA
2676 * descriptors in ring->dma_pdma.
2677 */
2678 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2679 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2680 &ring->phys_pdma, GFP_KERNEL);
2681 if (!ring->dma_pdma)
2682 goto no_tx_mem;
2683
2684 for (i = 0; i < ring_size; i++) {
2685 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2686 ring->dma_pdma[i].txd4 = 0;
2687 }
2688 }
2689
2690 ring->dma_size = ring_size;
2691 atomic_set(&ring->free_count, ring_size - 2);
2692 ring->next_free = ring->dma;
2693 ring->last_free = (void *)txd;
2694 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2695 ring->thresh = MAX_SKB_FRAGS;
2696
2697 /* make sure that all changes to the dma ring are flushed before we
2698 * continue
2699 */
2700 wmb();
2701
2702 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2703 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2704 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2705 mtk_w32(eth,
2706 ring->phys + ((ring_size - 1) * sz),
2707 soc->reg_map->qdma.crx_ptr);
2708 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2709
2710 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2711 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2712 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2713
2714 val = MTK_QTX_SCH_MIN_RATE_EN |
2715 /* minimum: 10 Mbps */
2716 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2717 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2718 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2719 if (mtk_is_netsys_v1(eth))
2720 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2721 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2722 ofs += MTK_QTX_OFFSET;
2723 }
2724 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2725 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2726 if (mtk_is_netsys_v2_or_greater(eth))
2727 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2728 } else {
2729 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2730 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2731 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2732 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2733 }
2734
2735 return 0;
2736
2737 no_tx_mem:
2738 return -ENOMEM;
2739 }
2740
mtk_tx_clean(struct mtk_eth * eth)2741 static void mtk_tx_clean(struct mtk_eth *eth)
2742 {
2743 const struct mtk_soc_data *soc = eth->soc;
2744 struct mtk_tx_ring *ring = ð->tx_ring;
2745 int i;
2746
2747 if (ring->buf) {
2748 for (i = 0; i < ring->dma_size; i++)
2749 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2750 kfree(ring->buf);
2751 ring->buf = NULL;
2752 }
2753
2754 if (ring->dma) {
2755 mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
2756 ring->dma, ring->phys, true);
2757 ring->dma = NULL;
2758 }
2759
2760 if (ring->dma_pdma) {
2761 dma_free_coherent(eth->dma_dev,
2762 ring->dma_size * soc->tx.desc_size,
2763 ring->dma_pdma, ring->phys_pdma);
2764 ring->dma_pdma = NULL;
2765 }
2766 }
2767
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2768 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2769 {
2770 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2771 const struct mtk_soc_data *soc = eth->soc;
2772 struct mtk_rx_ring *ring;
2773 int rx_data_len, rx_dma_size;
2774 int i;
2775
2776 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2777 if (ring_no)
2778 return -EINVAL;
2779 ring = ð->rx_ring_qdma;
2780 } else {
2781 ring = ð->rx_ring[ring_no];
2782 }
2783
2784 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2785 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2786 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2787 } else {
2788 rx_data_len = ETH_DATA_LEN;
2789 rx_dma_size = soc->rx.dma_size;
2790 }
2791
2792 ring->frag_size = mtk_max_frag_size(rx_data_len);
2793 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2794 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2795 GFP_KERNEL);
2796 if (!ring->data)
2797 return -ENOMEM;
2798
2799 if (mtk_page_pool_enabled(eth)) {
2800 struct page_pool *pp;
2801
2802 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2803 rx_dma_size);
2804 if (IS_ERR(pp))
2805 return PTR_ERR(pp);
2806
2807 ring->page_pool = pp;
2808 }
2809
2810 ring->dma = mtk_dma_ring_alloc(eth,
2811 rx_dma_size * eth->soc->rx.desc_size,
2812 &ring->phys,
2813 rx_flag == MTK_RX_FLAGS_NORMAL);
2814 if (!ring->dma)
2815 return -ENOMEM;
2816
2817 for (i = 0; i < rx_dma_size; i++) {
2818 struct mtk_rx_dma_v2 *rxd;
2819 dma_addr_t dma_addr;
2820 void *data;
2821
2822 rxd = ring->dma + i * eth->soc->rx.desc_size;
2823 if (ring->page_pool) {
2824 data = mtk_page_pool_get_buff(ring->page_pool,
2825 &dma_addr, GFP_KERNEL);
2826 if (!data)
2827 return -ENOMEM;
2828 } else {
2829 if (ring->frag_size <= PAGE_SIZE)
2830 data = netdev_alloc_frag(ring->frag_size);
2831 else
2832 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2833
2834 if (!data)
2835 return -ENOMEM;
2836
2837 dma_addr = dma_map_single(eth->dma_dev,
2838 data + NET_SKB_PAD + eth->ip_align,
2839 ring->buf_size, DMA_FROM_DEVICE);
2840 if (unlikely(dma_mapping_error(eth->dma_dev,
2841 dma_addr))) {
2842 skb_free_frag(data);
2843 return -ENOMEM;
2844 }
2845 }
2846 rxd->rxd1 = (unsigned int)dma_addr;
2847 ring->data[i] = data;
2848
2849 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2850 rxd->rxd2 = RX_DMA_LSO;
2851 else
2852 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2853
2854 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2855 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2856
2857 rxd->rxd3 = 0;
2858 rxd->rxd4 = 0;
2859 if (mtk_is_netsys_v3_or_greater(eth)) {
2860 rxd->rxd5 = 0;
2861 rxd->rxd6 = 0;
2862 rxd->rxd7 = 0;
2863 rxd->rxd8 = 0;
2864 }
2865 }
2866
2867 ring->dma_size = rx_dma_size;
2868 ring->calc_idx_update = false;
2869 ring->calc_idx = rx_dma_size - 1;
2870 if (rx_flag == MTK_RX_FLAGS_QDMA)
2871 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2872 ring_no * MTK_QRX_OFFSET;
2873 else
2874 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2875 ring_no * MTK_QRX_OFFSET;
2876 /* make sure that all changes to the dma ring are flushed before we
2877 * continue
2878 */
2879 wmb();
2880
2881 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2882 mtk_w32(eth, ring->phys,
2883 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2884 mtk_w32(eth, rx_dma_size,
2885 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2886 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2887 reg_map->qdma.rst_idx);
2888 } else {
2889 mtk_w32(eth, ring->phys,
2890 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2891 mtk_w32(eth, rx_dma_size,
2892 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2893 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2894 reg_map->pdma.rst_idx);
2895 }
2896 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2897
2898 return 0;
2899 }
2900
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2901 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2902 {
2903 u64 addr64 = 0;
2904 int i;
2905
2906 if (ring->data && ring->dma) {
2907 for (i = 0; i < ring->dma_size; i++) {
2908 struct mtk_rx_dma *rxd;
2909
2910 if (!ring->data[i])
2911 continue;
2912
2913 rxd = ring->dma + i * eth->soc->rx.desc_size;
2914 if (!rxd->rxd1)
2915 continue;
2916
2917 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2918 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2919
2920 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2921 ring->buf_size, DMA_FROM_DEVICE);
2922 mtk_rx_put_buff(ring, ring->data[i], false);
2923 }
2924 kfree(ring->data);
2925 ring->data = NULL;
2926 }
2927
2928 if (ring->dma) {
2929 mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
2930 ring->dma, ring->phys, in_sram);
2931 ring->dma = NULL;
2932 }
2933
2934 if (ring->page_pool) {
2935 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2936 xdp_rxq_info_unreg(&ring->xdp_q);
2937 page_pool_destroy(ring->page_pool);
2938 ring->page_pool = NULL;
2939 }
2940 }
2941
mtk_hwlro_rx_init(struct mtk_eth * eth)2942 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2943 {
2944 int i;
2945 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2946 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2947
2948 /* set LRO rings to auto-learn modes */
2949 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2950
2951 /* validate LRO ring */
2952 ring_ctrl_dw2 |= MTK_RING_VLD;
2953
2954 /* set AGE timer (unit: 20us) */
2955 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2956 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2957
2958 /* set max AGG timer (unit: 20us) */
2959 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2960
2961 /* set max LRO AGG count */
2962 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2963 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2964
2965 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2966 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2967 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2968 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2969 }
2970
2971 /* IPv4 checksum update enable */
2972 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2973
2974 /* switch priority comparison to packet count mode */
2975 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2976
2977 /* bandwidth threshold setting */
2978 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2979
2980 /* auto-learn score delta setting */
2981 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2982
2983 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2984 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2985 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2986
2987 /* set HW LRO mode & the max aggregation count for rx packets */
2988 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2989
2990 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2991 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2992
2993 /* enable HW LRO */
2994 lro_ctrl_dw0 |= MTK_LRO_EN;
2995
2996 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2997 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2998
2999 return 0;
3000 }
3001
mtk_hwlro_rx_uninit(struct mtk_eth * eth)3002 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
3003 {
3004 int i;
3005 u32 val;
3006
3007 /* relinquish lro rings, flush aggregated packets */
3008 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3009
3010 /* wait for relinquishments done */
3011 for (i = 0; i < 10; i++) {
3012 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3013 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
3014 msleep(20);
3015 continue;
3016 }
3017 break;
3018 }
3019
3020 /* invalidate lro rings */
3021 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3022 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3023
3024 /* disable HW LRO */
3025 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3026 }
3027
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)3028 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3029 {
3030 u32 reg_val;
3031
3032 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3033
3034 /* invalidate the IP setting */
3035 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3036
3037 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3038
3039 /* validate the IP setting */
3040 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3041 }
3042
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)3043 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3044 {
3045 u32 reg_val;
3046
3047 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3048
3049 /* invalidate the IP setting */
3050 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3051
3052 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3053 }
3054
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)3055 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3056 {
3057 int cnt = 0;
3058 int i;
3059
3060 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3061 if (mac->hwlro_ip[i])
3062 cnt++;
3063 }
3064
3065 return cnt;
3066 }
3067
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3068 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3069 struct ethtool_rxnfc *cmd)
3070 {
3071 struct ethtool_rx_flow_spec *fsp =
3072 (struct ethtool_rx_flow_spec *)&cmd->fs;
3073 struct mtk_mac *mac = netdev_priv(dev);
3074 struct mtk_eth *eth = mac->hw;
3075 int hwlro_idx;
3076
3077 if ((fsp->flow_type != TCP_V4_FLOW) ||
3078 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3079 (fsp->location > 1))
3080 return -EINVAL;
3081
3082 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3083 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3084
3085 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3086
3087 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3088
3089 return 0;
3090 }
3091
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3092 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3093 struct ethtool_rxnfc *cmd)
3094 {
3095 struct ethtool_rx_flow_spec *fsp =
3096 (struct ethtool_rx_flow_spec *)&cmd->fs;
3097 struct mtk_mac *mac = netdev_priv(dev);
3098 struct mtk_eth *eth = mac->hw;
3099 int hwlro_idx;
3100
3101 if (fsp->location > 1)
3102 return -EINVAL;
3103
3104 mac->hwlro_ip[fsp->location] = 0;
3105 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3106
3107 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3108
3109 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3110
3111 return 0;
3112 }
3113
mtk_hwlro_netdev_disable(struct net_device * dev)3114 static void mtk_hwlro_netdev_disable(struct net_device *dev)
3115 {
3116 struct mtk_mac *mac = netdev_priv(dev);
3117 struct mtk_eth *eth = mac->hw;
3118 int i, hwlro_idx;
3119
3120 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3121 mac->hwlro_ip[i] = 0;
3122 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3123
3124 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3125 }
3126
3127 mac->hwlro_ip_cnt = 0;
3128 }
3129
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)3130 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3131 struct ethtool_rxnfc *cmd)
3132 {
3133 struct mtk_mac *mac = netdev_priv(dev);
3134 struct ethtool_rx_flow_spec *fsp =
3135 (struct ethtool_rx_flow_spec *)&cmd->fs;
3136
3137 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
3138 return -EINVAL;
3139
3140 /* only tcp dst ipv4 is meaningful, others are meaningless */
3141 fsp->flow_type = TCP_V4_FLOW;
3142 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3143 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3144
3145 fsp->h_u.tcp_ip4_spec.ip4src = 0;
3146 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3147 fsp->h_u.tcp_ip4_spec.psrc = 0;
3148 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3149 fsp->h_u.tcp_ip4_spec.pdst = 0;
3150 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3151 fsp->h_u.tcp_ip4_spec.tos = 0;
3152 fsp->m_u.tcp_ip4_spec.tos = 0xff;
3153
3154 return 0;
3155 }
3156
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3157 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3158 struct ethtool_rxnfc *cmd,
3159 u32 *rule_locs)
3160 {
3161 struct mtk_mac *mac = netdev_priv(dev);
3162 int cnt = 0;
3163 int i;
3164
3165 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3166 if (cnt == cmd->rule_cnt)
3167 return -EMSGSIZE;
3168
3169 if (mac->hwlro_ip[i]) {
3170 rule_locs[cnt] = i;
3171 cnt++;
3172 }
3173 }
3174
3175 cmd->rule_cnt = cnt;
3176
3177 return 0;
3178 }
3179
mtk_fix_features(struct net_device * dev,netdev_features_t features)3180 static netdev_features_t mtk_fix_features(struct net_device *dev,
3181 netdev_features_t features)
3182 {
3183 if (!(features & NETIF_F_LRO)) {
3184 struct mtk_mac *mac = netdev_priv(dev);
3185 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3186
3187 if (ip_cnt) {
3188 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3189
3190 features |= NETIF_F_LRO;
3191 }
3192 }
3193
3194 return features;
3195 }
3196
mtk_set_features(struct net_device * dev,netdev_features_t features)3197 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3198 {
3199 netdev_features_t diff = dev->features ^ features;
3200
3201 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3202 mtk_hwlro_netdev_disable(dev);
3203
3204 return 0;
3205 }
3206
3207 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3208 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3209 {
3210 unsigned int reg;
3211 int ret;
3212 u32 val;
3213
3214 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3215 reg = eth->soc->reg_map->qdma.glo_cfg;
3216 else
3217 reg = eth->soc->reg_map->pdma.glo_cfg;
3218
3219 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3220 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3221 5, MTK_DMA_BUSY_TIMEOUT_US);
3222 if (ret)
3223 dev_err(eth->dev, "DMA init timeout\n");
3224
3225 return ret;
3226 }
3227
mtk_dma_init(struct mtk_eth * eth)3228 static int mtk_dma_init(struct mtk_eth *eth)
3229 {
3230 int err;
3231 u32 i;
3232
3233 if (mtk_dma_busy_wait(eth))
3234 return -EBUSY;
3235
3236 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3237 /* QDMA needs scratch memory for internal reordering of the
3238 * descriptors
3239 */
3240 err = mtk_init_fq_dma(eth);
3241 if (err)
3242 return err;
3243 }
3244
3245 err = mtk_tx_alloc(eth);
3246 if (err)
3247 return err;
3248
3249 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3250 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3251 if (err)
3252 return err;
3253 }
3254
3255 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3256 if (err)
3257 return err;
3258
3259 if (eth->hwlro) {
3260 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3261 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3262 if (err)
3263 return err;
3264 }
3265 err = mtk_hwlro_rx_init(eth);
3266 if (err)
3267 return err;
3268 }
3269
3270 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3271 /* Enable random early drop and set drop threshold
3272 * automatically
3273 */
3274 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3275 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3276 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3277 }
3278
3279 return 0;
3280 }
3281
mtk_dma_free(struct mtk_eth * eth)3282 static void mtk_dma_free(struct mtk_eth *eth)
3283 {
3284 const struct mtk_soc_data *soc = eth->soc;
3285 int i, j, txqs = 1;
3286
3287 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3288 txqs = MTK_QDMA_NUM_QUEUES;
3289
3290 for (i = 0; i < MTK_MAX_DEVS; i++) {
3291 if (!eth->netdev[i])
3292 continue;
3293
3294 for (j = 0; j < txqs; j++)
3295 netdev_tx_reset_subqueue(eth->netdev[i], j);
3296 }
3297
3298 if (eth->scratch_ring) {
3299 mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
3300 eth->scratch_ring, eth->phy_scratch_ring,
3301 true);
3302 eth->scratch_ring = NULL;
3303 eth->phy_scratch_ring = 0;
3304 }
3305
3306 mtk_tx_clean(eth);
3307 mtk_rx_clean(eth, ð->rx_ring[0], true);
3308 mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3309
3310 if (eth->hwlro) {
3311 mtk_hwlro_rx_uninit(eth);
3312 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3313 mtk_rx_clean(eth, ð->rx_ring[i], false);
3314 }
3315
3316 for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3317 kfree(eth->scratch_head[i]);
3318 eth->scratch_head[i] = NULL;
3319 }
3320 }
3321
mtk_hw_reset_check(struct mtk_eth * eth)3322 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3323 {
3324 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3325
3326 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3327 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3328 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3329 }
3330
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3331 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3332 {
3333 struct mtk_mac *mac = netdev_priv(dev);
3334 struct mtk_eth *eth = mac->hw;
3335
3336 if (test_bit(MTK_RESETTING, ð->state))
3337 return;
3338
3339 if (!mtk_hw_reset_check(eth))
3340 return;
3341
3342 eth->netdev[mac->id]->stats.tx_errors++;
3343 netif_err(eth, tx_err, dev, "transmit timed out\n");
3344
3345 schedule_work(ð->pending_work);
3346 }
3347
mtk_get_irqs(struct platform_device * pdev,struct mtk_eth * eth)3348 static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
3349 {
3350 int i;
3351
3352 /* future SoCs beginning with MT7988 should use named IRQs in dts */
3353 eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
3354 eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
3355 if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
3356 return 0;
3357
3358 /* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
3359 if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
3360 return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
3361 "Error requesting FE TX IRQ\n");
3362
3363 if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
3364 return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
3365 "Error requesting FE RX IRQ\n");
3366
3367 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
3368 dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
3369
3370 /* legacy way:
3371 * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
3372 * from devicetree and used for both RX and TX - it is shared.
3373 * On SoCs with non-shared IRQs the first entry is not used,
3374 * the second is for TX, and the third is for RX.
3375 */
3376 for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
3377 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3378 if (i == MTK_FE_IRQ_SHARED)
3379 eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
3380 else
3381 eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
3382 } else {
3383 eth->irq[i] = platform_get_irq(pdev, i + 1);
3384 }
3385
3386 if (eth->irq[i] < 0) {
3387 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3388 return -ENXIO;
3389 }
3390 }
3391
3392 return 0;
3393 }
3394
mtk_handle_irq_rx(int irq,void * _eth)3395 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3396 {
3397 struct mtk_eth *eth = _eth;
3398
3399 eth->rx_events++;
3400 if (likely(napi_schedule_prep(ð->rx_napi))) {
3401 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3402 __napi_schedule(ð->rx_napi);
3403 }
3404
3405 return IRQ_HANDLED;
3406 }
3407
mtk_handle_irq_tx(int irq,void * _eth)3408 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3409 {
3410 struct mtk_eth *eth = _eth;
3411
3412 eth->tx_events++;
3413 if (likely(napi_schedule_prep(ð->tx_napi))) {
3414 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3415 __napi_schedule(ð->tx_napi);
3416 }
3417
3418 return IRQ_HANDLED;
3419 }
3420
mtk_handle_irq(int irq,void * _eth)3421 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3422 {
3423 struct mtk_eth *eth = _eth;
3424 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3425
3426 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3427 eth->soc->rx.irq_done_mask) {
3428 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3429 eth->soc->rx.irq_done_mask)
3430 mtk_handle_irq_rx(irq, _eth);
3431 }
3432 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3433 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3434 mtk_handle_irq_tx(irq, _eth);
3435 }
3436
3437 return IRQ_HANDLED;
3438 }
3439
3440 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3441 static void mtk_poll_controller(struct net_device *dev)
3442 {
3443 struct mtk_mac *mac = netdev_priv(dev);
3444 struct mtk_eth *eth = mac->hw;
3445
3446 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3447 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3448 mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
3449 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3450 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3451 }
3452 #endif
3453
mtk_start_dma(struct mtk_eth * eth)3454 static int mtk_start_dma(struct mtk_eth *eth)
3455 {
3456 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3457 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3458 int err;
3459
3460 err = mtk_dma_init(eth);
3461 if (err) {
3462 mtk_dma_free(eth);
3463 return err;
3464 }
3465
3466 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3467 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3468 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3469 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3470 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3471
3472 if (mtk_is_netsys_v2_or_greater(eth))
3473 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3474 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3475 MTK_CHK_DDONE_EN;
3476 else
3477 val |= MTK_RX_BT_32DWORDS;
3478 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3479
3480 mtk_w32(eth,
3481 MTK_RX_DMA_EN | rx_2b_offset |
3482 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3483 reg_map->pdma.glo_cfg);
3484 } else {
3485 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3486 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3487 reg_map->pdma.glo_cfg);
3488 }
3489
3490 return 0;
3491 }
3492
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3493 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3494 {
3495 u32 val;
3496
3497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3498 return;
3499
3500 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3501
3502 /* default setup the forward port to send frame to PDMA */
3503 val &= ~0xffff;
3504
3505 /* Enable RX checksum */
3506 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3507
3508 val |= config;
3509
3510 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3511 val |= MTK_GDMA_SPECIAL_TAG;
3512
3513 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3514 }
3515
3516
mtk_uses_dsa(struct net_device * dev)3517 static bool mtk_uses_dsa(struct net_device *dev)
3518 {
3519 #if IS_ENABLED(CONFIG_NET_DSA)
3520 return netdev_uses_dsa(dev) &&
3521 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3522 #else
3523 return false;
3524 #endif
3525 }
3526
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3527 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3528 {
3529 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3530 struct mtk_eth *eth = mac->hw;
3531 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3532 struct ethtool_link_ksettings s;
3533 struct net_device *ldev;
3534 struct list_head *iter;
3535 struct dsa_port *dp;
3536
3537 if (event != NETDEV_CHANGE)
3538 return NOTIFY_DONE;
3539
3540 netdev_for_each_lower_dev(dev, ldev, iter) {
3541 if (netdev_priv(ldev) == mac)
3542 goto found;
3543 }
3544
3545 return NOTIFY_DONE;
3546
3547 found:
3548 if (!dsa_user_dev_check(dev))
3549 return NOTIFY_DONE;
3550
3551 if (__ethtool_get_link_ksettings(dev, &s))
3552 return NOTIFY_DONE;
3553
3554 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3555 return NOTIFY_DONE;
3556
3557 dp = dsa_port_from_netdev(dev);
3558 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3559 return NOTIFY_DONE;
3560
3561 if (mac->speed > 0 && mac->speed <= s.base.speed)
3562 s.base.speed = 0;
3563
3564 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3565
3566 return NOTIFY_DONE;
3567 }
3568
mtk_open(struct net_device * dev)3569 static int mtk_open(struct net_device *dev)
3570 {
3571 struct mtk_mac *mac = netdev_priv(dev);
3572 struct mtk_eth *eth = mac->hw;
3573 struct mtk_mac *target_mac;
3574 int i, err, ppe_num;
3575
3576 ppe_num = eth->soc->ppe_num;
3577
3578 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3579 if (err) {
3580 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3581 err);
3582 return err;
3583 }
3584
3585 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3586 if (!refcount_read(ð->dma_refcnt)) {
3587 const struct mtk_soc_data *soc = eth->soc;
3588 u32 gdm_config;
3589 int i;
3590
3591 err = mtk_start_dma(eth);
3592 if (err) {
3593 phylink_disconnect_phy(mac->phylink);
3594 return err;
3595 }
3596
3597 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3598 mtk_ppe_start(eth->ppe[i]);
3599
3600 for (i = 0; i < MTK_MAX_DEVS; i++) {
3601 if (!eth->netdev[i])
3602 continue;
3603
3604 target_mac = netdev_priv(eth->netdev[i]);
3605 if (!soc->offload_version) {
3606 target_mac->ppe_idx = 0;
3607 gdm_config = MTK_GDMA_TO_PDMA;
3608 } else if (ppe_num >= 3 && target_mac->id == 2) {
3609 target_mac->ppe_idx = 2;
3610 gdm_config = soc->reg_map->gdma_to_ppe[2];
3611 } else if (ppe_num >= 2 && target_mac->id == 1) {
3612 target_mac->ppe_idx = 1;
3613 gdm_config = soc->reg_map->gdma_to_ppe[1];
3614 } else {
3615 target_mac->ppe_idx = 0;
3616 gdm_config = soc->reg_map->gdma_to_ppe[0];
3617 }
3618 mtk_gdm_config(eth, target_mac->id, gdm_config);
3619 }
3620
3621 napi_enable(ð->tx_napi);
3622 napi_enable(ð->rx_napi);
3623 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3624 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3625 refcount_set(ð->dma_refcnt, 1);
3626 } else {
3627 refcount_inc(ð->dma_refcnt);
3628 }
3629
3630 phylink_start(mac->phylink);
3631 netif_tx_start_all_queues(dev);
3632
3633 if (mtk_is_netsys_v2_or_greater(eth))
3634 return 0;
3635
3636 if (mtk_uses_dsa(dev) && !eth->prog) {
3637 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3638 struct metadata_dst *md_dst = eth->dsa_meta[i];
3639
3640 if (md_dst)
3641 continue;
3642
3643 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3644 GFP_KERNEL);
3645 if (!md_dst)
3646 return -ENOMEM;
3647
3648 md_dst->u.port_info.port_id = i;
3649 eth->dsa_meta[i] = md_dst;
3650 }
3651 } else {
3652 /* Hardware DSA untagging and VLAN RX offloading need to be
3653 * disabled if at least one MAC does not use DSA.
3654 */
3655 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3656
3657 val &= ~MTK_CDMP_STAG_EN;
3658 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3659
3660 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3661 }
3662
3663 return 0;
3664 }
3665
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3666 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3667 {
3668 u32 val;
3669 int i;
3670
3671 /* stop the dma engine */
3672 spin_lock_bh(ð->page_lock);
3673 val = mtk_r32(eth, glo_cfg);
3674 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3675 glo_cfg);
3676 spin_unlock_bh(ð->page_lock);
3677
3678 /* wait for dma stop */
3679 for (i = 0; i < 10; i++) {
3680 val = mtk_r32(eth, glo_cfg);
3681 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3682 msleep(20);
3683 continue;
3684 }
3685 break;
3686 }
3687 }
3688
mtk_stop(struct net_device * dev)3689 static int mtk_stop(struct net_device *dev)
3690 {
3691 struct mtk_mac *mac = netdev_priv(dev);
3692 struct mtk_eth *eth = mac->hw;
3693 int i;
3694
3695 phylink_stop(mac->phylink);
3696
3697 netif_tx_disable(dev);
3698
3699 phylink_disconnect_phy(mac->phylink);
3700
3701 /* only shutdown DMA if this is the last user */
3702 if (!refcount_dec_and_test(ð->dma_refcnt))
3703 return 0;
3704
3705 for (i = 0; i < MTK_MAX_DEVS; i++)
3706 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3707
3708 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3709 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3710 napi_disable(ð->tx_napi);
3711 napi_disable(ð->rx_napi);
3712
3713 cancel_work_sync(ð->rx_dim.work);
3714 cancel_work_sync(ð->tx_dim.work);
3715
3716 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3717 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3718 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3719
3720 mtk_dma_free(eth);
3721
3722 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3723 mtk_ppe_stop(eth->ppe[i]);
3724
3725 return 0;
3726 }
3727
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3728 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3729 struct netlink_ext_ack *extack)
3730 {
3731 struct mtk_mac *mac = netdev_priv(dev);
3732 struct mtk_eth *eth = mac->hw;
3733 struct bpf_prog *old_prog;
3734 bool need_update;
3735
3736 if (eth->hwlro) {
3737 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3738 return -EOPNOTSUPP;
3739 }
3740
3741 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3742 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3743 return -EOPNOTSUPP;
3744 }
3745
3746 need_update = !!eth->prog != !!prog;
3747 if (netif_running(dev) && need_update)
3748 mtk_stop(dev);
3749
3750 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3751 if (old_prog)
3752 bpf_prog_put(old_prog);
3753
3754 if (netif_running(dev) && need_update)
3755 return mtk_open(dev);
3756
3757 return 0;
3758 }
3759
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3760 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3761 {
3762 switch (xdp->command) {
3763 case XDP_SETUP_PROG:
3764 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3765 default:
3766 return -EINVAL;
3767 }
3768 }
3769
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3770 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3771 {
3772 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3773 reset_bits,
3774 reset_bits);
3775
3776 usleep_range(1000, 1100);
3777 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3778 reset_bits,
3779 ~reset_bits);
3780 mdelay(10);
3781 }
3782
mtk_clk_disable(struct mtk_eth * eth)3783 static void mtk_clk_disable(struct mtk_eth *eth)
3784 {
3785 int clk;
3786
3787 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3788 clk_disable_unprepare(eth->clks[clk]);
3789 }
3790
mtk_clk_enable(struct mtk_eth * eth)3791 static int mtk_clk_enable(struct mtk_eth *eth)
3792 {
3793 int clk, ret;
3794
3795 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3796 ret = clk_prepare_enable(eth->clks[clk]);
3797 if (ret)
3798 goto err_disable_clks;
3799 }
3800
3801 return 0;
3802
3803 err_disable_clks:
3804 while (--clk >= 0)
3805 clk_disable_unprepare(eth->clks[clk]);
3806
3807 return ret;
3808 }
3809
mtk_dim_rx(struct work_struct * work)3810 static void mtk_dim_rx(struct work_struct *work)
3811 {
3812 struct dim *dim = container_of(work, struct dim, work);
3813 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3814 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3815 struct dim_cq_moder cur_profile;
3816 u32 val, cur;
3817
3818 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3819 dim->profile_ix);
3820 spin_lock_bh(ð->dim_lock);
3821
3822 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3823 val &= MTK_PDMA_DELAY_TX_MASK;
3824 val |= MTK_PDMA_DELAY_RX_EN;
3825
3826 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3827 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3828
3829 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3830 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3831
3832 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3833 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3834 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3835
3836 spin_unlock_bh(ð->dim_lock);
3837
3838 dim->state = DIM_START_MEASURE;
3839 }
3840
mtk_dim_tx(struct work_struct * work)3841 static void mtk_dim_tx(struct work_struct *work)
3842 {
3843 struct dim *dim = container_of(work, struct dim, work);
3844 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3845 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3846 struct dim_cq_moder cur_profile;
3847 u32 val, cur;
3848
3849 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3850 dim->profile_ix);
3851 spin_lock_bh(ð->dim_lock);
3852
3853 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3854 val &= MTK_PDMA_DELAY_RX_MASK;
3855 val |= MTK_PDMA_DELAY_TX_EN;
3856
3857 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3858 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3859
3860 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3861 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3862
3863 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3864 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3865 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3866
3867 spin_unlock_bh(ð->dim_lock);
3868
3869 dim->state = DIM_START_MEASURE;
3870 }
3871
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3872 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3873 {
3874 struct mtk_eth *eth = mac->hw;
3875 u32 mcr_cur, mcr_new;
3876
3877 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3878 return;
3879
3880 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3881 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3882
3883 if (val <= 1518)
3884 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3885 else if (val <= 1536)
3886 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3887 else if (val <= 1552)
3888 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3889 else
3890 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3891
3892 if (mcr_new != mcr_cur)
3893 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3894 }
3895
mtk_hw_reset(struct mtk_eth * eth)3896 static void mtk_hw_reset(struct mtk_eth *eth)
3897 {
3898 u32 val;
3899
3900 if (mtk_is_netsys_v2_or_greater(eth))
3901 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3902
3903 if (mtk_is_netsys_v3_or_greater(eth)) {
3904 val = RSTCTRL_PPE0_V3;
3905
3906 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3907 val |= RSTCTRL_PPE1_V3;
3908
3909 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3910 val |= RSTCTRL_PPE2;
3911
3912 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3913 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3914 val = RSTCTRL_PPE0_V2;
3915
3916 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3917 val |= RSTCTRL_PPE1;
3918 } else {
3919 val = RSTCTRL_PPE0;
3920 }
3921
3922 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3923
3924 if (mtk_is_netsys_v3_or_greater(eth))
3925 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3926 0x6f8ff);
3927 else if (mtk_is_netsys_v2_or_greater(eth))
3928 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3929 0x3ffffff);
3930 }
3931
mtk_hw_reset_read(struct mtk_eth * eth)3932 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3933 {
3934 u32 val;
3935
3936 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3937 return val;
3938 }
3939
mtk_hw_warm_reset(struct mtk_eth * eth)3940 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3941 {
3942 u32 rst_mask, val;
3943
3944 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3945 RSTCTRL_FE);
3946 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3947 val & RSTCTRL_FE, 1, 1000)) {
3948 dev_err(eth->dev, "warm reset failed\n");
3949 mtk_hw_reset(eth);
3950 return;
3951 }
3952
3953 if (mtk_is_netsys_v3_or_greater(eth)) {
3954 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3955 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3956 rst_mask |= RSTCTRL_PPE1_V3;
3957 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3958 rst_mask |= RSTCTRL_PPE2;
3959
3960 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3961 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3962 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3963 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3964 rst_mask |= RSTCTRL_PPE1;
3965 } else {
3966 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3967 }
3968
3969 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3970
3971 udelay(1);
3972 val = mtk_hw_reset_read(eth);
3973 if (!(val & rst_mask))
3974 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3975 val, rst_mask);
3976
3977 rst_mask |= RSTCTRL_FE;
3978 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3979
3980 udelay(1);
3981 val = mtk_hw_reset_read(eth);
3982 if (val & rst_mask)
3983 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3984 val, rst_mask);
3985 }
3986
mtk_hw_check_dma_hang(struct mtk_eth * eth)3987 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3988 {
3989 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3990 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3991 bool oq_hang, cdm1_busy, adma_busy;
3992 bool wtx_busy, cdm_full, oq_free;
3993 u32 wdidx, val, gdm1_fc, gdm2_fc;
3994 bool qfsm_hang, qfwd_hang;
3995 bool ret = false;
3996
3997 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3998 return false;
3999
4000 /* WDMA sanity checks */
4001 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
4002
4003 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
4004 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
4005
4006 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
4007 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
4008
4009 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
4010 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
4011 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
4012
4013 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
4014 if (++eth->reset.wdma_hang_count > 2) {
4015 eth->reset.wdma_hang_count = 0;
4016 ret = true;
4017 }
4018 goto out;
4019 }
4020
4021 /* QDMA sanity checks */
4022 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
4023 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
4024
4025 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
4026 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
4027 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
4028 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
4029 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
4030 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
4031
4032 if (qfsm_hang && qfwd_hang &&
4033 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
4034 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
4035 if (++eth->reset.qdma_hang_count > 2) {
4036 eth->reset.qdma_hang_count = 0;
4037 ret = true;
4038 }
4039 goto out;
4040 }
4041
4042 /* ADMA sanity checks */
4043 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
4044 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
4045 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
4046 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
4047
4048 if (oq_hang && cdm1_busy && adma_busy) {
4049 if (++eth->reset.adma_hang_count > 2) {
4050 eth->reset.adma_hang_count = 0;
4051 ret = true;
4052 }
4053 goto out;
4054 }
4055
4056 eth->reset.wdma_hang_count = 0;
4057 eth->reset.qdma_hang_count = 0;
4058 eth->reset.adma_hang_count = 0;
4059 out:
4060 eth->reset.wdidx = wdidx;
4061
4062 return ret;
4063 }
4064
mtk_hw_reset_monitor_work(struct work_struct * work)4065 static void mtk_hw_reset_monitor_work(struct work_struct *work)
4066 {
4067 struct delayed_work *del_work = to_delayed_work(work);
4068 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
4069 reset.monitor_work);
4070
4071 if (test_bit(MTK_RESETTING, ð->state))
4072 goto out;
4073
4074 /* DMA stuck checks */
4075 if (mtk_hw_check_dma_hang(eth))
4076 schedule_work(ð->pending_work);
4077
4078 out:
4079 schedule_delayed_work(ð->reset.monitor_work,
4080 MTK_DMA_MONITOR_TIMEOUT);
4081 }
4082
mtk_hw_init(struct mtk_eth * eth,bool reset)4083 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
4084 {
4085 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4086 ETHSYS_DMA_AG_MAP_PPE;
4087 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4088 int i, val, ret;
4089
4090 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
4091 return 0;
4092
4093 if (!reset) {
4094 pm_runtime_enable(eth->dev);
4095 pm_runtime_get_sync(eth->dev);
4096
4097 ret = mtk_clk_enable(eth);
4098 if (ret)
4099 goto err_disable_pm;
4100 }
4101
4102 if (eth->ethsys)
4103 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4104 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
4105
4106 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4107 ret = device_reset(eth->dev);
4108 if (ret) {
4109 dev_err(eth->dev, "MAC reset failed!\n");
4110 goto err_disable_pm;
4111 }
4112
4113 /* set interrupt delays based on current Net DIM sample */
4114 mtk_dim_rx(ð->rx_dim.work);
4115 mtk_dim_tx(ð->tx_dim.work);
4116
4117 /* disable delay and normal interrupt */
4118 mtk_tx_irq_disable(eth, ~0);
4119 mtk_rx_irq_disable(eth, ~0);
4120
4121 return 0;
4122 }
4123
4124 msleep(100);
4125
4126 if (reset)
4127 mtk_hw_warm_reset(eth);
4128 else
4129 mtk_hw_reset(eth);
4130
4131 /* No MT7628/88 support yet */
4132 if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4133 mtk_mdio_config(eth);
4134
4135 if (mtk_is_netsys_v3_or_greater(eth)) {
4136 /* Set FE to PDMAv2 if necessary */
4137 val = mtk_r32(eth, MTK_FE_GLO_MISC);
4138 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
4139 }
4140
4141 if (eth->pctl) {
4142 /* Set GE2 driving and slew rate */
4143 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4144
4145 /* set GE2 TDSEL */
4146 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4147
4148 /* set GE2 TUNE */
4149 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4150 }
4151
4152 /* Set linkdown as the default for each GMAC. Its own MCR would be set
4153 * up with the more appropriate value when mtk_mac_config call is being
4154 * invoked.
4155 */
4156 for (i = 0; i < MTK_MAX_DEVS; i++) {
4157 struct net_device *dev = eth->netdev[i];
4158
4159 if (!dev)
4160 continue;
4161
4162 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4163 mtk_set_mcr_max_rx(netdev_priv(dev),
4164 dev->mtu + MTK_RX_ETH_HLEN);
4165 }
4166
4167 /* Indicates CDM to parse the MTK special tag from CPU
4168 * which also is working out for untag packets.
4169 */
4170 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4171 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4172 if (mtk_is_netsys_v1(eth)) {
4173 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4174 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4175
4176 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4177 }
4178
4179 /* set interrupt delays based on current Net DIM sample */
4180 mtk_dim_rx(ð->rx_dim.work);
4181 mtk_dim_tx(ð->tx_dim.work);
4182
4183 /* disable delay and normal interrupt */
4184 mtk_tx_irq_disable(eth, ~0);
4185 mtk_rx_irq_disable(eth, ~0);
4186
4187 /* FE int grouping */
4188 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
4189 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
4190 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
4191 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
4192 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
4193
4194 if (mtk_is_netsys_v3_or_greater(eth)) {
4195 /* PSE dummy page mechanism */
4196 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
4197 PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
4198
4199 /* PSE free buffer drop threshold */
4200 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4201
4202 /* PSE should not drop port8, port9 and port13 packets from
4203 * WDMA Tx
4204 */
4205 mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
4206
4207 /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
4208 * ring full
4209 */
4210 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
4211 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
4212 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
4213
4214 /* GDM and CDM Threshold */
4215 mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
4216 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4217
4218 /* Disable GDM1 RX CRC stripping */
4219 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4220
4221 /* PSE GDM3 MIB counter has incorrect hw default values,
4222 * so the driver ought to read clear the values beforehand
4223 * in case ethtool retrieve wrong mib values.
4224 */
4225 for (i = 0; i < 0x80; i += 0x4)
4226 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4227 } else if (!mtk_is_netsys_v1(eth)) {
4228 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4229 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4230
4231 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4232 mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
4233
4234 /* PSE Free Queue Flow Control */
4235 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4236
4237 /* PSE config input queue threshold */
4238 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4239 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4240 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4241 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4242 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4243 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4244 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4245 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4246
4247 /* PSE config output queue threshold */
4248 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4249 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4250 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4251 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4252 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4253 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4254 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4255 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4256
4257 /* GDM and CDM Threshold */
4258 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4259 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4260 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4261 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4262 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4263 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4264 }
4265
4266 return 0;
4267
4268 err_disable_pm:
4269 if (!reset) {
4270 pm_runtime_put_sync(eth->dev);
4271 pm_runtime_disable(eth->dev);
4272 }
4273
4274 return ret;
4275 }
4276
mtk_hw_deinit(struct mtk_eth * eth)4277 static int mtk_hw_deinit(struct mtk_eth *eth)
4278 {
4279 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
4280 return 0;
4281
4282 mtk_clk_disable(eth);
4283
4284 pm_runtime_put_sync(eth->dev);
4285 pm_runtime_disable(eth->dev);
4286
4287 return 0;
4288 }
4289
mtk_uninit(struct net_device * dev)4290 static void mtk_uninit(struct net_device *dev)
4291 {
4292 struct mtk_mac *mac = netdev_priv(dev);
4293 struct mtk_eth *eth = mac->hw;
4294
4295 phylink_disconnect_phy(mac->phylink);
4296 mtk_tx_irq_disable(eth, ~0);
4297 mtk_rx_irq_disable(eth, ~0);
4298 }
4299
mtk_change_mtu(struct net_device * dev,int new_mtu)4300 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4301 {
4302 int length = new_mtu + MTK_RX_ETH_HLEN;
4303 struct mtk_mac *mac = netdev_priv(dev);
4304 struct mtk_eth *eth = mac->hw;
4305
4306 if (rcu_access_pointer(eth->prog) &&
4307 length > MTK_PP_MAX_BUF_SIZE) {
4308 netdev_err(dev, "Invalid MTU for XDP mode\n");
4309 return -EINVAL;
4310 }
4311
4312 mtk_set_mcr_max_rx(mac, length);
4313 WRITE_ONCE(dev->mtu, new_mtu);
4314
4315 return 0;
4316 }
4317
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4318 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4319 {
4320 struct mtk_mac *mac = netdev_priv(dev);
4321
4322 switch (cmd) {
4323 case SIOCGMIIPHY:
4324 case SIOCGMIIREG:
4325 case SIOCSMIIREG:
4326 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4327 default:
4328 break;
4329 }
4330
4331 return -EOPNOTSUPP;
4332 }
4333
mtk_prepare_for_reset(struct mtk_eth * eth)4334 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4335 {
4336 u32 val;
4337 int i;
4338
4339 /* set FE PPE ports link down */
4340 for (i = MTK_GMAC1_ID;
4341 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4342 i += 2) {
4343 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4344 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4345 val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4346 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4347 val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4348 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4349 }
4350
4351 /* adjust PPE configurations to prepare for reset */
4352 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4353 mtk_ppe_prepare_reset(eth->ppe[i]);
4354
4355 /* disable NETSYS interrupts */
4356 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4357
4358 /* force link down GMAC */
4359 for (i = 0; i < 2; i++) {
4360 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4361 mtk_w32(eth, val, MTK_MAC_MCR(i));
4362 }
4363 }
4364
mtk_pending_work(struct work_struct * work)4365 static void mtk_pending_work(struct work_struct *work)
4366 {
4367 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4368 unsigned long restart = 0;
4369 u32 val;
4370 int i;
4371
4372 rtnl_lock();
4373 set_bit(MTK_RESETTING, ð->state);
4374
4375 mtk_prepare_for_reset(eth);
4376 mtk_wed_fe_reset();
4377 /* Run again reset preliminary configuration in order to avoid any
4378 * possible race during FE reset since it can run releasing RTNL lock.
4379 */
4380 mtk_prepare_for_reset(eth);
4381
4382 /* stop all devices to make sure that dma is properly shut down */
4383 for (i = 0; i < MTK_MAX_DEVS; i++) {
4384 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4385 continue;
4386
4387 mtk_stop(eth->netdev[i]);
4388 __set_bit(i, &restart);
4389 }
4390
4391 usleep_range(15000, 16000);
4392
4393 if (eth->dev->pins)
4394 pinctrl_select_state(eth->dev->pins->p,
4395 eth->dev->pins->default_state);
4396 mtk_hw_init(eth, true);
4397
4398 /* restart DMA and enable IRQs */
4399 for (i = 0; i < MTK_MAX_DEVS; i++) {
4400 if (!eth->netdev[i] || !test_bit(i, &restart))
4401 continue;
4402
4403 if (mtk_open(eth->netdev[i])) {
4404 netif_alert(eth, ifup, eth->netdev[i],
4405 "Driver up/down cycle failed\n");
4406 dev_close(eth->netdev[i]);
4407 }
4408 }
4409
4410 /* set FE PPE ports link up */
4411 for (i = MTK_GMAC1_ID;
4412 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4413 i += 2) {
4414 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4415 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4416 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4417 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4418 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4419
4420 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4421 }
4422
4423 clear_bit(MTK_RESETTING, ð->state);
4424
4425 mtk_wed_fe_reset_complete();
4426
4427 rtnl_unlock();
4428 }
4429
mtk_free_dev(struct mtk_eth * eth)4430 static int mtk_free_dev(struct mtk_eth *eth)
4431 {
4432 int i;
4433
4434 for (i = 0; i < MTK_MAX_DEVS; i++) {
4435 if (!eth->netdev[i])
4436 continue;
4437 free_netdev(eth->netdev[i]);
4438 }
4439
4440 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4441 if (!eth->dsa_meta[i])
4442 break;
4443 metadata_dst_free(eth->dsa_meta[i]);
4444 }
4445
4446 return 0;
4447 }
4448
mtk_unreg_dev(struct mtk_eth * eth)4449 static int mtk_unreg_dev(struct mtk_eth *eth)
4450 {
4451 int i;
4452
4453 for (i = 0; i < MTK_MAX_DEVS; i++) {
4454 struct mtk_mac *mac;
4455 if (!eth->netdev[i])
4456 continue;
4457 mac = netdev_priv(eth->netdev[i]);
4458 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4459 unregister_netdevice_notifier(&mac->device_notifier);
4460 unregister_netdev(eth->netdev[i]);
4461 }
4462
4463 return 0;
4464 }
4465
mtk_sgmii_destroy(struct mtk_eth * eth)4466 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4467 {
4468 int i;
4469
4470 for (i = 0; i < MTK_MAX_DEVS; i++)
4471 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4472 }
4473
mtk_cleanup(struct mtk_eth * eth)4474 static int mtk_cleanup(struct mtk_eth *eth)
4475 {
4476 mtk_sgmii_destroy(eth);
4477 mtk_unreg_dev(eth);
4478 mtk_free_dev(eth);
4479 cancel_work_sync(ð->pending_work);
4480 cancel_delayed_work_sync(ð->reset.monitor_work);
4481
4482 return 0;
4483 }
4484
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4485 static int mtk_get_link_ksettings(struct net_device *ndev,
4486 struct ethtool_link_ksettings *cmd)
4487 {
4488 struct mtk_mac *mac = netdev_priv(ndev);
4489
4490 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4491 return -EBUSY;
4492
4493 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4494 }
4495
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4496 static int mtk_set_link_ksettings(struct net_device *ndev,
4497 const struct ethtool_link_ksettings *cmd)
4498 {
4499 struct mtk_mac *mac = netdev_priv(ndev);
4500
4501 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4502 return -EBUSY;
4503
4504 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4505 }
4506
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4507 static void mtk_get_drvinfo(struct net_device *dev,
4508 struct ethtool_drvinfo *info)
4509 {
4510 struct mtk_mac *mac = netdev_priv(dev);
4511
4512 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4513 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4514 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4515 }
4516
mtk_get_msglevel(struct net_device * dev)4517 static u32 mtk_get_msglevel(struct net_device *dev)
4518 {
4519 struct mtk_mac *mac = netdev_priv(dev);
4520
4521 return mac->hw->msg_enable;
4522 }
4523
mtk_set_msglevel(struct net_device * dev,u32 value)4524 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4525 {
4526 struct mtk_mac *mac = netdev_priv(dev);
4527
4528 mac->hw->msg_enable = value;
4529 }
4530
mtk_nway_reset(struct net_device * dev)4531 static int mtk_nway_reset(struct net_device *dev)
4532 {
4533 struct mtk_mac *mac = netdev_priv(dev);
4534
4535 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4536 return -EBUSY;
4537
4538 if (!mac->phylink)
4539 return -ENOTSUPP;
4540
4541 return phylink_ethtool_nway_reset(mac->phylink);
4542 }
4543
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4544 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4545 {
4546 int i;
4547
4548 switch (stringset) {
4549 case ETH_SS_STATS: {
4550 struct mtk_mac *mac = netdev_priv(dev);
4551
4552 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4553 ethtool_puts(&data, mtk_ethtool_stats[i].str);
4554 if (mtk_page_pool_enabled(mac->hw))
4555 page_pool_ethtool_stats_get_strings(data);
4556 break;
4557 }
4558 default:
4559 break;
4560 }
4561 }
4562
mtk_get_sset_count(struct net_device * dev,int sset)4563 static int mtk_get_sset_count(struct net_device *dev, int sset)
4564 {
4565 switch (sset) {
4566 case ETH_SS_STATS: {
4567 int count = ARRAY_SIZE(mtk_ethtool_stats);
4568 struct mtk_mac *mac = netdev_priv(dev);
4569
4570 if (mtk_page_pool_enabled(mac->hw))
4571 count += page_pool_ethtool_stats_get_count();
4572 return count;
4573 }
4574 default:
4575 return -EOPNOTSUPP;
4576 }
4577 }
4578
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4579 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4580 {
4581 struct page_pool_stats stats = {};
4582 int i;
4583
4584 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4585 struct mtk_rx_ring *ring = ð->rx_ring[i];
4586
4587 if (!ring->page_pool)
4588 continue;
4589
4590 page_pool_get_stats(ring->page_pool, &stats);
4591 }
4592 page_pool_ethtool_stats_get(data, &stats);
4593 }
4594
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4595 static void mtk_get_ethtool_stats(struct net_device *dev,
4596 struct ethtool_stats *stats, u64 *data)
4597 {
4598 struct mtk_mac *mac = netdev_priv(dev);
4599 struct mtk_hw_stats *hwstats = mac->hw_stats;
4600 u64 *data_src, *data_dst;
4601 unsigned int start;
4602 int i;
4603
4604 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4605 return;
4606
4607 if (netif_running(dev) && netif_device_present(dev)) {
4608 if (spin_trylock_bh(&hwstats->stats_lock)) {
4609 mtk_stats_update_mac(mac);
4610 spin_unlock_bh(&hwstats->stats_lock);
4611 }
4612 }
4613
4614 data_src = (u64 *)hwstats;
4615
4616 do {
4617 data_dst = data;
4618 start = u64_stats_fetch_begin(&hwstats->syncp);
4619
4620 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4621 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4622 if (mtk_page_pool_enabled(mac->hw))
4623 mtk_ethtool_pp_stats(mac->hw, data_dst);
4624 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4625 }
4626
mtk_get_rx_ring_count(struct net_device * dev)4627 static u32 mtk_get_rx_ring_count(struct net_device *dev)
4628 {
4629 if (dev->hw_features & NETIF_F_LRO)
4630 return MTK_MAX_RX_RING_NUM;
4631
4632 return 0;
4633 }
4634
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4635 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4636 u32 *rule_locs)
4637 {
4638 int ret = -EOPNOTSUPP;
4639
4640 switch (cmd->cmd) {
4641 case ETHTOOL_GRXCLSRLCNT:
4642 if (dev->hw_features & NETIF_F_LRO) {
4643 struct mtk_mac *mac = netdev_priv(dev);
4644
4645 cmd->rule_cnt = mac->hwlro_ip_cnt;
4646 ret = 0;
4647 }
4648 break;
4649 case ETHTOOL_GRXCLSRULE:
4650 if (dev->hw_features & NETIF_F_LRO)
4651 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4652 break;
4653 case ETHTOOL_GRXCLSRLALL:
4654 if (dev->hw_features & NETIF_F_LRO)
4655 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4656 rule_locs);
4657 break;
4658 default:
4659 break;
4660 }
4661
4662 return ret;
4663 }
4664
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4665 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4666 {
4667 int ret = -EOPNOTSUPP;
4668
4669 switch (cmd->cmd) {
4670 case ETHTOOL_SRXCLSRLINS:
4671 if (dev->hw_features & NETIF_F_LRO)
4672 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4673 break;
4674 case ETHTOOL_SRXCLSRLDEL:
4675 if (dev->hw_features & NETIF_F_LRO)
4676 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4677 break;
4678 default:
4679 break;
4680 }
4681
4682 return ret;
4683 }
4684
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4685 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4686 {
4687 struct mtk_mac *mac = netdev_priv(dev);
4688
4689 phylink_ethtool_get_pauseparam(mac->phylink, pause);
4690 }
4691
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4692 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4693 {
4694 struct mtk_mac *mac = netdev_priv(dev);
4695
4696 return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4697 }
4698
mtk_get_eee(struct net_device * dev,struct ethtool_keee * eee)4699 static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4700 {
4701 struct mtk_mac *mac = netdev_priv(dev);
4702
4703 return phylink_ethtool_get_eee(mac->phylink, eee);
4704 }
4705
mtk_set_eee(struct net_device * dev,struct ethtool_keee * eee)4706 static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4707 {
4708 struct mtk_mac *mac = netdev_priv(dev);
4709
4710 return phylink_ethtool_set_eee(mac->phylink, eee);
4711 }
4712
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4713 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4714 struct net_device *sb_dev)
4715 {
4716 struct mtk_mac *mac = netdev_priv(dev);
4717 unsigned int queue = 0;
4718
4719 if (netdev_uses_dsa(dev))
4720 queue = skb_get_queue_mapping(skb) + 3;
4721 else
4722 queue = mac->id;
4723
4724 if (queue >= dev->num_tx_queues)
4725 queue = 0;
4726
4727 return queue;
4728 }
4729
4730 static const struct ethtool_ops mtk_ethtool_ops = {
4731 .get_link_ksettings = mtk_get_link_ksettings,
4732 .set_link_ksettings = mtk_set_link_ksettings,
4733 .get_drvinfo = mtk_get_drvinfo,
4734 .get_msglevel = mtk_get_msglevel,
4735 .set_msglevel = mtk_set_msglevel,
4736 .nway_reset = mtk_nway_reset,
4737 .get_link = ethtool_op_get_link,
4738 .get_strings = mtk_get_strings,
4739 .get_sset_count = mtk_get_sset_count,
4740 .get_ethtool_stats = mtk_get_ethtool_stats,
4741 .get_pauseparam = mtk_get_pauseparam,
4742 .set_pauseparam = mtk_set_pauseparam,
4743 .get_rxnfc = mtk_get_rxnfc,
4744 .set_rxnfc = mtk_set_rxnfc,
4745 .get_rx_ring_count = mtk_get_rx_ring_count,
4746 .get_eee = mtk_get_eee,
4747 .set_eee = mtk_set_eee,
4748 };
4749
4750 static const struct net_device_ops mtk_netdev_ops = {
4751 .ndo_uninit = mtk_uninit,
4752 .ndo_open = mtk_open,
4753 .ndo_stop = mtk_stop,
4754 .ndo_start_xmit = mtk_start_xmit,
4755 .ndo_set_mac_address = mtk_set_mac_address,
4756 .ndo_validate_addr = eth_validate_addr,
4757 .ndo_eth_ioctl = mtk_do_ioctl,
4758 .ndo_change_mtu = mtk_change_mtu,
4759 .ndo_tx_timeout = mtk_tx_timeout,
4760 .ndo_get_stats64 = mtk_get_stats64,
4761 .ndo_fix_features = mtk_fix_features,
4762 .ndo_set_features = mtk_set_features,
4763 #ifdef CONFIG_NET_POLL_CONTROLLER
4764 .ndo_poll_controller = mtk_poll_controller,
4765 #endif
4766 .ndo_setup_tc = mtk_eth_setup_tc,
4767 .ndo_bpf = mtk_xdp,
4768 .ndo_xdp_xmit = mtk_xdp_xmit,
4769 .ndo_select_queue = mtk_select_queue,
4770 };
4771
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4772 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4773 {
4774 const __be32 *_id = of_get_property(np, "reg", NULL);
4775 phy_interface_t phy_mode;
4776 struct phylink *phylink;
4777 struct mtk_mac *mac;
4778 int id, err;
4779 int txqs = 1;
4780 u32 val;
4781
4782 if (!_id) {
4783 dev_err(eth->dev, "missing mac id\n");
4784 return -EINVAL;
4785 }
4786
4787 id = be32_to_cpup(_id);
4788 if (id >= MTK_MAX_DEVS) {
4789 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4790 return -EINVAL;
4791 }
4792
4793 if (eth->netdev[id]) {
4794 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4795 return -EINVAL;
4796 }
4797
4798 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4799 txqs = MTK_QDMA_NUM_QUEUES;
4800
4801 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4802 if (!eth->netdev[id]) {
4803 dev_err(eth->dev, "alloc_etherdev failed\n");
4804 return -ENOMEM;
4805 }
4806 mac = netdev_priv(eth->netdev[id]);
4807 eth->mac[id] = mac;
4808 mac->id = id;
4809 mac->hw = eth;
4810 mac->of_node = np;
4811
4812 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4813 if (err == -EPROBE_DEFER)
4814 return err;
4815
4816 if (err) {
4817 /* If the mac address is invalid, use random mac address */
4818 eth_hw_addr_random(eth->netdev[id]);
4819 dev_err(eth->dev, "generated random MAC address %pM\n",
4820 eth->netdev[id]->dev_addr);
4821 }
4822
4823 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4824 mac->hwlro_ip_cnt = 0;
4825
4826 mac->hw_stats = devm_kzalloc(eth->dev,
4827 sizeof(*mac->hw_stats),
4828 GFP_KERNEL);
4829 if (!mac->hw_stats) {
4830 dev_err(eth->dev, "failed to allocate counter memory\n");
4831 err = -ENOMEM;
4832 goto free_netdev;
4833 }
4834 spin_lock_init(&mac->hw_stats->stats_lock);
4835 u64_stats_init(&mac->hw_stats->syncp);
4836
4837 if (mtk_is_netsys_v3_or_greater(eth))
4838 mac->hw_stats->reg_offset = id * 0x80;
4839 else
4840 mac->hw_stats->reg_offset = id * 0x40;
4841
4842 /* phylink create */
4843 err = of_get_phy_mode(np, &phy_mode);
4844 if (err) {
4845 dev_err(eth->dev, "incorrect phy-mode\n");
4846 goto free_netdev;
4847 }
4848
4849 /* mac config is not set */
4850 mac->interface = PHY_INTERFACE_MODE_NA;
4851 mac->speed = SPEED_UNKNOWN;
4852
4853 mac->phylink_config.dev = ð->netdev[id]->dev;
4854 mac->phylink_config.type = PHYLINK_NETDEV;
4855 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4856 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4857 mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
4858 MAC_2500FD;
4859 mac->phylink_config.lpi_timer_default = 1000;
4860
4861 /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4862 * in its .mac_config method (since state->speed is not valid there.
4863 * Disable support for MII, GMII and RGMII.
4864 */
4865 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4866 __set_bit(PHY_INTERFACE_MODE_MII,
4867 mac->phylink_config.supported_interfaces);
4868 __set_bit(PHY_INTERFACE_MODE_GMII,
4869 mac->phylink_config.supported_interfaces);
4870
4871 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4872 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4873 }
4874
4875 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4876 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4877 mac->phylink_config.supported_interfaces);
4878
4879 /* TRGMII is not permitted on MT7621 if using DDR2 */
4880 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4881 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4882 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4883 if (val & SYSCFG_DRAM_TYPE_DDR2)
4884 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4885 mac->phylink_config.supported_interfaces);
4886 }
4887
4888 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4889 __set_bit(PHY_INTERFACE_MODE_SGMII,
4890 mac->phylink_config.supported_interfaces);
4891 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4892 mac->phylink_config.supported_interfaces);
4893 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4894 mac->phylink_config.supported_interfaces);
4895 }
4896
4897 if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4898 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
4899 id == MTK_GMAC1_ID) {
4900 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4901 MAC_SYM_PAUSE |
4902 MAC_10000FD;
4903 phy_interface_zero(mac->phylink_config.supported_interfaces);
4904 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4905 mac->phylink_config.supported_interfaces);
4906 }
4907
4908 phylink = phylink_create(&mac->phylink_config,
4909 of_fwnode_handle(mac->of_node),
4910 phy_mode, &mtk_phylink_ops);
4911 if (IS_ERR(phylink)) {
4912 err = PTR_ERR(phylink);
4913 goto free_netdev;
4914 }
4915
4916 mac->phylink = phylink;
4917
4918 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
4919 id == MTK_GMAC2_ID)
4920 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4921 mac->phylink_config.supported_interfaces);
4922
4923 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4924 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4925 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4926 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4927
4928 eth->netdev[id]->hw_features = eth->soc->hw_features;
4929 if (eth->hwlro)
4930 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4931
4932 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4933 ~NETIF_F_HW_VLAN_CTAG_TX;
4934 eth->netdev[id]->features |= eth->soc->hw_features;
4935 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4936
4937 eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
4938 eth->netdev[id]->dev.of_node = np;
4939
4940 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4941 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4942 else
4943 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4944
4945 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4946 mac->device_notifier.notifier_call = mtk_device_event;
4947 register_netdevice_notifier(&mac->device_notifier);
4948 }
4949
4950 if (mtk_page_pool_enabled(eth))
4951 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4952 NETDEV_XDP_ACT_REDIRECT |
4953 NETDEV_XDP_ACT_NDO_XMIT |
4954 NETDEV_XDP_ACT_NDO_XMIT_SG;
4955
4956 return 0;
4957
4958 free_netdev:
4959 free_netdev(eth->netdev[id]);
4960 return err;
4961 }
4962
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4963 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4964 {
4965 struct net_device *dev, *tmp;
4966 LIST_HEAD(dev_list);
4967 int i;
4968
4969 rtnl_lock();
4970
4971 for (i = 0; i < MTK_MAX_DEVS; i++) {
4972 dev = eth->netdev[i];
4973
4974 if (!dev || !(dev->flags & IFF_UP))
4975 continue;
4976
4977 list_add_tail(&dev->close_list, &dev_list);
4978 }
4979
4980 netif_close_many(&dev_list, false);
4981
4982 eth->dma_dev = dma_dev;
4983
4984 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4985 list_del_init(&dev->close_list);
4986 dev_open(dev, NULL);
4987 }
4988
4989 rtnl_unlock();
4990 }
4991
mtk_sgmii_init(struct mtk_eth * eth)4992 static int mtk_sgmii_init(struct mtk_eth *eth)
4993 {
4994 struct device_node *np;
4995 struct regmap *regmap;
4996 int i;
4997
4998 for (i = 0; i < MTK_MAX_DEVS; i++) {
4999 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
5000 if (!np)
5001 break;
5002
5003 regmap = syscon_node_to_regmap(np);
5004 if (IS_ERR(regmap)) {
5005 of_node_put(np);
5006 return PTR_ERR(regmap);
5007 }
5008
5009 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev,
5010 of_fwnode_handle(np),
5011 regmap,
5012 eth->soc->ana_rgc3);
5013 of_node_put(np);
5014 }
5015
5016 return 0;
5017 }
5018
mtk_setup_legacy_sram(struct mtk_eth * eth,struct resource * res)5019 static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
5020 {
5021 dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
5022
5023 if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
5024 res->end)
5025 return -EINVAL;
5026
5027 eth->sram_pool = devm_gen_pool_create(eth->dev,
5028 const_ilog2(MTK_ETH_SRAM_GRANULARITY),
5029 NUMA_NO_NODE, dev_name(eth->dev));
5030
5031 if (IS_ERR(eth->sram_pool))
5032 return PTR_ERR(eth->sram_pool);
5033
5034 return gen_pool_add_virt(eth->sram_pool,
5035 (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
5036 res->start + MTK_ETH_SRAM_OFFSET,
5037 MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
5038 }
5039
mtk_probe(struct platform_device * pdev)5040 static int mtk_probe(struct platform_device *pdev)
5041 {
5042 struct resource *res = NULL;
5043 struct device_node *mac_np;
5044 struct mtk_eth *eth;
5045 int err, i;
5046
5047 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5048 if (!eth)
5049 return -ENOMEM;
5050
5051 eth->soc = of_device_get_match_data(&pdev->dev);
5052
5053 eth->dev = &pdev->dev;
5054 eth->dma_dev = &pdev->dev;
5055 eth->base = devm_platform_ioremap_resource(pdev, 0);
5056 if (IS_ERR(eth->base))
5057 return PTR_ERR(eth->base);
5058
5059 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
5060 eth->ip_align = NET_IP_ALIGN;
5061
5062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
5063 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5064 if (!err)
5065 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5066
5067 if (err) {
5068 dev_err(&pdev->dev, "Wrong DMA config\n");
5069 return -EINVAL;
5070 }
5071 }
5072
5073 spin_lock_init(ð->page_lock);
5074 spin_lock_init(ð->tx_irq_lock);
5075 spin_lock_init(ð->rx_irq_lock);
5076 spin_lock_init(ð->dim_lock);
5077
5078 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5079 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
5080 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
5081
5082 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5083 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
5084
5085 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5086 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5087 "mediatek,ethsys");
5088 if (IS_ERR(eth->ethsys)) {
5089 dev_err(&pdev->dev, "no ethsys regmap found\n");
5090 return PTR_ERR(eth->ethsys);
5091 }
5092 }
5093
5094 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5095 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5096 "mediatek,infracfg");
5097 if (IS_ERR(eth->infra)) {
5098 dev_err(&pdev->dev, "no infracfg regmap found\n");
5099 return PTR_ERR(eth->infra);
5100 }
5101 }
5102
5103 if (of_dma_is_coherent(pdev->dev.of_node)) {
5104 struct regmap *cci;
5105
5106 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5107 "cci-control-port");
5108 /* enable CPU/bus coherency */
5109 if (!IS_ERR(cci))
5110 regmap_write(cci, 0, 3);
5111 }
5112
5113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
5114 err = mtk_sgmii_init(eth);
5115
5116 if (err)
5117 return err;
5118 }
5119
5120 if (eth->soc->required_pctl) {
5121 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5122 "mediatek,pctl");
5123 if (IS_ERR(eth->pctl)) {
5124 dev_err(&pdev->dev, "no pctl regmap found\n");
5125 err = PTR_ERR(eth->pctl);
5126 goto err_destroy_sgmii;
5127 }
5128 }
5129
5130 if (mtk_is_netsys_v2_or_greater(eth)) {
5131 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5132 if (!res) {
5133 err = -EINVAL;
5134 goto err_destroy_sgmii;
5135 }
5136
5137 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
5138 eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
5139 "sram", 0);
5140 if (!eth->sram_pool) {
5141 if (!mtk_is_netsys_v3_or_greater(eth)) {
5142 err = mtk_setup_legacy_sram(eth, res);
5143 if (err)
5144 goto err_destroy_sgmii;
5145 } else {
5146 dev_err(&pdev->dev,
5147 "Could not get SRAM pool\n");
5148 err = -EINVAL;
5149 goto err_destroy_sgmii;
5150 }
5151 }
5152 }
5153 }
5154
5155 if (eth->soc->offload_version) {
5156 for (i = 0;; i++) {
5157 struct device_node *np;
5158 phys_addr_t wdma_phy;
5159 u32 wdma_base;
5160
5161 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
5162 break;
5163
5164 np = of_parse_phandle(pdev->dev.of_node,
5165 "mediatek,wed", i);
5166 if (!np)
5167 break;
5168
5169 wdma_base = eth->soc->reg_map->wdma_base[i];
5170 wdma_phy = res ? res->start + wdma_base : 0;
5171 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
5172 wdma_phy, i);
5173 }
5174 }
5175
5176 err = mtk_get_irqs(pdev, eth);
5177 if (err)
5178 goto err_wed_exit;
5179
5180 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5181 eth->clks[i] = devm_clk_get(eth->dev,
5182 mtk_clks_source_name[i]);
5183 if (IS_ERR(eth->clks[i])) {
5184 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
5185 err = -EPROBE_DEFER;
5186 goto err_wed_exit;
5187 }
5188 if (eth->soc->required_clks & BIT(i)) {
5189 dev_err(&pdev->dev, "clock %s not found\n",
5190 mtk_clks_source_name[i]);
5191 err = -EINVAL;
5192 goto err_wed_exit;
5193 }
5194 eth->clks[i] = NULL;
5195 }
5196 }
5197
5198 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5199 INIT_WORK(ð->pending_work, mtk_pending_work);
5200
5201 err = mtk_hw_init(eth, false);
5202 if (err)
5203 goto err_wed_exit;
5204
5205 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5206
5207 for_each_child_of_node(pdev->dev.of_node, mac_np) {
5208 if (!of_device_is_compatible(mac_np,
5209 "mediatek,eth-mac"))
5210 continue;
5211
5212 if (!of_device_is_available(mac_np))
5213 continue;
5214
5215 err = mtk_add_mac(eth, mac_np);
5216 if (err) {
5217 of_node_put(mac_np);
5218 goto err_deinit_hw;
5219 }
5220 }
5221
5222 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
5223 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
5224 mtk_handle_irq, 0,
5225 dev_name(eth->dev), eth);
5226 } else {
5227 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
5228 mtk_handle_irq_tx, 0,
5229 dev_name(eth->dev), eth);
5230 if (err)
5231 goto err_free_dev;
5232
5233 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
5234 mtk_handle_irq_rx, 0,
5235 dev_name(eth->dev), eth);
5236 }
5237 if (err)
5238 goto err_free_dev;
5239
5240 /* No MT7628/88 support yet */
5241 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5242 err = mtk_mdio_init(eth);
5243 if (err)
5244 goto err_free_dev;
5245 }
5246
5247 if (eth->soc->offload_version) {
5248 u8 ppe_num = eth->soc->ppe_num;
5249
5250 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5251 for (i = 0; i < ppe_num; i++) {
5252 u32 ppe_addr = eth->soc->reg_map->ppe_base;
5253
5254 ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5255 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5256
5257 if (!eth->ppe[i]) {
5258 err = -ENOMEM;
5259 goto err_deinit_ppe;
5260 }
5261 err = mtk_eth_offload_init(eth, i);
5262
5263 if (err)
5264 goto err_deinit_ppe;
5265 }
5266 }
5267
5268 for (i = 0; i < MTK_MAX_DEVS; i++) {
5269 if (!eth->netdev[i])
5270 continue;
5271
5272 err = register_netdev(eth->netdev[i]);
5273 if (err) {
5274 dev_err(eth->dev, "error bringing up device\n");
5275 goto err_deinit_ppe;
5276 } else
5277 netif_info(eth, probe, eth->netdev[i],
5278 "mediatek frame engine at 0x%08lx, irq %d\n",
5279 eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
5280 }
5281
5282 /* we run 2 devices on the same DMA ring so we need a dummy device
5283 * for NAPI to work
5284 */
5285 eth->dummy_dev = alloc_netdev_dummy(0);
5286 if (!eth->dummy_dev) {
5287 err = -ENOMEM;
5288 dev_err(eth->dev, "failed to allocated dummy device\n");
5289 goto err_unreg_netdev;
5290 }
5291 netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx);
5292 netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx);
5293
5294 platform_set_drvdata(pdev, eth);
5295 schedule_delayed_work(ð->reset.monitor_work,
5296 MTK_DMA_MONITOR_TIMEOUT);
5297
5298 return 0;
5299
5300 err_unreg_netdev:
5301 mtk_unreg_dev(eth);
5302 err_deinit_ppe:
5303 mtk_ppe_deinit(eth);
5304 mtk_mdio_cleanup(eth);
5305 err_free_dev:
5306 mtk_free_dev(eth);
5307 err_deinit_hw:
5308 mtk_hw_deinit(eth);
5309 err_wed_exit:
5310 mtk_wed_exit();
5311 err_destroy_sgmii:
5312 mtk_sgmii_destroy(eth);
5313
5314 return err;
5315 }
5316
mtk_remove(struct platform_device * pdev)5317 static void mtk_remove(struct platform_device *pdev)
5318 {
5319 struct mtk_eth *eth = platform_get_drvdata(pdev);
5320 struct mtk_mac *mac;
5321 int i;
5322
5323 /* stop all devices to make sure that dma is properly shut down */
5324 for (i = 0; i < MTK_MAX_DEVS; i++) {
5325 if (!eth->netdev[i])
5326 continue;
5327 mtk_stop(eth->netdev[i]);
5328 mac = netdev_priv(eth->netdev[i]);
5329 phylink_disconnect_phy(mac->phylink);
5330 }
5331
5332 mtk_wed_exit();
5333 mtk_hw_deinit(eth);
5334
5335 netif_napi_del(ð->tx_napi);
5336 netif_napi_del(ð->rx_napi);
5337 mtk_cleanup(eth);
5338 free_netdev(eth->dummy_dev);
5339 mtk_mdio_cleanup(eth);
5340 }
5341
5342 static const struct mtk_soc_data mt2701_data = {
5343 .reg_map = &mtk_reg_map,
5344 .caps = MT7623_CAPS | MTK_HWLRO,
5345 .hw_features = MTK_HW_FEATURES,
5346 .required_clks = MT7623_CLKS_BITMAP,
5347 .required_pctl = true,
5348 .version = 1,
5349 .tx = {
5350 .desc_size = sizeof(struct mtk_tx_dma),
5351 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5352 .dma_len_offset = 16,
5353 .dma_size = MTK_DMA_SIZE(2K),
5354 .fq_dma_size = MTK_DMA_SIZE(2K),
5355 },
5356 .rx = {
5357 .desc_size = sizeof(struct mtk_rx_dma),
5358 .irq_done_mask = MTK_RX_DONE_INT,
5359 .dma_l4_valid = RX_DMA_L4_VALID,
5360 .dma_size = MTK_DMA_SIZE(2K),
5361 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5362 .dma_len_offset = 16,
5363 },
5364 };
5365
5366 static const struct mtk_soc_data mt7621_data = {
5367 .reg_map = &mtk_reg_map,
5368 .caps = MT7621_CAPS,
5369 .hw_features = MTK_HW_FEATURES,
5370 .required_clks = MT7621_CLKS_BITMAP,
5371 .required_pctl = false,
5372 .version = 1,
5373 .offload_version = 1,
5374 .ppe_num = 1,
5375 .hash_offset = 2,
5376 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5377 .tx = {
5378 .desc_size = sizeof(struct mtk_tx_dma),
5379 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5380 .dma_len_offset = 16,
5381 .dma_size = MTK_DMA_SIZE(2K),
5382 .fq_dma_size = MTK_DMA_SIZE(2K),
5383 },
5384 .rx = {
5385 .desc_size = sizeof(struct mtk_rx_dma),
5386 .irq_done_mask = MTK_RX_DONE_INT,
5387 .dma_l4_valid = RX_DMA_L4_VALID,
5388 .dma_size = MTK_DMA_SIZE(2K),
5389 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5390 .dma_len_offset = 16,
5391 },
5392 };
5393
5394 static const struct mtk_soc_data mt7622_data = {
5395 .reg_map = &mtk_reg_map,
5396 .ana_rgc3 = 0x2028,
5397 .caps = MT7622_CAPS | MTK_HWLRO,
5398 .hw_features = MTK_HW_FEATURES,
5399 .required_clks = MT7622_CLKS_BITMAP,
5400 .required_pctl = false,
5401 .version = 1,
5402 .offload_version = 2,
5403 .ppe_num = 1,
5404 .hash_offset = 2,
5405 .has_accounting = true,
5406 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5407 .tx = {
5408 .desc_size = sizeof(struct mtk_tx_dma),
5409 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5410 .dma_len_offset = 16,
5411 .dma_size = MTK_DMA_SIZE(2K),
5412 .fq_dma_size = MTK_DMA_SIZE(2K),
5413 },
5414 .rx = {
5415 .desc_size = sizeof(struct mtk_rx_dma),
5416 .irq_done_mask = MTK_RX_DONE_INT,
5417 .dma_l4_valid = RX_DMA_L4_VALID,
5418 .dma_size = MTK_DMA_SIZE(2K),
5419 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5420 .dma_len_offset = 16,
5421 },
5422 };
5423
5424 static const struct mtk_soc_data mt7623_data = {
5425 .reg_map = &mtk_reg_map,
5426 .caps = MT7623_CAPS | MTK_HWLRO,
5427 .hw_features = MTK_HW_FEATURES,
5428 .required_clks = MT7623_CLKS_BITMAP,
5429 .required_pctl = true,
5430 .version = 1,
5431 .offload_version = 1,
5432 .ppe_num = 1,
5433 .hash_offset = 2,
5434 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5435 .disable_pll_modes = true,
5436 .tx = {
5437 .desc_size = sizeof(struct mtk_tx_dma),
5438 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5439 .dma_len_offset = 16,
5440 .dma_size = MTK_DMA_SIZE(2K),
5441 .fq_dma_size = MTK_DMA_SIZE(2K),
5442 },
5443 .rx = {
5444 .desc_size = sizeof(struct mtk_rx_dma),
5445 .irq_done_mask = MTK_RX_DONE_INT,
5446 .dma_l4_valid = RX_DMA_L4_VALID,
5447 .dma_size = MTK_DMA_SIZE(2K),
5448 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5449 .dma_len_offset = 16,
5450 },
5451 };
5452
5453 static const struct mtk_soc_data mt7629_data = {
5454 .reg_map = &mtk_reg_map,
5455 .ana_rgc3 = 0x128,
5456 .caps = MT7629_CAPS | MTK_HWLRO,
5457 .hw_features = MTK_HW_FEATURES,
5458 .required_clks = MT7629_CLKS_BITMAP,
5459 .required_pctl = false,
5460 .has_accounting = true,
5461 .version = 1,
5462 .tx = {
5463 .desc_size = sizeof(struct mtk_tx_dma),
5464 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5465 .dma_len_offset = 16,
5466 .dma_size = MTK_DMA_SIZE(2K),
5467 .fq_dma_size = MTK_DMA_SIZE(2K),
5468 },
5469 .rx = {
5470 .desc_size = sizeof(struct mtk_rx_dma),
5471 .irq_done_mask = MTK_RX_DONE_INT,
5472 .dma_l4_valid = RX_DMA_L4_VALID,
5473 .dma_size = MTK_DMA_SIZE(2K),
5474 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5475 .dma_len_offset = 16,
5476 },
5477 };
5478
5479 static const struct mtk_soc_data mt7981_data = {
5480 .reg_map = &mt7986_reg_map,
5481 .ana_rgc3 = 0x128,
5482 .caps = MT7981_CAPS,
5483 .hw_features = MTK_HW_FEATURES,
5484 .required_clks = MT7981_CLKS_BITMAP,
5485 .required_pctl = false,
5486 .version = 2,
5487 .offload_version = 2,
5488 .ppe_num = 2,
5489 .hash_offset = 4,
5490 .has_accounting = true,
5491 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5492 .tx = {
5493 .desc_size = sizeof(struct mtk_tx_dma_v2),
5494 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5495 .dma_len_offset = 8,
5496 .dma_size = MTK_DMA_SIZE(2K),
5497 .fq_dma_size = MTK_DMA_SIZE(2K),
5498 },
5499 .rx = {
5500 .desc_size = sizeof(struct mtk_rx_dma),
5501 .irq_done_mask = MTK_RX_DONE_INT,
5502 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5503 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5504 .dma_len_offset = 16,
5505 .dma_size = MTK_DMA_SIZE(2K),
5506 },
5507 };
5508
5509 static const struct mtk_soc_data mt7986_data = {
5510 .reg_map = &mt7986_reg_map,
5511 .ana_rgc3 = 0x128,
5512 .caps = MT7986_CAPS,
5513 .hw_features = MTK_HW_FEATURES,
5514 .required_clks = MT7986_CLKS_BITMAP,
5515 .required_pctl = false,
5516 .version = 2,
5517 .offload_version = 2,
5518 .ppe_num = 2,
5519 .hash_offset = 4,
5520 .has_accounting = true,
5521 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5522 .tx = {
5523 .desc_size = sizeof(struct mtk_tx_dma_v2),
5524 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5525 .dma_len_offset = 8,
5526 .dma_size = MTK_DMA_SIZE(2K),
5527 .fq_dma_size = MTK_DMA_SIZE(2K),
5528 },
5529 .rx = {
5530 .desc_size = sizeof(struct mtk_rx_dma),
5531 .irq_done_mask = MTK_RX_DONE_INT,
5532 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5533 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5534 .dma_len_offset = 16,
5535 .dma_size = MTK_DMA_SIZE(2K),
5536 },
5537 };
5538
5539 static const struct mtk_soc_data mt7988_data = {
5540 .reg_map = &mt7988_reg_map,
5541 .ana_rgc3 = 0x128,
5542 .caps = MT7988_CAPS,
5543 .hw_features = MTK_HW_FEATURES,
5544 .required_clks = MT7988_CLKS_BITMAP,
5545 .required_pctl = false,
5546 .version = 3,
5547 .offload_version = 2,
5548 .ppe_num = 3,
5549 .hash_offset = 4,
5550 .has_accounting = true,
5551 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5552 .tx = {
5553 .desc_size = sizeof(struct mtk_tx_dma_v2),
5554 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5555 .dma_len_offset = 8,
5556 .dma_size = MTK_DMA_SIZE(2K),
5557 .fq_dma_size = MTK_DMA_SIZE(4K),
5558 },
5559 .rx = {
5560 .desc_size = sizeof(struct mtk_rx_dma_v2),
5561 .irq_done_mask = MTK_RX_DONE_INT_V2,
5562 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5563 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5564 .dma_len_offset = 8,
5565 .dma_size = MTK_DMA_SIZE(2K),
5566 },
5567 };
5568
5569 static const struct mtk_soc_data rt5350_data = {
5570 .reg_map = &mt7628_reg_map,
5571 .caps = MT7628_CAPS,
5572 .hw_features = MTK_HW_FEATURES_MT7628,
5573 .required_clks = MT7628_CLKS_BITMAP,
5574 .required_pctl = false,
5575 .version = 1,
5576 .tx = {
5577 .desc_size = sizeof(struct mtk_tx_dma),
5578 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5579 .dma_len_offset = 16,
5580 .dma_size = MTK_DMA_SIZE(2K),
5581 },
5582 .rx = {
5583 .desc_size = sizeof(struct mtk_rx_dma),
5584 .irq_done_mask = MTK_RX_DONE_INT,
5585 .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5586 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5587 .dma_len_offset = 16,
5588 .dma_size = MTK_DMA_SIZE(2K),
5589 },
5590 };
5591
5592 const struct of_device_id of_mtk_match[] = {
5593 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5594 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5595 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5596 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5597 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5598 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5599 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5600 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5601 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5602 {},
5603 };
5604 MODULE_DEVICE_TABLE(of, of_mtk_match);
5605
5606 static struct platform_driver mtk_driver = {
5607 .probe = mtk_probe,
5608 .remove = mtk_remove,
5609 .driver = {
5610 .name = "mtk_soc_eth",
5611 .of_match_table = of_mtk_match,
5612 },
5613 };
5614
5615 module_platform_driver(mtk_driver);
5616
5617 MODULE_LICENSE("GPL");
5618 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5619 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5620 MODULE_IMPORT_NS("NETDEV_INTERNAL");
5621