xref: /linux/drivers/net/ethernet/airoha/airoha_eth.c (revision 3d2c3d2eea9acdbee5b5742d15d021069b49d3f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2024 AIROHA Inc
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  */
6 #include <linux/of.h>
7 #include <linux/of_net.h>
8 #include <linux/of_reserved_mem.h>
9 #include <linux/platform_device.h>
10 #include <linux/tcp.h>
11 #include <linux/u64_stats_sync.h>
12 #include <net/dst_metadata.h>
13 #include <net/page_pool/helpers.h>
14 #include <net/pkt_cls.h>
15 #include <uapi/linux/ppp_defs.h>
16 
17 #include "airoha_regs.h"
18 #include "airoha_eth.h"
19 
20 u32 airoha_rr(void __iomem *base, u32 offset)
21 {
22 	return readl(base + offset);
23 }
24 
25 void airoha_wr(void __iomem *base, u32 offset, u32 val)
26 {
27 	writel(val, base + offset);
28 }
29 
30 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
31 {
32 	val |= (airoha_rr(base, offset) & ~mask);
33 	airoha_wr(base, offset, val);
34 
35 	return val;
36 }
37 
38 static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
39 				    int index, u32 clear, u32 set)
40 {
41 	struct airoha_qdma *qdma = irq_bank->qdma;
42 	int bank = irq_bank - &qdma->irq_banks[0];
43 	unsigned long flags;
44 
45 	if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
46 		return;
47 
48 	spin_lock_irqsave(&irq_bank->irq_lock, flags);
49 
50 	irq_bank->irqmask[index] &= ~clear;
51 	irq_bank->irqmask[index] |= set;
52 	airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
53 		       irq_bank->irqmask[index]);
54 	/* Read irq_enable register in order to guarantee the update above
55 	 * completes in the spinlock critical section.
56 	 */
57 	airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
58 
59 	spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
60 }
61 
62 static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
63 				   int index, u32 mask)
64 {
65 	airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
66 }
67 
68 static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
69 				    int index, u32 mask)
70 {
71 	airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
72 }
73 
74 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
75 {
76 	struct airoha_eth *eth = port->qdma->eth;
77 	u32 val, reg;
78 
79 	reg = airoha_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
80 					   : REG_FE_WAN_MAC_H;
81 	val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
82 	airoha_fe_wr(eth, reg, val);
83 
84 	val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
85 	airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
86 	airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
87 
88 	airoha_ppe_init_upd_mem(port);
89 }
90 
91 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
92 					u32 val)
93 {
94 	airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
95 		      FIELD_PREP(GDM_OCFQ_MASK, val));
96 	airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
97 		      FIELD_PREP(GDM_MCFQ_MASK, val));
98 	airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
99 		      FIELD_PREP(GDM_BCFQ_MASK, val));
100 	airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
101 		      FIELD_PREP(GDM_UCFQ_MASK, val));
102 }
103 
104 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
105 				       bool enable)
106 {
107 	struct airoha_eth *eth = port->qdma->eth;
108 	u32 vip_port;
109 
110 	switch (port->id) {
111 	case AIROHA_GDM3_IDX:
112 		/* FIXME: handle XSI_PCIE1_PORT */
113 		vip_port = XSI_PCIE0_VIP_PORT_MASK;
114 		break;
115 	case AIROHA_GDM4_IDX:
116 		/* FIXME: handle XSI_USB_PORT */
117 		vip_port = XSI_ETH_VIP_PORT_MASK;
118 		break;
119 	default:
120 		return 0;
121 	}
122 
123 	if (enable) {
124 		airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
125 		airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
126 	} else {
127 		airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
128 		airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
129 	}
130 
131 	return 0;
132 }
133 
134 static void airoha_fe_maccr_init(struct airoha_eth *eth)
135 {
136 	int p;
137 
138 	for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
139 		airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
140 			      GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
141 			      GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
142 
143 	airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
144 		      FIELD_PREP(CDM_VLAN_MASK, 0x8100));
145 
146 	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
147 }
148 
149 static void airoha_fe_vip_setup(struct airoha_eth *eth)
150 {
151 	airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
152 	airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
153 
154 	airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
155 	airoha_fe_wr(eth, REG_FE_VIP_EN(4),
156 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
157 		     PATN_EN_MASK);
158 
159 	airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
160 	airoha_fe_wr(eth, REG_FE_VIP_EN(6),
161 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
162 		     PATN_EN_MASK);
163 
164 	airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
165 	airoha_fe_wr(eth, REG_FE_VIP_EN(7),
166 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
167 		     PATN_EN_MASK);
168 
169 	/* BOOTP (0x43) */
170 	airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
171 	airoha_fe_wr(eth, REG_FE_VIP_EN(8),
172 		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
173 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
174 
175 	/* BOOTP (0x44) */
176 	airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
177 	airoha_fe_wr(eth, REG_FE_VIP_EN(9),
178 		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
179 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
180 
181 	/* ISAKMP */
182 	airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
183 	airoha_fe_wr(eth, REG_FE_VIP_EN(10),
184 		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
185 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
186 
187 	airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
188 	airoha_fe_wr(eth, REG_FE_VIP_EN(11),
189 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
190 		     PATN_EN_MASK);
191 
192 	/* DHCPv6 */
193 	airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
194 	airoha_fe_wr(eth, REG_FE_VIP_EN(12),
195 		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
196 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
197 
198 	airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
199 	airoha_fe_wr(eth, REG_FE_VIP_EN(19),
200 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
201 		     PATN_EN_MASK);
202 
203 	/* ETH->ETH_P_1905 (0x893a) */
204 	airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
205 	airoha_fe_wr(eth, REG_FE_VIP_EN(20),
206 		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
207 
208 	airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
209 	airoha_fe_wr(eth, REG_FE_VIP_EN(21),
210 		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
211 }
212 
213 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
214 					     u32 port, u32 queue)
215 {
216 	u32 val;
217 
218 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
219 		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
220 		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
221 		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
222 	val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
223 
224 	return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
225 }
226 
227 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
228 					      u32 port, u32 queue, u32 val)
229 {
230 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
231 		      FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
232 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
233 		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
234 		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
235 		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
236 		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
237 		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
238 }
239 
240 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
241 {
242 	u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
243 
244 	return FIELD_GET(PSE_ALLRSV_MASK, val);
245 }
246 
247 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
248 				    u32 port, u32 queue, u32 val)
249 {
250 	u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
251 	u32 tmp, all_rsv, fq_limit;
252 
253 	airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
254 
255 	/* modify all rsv */
256 	all_rsv = airoha_fe_get_pse_all_rsv(eth);
257 	all_rsv += (val - orig_val);
258 	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
259 		      FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
260 
261 	/* modify hthd */
262 	tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
263 	fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
264 	tmp = fq_limit - all_rsv - 0x20;
265 	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
266 		      PSE_SHARE_USED_HTHD_MASK,
267 		      FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
268 
269 	tmp = fq_limit - all_rsv - 0x100;
270 	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
271 		      PSE_SHARE_USED_MTHD_MASK,
272 		      FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
273 	tmp = (3 * tmp) >> 2;
274 	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
275 		      PSE_SHARE_USED_LTHD_MASK,
276 		      FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
277 
278 	return 0;
279 }
280 
281 static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
282 {
283 	const u32 pse_port_num_queues[] = {
284 		[FE_PSE_PORT_CDM1] = 6,
285 		[FE_PSE_PORT_GDM1] = 6,
286 		[FE_PSE_PORT_GDM2] = 32,
287 		[FE_PSE_PORT_GDM3] = 6,
288 		[FE_PSE_PORT_PPE1] = 4,
289 		[FE_PSE_PORT_CDM2] = 6,
290 		[FE_PSE_PORT_CDM3] = 8,
291 		[FE_PSE_PORT_CDM4] = 10,
292 		[FE_PSE_PORT_PPE2] = 4,
293 		[FE_PSE_PORT_GDM4] = 2,
294 		[FE_PSE_PORT_CDM5] = 2,
295 	};
296 	u32 all_rsv;
297 	int q;
298 
299 	all_rsv = airoha_fe_get_pse_all_rsv(eth);
300 	if (airoha_ppe_is_enabled(eth, 1)) {
301 		/* hw misses PPE2 oq rsv */
302 		all_rsv += PSE_RSV_PAGES *
303 			   pse_port_num_queues[FE_PSE_PORT_PPE2];
304 	}
305 	airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
306 
307 	/* CMD1 */
308 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
309 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
310 					 PSE_QUEUE_RSV_PAGES);
311 	/* GMD1 */
312 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
313 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
314 					 PSE_QUEUE_RSV_PAGES);
315 	/* GMD2 */
316 	for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
317 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
318 	/* GMD3 */
319 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
320 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
321 					 PSE_QUEUE_RSV_PAGES);
322 	/* PPE1 */
323 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
324 		if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
325 			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
326 						 PSE_QUEUE_RSV_PAGES);
327 		else
328 			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
329 	}
330 	/* CDM2 */
331 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
332 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
333 					 PSE_QUEUE_RSV_PAGES);
334 	/* CDM3 */
335 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
336 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
337 	/* CDM4 */
338 	for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
339 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
340 					 PSE_QUEUE_RSV_PAGES);
341 	if (airoha_ppe_is_enabled(eth, 1)) {
342 		/* PPE2 */
343 		for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
344 			if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
345 				airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
346 							 q,
347 							 PSE_QUEUE_RSV_PAGES);
348 			else
349 				airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
350 							 q, 0);
351 		}
352 	}
353 	/* GMD4 */
354 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
355 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
356 					 PSE_QUEUE_RSV_PAGES);
357 	/* CDM5 */
358 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
359 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
360 					 PSE_QUEUE_RSV_PAGES);
361 }
362 
363 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
364 {
365 	int i;
366 
367 	for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
368 		int err, j;
369 		u32 val;
370 
371 		airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
372 
373 		val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
374 		      MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
375 		airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
376 		err = read_poll_timeout(airoha_fe_rr, val,
377 					val & MC_VLAN_CFG_CMD_DONE_MASK,
378 					USEC_PER_MSEC, 5 * USEC_PER_MSEC,
379 					false, eth, REG_MC_VLAN_CFG);
380 		if (err)
381 			return err;
382 
383 		for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
384 			airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
385 
386 			val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
387 			      FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
388 			      MC_VLAN_CFG_RW_MASK;
389 			airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
390 			err = read_poll_timeout(airoha_fe_rr, val,
391 						val & MC_VLAN_CFG_CMD_DONE_MASK,
392 						USEC_PER_MSEC,
393 						5 * USEC_PER_MSEC, false, eth,
394 						REG_MC_VLAN_CFG);
395 			if (err)
396 				return err;
397 		}
398 	}
399 
400 	return 0;
401 }
402 
403 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
404 {
405 	/* CDM1_CRSN_QSEL */
406 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
407 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
408 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
409 				 CDM_CRSN_QSEL_Q1));
410 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
411 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
412 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
413 				 CDM_CRSN_QSEL_Q1));
414 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
415 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
416 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
417 				 CDM_CRSN_QSEL_Q1));
418 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
419 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
420 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
421 				 CDM_CRSN_QSEL_Q6));
422 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
423 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
424 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
425 				 CDM_CRSN_QSEL_Q1));
426 	/* CDM2_CRSN_QSEL */
427 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
428 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
429 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
430 				 CDM_CRSN_QSEL_Q1));
431 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
432 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
433 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
434 				 CDM_CRSN_QSEL_Q1));
435 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
436 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
437 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
438 				 CDM_CRSN_QSEL_Q1));
439 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
440 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
441 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
442 				 CDM_CRSN_QSEL_Q6));
443 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
444 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
445 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
446 				 CDM_CRSN_QSEL_Q1));
447 }
448 
449 static int airoha_fe_init(struct airoha_eth *eth)
450 {
451 	airoha_fe_maccr_init(eth);
452 
453 	/* PSE IQ reserve */
454 	airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
455 		      FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
456 	airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
457 		      PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
458 		      FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
459 		      FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
460 
461 	/* enable FE copy engine for MC/KA/DPI */
462 	airoha_fe_wr(eth, REG_FE_PCE_CFG,
463 		     PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
464 	/* set vip queue selection to ring 1 */
465 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
466 		      FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
467 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
468 		      FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
469 	/* set GDM4 source interface offset to 8 */
470 	airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
471 		      GDM_SPORT_OFF2_MASK |
472 		      GDM_SPORT_OFF1_MASK |
473 		      GDM_SPORT_OFF0_MASK,
474 		      FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
475 		      FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
476 		      FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
477 
478 	/* set PSE Page as 128B */
479 	airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
480 		      FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
481 		      FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
482 		      FE_DMA_GLO_PG_SZ_MASK);
483 	airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
484 		     FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
485 		     FE_RST_GDM4_MBI_ARB_MASK);
486 	usleep_range(1000, 2000);
487 
488 	/* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
489 	 * connect other rings to PSE Port0 OQ-0
490 	 */
491 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
492 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
493 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
494 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
495 
496 	airoha_fe_vip_setup(eth);
497 	airoha_fe_pse_ports_init(eth);
498 
499 	airoha_fe_set(eth, REG_GDM_MISC_CFG,
500 		      GDM2_RDM_ACK_WAIT_PREF_MASK |
501 		      GDM2_CHN_VLD_MODE_MASK);
502 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
503 		      FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
504 
505 	/* init fragment and assemble Force Port */
506 	/* NPU Core-3, NPU Bridge Channel-3 */
507 	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
508 		      IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
509 		      FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
510 		      FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
511 	/* QDMA LAN, RX Ring-22 */
512 	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
513 		      IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
514 		      FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
515 		      FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
516 
517 	airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK);
518 	airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK);
519 
520 	airoha_fe_crsn_qsel_init(eth);
521 
522 	airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
523 	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
524 
525 	/* default aging mode for mbi unlock issue */
526 	airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
527 		      MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
528 		      FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
529 		      FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
530 
531 	/* disable IFC by default */
532 	airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
533 
534 	/* enable 1:N vlan action, init vlan table */
535 	airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
536 
537 	return airoha_fe_mc_vlan_clear(eth);
538 }
539 
540 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
541 {
542 	struct airoha_qdma *qdma = q->qdma;
543 	int qid = q - &qdma->q_rx[0];
544 	int nframes = 0;
545 
546 	while (q->queued < q->ndesc - 1) {
547 		struct airoha_queue_entry *e = &q->entry[q->head];
548 		struct airoha_qdma_desc *desc = &q->desc[q->head];
549 		struct page *page;
550 		int offset;
551 		u32 val;
552 
553 		page = page_pool_dev_alloc_frag(q->page_pool, &offset,
554 						q->buf_size);
555 		if (!page)
556 			break;
557 
558 		q->head = (q->head + 1) % q->ndesc;
559 		q->queued++;
560 		nframes++;
561 
562 		e->buf = page_address(page) + offset;
563 		e->dma_addr = page_pool_get_dma_addr(page) + offset;
564 		e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
565 
566 		val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
567 		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
568 		WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
569 		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
570 		WRITE_ONCE(desc->data, cpu_to_le32(val));
571 		WRITE_ONCE(desc->msg0, 0);
572 		WRITE_ONCE(desc->msg1, 0);
573 		WRITE_ONCE(desc->msg2, 0);
574 		WRITE_ONCE(desc->msg3, 0);
575 	}
576 
577 	if (nframes)
578 		airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
579 				RX_RING_CPU_IDX_MASK,
580 				FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
581 
582 	return nframes;
583 }
584 
585 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
586 				    struct airoha_qdma_desc *desc)
587 {
588 	u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
589 
590 	sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
591 	switch (sport) {
592 	case 0x10 ... 0x14:
593 		port = 0;
594 		break;
595 	case 0x2 ... 0x4:
596 		port = sport - 1;
597 		break;
598 	default:
599 		return -EINVAL;
600 	}
601 
602 	return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
603 }
604 
605 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
606 {
607 	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
608 	struct airoha_qdma *qdma = q->qdma;
609 	struct airoha_eth *eth = qdma->eth;
610 	int qid = q - &qdma->q_rx[0];
611 	int done = 0;
612 
613 	while (done < budget) {
614 		struct airoha_queue_entry *e = &q->entry[q->tail];
615 		struct airoha_qdma_desc *desc = &q->desc[q->tail];
616 		u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
617 		struct page *page = virt_to_head_page(e->buf);
618 		u32 desc_ctrl = le32_to_cpu(desc->ctrl);
619 		struct airoha_gdm_port *port;
620 		int data_len, len, p;
621 
622 		if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
623 			break;
624 
625 		q->tail = (q->tail + 1) % q->ndesc;
626 		q->queued--;
627 
628 		dma_sync_single_for_cpu(eth->dev, e->dma_addr,
629 					SKB_WITH_OVERHEAD(q->buf_size), dir);
630 
631 		len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
632 		data_len = q->skb ? q->buf_size
633 				  : SKB_WITH_OVERHEAD(q->buf_size);
634 		if (!len || data_len < len)
635 			goto free_frag;
636 
637 		p = airoha_qdma_get_gdm_port(eth, desc);
638 		if (p < 0 || !eth->ports[p])
639 			goto free_frag;
640 
641 		port = eth->ports[p];
642 		if (!q->skb) { /* first buffer */
643 			q->skb = napi_build_skb(e->buf, q->buf_size);
644 			if (!q->skb)
645 				goto free_frag;
646 
647 			__skb_put(q->skb, len);
648 			skb_mark_for_recycle(q->skb);
649 			q->skb->dev = port->dev;
650 			q->skb->protocol = eth_type_trans(q->skb, port->dev);
651 			q->skb->ip_summed = CHECKSUM_UNNECESSARY;
652 			skb_record_rx_queue(q->skb, qid);
653 		} else { /* scattered frame */
654 			struct skb_shared_info *shinfo = skb_shinfo(q->skb);
655 			int nr_frags = shinfo->nr_frags;
656 
657 			if (nr_frags >= ARRAY_SIZE(shinfo->frags))
658 				goto free_frag;
659 
660 			skb_add_rx_frag(q->skb, nr_frags, page,
661 					e->buf - page_address(page), len,
662 					q->buf_size);
663 		}
664 
665 		if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
666 			continue;
667 
668 		if (netdev_uses_dsa(port->dev)) {
669 			/* PPE module requires untagged packets to work
670 			 * properly and it provides DSA port index via the
671 			 * DMA descriptor. Report DSA tag to the DSA stack
672 			 * via skb dst info.
673 			 */
674 			u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
675 					      le32_to_cpu(desc->msg0));
676 
677 			if (sptag < ARRAY_SIZE(port->dsa_meta) &&
678 			    port->dsa_meta[sptag])
679 				skb_dst_set_noref(q->skb,
680 						  &port->dsa_meta[sptag]->dst);
681 		}
682 
683 		hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
684 		if (hash != AIROHA_RXD4_FOE_ENTRY)
685 			skb_set_hash(q->skb, jhash_1word(hash, 0),
686 				     PKT_HASH_TYPE_L4);
687 
688 		reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
689 		if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
690 			airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
691 					     false);
692 
693 		done++;
694 		napi_gro_receive(&q->napi, q->skb);
695 		q->skb = NULL;
696 		continue;
697 free_frag:
698 		if (q->skb) {
699 			dev_kfree_skb(q->skb);
700 			q->skb = NULL;
701 		}
702 		page_pool_put_full_page(q->page_pool, page, true);
703 	}
704 	airoha_qdma_fill_rx_queue(q);
705 
706 	return done;
707 }
708 
709 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
710 {
711 	struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
712 	int cur, done = 0;
713 
714 	do {
715 		cur = airoha_qdma_rx_process(q, budget - done);
716 		done += cur;
717 	} while (cur && done < budget);
718 
719 	if (done < budget && napi_complete(napi)) {
720 		struct airoha_qdma *qdma = q->qdma;
721 		int i, qid = q - &qdma->q_rx[0];
722 		int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
723 							 : QDMA_INT_REG_IDX2;
724 
725 		for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
726 			if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
727 				continue;
728 
729 			airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
730 					       BIT(qid % RX_DONE_HIGH_OFFSET));
731 		}
732 	}
733 
734 	return done;
735 }
736 
737 static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
738 				     struct airoha_qdma *qdma, int ndesc)
739 {
740 	const struct page_pool_params pp_params = {
741 		.order = 0,
742 		.pool_size = 256,
743 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
744 		.dma_dir = DMA_FROM_DEVICE,
745 		.max_len = PAGE_SIZE,
746 		.nid = NUMA_NO_NODE,
747 		.dev = qdma->eth->dev,
748 		.napi = &q->napi,
749 	};
750 	struct airoha_eth *eth = qdma->eth;
751 	int qid = q - &qdma->q_rx[0], thr;
752 	dma_addr_t dma_addr;
753 
754 	q->buf_size = PAGE_SIZE / 2;
755 	q->ndesc = ndesc;
756 	q->qdma = qdma;
757 
758 	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
759 				GFP_KERNEL);
760 	if (!q->entry)
761 		return -ENOMEM;
762 
763 	q->page_pool = page_pool_create(&pp_params);
764 	if (IS_ERR(q->page_pool)) {
765 		int err = PTR_ERR(q->page_pool);
766 
767 		q->page_pool = NULL;
768 		return err;
769 	}
770 
771 	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
772 				      &dma_addr, GFP_KERNEL);
773 	if (!q->desc)
774 		return -ENOMEM;
775 
776 	netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
777 
778 	airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
779 	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
780 			RX_RING_SIZE_MASK,
781 			FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
782 
783 	thr = clamp(ndesc >> 3, 1, 32);
784 	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
785 			FIELD_PREP(RX_RING_THR_MASK, thr));
786 	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
787 			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
788 	airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
789 
790 	airoha_qdma_fill_rx_queue(q);
791 
792 	return 0;
793 }
794 
795 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
796 {
797 	struct airoha_qdma *qdma = q->qdma;
798 	struct airoha_eth *eth = qdma->eth;
799 	int qid = q - &qdma->q_rx[0];
800 
801 	while (q->queued) {
802 		struct airoha_queue_entry *e = &q->entry[q->tail];
803 		struct airoha_qdma_desc *desc = &q->desc[q->tail];
804 		struct page *page = virt_to_head_page(e->buf);
805 
806 		dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
807 					page_pool_get_dma_dir(q->page_pool));
808 		page_pool_put_full_page(q->page_pool, page, false);
809 		/* Reset DMA descriptor */
810 		WRITE_ONCE(desc->ctrl, 0);
811 		WRITE_ONCE(desc->addr, 0);
812 		WRITE_ONCE(desc->data, 0);
813 		WRITE_ONCE(desc->msg0, 0);
814 		WRITE_ONCE(desc->msg1, 0);
815 		WRITE_ONCE(desc->msg2, 0);
816 		WRITE_ONCE(desc->msg3, 0);
817 
818 		q->tail = (q->tail + 1) % q->ndesc;
819 		q->queued--;
820 	}
821 
822 	q->head = q->tail;
823 	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
824 			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail));
825 }
826 
827 static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
828 {
829 	int i;
830 
831 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
832 		int err;
833 
834 		if (!(RX_DONE_INT_MASK & BIT(i))) {
835 			/* rx-queue not binded to irq */
836 			continue;
837 		}
838 
839 		err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
840 						RX_DSCP_NUM(i));
841 		if (err)
842 			return err;
843 	}
844 
845 	return 0;
846 }
847 
848 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
849 {
850 	struct airoha_tx_irq_queue *irq_q;
851 	int id, done = 0, irq_queued;
852 	struct airoha_qdma *qdma;
853 	struct airoha_eth *eth;
854 	u32 status, head;
855 
856 	irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
857 	qdma = irq_q->qdma;
858 	id = irq_q - &qdma->q_tx_irq[0];
859 	eth = qdma->eth;
860 
861 	status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
862 	head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
863 	head = head % irq_q->size;
864 	irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
865 
866 	while (irq_queued > 0 && done < budget) {
867 		u32 qid, val = irq_q->q[head];
868 		struct airoha_qdma_desc *desc;
869 		struct airoha_queue_entry *e;
870 		struct airoha_queue *q;
871 		u32 index, desc_ctrl;
872 		struct sk_buff *skb;
873 
874 		if (val == 0xff)
875 			break;
876 
877 		irq_q->q[head] = 0xff; /* mark as done */
878 		head = (head + 1) % irq_q->size;
879 		irq_queued--;
880 		done++;
881 
882 		qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
883 		if (qid >= ARRAY_SIZE(qdma->q_tx))
884 			continue;
885 
886 		q = &qdma->q_tx[qid];
887 		if (!q->ndesc)
888 			continue;
889 
890 		index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
891 		if (index >= q->ndesc)
892 			continue;
893 
894 		spin_lock_bh(&q->lock);
895 
896 		if (!q->queued)
897 			goto unlock;
898 
899 		desc = &q->desc[index];
900 		desc_ctrl = le32_to_cpu(desc->ctrl);
901 
902 		if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
903 		    !(desc_ctrl & QDMA_DESC_DROP_MASK))
904 			goto unlock;
905 
906 		e = &q->entry[index];
907 		skb = e->skb;
908 
909 		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
910 				 DMA_TO_DEVICE);
911 		e->dma_addr = 0;
912 		list_add_tail(&e->list, &q->tx_list);
913 
914 		WRITE_ONCE(desc->msg0, 0);
915 		WRITE_ONCE(desc->msg1, 0);
916 		q->queued--;
917 
918 		if (skb) {
919 			u16 queue = skb_get_queue_mapping(skb);
920 			struct netdev_queue *txq;
921 
922 			txq = netdev_get_tx_queue(skb->dev, queue);
923 			netdev_tx_completed_queue(txq, 1, skb->len);
924 			if (netif_tx_queue_stopped(txq) &&
925 			    q->ndesc - q->queued >= q->free_thr)
926 				netif_tx_wake_queue(txq);
927 
928 			dev_kfree_skb_any(skb);
929 		}
930 unlock:
931 		spin_unlock_bh(&q->lock);
932 	}
933 
934 	if (done) {
935 		int i, len = done >> 7;
936 
937 		for (i = 0; i < len; i++)
938 			airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
939 					IRQ_CLEAR_LEN_MASK, 0x80);
940 		airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
941 				IRQ_CLEAR_LEN_MASK, (done & 0x7f));
942 	}
943 
944 	if (done < budget && napi_complete(napi))
945 		airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
946 				       TX_DONE_INT_MASK(id));
947 
948 	return done;
949 }
950 
951 static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
952 				     struct airoha_qdma *qdma, int size)
953 {
954 	struct airoha_eth *eth = qdma->eth;
955 	int i, qid = q - &qdma->q_tx[0];
956 	dma_addr_t dma_addr;
957 
958 	spin_lock_init(&q->lock);
959 	q->ndesc = size;
960 	q->qdma = qdma;
961 	q->free_thr = 1 + MAX_SKB_FRAGS;
962 	INIT_LIST_HEAD(&q->tx_list);
963 
964 	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
965 				GFP_KERNEL);
966 	if (!q->entry)
967 		return -ENOMEM;
968 
969 	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
970 				      &dma_addr, GFP_KERNEL);
971 	if (!q->desc)
972 		return -ENOMEM;
973 
974 	for (i = 0; i < q->ndesc; i++) {
975 		u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
976 
977 		list_add_tail(&q->entry[i].list, &q->tx_list);
978 		WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
979 	}
980 
981 	/* xmit ring drop default setting */
982 	airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
983 			TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
984 
985 	airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
986 	airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
987 			FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
988 	airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
989 			FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
990 
991 	return 0;
992 }
993 
994 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
995 				   struct airoha_qdma *qdma, int size)
996 {
997 	int id = irq_q - &qdma->q_tx_irq[0];
998 	struct airoha_eth *eth = qdma->eth;
999 	dma_addr_t dma_addr;
1000 
1001 	netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
1002 			  airoha_qdma_tx_napi_poll);
1003 	irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
1004 				       &dma_addr, GFP_KERNEL);
1005 	if (!irq_q->q)
1006 		return -ENOMEM;
1007 
1008 	memset(irq_q->q, 0xff, size * sizeof(u32));
1009 	irq_q->size = size;
1010 	irq_q->qdma = qdma;
1011 
1012 	airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
1013 	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
1014 			FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
1015 	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
1016 			FIELD_PREP(TX_IRQ_THR_MASK, 1));
1017 
1018 	return 0;
1019 }
1020 
1021 static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
1022 {
1023 	int i, err;
1024 
1025 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1026 		err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
1027 					      IRQ_QUEUE_LEN(i));
1028 		if (err)
1029 			return err;
1030 	}
1031 
1032 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1033 		err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
1034 						TX_DSCP_NUM);
1035 		if (err)
1036 			return err;
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
1043 {
1044 	struct airoha_eth *eth = q->qdma->eth;
1045 	int i;
1046 
1047 	spin_lock_bh(&q->lock);
1048 	for (i = 0; i < q->ndesc; i++) {
1049 		struct airoha_queue_entry *e = &q->entry[i];
1050 
1051 		if (!e->dma_addr)
1052 			continue;
1053 
1054 		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1055 				 DMA_TO_DEVICE);
1056 		dev_kfree_skb_any(e->skb);
1057 		e->dma_addr = 0;
1058 		e->skb = NULL;
1059 		list_add_tail(&e->list, &q->tx_list);
1060 		q->queued--;
1061 	}
1062 	spin_unlock_bh(&q->lock);
1063 }
1064 
1065 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
1066 {
1067 	int size, index, num_desc = HW_DSCP_NUM;
1068 	struct airoha_eth *eth = qdma->eth;
1069 	int id = qdma - &eth->qdma[0];
1070 	u32 status, buf_size;
1071 	dma_addr_t dma_addr;
1072 	const char *name;
1073 
1074 	name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
1075 	if (!name)
1076 		return -ENOMEM;
1077 
1078 	buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
1079 	index = of_property_match_string(eth->dev->of_node,
1080 					 "memory-region-names", name);
1081 	if (index >= 0) {
1082 		struct reserved_mem *rmem;
1083 		struct device_node *np;
1084 
1085 		/* Consume reserved memory for hw forwarding buffers queue if
1086 		 * available in the DTS
1087 		 */
1088 		np = of_parse_phandle(eth->dev->of_node, "memory-region",
1089 				      index);
1090 		if (!np)
1091 			return -ENODEV;
1092 
1093 		rmem = of_reserved_mem_lookup(np);
1094 		of_node_put(np);
1095 		dma_addr = rmem->base;
1096 		/* Compute the number of hw descriptors according to the
1097 		 * reserved memory size and the payload buffer size
1098 		 */
1099 		num_desc = div_u64(rmem->size, buf_size);
1100 	} else {
1101 		size = buf_size * num_desc;
1102 		if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
1103 					 GFP_KERNEL))
1104 			return -ENOMEM;
1105 	}
1106 
1107 	airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
1108 
1109 	size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
1110 	if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
1111 		return -ENOMEM;
1112 
1113 	airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
1114 	/* QDMA0: 2KB. QDMA1: 1KB */
1115 	airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
1116 			HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
1117 			FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
1118 	airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
1119 			FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
1120 	airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
1121 			LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
1122 			HW_FWD_DESC_NUM_MASK,
1123 			FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
1124 			LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
1125 
1126 	return read_poll_timeout(airoha_qdma_rr, status,
1127 				 !(status & LMGR_INIT_START), USEC_PER_MSEC,
1128 				 30 * USEC_PER_MSEC, true, qdma,
1129 				 REG_LMGR_INIT_CFG);
1130 }
1131 
1132 static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
1133 {
1134 	airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
1135 	airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
1136 
1137 	airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
1138 			  PSE_BUF_ESTIMATE_EN_MASK);
1139 
1140 	airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
1141 			EGRESS_RATE_METER_EN_MASK |
1142 			EGRESS_RATE_METER_EQ_RATE_EN_MASK);
1143 	/* 2047us x 31 = 63.457ms */
1144 	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1145 			EGRESS_RATE_METER_WINDOW_SZ_MASK,
1146 			FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
1147 	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1148 			EGRESS_RATE_METER_TIMESLICE_MASK,
1149 			FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
1150 
1151 	/* ratelimit init */
1152 	airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
1153 	/* fast-tick 25us */
1154 	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
1155 			FIELD_PREP(GLB_FAST_TICK_MASK, 25));
1156 	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
1157 			FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
1158 
1159 	airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
1160 	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
1161 			FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
1162 	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
1163 			EGRESS_SLOW_TICK_RATIO_MASK,
1164 			FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
1165 
1166 	airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
1167 	airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
1168 			  INGRESS_TRTCM_MODE_MASK);
1169 	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
1170 			FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
1171 	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
1172 			INGRESS_SLOW_TICK_RATIO_MASK,
1173 			FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
1174 
1175 	airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
1176 	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
1177 			FIELD_PREP(SLA_FAST_TICK_MASK, 25));
1178 	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
1179 			FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
1180 }
1181 
1182 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
1183 {
1184 	int i;
1185 
1186 	for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
1187 		/* Tx-cpu transferred count */
1188 		airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
1189 		airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1190 			       CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1191 			       CNTR_ALL_DSCP_RING_EN_MASK |
1192 			       FIELD_PREP(CNTR_CHAN_MASK, i));
1193 		/* Tx-fwd transferred count */
1194 		airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
1195 		airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1196 			       CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1197 			       CNTR_ALL_DSCP_RING_EN_MASK |
1198 			       FIELD_PREP(CNTR_SRC_MASK, 1) |
1199 			       FIELD_PREP(CNTR_CHAN_MASK, i));
1200 	}
1201 }
1202 
1203 static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
1204 {
1205 	int i;
1206 
1207 	for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1208 		/* clear pending irqs */
1209 		airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1210 		/* setup rx irqs */
1211 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
1212 				       INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1213 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
1214 				       INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1215 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
1216 				       INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1217 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
1218 				       INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1219 	}
1220 	/* setup tx irqs */
1221 	airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
1222 			       TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
1223 	airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
1224 			       TX_COHERENT_HIGH_INT_MASK);
1225 
1226 	/* setup irq binding */
1227 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1228 		if (!qdma->q_tx[i].ndesc)
1229 			continue;
1230 
1231 		if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
1232 			airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
1233 					TX_RING_IRQ_BLOCKING_CFG_MASK);
1234 		else
1235 			airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
1236 					  TX_RING_IRQ_BLOCKING_CFG_MASK);
1237 	}
1238 
1239 	airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1240 		       FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
1241 		       GLOBAL_CFG_CPU_TXR_RR_MASK |
1242 		       GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
1243 		       GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
1244 		       GLOBAL_CFG_MULTICAST_EN_MASK |
1245 		       GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
1246 		       GLOBAL_CFG_TX_WB_DONE_MASK |
1247 		       FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
1248 
1249 	airoha_qdma_init_qos(qdma);
1250 
1251 	/* disable qdma rx delay interrupt */
1252 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1253 		if (!qdma->q_rx[i].ndesc)
1254 			continue;
1255 
1256 		airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
1257 				  RX_DELAY_INT_MASK);
1258 	}
1259 
1260 	airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
1261 			TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
1262 	airoha_qdma_init_qos_stats(qdma);
1263 
1264 	return 0;
1265 }
1266 
1267 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
1268 {
1269 	struct airoha_irq_bank *irq_bank = dev_instance;
1270 	struct airoha_qdma *qdma = irq_bank->qdma;
1271 	u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
1272 	u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
1273 	int i;
1274 
1275 	for (i = 0; i < ARRAY_SIZE(intr); i++) {
1276 		intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
1277 		intr[i] &= irq_bank->irqmask[i];
1278 		airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
1279 	}
1280 
1281 	if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
1282 		return IRQ_NONE;
1283 
1284 	rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
1285 	if (rx_intr1) {
1286 		airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
1287 		rx_intr_mask |= rx_intr1;
1288 	}
1289 
1290 	rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
1291 	if (rx_intr2) {
1292 		airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
1293 		rx_intr_mask |= (rx_intr2 << 16);
1294 	}
1295 
1296 	for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
1297 		if (!qdma->q_rx[i].ndesc)
1298 			continue;
1299 
1300 		if (rx_intr_mask & BIT(i))
1301 			napi_schedule(&qdma->q_rx[i].napi);
1302 	}
1303 
1304 	if (intr[0] & INT_TX_MASK) {
1305 		for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1306 			if (!(intr[0] & TX_DONE_INT_MASK(i)))
1307 				continue;
1308 
1309 			airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
1310 						TX_DONE_INT_MASK(i));
1311 			napi_schedule(&qdma->q_tx_irq[i].napi);
1312 		}
1313 	}
1314 
1315 	return IRQ_HANDLED;
1316 }
1317 
1318 static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
1319 				      struct airoha_qdma *qdma)
1320 {
1321 	struct airoha_eth *eth = qdma->eth;
1322 	int i, id = qdma - &eth->qdma[0];
1323 
1324 	for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1325 		struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
1326 		int err, irq_index = 4 * id + i;
1327 		const char *name;
1328 
1329 		spin_lock_init(&irq_bank->irq_lock);
1330 		irq_bank->qdma = qdma;
1331 
1332 		irq_bank->irq = platform_get_irq(pdev, irq_index);
1333 		if (irq_bank->irq < 0)
1334 			return irq_bank->irq;
1335 
1336 		name = devm_kasprintf(eth->dev, GFP_KERNEL,
1337 				      KBUILD_MODNAME ".%d", irq_index);
1338 		if (!name)
1339 			return -ENOMEM;
1340 
1341 		err = devm_request_irq(eth->dev, irq_bank->irq,
1342 				       airoha_irq_handler, IRQF_SHARED, name,
1343 				       irq_bank);
1344 		if (err)
1345 			return err;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int airoha_qdma_init(struct platform_device *pdev,
1352 			    struct airoha_eth *eth,
1353 			    struct airoha_qdma *qdma)
1354 {
1355 	int err, id = qdma - &eth->qdma[0];
1356 	const char *res;
1357 
1358 	qdma->eth = eth;
1359 	res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
1360 	if (!res)
1361 		return -ENOMEM;
1362 
1363 	qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
1364 	if (IS_ERR(qdma->regs))
1365 		return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
1366 				     "failed to iomap qdma%d regs\n", id);
1367 
1368 	err = airoha_qdma_init_irq_banks(pdev, qdma);
1369 	if (err)
1370 		return err;
1371 
1372 	err = airoha_qdma_init_rx(qdma);
1373 	if (err)
1374 		return err;
1375 
1376 	err = airoha_qdma_init_tx(qdma);
1377 	if (err)
1378 		return err;
1379 
1380 	err = airoha_qdma_init_hfwd_queues(qdma);
1381 	if (err)
1382 		return err;
1383 
1384 	return airoha_qdma_hw_init(qdma);
1385 }
1386 
1387 static void airoha_qdma_cleanup(struct airoha_qdma *qdma)
1388 {
1389 	int i;
1390 
1391 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1392 		if (!qdma->q_rx[i].ndesc)
1393 			continue;
1394 
1395 		netif_napi_del(&qdma->q_rx[i].napi);
1396 		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
1397 		if (qdma->q_rx[i].page_pool) {
1398 			page_pool_destroy(qdma->q_rx[i].page_pool);
1399 			qdma->q_rx[i].page_pool = NULL;
1400 		}
1401 	}
1402 
1403 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1404 		netif_napi_del(&qdma->q_tx_irq[i].napi);
1405 
1406 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1407 		if (!qdma->q_tx[i].ndesc)
1408 			continue;
1409 
1410 		airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1411 	}
1412 }
1413 
1414 static int airoha_hw_init(struct platform_device *pdev,
1415 			  struct airoha_eth *eth)
1416 {
1417 	int err, i;
1418 
1419 	/* disable xsi */
1420 	err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
1421 	if (err)
1422 		return err;
1423 
1424 	err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
1425 	if (err)
1426 		return err;
1427 
1428 	msleep(20);
1429 	err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
1430 	if (err)
1431 		return err;
1432 
1433 	msleep(20);
1434 	err = airoha_fe_init(eth);
1435 	if (err)
1436 		return err;
1437 
1438 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
1439 		err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
1440 		if (err)
1441 			goto error;
1442 	}
1443 
1444 	err = airoha_ppe_init(eth);
1445 	if (err)
1446 		goto error;
1447 
1448 	set_bit(DEV_STATE_INITIALIZED, &eth->state);
1449 
1450 	return 0;
1451 error:
1452 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
1453 		airoha_qdma_cleanup(&eth->qdma[i]);
1454 
1455 	return err;
1456 }
1457 
1458 static void airoha_hw_cleanup(struct airoha_eth *eth)
1459 {
1460 	int i;
1461 
1462 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
1463 		airoha_qdma_cleanup(&eth->qdma[i]);
1464 	airoha_ppe_deinit(eth);
1465 }
1466 
1467 static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
1468 {
1469 	int i;
1470 
1471 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1472 		napi_enable(&qdma->q_tx_irq[i].napi);
1473 
1474 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1475 		if (!qdma->q_rx[i].ndesc)
1476 			continue;
1477 
1478 		napi_enable(&qdma->q_rx[i].napi);
1479 	}
1480 }
1481 
1482 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
1483 {
1484 	int i;
1485 
1486 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1487 		napi_disable(&qdma->q_tx_irq[i].napi);
1488 
1489 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1490 		if (!qdma->q_rx[i].ndesc)
1491 			continue;
1492 
1493 		napi_disable(&qdma->q_rx[i].napi);
1494 	}
1495 }
1496 
1497 static void airoha_update_hw_stats(struct airoha_gdm_port *port)
1498 {
1499 	struct airoha_eth *eth = port->qdma->eth;
1500 	u32 val, i = 0;
1501 
1502 	spin_lock(&port->stats.lock);
1503 	u64_stats_update_begin(&port->stats.syncp);
1504 
1505 	/* TX */
1506 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
1507 	port->stats.tx_ok_pkts += ((u64)val << 32);
1508 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
1509 	port->stats.tx_ok_pkts += val;
1510 
1511 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
1512 	port->stats.tx_ok_bytes += ((u64)val << 32);
1513 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
1514 	port->stats.tx_ok_bytes += val;
1515 
1516 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
1517 	port->stats.tx_drops += val;
1518 
1519 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
1520 	port->stats.tx_broadcast += val;
1521 
1522 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
1523 	port->stats.tx_multicast += val;
1524 
1525 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
1526 	port->stats.tx_len[i] += val;
1527 
1528 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
1529 	port->stats.tx_len[i] += ((u64)val << 32);
1530 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
1531 	port->stats.tx_len[i++] += val;
1532 
1533 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
1534 	port->stats.tx_len[i] += ((u64)val << 32);
1535 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
1536 	port->stats.tx_len[i++] += val;
1537 
1538 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
1539 	port->stats.tx_len[i] += ((u64)val << 32);
1540 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
1541 	port->stats.tx_len[i++] += val;
1542 
1543 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
1544 	port->stats.tx_len[i] += ((u64)val << 32);
1545 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
1546 	port->stats.tx_len[i++] += val;
1547 
1548 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
1549 	port->stats.tx_len[i] += ((u64)val << 32);
1550 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
1551 	port->stats.tx_len[i++] += val;
1552 
1553 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
1554 	port->stats.tx_len[i] += ((u64)val << 32);
1555 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
1556 	port->stats.tx_len[i++] += val;
1557 
1558 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
1559 	port->stats.tx_len[i++] += val;
1560 
1561 	/* RX */
1562 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
1563 	port->stats.rx_ok_pkts += ((u64)val << 32);
1564 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
1565 	port->stats.rx_ok_pkts += val;
1566 
1567 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
1568 	port->stats.rx_ok_bytes += ((u64)val << 32);
1569 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
1570 	port->stats.rx_ok_bytes += val;
1571 
1572 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
1573 	port->stats.rx_drops += val;
1574 
1575 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
1576 	port->stats.rx_broadcast += val;
1577 
1578 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
1579 	port->stats.rx_multicast += val;
1580 
1581 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
1582 	port->stats.rx_errors += val;
1583 
1584 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
1585 	port->stats.rx_crc_error += val;
1586 
1587 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
1588 	port->stats.rx_over_errors += val;
1589 
1590 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
1591 	port->stats.rx_fragment += val;
1592 
1593 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
1594 	port->stats.rx_jabber += val;
1595 
1596 	i = 0;
1597 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
1598 	port->stats.rx_len[i] += val;
1599 
1600 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
1601 	port->stats.rx_len[i] += ((u64)val << 32);
1602 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
1603 	port->stats.rx_len[i++] += val;
1604 
1605 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
1606 	port->stats.rx_len[i] += ((u64)val << 32);
1607 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
1608 	port->stats.rx_len[i++] += val;
1609 
1610 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
1611 	port->stats.rx_len[i] += ((u64)val << 32);
1612 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
1613 	port->stats.rx_len[i++] += val;
1614 
1615 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
1616 	port->stats.rx_len[i] += ((u64)val << 32);
1617 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
1618 	port->stats.rx_len[i++] += val;
1619 
1620 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
1621 	port->stats.rx_len[i] += ((u64)val << 32);
1622 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
1623 	port->stats.rx_len[i++] += val;
1624 
1625 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
1626 	port->stats.rx_len[i] += ((u64)val << 32);
1627 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
1628 	port->stats.rx_len[i++] += val;
1629 
1630 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
1631 	port->stats.rx_len[i++] += val;
1632 
1633 	/* reset mib counters */
1634 	airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
1635 		      FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
1636 
1637 	u64_stats_update_end(&port->stats.syncp);
1638 	spin_unlock(&port->stats.lock);
1639 }
1640 
1641 static int airoha_dev_open(struct net_device *dev)
1642 {
1643 	int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
1644 	struct airoha_gdm_port *port = netdev_priv(dev);
1645 	struct airoha_qdma *qdma = port->qdma;
1646 	u32 pse_port = FE_PSE_PORT_PPE1;
1647 
1648 	netif_tx_start_all_queues(dev);
1649 	err = airoha_set_vip_for_gdm_port(port, true);
1650 	if (err)
1651 		return err;
1652 
1653 	if (netdev_uses_dsa(dev))
1654 		airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1655 			      GDM_STAG_EN_MASK);
1656 	else
1657 		airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1658 				GDM_STAG_EN_MASK);
1659 
1660 	airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
1661 		      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1662 		      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1663 		      FIELD_PREP(GDM_LONG_LEN_MASK, len));
1664 
1665 	airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
1666 			GLOBAL_CFG_TX_DMA_EN_MASK |
1667 			GLOBAL_CFG_RX_DMA_EN_MASK);
1668 	atomic_inc(&qdma->users);
1669 
1670 	if (port->id == AIROHA_GDM2_IDX &&
1671 	    airoha_ppe_is_enabled(qdma->eth, 1)) {
1672 		/* For PPE2 always use secondary cpu port. */
1673 		pse_port = FE_PSE_PORT_PPE2;
1674 	}
1675 	airoha_set_gdm_port_fwd_cfg(qdma->eth, REG_GDM_FWD_CFG(port->id),
1676 				    pse_port);
1677 
1678 	return 0;
1679 }
1680 
1681 static int airoha_dev_stop(struct net_device *dev)
1682 {
1683 	struct airoha_gdm_port *port = netdev_priv(dev);
1684 	struct airoha_qdma *qdma = port->qdma;
1685 	int i, err;
1686 
1687 	netif_tx_disable(dev);
1688 	err = airoha_set_vip_for_gdm_port(port, false);
1689 	if (err)
1690 		return err;
1691 
1692 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
1693 		netdev_tx_reset_subqueue(dev, i);
1694 
1695 	airoha_set_gdm_port_fwd_cfg(qdma->eth, REG_GDM_FWD_CFG(port->id),
1696 				    FE_PSE_PORT_DROP);
1697 
1698 	if (atomic_dec_and_test(&qdma->users)) {
1699 		airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
1700 				  GLOBAL_CFG_TX_DMA_EN_MASK |
1701 				  GLOBAL_CFG_RX_DMA_EN_MASK);
1702 
1703 		for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1704 			if (!qdma->q_tx[i].ndesc)
1705 				continue;
1706 
1707 			airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1708 		}
1709 	}
1710 
1711 	return 0;
1712 }
1713 
1714 static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
1715 {
1716 	struct airoha_gdm_port *port = netdev_priv(dev);
1717 	int err;
1718 
1719 	err = eth_mac_addr(dev, p);
1720 	if (err)
1721 		return err;
1722 
1723 	airoha_set_macaddr(port, dev->dev_addr);
1724 
1725 	return 0;
1726 }
1727 
1728 static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
1729 {
1730 	struct airoha_eth *eth = port->qdma->eth;
1731 	u32 val, pse_port, chan, nbq;
1732 	int src_port;
1733 
1734 	/* Forward the traffic to the proper GDM port */
1735 	pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1736 					       : FE_PSE_PORT_GDM4;
1737 	airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1738 				    pse_port);
1739 	airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1740 			GDM_STRIP_CRC_MASK);
1741 
1742 	/* Enable GDM2 loopback */
1743 	airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff);
1744 	airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff);
1745 
1746 	chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
1747 	airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX),
1748 		      LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
1749 		      FIELD_PREP(LPBK_CHAN_MASK, chan) |
1750 		      LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
1751 		      LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
1752 	airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX),
1753 		      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1754 		      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1755 		      FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
1756 
1757 	/* Disable VIP and IFC for GDM2 */
1758 	airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
1759 	airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
1760 
1761 	/* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
1762 	nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
1763 	src_port = eth->soc->ops.get_src_port_id(port, nbq);
1764 	if (src_port < 0)
1765 		return src_port;
1766 
1767 	airoha_fe_rmw(eth, REG_FE_WAN_PORT,
1768 		      WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
1769 		      FIELD_PREP(WAN0_MASK, src_port));
1770 	val = src_port & SP_CPORT_DFT_MASK;
1771 	airoha_fe_rmw(eth,
1772 		      REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
1773 		      SP_CPORT_MASK(val),
1774 		      __field_prep(SP_CPORT_MASK(val), FE_PSE_PORT_CDM2));
1775 
1776 	if (port->id == AIROHA_GDM4_IDX && airoha_is_7581(eth)) {
1777 		u32 mask = FC_ID_OF_SRC_PORT_MASK(nbq);
1778 
1779 		airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, mask,
1780 			      __field_prep(mask, AIROHA_GDM2_IDX));
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static int airoha_dev_init(struct net_device *dev)
1787 {
1788 	struct airoha_gdm_port *port = netdev_priv(dev);
1789 	struct airoha_eth *eth = port->eth;
1790 	int i;
1791 
1792 	/* QDMA0 is used for lan ports while QDMA1 is used for WAN ports */
1793 	port->qdma = &eth->qdma[!airoha_is_lan_gdm_port(port)];
1794 	port->dev->irq = port->qdma->irq_banks[0].irq;
1795 	airoha_set_macaddr(port, dev->dev_addr);
1796 
1797 	switch (port->id) {
1798 	case AIROHA_GDM3_IDX:
1799 	case AIROHA_GDM4_IDX:
1800 		/* If GDM2 is active we can't enable loopback */
1801 		if (!eth->ports[1]) {
1802 			int err;
1803 
1804 			err = airoha_set_gdm2_loopback(port);
1805 			if (err)
1806 				return err;
1807 		}
1808 		break;
1809 	default:
1810 		break;
1811 	}
1812 
1813 	for (i = 0; i < eth->soc->num_ppe; i++)
1814 		airoha_ppe_set_cpu_port(port, i);
1815 
1816 	return 0;
1817 }
1818 
1819 static void airoha_dev_get_stats64(struct net_device *dev,
1820 				   struct rtnl_link_stats64 *storage)
1821 {
1822 	struct airoha_gdm_port *port = netdev_priv(dev);
1823 	unsigned int start;
1824 
1825 	airoha_update_hw_stats(port);
1826 	do {
1827 		start = u64_stats_fetch_begin(&port->stats.syncp);
1828 		storage->rx_packets = port->stats.rx_ok_pkts;
1829 		storage->tx_packets = port->stats.tx_ok_pkts;
1830 		storage->rx_bytes = port->stats.rx_ok_bytes;
1831 		storage->tx_bytes = port->stats.tx_ok_bytes;
1832 		storage->multicast = port->stats.rx_multicast;
1833 		storage->rx_errors = port->stats.rx_errors;
1834 		storage->rx_dropped = port->stats.rx_drops;
1835 		storage->tx_dropped = port->stats.tx_drops;
1836 		storage->rx_crc_errors = port->stats.rx_crc_error;
1837 		storage->rx_over_errors = port->stats.rx_over_errors;
1838 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
1839 }
1840 
1841 static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
1842 {
1843 	struct airoha_gdm_port *port = netdev_priv(dev);
1844 	struct airoha_eth *eth = port->qdma->eth;
1845 	u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
1846 
1847 	airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
1848 		      GDM_LONG_LEN_MASK,
1849 		      FIELD_PREP(GDM_LONG_LEN_MASK, len));
1850 	WRITE_ONCE(dev->mtu, mtu);
1851 
1852 	return 0;
1853 }
1854 
1855 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
1856 				   struct net_device *sb_dev)
1857 {
1858 	struct airoha_gdm_port *port = netdev_priv(dev);
1859 	int queue, channel;
1860 
1861 	/* For dsa device select QoS channel according to the dsa user port
1862 	 * index, rely on port id otherwise. Select QoS queue based on the
1863 	 * skb priority.
1864 	 */
1865 	channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
1866 	channel = channel % AIROHA_NUM_QOS_CHANNELS;
1867 	queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
1868 	queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
1869 
1870 	return queue < dev->num_tx_queues ? queue : 0;
1871 }
1872 
1873 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
1874 {
1875 #if IS_ENABLED(CONFIG_NET_DSA)
1876 	struct ethhdr *ehdr;
1877 	u8 xmit_tpid;
1878 	u16 tag;
1879 
1880 	if (!netdev_uses_dsa(dev))
1881 		return 0;
1882 
1883 	if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
1884 		return 0;
1885 
1886 	if (skb_cow_head(skb, 0))
1887 		return 0;
1888 
1889 	ehdr = (struct ethhdr *)skb->data;
1890 	tag = be16_to_cpu(ehdr->h_proto);
1891 	xmit_tpid = tag >> 8;
1892 
1893 	switch (xmit_tpid) {
1894 	case MTK_HDR_XMIT_TAGGED_TPID_8100:
1895 		ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
1896 		tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
1897 		break;
1898 	case MTK_HDR_XMIT_TAGGED_TPID_88A8:
1899 		ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
1900 		tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
1901 		break;
1902 	default:
1903 		/* PPE module requires untagged DSA packets to work properly,
1904 		 * so move DSA tag to DMA descriptor.
1905 		 */
1906 		memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
1907 		__skb_pull(skb, MTK_HDR_LEN);
1908 		break;
1909 	}
1910 
1911 	return tag;
1912 #else
1913 	return 0;
1914 #endif
1915 }
1916 
1917 int airoha_get_fe_port(struct airoha_gdm_port *port)
1918 {
1919 	struct airoha_qdma *qdma = port->qdma;
1920 	struct airoha_eth *eth = qdma->eth;
1921 
1922 	switch (eth->soc->version) {
1923 	case 0x7583:
1924 		return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1925 						   : port->id;
1926 	case 0x7581:
1927 	default:
1928 		return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
1929 						   : port->id;
1930 	}
1931 }
1932 
1933 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
1934 				   struct net_device *dev)
1935 {
1936 	struct airoha_gdm_port *port = netdev_priv(dev);
1937 	struct airoha_qdma *qdma = port->qdma;
1938 	u32 nr_frags, tag, msg0, msg1, len;
1939 	struct airoha_queue_entry *e;
1940 	struct netdev_queue *txq;
1941 	struct airoha_queue *q;
1942 	LIST_HEAD(tx_list);
1943 	void *data;
1944 	int i, qid;
1945 	u16 index;
1946 	u8 fport;
1947 
1948 	qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
1949 	tag = airoha_get_dsa_tag(skb, dev);
1950 
1951 	msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
1952 			  qid / AIROHA_NUM_QOS_QUEUES) |
1953 	       FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
1954 			  qid % AIROHA_NUM_QOS_QUEUES) |
1955 	       FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
1956 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1957 		msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
1958 			FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
1959 			FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
1960 
1961 	/* TSO: fill MSS info in tcp checksum field */
1962 	if (skb_is_gso(skb)) {
1963 		if (skb_cow_head(skb, 0))
1964 			goto error;
1965 
1966 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
1967 						 SKB_GSO_TCPV6)) {
1968 			__be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
1969 
1970 			tcp_hdr(skb)->check = (__force __sum16)csum;
1971 			msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
1972 		}
1973 	}
1974 
1975 	fport = airoha_get_fe_port(port);
1976 	msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
1977 	       FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
1978 
1979 	q = &qdma->q_tx[qid];
1980 	if (WARN_ON_ONCE(!q->ndesc))
1981 		goto error;
1982 
1983 	spin_lock_bh(&q->lock);
1984 
1985 	txq = netdev_get_tx_queue(dev, qid);
1986 	nr_frags = 1 + skb_shinfo(skb)->nr_frags;
1987 
1988 	if (q->queued + nr_frags >= q->ndesc) {
1989 		/* not enough space in the queue */
1990 		netif_tx_stop_queue(txq);
1991 		spin_unlock_bh(&q->lock);
1992 		return NETDEV_TX_BUSY;
1993 	}
1994 
1995 	len = skb_headlen(skb);
1996 	data = skb->data;
1997 
1998 	e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
1999 			     list);
2000 	index = e - q->entry;
2001 
2002 	for (i = 0; i < nr_frags; i++) {
2003 		struct airoha_qdma_desc *desc = &q->desc[index];
2004 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2005 		dma_addr_t addr;
2006 		u32 val;
2007 
2008 		addr = dma_map_single(dev->dev.parent, data, len,
2009 				      DMA_TO_DEVICE);
2010 		if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2011 			goto error_unmap;
2012 
2013 		list_move_tail(&e->list, &tx_list);
2014 		e->skb = i ? NULL : skb;
2015 		e->dma_addr = addr;
2016 		e->dma_len = len;
2017 
2018 		e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
2019 				     list);
2020 		index = e - q->entry;
2021 
2022 		val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2023 		if (i < nr_frags - 1)
2024 			val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2025 		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2026 		WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2027 		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2028 		WRITE_ONCE(desc->data, cpu_to_le32(val));
2029 		WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2030 		WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2031 		WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2032 
2033 		data = skb_frag_address(frag);
2034 		len = skb_frag_size(frag);
2035 	}
2036 	q->queued += i;
2037 
2038 	skb_tx_timestamp(skb);
2039 	netdev_tx_sent_queue(txq, skb->len);
2040 
2041 	if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2042 		airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2043 				TX_RING_CPU_IDX_MASK,
2044 				FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
2045 
2046 	if (q->ndesc - q->queued < q->free_thr)
2047 		netif_tx_stop_queue(txq);
2048 
2049 	spin_unlock_bh(&q->lock);
2050 
2051 	return NETDEV_TX_OK;
2052 
2053 error_unmap:
2054 	while (!list_empty(&tx_list)) {
2055 		e = list_first_entry(&tx_list, struct airoha_queue_entry,
2056 				     list);
2057 		dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
2058 				 DMA_TO_DEVICE);
2059 		e->dma_addr = 0;
2060 		list_move_tail(&e->list, &q->tx_list);
2061 	}
2062 
2063 	spin_unlock_bh(&q->lock);
2064 error:
2065 	dev_kfree_skb_any(skb);
2066 	dev->stats.tx_dropped++;
2067 
2068 	return NETDEV_TX_OK;
2069 }
2070 
2071 static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2072 				       struct ethtool_drvinfo *info)
2073 {
2074 	struct airoha_gdm_port *port = netdev_priv(dev);
2075 	struct airoha_eth *eth = port->qdma->eth;
2076 
2077 	strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2078 	strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2079 }
2080 
2081 static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2082 					 struct ethtool_eth_mac_stats *stats)
2083 {
2084 	struct airoha_gdm_port *port = netdev_priv(dev);
2085 	unsigned int start;
2086 
2087 	airoha_update_hw_stats(port);
2088 	do {
2089 		start = u64_stats_fetch_begin(&port->stats.syncp);
2090 		stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
2091 		stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
2092 		stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2093 		stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2094 		stats->FramesReceivedOK = port->stats.rx_ok_pkts;
2095 		stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
2096 		stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2097 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2098 }
2099 
2100 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2101 	{    0,    64 },
2102 	{   65,   127 },
2103 	{  128,   255 },
2104 	{  256,   511 },
2105 	{  512,  1023 },
2106 	{ 1024,  1518 },
2107 	{ 1519, 10239 },
2108 	{},
2109 };
2110 
2111 static void
2112 airoha_ethtool_get_rmon_stats(struct net_device *dev,
2113 			      struct ethtool_rmon_stats *stats,
2114 			      const struct ethtool_rmon_hist_range **ranges)
2115 {
2116 	struct airoha_gdm_port *port = netdev_priv(dev);
2117 	struct airoha_hw_stats *hw_stats = &port->stats;
2118 	unsigned int start;
2119 
2120 	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2121 		     ARRAY_SIZE(hw_stats->tx_len) + 1);
2122 	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2123 		     ARRAY_SIZE(hw_stats->rx_len) + 1);
2124 
2125 	*ranges = airoha_ethtool_rmon_ranges;
2126 	airoha_update_hw_stats(port);
2127 	do {
2128 		int i;
2129 
2130 		start = u64_stats_fetch_begin(&port->stats.syncp);
2131 		stats->fragments = hw_stats->rx_fragment;
2132 		stats->jabbers = hw_stats->rx_jabber;
2133 		for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2134 		     i++) {
2135 			stats->hist[i] = hw_stats->rx_len[i];
2136 			stats->hist_tx[i] = hw_stats->tx_len[i];
2137 		}
2138 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2139 }
2140 
2141 static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
2142 					 int channel, enum tx_sched_mode mode,
2143 					 const u16 *weights, u8 n_weights)
2144 {
2145 	int i;
2146 
2147 	for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2148 		airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2149 				  TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2150 
2151 	for (i = 0; i < n_weights; i++) {
2152 		u32 status;
2153 		int err;
2154 
2155 		airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2156 			       TWRR_RW_CMD_MASK |
2157 			       FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2158 			       FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2159 			       FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2160 		err = read_poll_timeout(airoha_qdma_rr, status,
2161 					status & TWRR_RW_CMD_DONE,
2162 					USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2163 					true, port->qdma,
2164 					REG_TXWRR_WEIGHT_CFG);
2165 		if (err)
2166 			return err;
2167 	}
2168 
2169 	airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2170 			CHAN_QOS_MODE_MASK(channel),
2171 			__field_prep(CHAN_QOS_MODE_MASK(channel), mode));
2172 
2173 	return 0;
2174 }
2175 
2176 static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
2177 					 int channel)
2178 {
2179 	static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2180 
2181 	return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
2182 					     ARRAY_SIZE(w));
2183 }
2184 
2185 static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
2186 					int channel,
2187 					struct tc_ets_qopt_offload *opt)
2188 {
2189 	struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2190 	enum tx_sched_mode mode = TC_SCH_SP;
2191 	u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2192 	int i, nstrict = 0;
2193 
2194 	if (p->bands > AIROHA_NUM_QOS_QUEUES)
2195 		return -EINVAL;
2196 
2197 	for (i = 0; i < p->bands; i++) {
2198 		if (!p->quanta[i])
2199 			nstrict++;
2200 	}
2201 
2202 	/* this configuration is not supported by the hw */
2203 	if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2204 		return -EINVAL;
2205 
2206 	/* EN7581 SoC supports fixed QoS band priority where WRR queues have
2207 	 * lowest priorities with respect to SP ones.
2208 	 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
2209 	 */
2210 	for (i = 0; i < nstrict; i++) {
2211 		if (p->priomap[p->bands - i - 1] != i)
2212 			return -EINVAL;
2213 	}
2214 
2215 	for (i = 0; i < p->bands - nstrict; i++) {
2216 		if (p->priomap[i] != nstrict + i)
2217 			return -EINVAL;
2218 
2219 		w[i] = p->weights[nstrict + i];
2220 	}
2221 
2222 	if (!nstrict)
2223 		mode = TC_SCH_WRR8;
2224 	else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2225 		mode = nstrict + 1;
2226 
2227 	return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
2228 					     ARRAY_SIZE(w));
2229 }
2230 
2231 static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
2232 					int channel,
2233 					struct tc_ets_qopt_offload *opt)
2234 {
2235 	u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2236 					    REG_CNTR_VAL(channel << 1));
2237 	u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2238 					    REG_CNTR_VAL((channel << 1) + 1));
2239 	u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2240 			 (fwd_tx_packets - port->fwd_tx_packets);
2241 	_bstats_update(opt->stats.bstats, 0, tx_packets);
2242 
2243 	port->cpu_tx_packets = cpu_tx_packets;
2244 	port->fwd_tx_packets = fwd_tx_packets;
2245 
2246 	return 0;
2247 }
2248 
2249 static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
2250 				     struct tc_ets_qopt_offload *opt)
2251 {
2252 	int channel;
2253 
2254 	if (opt->parent == TC_H_ROOT)
2255 		return -EINVAL;
2256 
2257 	channel = TC_H_MAJ(opt->handle) >> 16;
2258 	channel = channel % AIROHA_NUM_QOS_CHANNELS;
2259 
2260 	switch (opt->command) {
2261 	case TC_ETS_REPLACE:
2262 		return airoha_qdma_set_tx_ets_sched(port, channel, opt);
2263 	case TC_ETS_DESTROY:
2264 		/* PRIO is default qdisc scheduler */
2265 		return airoha_qdma_set_tx_prio_sched(port, channel);
2266 	case TC_ETS_STATS:
2267 		return airoha_qdma_get_tx_ets_stats(port, channel, opt);
2268 	default:
2269 		return -EOPNOTSUPP;
2270 	}
2271 }
2272 
2273 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
2274 				    u32 addr, enum trtcm_param_type param,
2275 				    u32 *val_low, u32 *val_high)
2276 {
2277 	u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2278 	u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2279 			  FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2280 			  FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2281 
2282 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2283 	if (read_poll_timeout(airoha_qdma_rr, val,
2284 			      val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2285 			      USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
2286 			      REG_TRTCM_CFG_PARAM(addr)))
2287 		return -ETIMEDOUT;
2288 
2289 	*val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2290 	if (val_high)
2291 		*val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2292 
2293 	return 0;
2294 }
2295 
2296 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
2297 				    u32 addr, enum trtcm_param_type param,
2298 				    u32 val)
2299 {
2300 	u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2301 	u32 config = RATE_LIMIT_PARAM_RW_MASK |
2302 		     FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2303 		     FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2304 		     FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2305 
2306 	airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2307 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2308 
2309 	return read_poll_timeout(airoha_qdma_rr, val,
2310 				 val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2311 				 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2312 				 qdma, REG_TRTCM_CFG_PARAM(addr));
2313 }
2314 
2315 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
2316 				     u32 addr, bool enable, u32 enable_mask)
2317 {
2318 	u32 val;
2319 	int err;
2320 
2321 	err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2322 				       &val, NULL);
2323 	if (err)
2324 		return err;
2325 
2326 	val = enable ? val | enable_mask : val & ~enable_mask;
2327 
2328 	return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2329 					val);
2330 }
2331 
2332 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
2333 					   int queue_id, u32 rate_val,
2334 					   u32 bucket_size)
2335 {
2336 	u32 val, config, tick, unit, rate, rate_frac;
2337 	int err;
2338 
2339 	err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2340 				       TRTCM_MISC_MODE, &config, NULL);
2341 	if (err)
2342 		return err;
2343 
2344 	val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
2345 	tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2346 	if (config & TRTCM_TICK_SEL)
2347 		tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2348 	if (!tick)
2349 		return -EINVAL;
2350 
2351 	unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2352 	if (!unit)
2353 		return -EINVAL;
2354 
2355 	rate = rate_val / unit;
2356 	rate_frac = rate_val % unit;
2357 	rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2358 	rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2359 	       FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2360 
2361 	err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2362 				       TRTCM_TOKEN_RATE_MODE, rate);
2363 	if (err)
2364 		return err;
2365 
2366 	val = bucket_size;
2367 	if (!(config & TRTCM_PKT_MODE))
2368 		val = max_t(u32, val, MIN_TOKEN_SIZE);
2369 	val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2370 
2371 	return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2372 					TRTCM_BUCKETSIZE_SHIFT_MODE, val);
2373 }
2374 
2375 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
2376 				      bool enable, enum trtcm_unit_type unit)
2377 {
2378 	bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
2379 	enum trtcm_param mode = TRTCM_METER_MODE;
2380 	int err;
2381 
2382 	mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
2383 	err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2384 					enable, mode);
2385 	if (err)
2386 		return err;
2387 
2388 	return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2389 					 tick_sel, TRTCM_TICK_SEL);
2390 }
2391 
2392 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
2393 				       u32 addr, enum trtcm_param_type param,
2394 				       enum trtcm_mode_type mode,
2395 				       u32 *val_low, u32 *val_high)
2396 {
2397 	u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2398 	u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2399 			  FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2400 			  FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2401 			  FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2402 
2403 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2404 	if (read_poll_timeout(airoha_qdma_rr, val,
2405 			      val & TRTCM_PARAM_RW_DONE_MASK,
2406 			      USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2407 			      qdma, REG_TRTCM_CFG_PARAM(addr)))
2408 		return -ETIMEDOUT;
2409 
2410 	*val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2411 	if (val_high)
2412 		*val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2413 
2414 	return 0;
2415 }
2416 
2417 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
2418 				       u32 addr, enum trtcm_param_type param,
2419 				       enum trtcm_mode_type mode, u32 val)
2420 {
2421 	u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2422 	u32 config = TRTCM_PARAM_RW_MASK |
2423 		     FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2424 		     FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2425 		     FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2426 		     FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2427 
2428 	airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2429 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2430 
2431 	return read_poll_timeout(airoha_qdma_rr, val,
2432 				 val & TRTCM_PARAM_RW_DONE_MASK,
2433 				 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2434 				 qdma, REG_TRTCM_CFG_PARAM(addr));
2435 }
2436 
2437 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
2438 					u32 addr, enum trtcm_mode_type mode,
2439 					bool enable, u32 enable_mask)
2440 {
2441 	u32 val;
2442 
2443 	if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2444 					mode, &val, NULL))
2445 		return -EINVAL;
2446 
2447 	val = enable ? val | enable_mask : val & ~enable_mask;
2448 
2449 	return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2450 					   mode, val);
2451 }
2452 
2453 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
2454 					      int channel, u32 addr,
2455 					      enum trtcm_mode_type mode,
2456 					      u32 rate_val, u32 bucket_size)
2457 {
2458 	u32 val, config, tick, unit, rate, rate_frac;
2459 	int err;
2460 
2461 	if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2462 					mode, &config, NULL))
2463 		return -EINVAL;
2464 
2465 	val = airoha_qdma_rr(qdma, addr);
2466 	tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2467 	if (config & TRTCM_TICK_SEL)
2468 		tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2469 	if (!tick)
2470 		return -EINVAL;
2471 
2472 	unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2473 	if (!unit)
2474 		return -EINVAL;
2475 
2476 	rate = rate_val / unit;
2477 	rate_frac = rate_val % unit;
2478 	rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2479 	rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2480 	       FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2481 
2482 	err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
2483 					  TRTCM_TOKEN_RATE_MODE, mode, rate);
2484 	if (err)
2485 		return err;
2486 
2487 	val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
2488 	val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2489 
2490 	return airoha_qdma_set_trtcm_param(qdma, channel, addr,
2491 					   TRTCM_BUCKETSIZE_SHIFT_MODE,
2492 					   mode, val);
2493 }
2494 
2495 static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
2496 					 int channel, u32 rate,
2497 					 u32 bucket_size)
2498 {
2499 	int i, err;
2500 
2501 	for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
2502 		err = airoha_qdma_set_trtcm_config(port->qdma, channel,
2503 						   REG_EGRESS_TRTCM_CFG, i,
2504 						   !!rate, TRTCM_METER_MODE);
2505 		if (err)
2506 			return err;
2507 
2508 		err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
2509 							 REG_EGRESS_TRTCM_CFG,
2510 							 i, rate, bucket_size);
2511 		if (err)
2512 			return err;
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
2519 					  struct tc_htb_qopt_offload *opt)
2520 {
2521 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2522 	u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
2523 	struct net_device *dev = port->dev;
2524 	int num_tx_queues = dev->real_num_tx_queues;
2525 	int err;
2526 
2527 	if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
2528 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
2529 		return -EINVAL;
2530 	}
2531 
2532 	err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
2533 	if (err) {
2534 		NL_SET_ERR_MSG_MOD(opt->extack,
2535 				   "failed configuring htb offload");
2536 		return err;
2537 	}
2538 
2539 	if (opt->command == TC_HTB_NODE_MODIFY)
2540 		return 0;
2541 
2542 	err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
2543 	if (err) {
2544 		airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
2545 		NL_SET_ERR_MSG_MOD(opt->extack,
2546 				   "failed setting real_num_tx_queues");
2547 		return err;
2548 	}
2549 
2550 	set_bit(channel, port->qos_sq_bmap);
2551 	opt->qid = AIROHA_NUM_TX_RING + channel;
2552 
2553 	return 0;
2554 }
2555 
2556 static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
2557 				    u32 rate, u32 bucket_size,
2558 				    enum trtcm_unit_type unit_type)
2559 {
2560 	struct airoha_qdma *qdma = port->qdma;
2561 	int i;
2562 
2563 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2564 		int err;
2565 
2566 		if (!qdma->q_rx[i].ndesc)
2567 			continue;
2568 
2569 		err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
2570 		if (err)
2571 			return err;
2572 
2573 		err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
2574 						      bucket_size);
2575 		if (err)
2576 			return err;
2577 	}
2578 
2579 	return 0;
2580 }
2581 
2582 static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
2583 {
2584 	const struct flow_action *actions = &f->rule->action;
2585 	const struct flow_action_entry *act;
2586 
2587 	if (!flow_action_has_entries(actions)) {
2588 		NL_SET_ERR_MSG_MOD(f->common.extack,
2589 				   "filter run with no actions");
2590 		return -EINVAL;
2591 	}
2592 
2593 	if (!flow_offload_has_one_action(actions)) {
2594 		NL_SET_ERR_MSG_MOD(f->common.extack,
2595 				   "only once action per filter is supported");
2596 		return -EOPNOTSUPP;
2597 	}
2598 
2599 	act = &actions->entries[0];
2600 	if (act->id != FLOW_ACTION_POLICE) {
2601 		NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
2602 		return -EOPNOTSUPP;
2603 	}
2604 
2605 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
2606 		NL_SET_ERR_MSG_MOD(f->common.extack,
2607 				   "invalid exceed action id");
2608 		return -EOPNOTSUPP;
2609 	}
2610 
2611 	if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
2612 		NL_SET_ERR_MSG_MOD(f->common.extack,
2613 				   "invalid notexceed action id");
2614 		return -EOPNOTSUPP;
2615 	}
2616 
2617 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
2618 	    !flow_action_is_last_entry(actions, act)) {
2619 		NL_SET_ERR_MSG_MOD(f->common.extack,
2620 				   "action accept must be last");
2621 		return -EOPNOTSUPP;
2622 	}
2623 
2624 	if (act->police.peakrate_bytes_ps || act->police.avrate ||
2625 	    act->police.overhead || act->police.mtu) {
2626 		NL_SET_ERR_MSG_MOD(f->common.extack,
2627 				   "peakrate/avrate/overhead/mtu unsupported");
2628 		return -EOPNOTSUPP;
2629 	}
2630 
2631 	return 0;
2632 }
2633 
2634 static int airoha_dev_tc_matchall(struct net_device *dev,
2635 				  struct tc_cls_matchall_offload *f)
2636 {
2637 	enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
2638 	struct airoha_gdm_port *port = netdev_priv(dev);
2639 	u32 rate = 0, bucket_size = 0;
2640 
2641 	switch (f->command) {
2642 	case TC_CLSMATCHALL_REPLACE: {
2643 		const struct flow_action_entry *act;
2644 		int err;
2645 
2646 		err = airoha_tc_matchall_act_validate(f);
2647 		if (err)
2648 			return err;
2649 
2650 		act = &f->rule->action.entries[0];
2651 		if (act->police.rate_pkt_ps) {
2652 			rate = act->police.rate_pkt_ps;
2653 			bucket_size = act->police.burst_pkt;
2654 			unit_type = TRTCM_PACKET_UNIT;
2655 		} else {
2656 			rate = div_u64(act->police.rate_bytes_ps, 1000);
2657 			rate = rate << 3; /* Kbps */
2658 			bucket_size = act->police.burst;
2659 		}
2660 		fallthrough;
2661 	}
2662 	case TC_CLSMATCHALL_DESTROY:
2663 		return airoha_qdma_set_rx_meter(port, rate, bucket_size,
2664 						unit_type);
2665 	default:
2666 		return -EOPNOTSUPP;
2667 	}
2668 }
2669 
2670 static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
2671 					void *type_data, void *cb_priv)
2672 {
2673 	struct net_device *dev = cb_priv;
2674 	struct airoha_gdm_port *port = netdev_priv(dev);
2675 	struct airoha_eth *eth = port->qdma->eth;
2676 
2677 	if (!tc_can_offload(dev))
2678 		return -EOPNOTSUPP;
2679 
2680 	switch (type) {
2681 	case TC_SETUP_CLSFLOWER:
2682 		return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
2683 	case TC_SETUP_CLSMATCHALL:
2684 		return airoha_dev_tc_matchall(dev, type_data);
2685 	default:
2686 		return -EOPNOTSUPP;
2687 	}
2688 }
2689 
2690 static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
2691 				     struct flow_block_offload *f)
2692 {
2693 	flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
2694 	static LIST_HEAD(block_cb_list);
2695 	struct flow_block_cb *block_cb;
2696 
2697 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2698 		return -EOPNOTSUPP;
2699 
2700 	f->driver_block_list = &block_cb_list;
2701 	switch (f->command) {
2702 	case FLOW_BLOCK_BIND:
2703 		block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
2704 		if (block_cb) {
2705 			flow_block_cb_incref(block_cb);
2706 			return 0;
2707 		}
2708 		block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
2709 		if (IS_ERR(block_cb))
2710 			return PTR_ERR(block_cb);
2711 
2712 		flow_block_cb_incref(block_cb);
2713 		flow_block_cb_add(block_cb, f);
2714 		list_add_tail(&block_cb->driver_list, &block_cb_list);
2715 		return 0;
2716 	case FLOW_BLOCK_UNBIND:
2717 		block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
2718 		if (!block_cb)
2719 			return -ENOENT;
2720 
2721 		if (!flow_block_cb_decref(block_cb)) {
2722 			flow_block_cb_remove(block_cb, f);
2723 			list_del(&block_cb->driver_list);
2724 		}
2725 		return 0;
2726 	default:
2727 		return -EOPNOTSUPP;
2728 	}
2729 }
2730 
2731 static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
2732 {
2733 	struct net_device *dev = port->dev;
2734 
2735 	netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
2736 	airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
2737 	clear_bit(queue, port->qos_sq_bmap);
2738 }
2739 
2740 static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
2741 					   struct tc_htb_qopt_offload *opt)
2742 {
2743 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2744 
2745 	if (!test_bit(channel, port->qos_sq_bmap)) {
2746 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2747 		return -EINVAL;
2748 	}
2749 
2750 	airoha_tc_remove_htb_queue(port, channel);
2751 
2752 	return 0;
2753 }
2754 
2755 static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
2756 {
2757 	int q;
2758 
2759 	for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
2760 		airoha_tc_remove_htb_queue(port, q);
2761 
2762 	return 0;
2763 }
2764 
2765 static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
2766 					    struct tc_htb_qopt_offload *opt)
2767 {
2768 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2769 
2770 	if (!test_bit(channel, port->qos_sq_bmap)) {
2771 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2772 		return -EINVAL;
2773 	}
2774 
2775 	opt->qid = AIROHA_NUM_TX_RING + channel;
2776 
2777 	return 0;
2778 }
2779 
2780 static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
2781 				     struct tc_htb_qopt_offload *opt)
2782 {
2783 	switch (opt->command) {
2784 	case TC_HTB_CREATE:
2785 		break;
2786 	case TC_HTB_DESTROY:
2787 		return airoha_tc_htb_destroy(port);
2788 	case TC_HTB_NODE_MODIFY:
2789 	case TC_HTB_LEAF_ALLOC_QUEUE:
2790 		return airoha_tc_htb_alloc_leaf_queue(port, opt);
2791 	case TC_HTB_LEAF_DEL:
2792 	case TC_HTB_LEAF_DEL_LAST:
2793 	case TC_HTB_LEAF_DEL_LAST_FORCE:
2794 		return airoha_tc_htb_delete_leaf_queue(port, opt);
2795 	case TC_HTB_LEAF_QUERY_QUEUE:
2796 		return airoha_tc_get_htb_get_leaf_queue(port, opt);
2797 	default:
2798 		return -EOPNOTSUPP;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
2805 			       void *type_data)
2806 {
2807 	struct airoha_gdm_port *port = netdev_priv(dev);
2808 
2809 	switch (type) {
2810 	case TC_SETUP_QDISC_ETS:
2811 		return airoha_tc_setup_qdisc_ets(port, type_data);
2812 	case TC_SETUP_QDISC_HTB:
2813 		return airoha_tc_setup_qdisc_htb(port, type_data);
2814 	case TC_SETUP_BLOCK:
2815 	case TC_SETUP_FT:
2816 		return airoha_dev_setup_tc_block(port, type_data);
2817 	default:
2818 		return -EOPNOTSUPP;
2819 	}
2820 }
2821 
2822 static const struct net_device_ops airoha_netdev_ops = {
2823 	.ndo_init		= airoha_dev_init,
2824 	.ndo_open		= airoha_dev_open,
2825 	.ndo_stop		= airoha_dev_stop,
2826 	.ndo_change_mtu		= airoha_dev_change_mtu,
2827 	.ndo_select_queue	= airoha_dev_select_queue,
2828 	.ndo_start_xmit		= airoha_dev_xmit,
2829 	.ndo_get_stats64        = airoha_dev_get_stats64,
2830 	.ndo_set_mac_address	= airoha_dev_set_macaddr,
2831 	.ndo_setup_tc		= airoha_dev_tc_setup,
2832 };
2833 
2834 static const struct ethtool_ops airoha_ethtool_ops = {
2835 	.get_drvinfo		= airoha_ethtool_get_drvinfo,
2836 	.get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
2837 	.get_rmon_stats		= airoha_ethtool_get_rmon_stats,
2838 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2839 	.get_link		= ethtool_op_get_link,
2840 };
2841 
2842 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
2843 {
2844 	int i;
2845 
2846 	for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2847 		struct metadata_dst *md_dst;
2848 
2849 		md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
2850 					    GFP_KERNEL);
2851 		if (!md_dst)
2852 			return -ENOMEM;
2853 
2854 		md_dst->u.port_info.port_id = i;
2855 		port->dsa_meta[i] = md_dst;
2856 	}
2857 
2858 	return 0;
2859 }
2860 
2861 static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
2862 {
2863 	int i;
2864 
2865 	for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2866 		if (!port->dsa_meta[i])
2867 			continue;
2868 
2869 		metadata_dst_free(port->dsa_meta[i]);
2870 	}
2871 }
2872 
2873 bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
2874 			      struct airoha_gdm_port *port)
2875 {
2876 	int i;
2877 
2878 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2879 		if (eth->ports[i] == port)
2880 			return true;
2881 	}
2882 
2883 	return false;
2884 }
2885 
2886 static int airoha_alloc_gdm_port(struct airoha_eth *eth,
2887 				 struct device_node *np)
2888 {
2889 	const __be32 *id_ptr = of_get_property(np, "reg", NULL);
2890 	struct airoha_gdm_port *port;
2891 	struct net_device *dev;
2892 	int err, p;
2893 	u32 id;
2894 
2895 	if (!id_ptr) {
2896 		dev_err(eth->dev, "missing gdm port id\n");
2897 		return -EINVAL;
2898 	}
2899 
2900 	id = be32_to_cpup(id_ptr);
2901 	p = id - 1;
2902 
2903 	if (!id || id > ARRAY_SIZE(eth->ports)) {
2904 		dev_err(eth->dev, "invalid gdm port id: %d\n", id);
2905 		return -EINVAL;
2906 	}
2907 
2908 	if (eth->ports[p]) {
2909 		dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
2910 		return -EINVAL;
2911 	}
2912 
2913 	dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
2914 				      AIROHA_NUM_NETDEV_TX_RINGS,
2915 				      AIROHA_NUM_RX_RING);
2916 	if (!dev) {
2917 		dev_err(eth->dev, "alloc_etherdev failed\n");
2918 		return -ENOMEM;
2919 	}
2920 
2921 	dev->netdev_ops = &airoha_netdev_ops;
2922 	dev->ethtool_ops = &airoha_ethtool_ops;
2923 	dev->max_mtu = AIROHA_MAX_MTU;
2924 	dev->watchdog_timeo = 5 * HZ;
2925 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2926 			   NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2927 			   NETIF_F_SG | NETIF_F_TSO |
2928 			   NETIF_F_HW_TC;
2929 	dev->features |= dev->hw_features;
2930 	dev->vlan_features = dev->hw_features;
2931 	dev->dev.of_node = np;
2932 	SET_NETDEV_DEV(dev, eth->dev);
2933 
2934 	/* reserve hw queues for HTB offloading */
2935 	err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
2936 	if (err)
2937 		return err;
2938 
2939 	err = of_get_ethdev_address(np, dev);
2940 	if (err) {
2941 		if (err == -EPROBE_DEFER)
2942 			return err;
2943 
2944 		eth_hw_addr_random(dev);
2945 		dev_info(eth->dev, "generated random MAC address %pM\n",
2946 			 dev->dev_addr);
2947 	}
2948 
2949 	port = netdev_priv(dev);
2950 	u64_stats_init(&port->stats.syncp);
2951 	spin_lock_init(&port->stats.lock);
2952 	port->eth = eth;
2953 	port->dev = dev;
2954 	port->id = id;
2955 	eth->ports[p] = port;
2956 
2957 	return airoha_metadata_dst_alloc(port);
2958 }
2959 
2960 static int airoha_register_gdm_devices(struct airoha_eth *eth)
2961 {
2962 	int i;
2963 
2964 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2965 		struct airoha_gdm_port *port = eth->ports[i];
2966 		int err;
2967 
2968 		if (!port)
2969 			continue;
2970 
2971 		err = register_netdev(port->dev);
2972 		if (err)
2973 			return err;
2974 	}
2975 
2976 	set_bit(DEV_STATE_REGISTERED, &eth->state);
2977 
2978 	return 0;
2979 }
2980 
2981 static int airoha_probe(struct platform_device *pdev)
2982 {
2983 	struct reset_control_bulk_data *xsi_rsts;
2984 	struct device_node *np;
2985 	struct airoha_eth *eth;
2986 	int i, err;
2987 
2988 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2989 	if (!eth)
2990 		return -ENOMEM;
2991 
2992 	eth->soc = of_device_get_match_data(&pdev->dev);
2993 	if (!eth->soc)
2994 		return -EINVAL;
2995 
2996 	eth->dev = &pdev->dev;
2997 
2998 	err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
2999 	if (err) {
3000 		dev_err(eth->dev, "failed configuring DMA mask\n");
3001 		return err;
3002 	}
3003 
3004 	eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
3005 	if (IS_ERR(eth->fe_regs))
3006 		return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
3007 				     "failed to iomap fe regs\n");
3008 
3009 	eth->rsts[0].id = "fe";
3010 	eth->rsts[1].id = "pdma";
3011 	eth->rsts[2].id = "qdma";
3012 	err = devm_reset_control_bulk_get_exclusive(eth->dev,
3013 						    ARRAY_SIZE(eth->rsts),
3014 						    eth->rsts);
3015 	if (err) {
3016 		dev_err(eth->dev, "failed to get bulk reset lines\n");
3017 		return err;
3018 	}
3019 
3020 	xsi_rsts = devm_kcalloc(eth->dev,
3021 				eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
3022 				GFP_KERNEL);
3023 	if (!xsi_rsts)
3024 		return -ENOMEM;
3025 
3026 	eth->xsi_rsts = xsi_rsts;
3027 	for (i = 0; i < eth->soc->num_xsi_rsts; i++)
3028 		eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
3029 
3030 	err = devm_reset_control_bulk_get_exclusive(eth->dev,
3031 						    eth->soc->num_xsi_rsts,
3032 						    eth->xsi_rsts);
3033 	if (err) {
3034 		dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
3035 		return err;
3036 	}
3037 
3038 	eth->napi_dev = alloc_netdev_dummy(0);
3039 	if (!eth->napi_dev)
3040 		return -ENOMEM;
3041 
3042 	/* Enable threaded NAPI by default */
3043 	eth->napi_dev->threaded = true;
3044 	strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
3045 	platform_set_drvdata(pdev, eth);
3046 
3047 	err = airoha_hw_init(pdev, eth);
3048 	if (err)
3049 		goto error_netdev_free;
3050 
3051 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3052 		airoha_qdma_start_napi(&eth->qdma[i]);
3053 
3054 	for_each_child_of_node(pdev->dev.of_node, np) {
3055 		if (!of_device_is_compatible(np, "airoha,eth-mac"))
3056 			continue;
3057 
3058 		if (!of_device_is_available(np))
3059 			continue;
3060 
3061 		err = airoha_alloc_gdm_port(eth, np);
3062 		if (err) {
3063 			of_node_put(np);
3064 			goto error_napi_stop;
3065 		}
3066 	}
3067 
3068 	err = airoha_register_gdm_devices(eth);
3069 	if (err)
3070 		goto error_napi_stop;
3071 
3072 	return 0;
3073 
3074 error_napi_stop:
3075 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3076 		airoha_qdma_stop_napi(&eth->qdma[i]);
3077 
3078 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3079 		struct airoha_gdm_port *port = eth->ports[i];
3080 
3081 		if (!port)
3082 			continue;
3083 
3084 		if (port->dev->reg_state == NETREG_REGISTERED)
3085 			unregister_netdev(port->dev);
3086 		airoha_metadata_dst_free(port);
3087 	}
3088 	airoha_hw_cleanup(eth);
3089 error_netdev_free:
3090 	free_netdev(eth->napi_dev);
3091 	platform_set_drvdata(pdev, NULL);
3092 
3093 	return err;
3094 }
3095 
3096 static void airoha_remove(struct platform_device *pdev)
3097 {
3098 	struct airoha_eth *eth = platform_get_drvdata(pdev);
3099 	int i;
3100 
3101 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3102 		airoha_qdma_stop_napi(&eth->qdma[i]);
3103 
3104 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3105 		struct airoha_gdm_port *port = eth->ports[i];
3106 
3107 		if (!port)
3108 			continue;
3109 
3110 		unregister_netdev(port->dev);
3111 		airoha_metadata_dst_free(port);
3112 	}
3113 	airoha_hw_cleanup(eth);
3114 
3115 	free_netdev(eth->napi_dev);
3116 	platform_set_drvdata(pdev, NULL);
3117 }
3118 
3119 static const char * const en7581_xsi_rsts_names[] = {
3120 	"xsi-mac",
3121 	"hsi0-mac",
3122 	"hsi1-mac",
3123 	"hsi-mac",
3124 	"xfp-mac",
3125 };
3126 
3127 static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3128 {
3129 	switch (port->id) {
3130 	case AIROHA_GDM3_IDX:
3131 		/* 7581 SoC supports PCIe serdes on GDM3 port */
3132 		if (nbq == 4)
3133 			return HSGMII_LAN_7581_PCIE0_SRCPORT;
3134 		if (nbq == 5)
3135 			return HSGMII_LAN_7581_PCIE1_SRCPORT;
3136 		break;
3137 	case AIROHA_GDM4_IDX:
3138 		/* 7581 SoC supports eth and usb serdes on GDM4 port */
3139 		if (!nbq)
3140 			return HSGMII_LAN_7581_ETH_SRCPORT;
3141 		if (nbq == 1)
3142 			return HSGMII_LAN_7581_USB_SRCPORT;
3143 		break;
3144 	default:
3145 		break;
3146 	}
3147 
3148 	return -EINVAL;
3149 }
3150 
3151 static const char * const an7583_xsi_rsts_names[] = {
3152 	"xsi-mac",
3153 	"hsi0-mac",
3154 	"hsi1-mac",
3155 	"xfp-mac",
3156 };
3157 
3158 static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3159 {
3160 	switch (port->id) {
3161 	case AIROHA_GDM3_IDX:
3162 		/* 7583 SoC supports eth serdes on GDM3 port */
3163 		if (!nbq)
3164 			return HSGMII_LAN_7583_ETH_SRCPORT;
3165 		break;
3166 	case AIROHA_GDM4_IDX:
3167 		/* 7583 SoC supports PCIe and USB serdes on GDM4 port */
3168 		if (!nbq)
3169 			return HSGMII_LAN_7583_PCIE_SRCPORT;
3170 		if (nbq == 1)
3171 			return HSGMII_LAN_7583_USB_SRCPORT;
3172 		break;
3173 	default:
3174 		break;
3175 	}
3176 
3177 	return -EINVAL;
3178 }
3179 
3180 static const struct airoha_eth_soc_data en7581_soc_data = {
3181 	.version = 0x7581,
3182 	.xsi_rsts_names = en7581_xsi_rsts_names,
3183 	.num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
3184 	.num_ppe = 2,
3185 	.ops = {
3186 		.get_src_port_id = airoha_en7581_get_src_port_id,
3187 	},
3188 };
3189 
3190 static const struct airoha_eth_soc_data an7583_soc_data = {
3191 	.version = 0x7583,
3192 	.xsi_rsts_names = an7583_xsi_rsts_names,
3193 	.num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
3194 	.num_ppe = 1,
3195 	.ops = {
3196 		.get_src_port_id = airoha_an7583_get_src_port_id,
3197 	},
3198 };
3199 
3200 static const struct of_device_id of_airoha_match[] = {
3201 	{ .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
3202 	{ .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
3203 	{ /* sentinel */ }
3204 };
3205 MODULE_DEVICE_TABLE(of, of_airoha_match);
3206 
3207 static struct platform_driver airoha_driver = {
3208 	.probe = airoha_probe,
3209 	.remove = airoha_remove,
3210 	.driver = {
3211 		.name = KBUILD_MODNAME,
3212 		.of_match_table = of_airoha_match,
3213 	},
3214 };
3215 module_platform_driver(airoha_driver);
3216 
3217 MODULE_LICENSE("GPL");
3218 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
3219 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
3220