xref: /linux/drivers/net/ethernet/airoha/airoha_eth.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2024 AIROHA Inc
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  */
6 #include <linux/of.h>
7 #include <linux/of_net.h>
8 #include <linux/of_reserved_mem.h>
9 #include <linux/platform_device.h>
10 #include <linux/tcp.h>
11 #include <linux/u64_stats_sync.h>
12 #include <net/dst_metadata.h>
13 #include <net/page_pool/helpers.h>
14 #include <net/pkt_cls.h>
15 #include <uapi/linux/ppp_defs.h>
16 
17 #include "airoha_regs.h"
18 #include "airoha_eth.h"
19 
airoha_rr(void __iomem * base,u32 offset)20 u32 airoha_rr(void __iomem *base, u32 offset)
21 {
22 	return readl(base + offset);
23 }
24 
airoha_wr(void __iomem * base,u32 offset,u32 val)25 void airoha_wr(void __iomem *base, u32 offset, u32 val)
26 {
27 	writel(val, base + offset);
28 }
29 
airoha_rmw(void __iomem * base,u32 offset,u32 mask,u32 val)30 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
31 {
32 	val |= (airoha_rr(base, offset) & ~mask);
33 	airoha_wr(base, offset, val);
34 
35 	return val;
36 }
37 
airoha_qdma_set_irqmask(struct airoha_irq_bank * irq_bank,int index,u32 clear,u32 set)38 static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
39 				    int index, u32 clear, u32 set)
40 {
41 	struct airoha_qdma *qdma = irq_bank->qdma;
42 	int bank = irq_bank - &qdma->irq_banks[0];
43 	unsigned long flags;
44 
45 	if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
46 		return;
47 
48 	spin_lock_irqsave(&irq_bank->irq_lock, flags);
49 
50 	irq_bank->irqmask[index] &= ~clear;
51 	irq_bank->irqmask[index] |= set;
52 	airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
53 		       irq_bank->irqmask[index]);
54 	/* Read irq_enable register in order to guarantee the update above
55 	 * completes in the spinlock critical section.
56 	 */
57 	airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
58 
59 	spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
60 }
61 
airoha_qdma_irq_enable(struct airoha_irq_bank * irq_bank,int index,u32 mask)62 static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
63 				   int index, u32 mask)
64 {
65 	airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
66 }
67 
airoha_qdma_irq_disable(struct airoha_irq_bank * irq_bank,int index,u32 mask)68 static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
69 				    int index, u32 mask)
70 {
71 	airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
72 }
73 
airoha_set_macaddr(struct airoha_gdm_port * port,const u8 * addr)74 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
75 {
76 	struct airoha_eth *eth = port->qdma->eth;
77 	u32 val, reg;
78 
79 	reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
80 					   : REG_FE_WAN_MAC_H;
81 	val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
82 	airoha_fe_wr(eth, reg, val);
83 
84 	val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
85 	airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
86 	airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
87 
88 	airoha_ppe_init_upd_mem(port);
89 }
90 
airoha_set_gdm_port_fwd_cfg(struct airoha_eth * eth,u32 addr,u32 val)91 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
92 					u32 val)
93 {
94 	airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
95 		      FIELD_PREP(GDM_OCFQ_MASK, val));
96 	airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
97 		      FIELD_PREP(GDM_MCFQ_MASK, val));
98 	airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
99 		      FIELD_PREP(GDM_BCFQ_MASK, val));
100 	airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
101 		      FIELD_PREP(GDM_UCFQ_MASK, val));
102 }
103 
airoha_set_vip_for_gdm_port(struct airoha_gdm_port * port,bool enable)104 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
105 				       bool enable)
106 {
107 	struct airoha_eth *eth = port->qdma->eth;
108 	u32 vip_port;
109 
110 	switch (port->id) {
111 	case AIROHA_GDM3_IDX:
112 		/* FIXME: handle XSI_PCIE1_PORT */
113 		vip_port = XSI_PCIE0_VIP_PORT_MASK;
114 		break;
115 	case AIROHA_GDM4_IDX:
116 		/* FIXME: handle XSI_USB_PORT */
117 		vip_port = XSI_ETH_VIP_PORT_MASK;
118 		break;
119 	default:
120 		return 0;
121 	}
122 
123 	if (enable) {
124 		airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
125 		airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
126 	} else {
127 		airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
128 		airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
129 	}
130 
131 	return 0;
132 }
133 
airoha_fe_maccr_init(struct airoha_eth * eth)134 static void airoha_fe_maccr_init(struct airoha_eth *eth)
135 {
136 	int p;
137 
138 	for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
139 		airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
140 			      GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
141 			      GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
142 
143 	airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
144 		      FIELD_PREP(CDM_VLAN_MASK, 0x8100));
145 
146 	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
147 }
148 
airoha_fe_vip_setup(struct airoha_eth * eth)149 static void airoha_fe_vip_setup(struct airoha_eth *eth)
150 {
151 	airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
152 	airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
153 
154 	airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
155 	airoha_fe_wr(eth, REG_FE_VIP_EN(4),
156 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
157 		     PATN_EN_MASK);
158 
159 	airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
160 	airoha_fe_wr(eth, REG_FE_VIP_EN(6),
161 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
162 		     PATN_EN_MASK);
163 
164 	airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
165 	airoha_fe_wr(eth, REG_FE_VIP_EN(7),
166 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
167 		     PATN_EN_MASK);
168 
169 	/* BOOTP (0x43) */
170 	airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
171 	airoha_fe_wr(eth, REG_FE_VIP_EN(8),
172 		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
173 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
174 
175 	/* BOOTP (0x44) */
176 	airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
177 	airoha_fe_wr(eth, REG_FE_VIP_EN(9),
178 		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
179 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
180 
181 	/* ISAKMP */
182 	airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
183 	airoha_fe_wr(eth, REG_FE_VIP_EN(10),
184 		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
185 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
186 
187 	airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
188 	airoha_fe_wr(eth, REG_FE_VIP_EN(11),
189 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
190 		     PATN_EN_MASK);
191 
192 	/* DHCPv6 */
193 	airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
194 	airoha_fe_wr(eth, REG_FE_VIP_EN(12),
195 		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
196 		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
197 
198 	airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
199 	airoha_fe_wr(eth, REG_FE_VIP_EN(19),
200 		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
201 		     PATN_EN_MASK);
202 
203 	/* ETH->ETH_P_1905 (0x893a) */
204 	airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
205 	airoha_fe_wr(eth, REG_FE_VIP_EN(20),
206 		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
207 
208 	airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
209 	airoha_fe_wr(eth, REG_FE_VIP_EN(21),
210 		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
211 }
212 
airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue)213 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
214 					     u32 port, u32 queue)
215 {
216 	u32 val;
217 
218 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
219 		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
220 		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
221 		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
222 	val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
223 
224 	return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
225 }
226 
airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue,u32 val)227 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
228 					      u32 port, u32 queue, u32 val)
229 {
230 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
231 		      FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
232 	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
233 		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
234 		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
235 		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
236 		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
237 		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
238 }
239 
airoha_fe_get_pse_all_rsv(struct airoha_eth * eth)240 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
241 {
242 	u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
243 
244 	return FIELD_GET(PSE_ALLRSV_MASK, val);
245 }
246 
airoha_fe_set_pse_oq_rsv(struct airoha_eth * eth,u32 port,u32 queue,u32 val)247 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
248 				    u32 port, u32 queue, u32 val)
249 {
250 	u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
251 	u32 tmp, all_rsv, fq_limit;
252 
253 	airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
254 
255 	/* modify all rsv */
256 	all_rsv = airoha_fe_get_pse_all_rsv(eth);
257 	all_rsv += (val - orig_val);
258 	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
259 		      FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
260 
261 	/* modify hthd */
262 	tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
263 	fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
264 	tmp = fq_limit - all_rsv - 0x20;
265 	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
266 		      PSE_SHARE_USED_HTHD_MASK,
267 		      FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
268 
269 	tmp = fq_limit - all_rsv - 0x100;
270 	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
271 		      PSE_SHARE_USED_MTHD_MASK,
272 		      FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
273 	tmp = (3 * tmp) >> 2;
274 	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
275 		      PSE_SHARE_USED_LTHD_MASK,
276 		      FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
277 
278 	return 0;
279 }
280 
airoha_fe_pse_ports_init(struct airoha_eth * eth)281 static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
282 {
283 	const u32 pse_port_num_queues[] = {
284 		[FE_PSE_PORT_CDM1] = 6,
285 		[FE_PSE_PORT_GDM1] = 6,
286 		[FE_PSE_PORT_GDM2] = 32,
287 		[FE_PSE_PORT_GDM3] = 6,
288 		[FE_PSE_PORT_PPE1] = 4,
289 		[FE_PSE_PORT_CDM2] = 6,
290 		[FE_PSE_PORT_CDM3] = 8,
291 		[FE_PSE_PORT_CDM4] = 10,
292 		[FE_PSE_PORT_PPE2] = 4,
293 		[FE_PSE_PORT_GDM4] = 2,
294 		[FE_PSE_PORT_CDM5] = 2,
295 	};
296 	u32 all_rsv;
297 	int q;
298 
299 	all_rsv = airoha_fe_get_pse_all_rsv(eth);
300 	if (airoha_ppe_is_enabled(eth, 1)) {
301 		/* hw misses PPE2 oq rsv */
302 		all_rsv += PSE_RSV_PAGES *
303 			   pse_port_num_queues[FE_PSE_PORT_PPE2];
304 	}
305 	airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
306 
307 	/* CMD1 */
308 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
309 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
310 					 PSE_QUEUE_RSV_PAGES);
311 	/* GMD1 */
312 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
313 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
314 					 PSE_QUEUE_RSV_PAGES);
315 	/* GMD2 */
316 	for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
317 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
318 	/* GMD3 */
319 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
320 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
321 					 PSE_QUEUE_RSV_PAGES);
322 	/* PPE1 */
323 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
324 		if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
325 			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
326 						 PSE_QUEUE_RSV_PAGES);
327 		else
328 			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
329 	}
330 	/* CDM2 */
331 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
332 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
333 					 PSE_QUEUE_RSV_PAGES);
334 	/* CDM3 */
335 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
336 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
337 	/* CDM4 */
338 	for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
339 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
340 					 PSE_QUEUE_RSV_PAGES);
341 	if (airoha_ppe_is_enabled(eth, 1)) {
342 		/* PPE2 */
343 		for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
344 			if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
345 				airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
346 							 q,
347 							 PSE_QUEUE_RSV_PAGES);
348 			else
349 				airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
350 							 q, 0);
351 		}
352 	}
353 	/* GMD4 */
354 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
355 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
356 					 PSE_QUEUE_RSV_PAGES);
357 	/* CDM5 */
358 	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
359 		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
360 					 PSE_QUEUE_RSV_PAGES);
361 }
362 
airoha_fe_mc_vlan_clear(struct airoha_eth * eth)363 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
364 {
365 	int i;
366 
367 	for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
368 		int err, j;
369 		u32 val;
370 
371 		airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
372 
373 		val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
374 		      MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
375 		airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
376 		err = read_poll_timeout(airoha_fe_rr, val,
377 					val & MC_VLAN_CFG_CMD_DONE_MASK,
378 					USEC_PER_MSEC, 5 * USEC_PER_MSEC,
379 					false, eth, REG_MC_VLAN_CFG);
380 		if (err)
381 			return err;
382 
383 		for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
384 			airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
385 
386 			val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
387 			      FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
388 			      MC_VLAN_CFG_RW_MASK;
389 			airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
390 			err = read_poll_timeout(airoha_fe_rr, val,
391 						val & MC_VLAN_CFG_CMD_DONE_MASK,
392 						USEC_PER_MSEC,
393 						5 * USEC_PER_MSEC, false, eth,
394 						REG_MC_VLAN_CFG);
395 			if (err)
396 				return err;
397 		}
398 	}
399 
400 	return 0;
401 }
402 
airoha_fe_crsn_qsel_init(struct airoha_eth * eth)403 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
404 {
405 	/* CDM1_CRSN_QSEL */
406 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
407 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
408 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
409 				 CDM_CRSN_QSEL_Q1));
410 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
411 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
412 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
413 				 CDM_CRSN_QSEL_Q1));
414 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
415 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
416 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
417 				 CDM_CRSN_QSEL_Q1));
418 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
419 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
420 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
421 				 CDM_CRSN_QSEL_Q6));
422 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
423 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
424 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
425 				 CDM_CRSN_QSEL_Q1));
426 	/* CDM2_CRSN_QSEL */
427 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
428 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
429 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
430 				 CDM_CRSN_QSEL_Q1));
431 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
432 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
433 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
434 				 CDM_CRSN_QSEL_Q1));
435 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
436 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
437 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
438 				 CDM_CRSN_QSEL_Q1));
439 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
440 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
441 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
442 				 CDM_CRSN_QSEL_Q6));
443 	airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
444 		      CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
445 		      FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
446 				 CDM_CRSN_QSEL_Q1));
447 }
448 
airoha_fe_init(struct airoha_eth * eth)449 static int airoha_fe_init(struct airoha_eth *eth)
450 {
451 	airoha_fe_maccr_init(eth);
452 
453 	/* PSE IQ reserve */
454 	airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
455 		      FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
456 	airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
457 		      PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
458 		      FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
459 		      FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
460 
461 	/* enable FE copy engine for MC/KA/DPI */
462 	airoha_fe_wr(eth, REG_FE_PCE_CFG,
463 		     PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
464 	/* set vip queue selection to ring 1 */
465 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
466 		      FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
467 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
468 		      FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
469 	/* set GDM4 source interface offset to 8 */
470 	airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
471 		      GDM_SPORT_OFF2_MASK |
472 		      GDM_SPORT_OFF1_MASK |
473 		      GDM_SPORT_OFF0_MASK,
474 		      FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
475 		      FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
476 		      FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
477 
478 	/* set PSE Page as 128B */
479 	airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
480 		      FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
481 		      FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
482 		      FE_DMA_GLO_PG_SZ_MASK);
483 	airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
484 		     FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
485 		     FE_RST_GDM4_MBI_ARB_MASK);
486 	usleep_range(1000, 2000);
487 
488 	/* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
489 	 * connect other rings to PSE Port0 OQ-0
490 	 */
491 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
492 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
493 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
494 	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
495 
496 	airoha_fe_vip_setup(eth);
497 	airoha_fe_pse_ports_init(eth);
498 
499 	airoha_fe_set(eth, REG_GDM_MISC_CFG,
500 		      GDM2_RDM_ACK_WAIT_PREF_MASK |
501 		      GDM2_CHN_VLD_MODE_MASK);
502 	airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
503 		      FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
504 
505 	/* init fragment and assemble Force Port */
506 	/* NPU Core-3, NPU Bridge Channel-3 */
507 	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
508 		      IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
509 		      FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
510 		      FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
511 	/* QDMA LAN, RX Ring-22 */
512 	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
513 		      IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
514 		      FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
515 		      FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
516 
517 	airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK);
518 	airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK);
519 
520 	airoha_fe_crsn_qsel_init(eth);
521 
522 	airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
523 	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
524 
525 	/* default aging mode for mbi unlock issue */
526 	airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
527 		      MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
528 		      FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
529 		      FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
530 
531 	/* disable IFC by default */
532 	airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
533 
534 	/* enable 1:N vlan action, init vlan table */
535 	airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
536 
537 	return airoha_fe_mc_vlan_clear(eth);
538 }
539 
airoha_qdma_fill_rx_queue(struct airoha_queue * q)540 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
541 {
542 	struct airoha_qdma *qdma = q->qdma;
543 	int qid = q - &qdma->q_rx[0];
544 	int nframes = 0;
545 
546 	while (q->queued < q->ndesc - 1) {
547 		struct airoha_queue_entry *e = &q->entry[q->head];
548 		struct airoha_qdma_desc *desc = &q->desc[q->head];
549 		struct page *page;
550 		int offset;
551 		u32 val;
552 
553 		page = page_pool_dev_alloc_frag(q->page_pool, &offset,
554 						q->buf_size);
555 		if (!page)
556 			break;
557 
558 		q->head = (q->head + 1) % q->ndesc;
559 		q->queued++;
560 		nframes++;
561 
562 		e->buf = page_address(page) + offset;
563 		e->dma_addr = page_pool_get_dma_addr(page) + offset;
564 		e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
565 
566 		val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
567 		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
568 		WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
569 		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
570 		WRITE_ONCE(desc->data, cpu_to_le32(val));
571 		WRITE_ONCE(desc->msg0, 0);
572 		WRITE_ONCE(desc->msg1, 0);
573 		WRITE_ONCE(desc->msg2, 0);
574 		WRITE_ONCE(desc->msg3, 0);
575 
576 		airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
577 				RX_RING_CPU_IDX_MASK,
578 				FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
579 	}
580 
581 	return nframes;
582 }
583 
airoha_qdma_get_gdm_port(struct airoha_eth * eth,struct airoha_qdma_desc * desc)584 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
585 				    struct airoha_qdma_desc *desc)
586 {
587 	u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
588 
589 	sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
590 	switch (sport) {
591 	case 0x10 ... 0x14:
592 		port = 0;
593 		break;
594 	case 0x2 ... 0x4:
595 		port = sport - 1;
596 		break;
597 	default:
598 		return -EINVAL;
599 	}
600 
601 	return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
602 }
603 
airoha_qdma_rx_process(struct airoha_queue * q,int budget)604 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
605 {
606 	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
607 	struct airoha_qdma *qdma = q->qdma;
608 	struct airoha_eth *eth = qdma->eth;
609 	int qid = q - &qdma->q_rx[0];
610 	int done = 0;
611 
612 	while (done < budget) {
613 		struct airoha_queue_entry *e = &q->entry[q->tail];
614 		struct airoha_qdma_desc *desc = &q->desc[q->tail];
615 		u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
616 		struct page *page = virt_to_head_page(e->buf);
617 		u32 desc_ctrl = le32_to_cpu(desc->ctrl);
618 		struct airoha_gdm_port *port;
619 		int data_len, len, p;
620 
621 		if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
622 			break;
623 
624 		q->tail = (q->tail + 1) % q->ndesc;
625 		q->queued--;
626 
627 		dma_sync_single_for_cpu(eth->dev, e->dma_addr,
628 					SKB_WITH_OVERHEAD(q->buf_size), dir);
629 
630 		len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
631 		data_len = q->skb ? q->buf_size
632 				  : SKB_WITH_OVERHEAD(q->buf_size);
633 		if (!len || data_len < len)
634 			goto free_frag;
635 
636 		p = airoha_qdma_get_gdm_port(eth, desc);
637 		if (p < 0 || !eth->ports[p])
638 			goto free_frag;
639 
640 		port = eth->ports[p];
641 		if (!q->skb) { /* first buffer */
642 			q->skb = napi_build_skb(e->buf, q->buf_size);
643 			if (!q->skb)
644 				goto free_frag;
645 
646 			__skb_put(q->skb, len);
647 			skb_mark_for_recycle(q->skb);
648 			q->skb->dev = port->dev;
649 			q->skb->protocol = eth_type_trans(q->skb, port->dev);
650 			q->skb->ip_summed = CHECKSUM_UNNECESSARY;
651 			skb_record_rx_queue(q->skb, qid);
652 		} else { /* scattered frame */
653 			struct skb_shared_info *shinfo = skb_shinfo(q->skb);
654 			int nr_frags = shinfo->nr_frags;
655 
656 			if (nr_frags >= ARRAY_SIZE(shinfo->frags))
657 				goto free_frag;
658 
659 			skb_add_rx_frag(q->skb, nr_frags, page,
660 					e->buf - page_address(page), len,
661 					q->buf_size);
662 		}
663 
664 		if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
665 			continue;
666 
667 		if (netdev_uses_dsa(port->dev)) {
668 			/* PPE module requires untagged packets to work
669 			 * properly and it provides DSA port index via the
670 			 * DMA descriptor. Report DSA tag to the DSA stack
671 			 * via skb dst info.
672 			 */
673 			u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
674 					      le32_to_cpu(desc->msg0));
675 
676 			if (sptag < ARRAY_SIZE(port->dsa_meta) &&
677 			    port->dsa_meta[sptag])
678 				skb_dst_set_noref(q->skb,
679 						  &port->dsa_meta[sptag]->dst);
680 		}
681 
682 		hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
683 		if (hash != AIROHA_RXD4_FOE_ENTRY)
684 			skb_set_hash(q->skb, jhash_1word(hash, 0),
685 				     PKT_HASH_TYPE_L4);
686 
687 		reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
688 		if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
689 			airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
690 					     false);
691 
692 		done++;
693 		napi_gro_receive(&q->napi, q->skb);
694 		q->skb = NULL;
695 		continue;
696 free_frag:
697 		if (q->skb) {
698 			dev_kfree_skb(q->skb);
699 			q->skb = NULL;
700 		}
701 		page_pool_put_full_page(q->page_pool, page, true);
702 	}
703 	airoha_qdma_fill_rx_queue(q);
704 
705 	return done;
706 }
707 
airoha_qdma_rx_napi_poll(struct napi_struct * napi,int budget)708 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
709 {
710 	struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
711 	int cur, done = 0;
712 
713 	do {
714 		cur = airoha_qdma_rx_process(q, budget - done);
715 		done += cur;
716 	} while (cur && done < budget);
717 
718 	if (done < budget && napi_complete(napi)) {
719 		struct airoha_qdma *qdma = q->qdma;
720 		int i, qid = q - &qdma->q_rx[0];
721 		int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
722 							 : QDMA_INT_REG_IDX2;
723 
724 		for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
725 			if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
726 				continue;
727 
728 			airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
729 					       BIT(qid % RX_DONE_HIGH_OFFSET));
730 		}
731 	}
732 
733 	return done;
734 }
735 
airoha_qdma_init_rx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int ndesc)736 static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
737 				     struct airoha_qdma *qdma, int ndesc)
738 {
739 	const struct page_pool_params pp_params = {
740 		.order = 0,
741 		.pool_size = 256,
742 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
743 		.dma_dir = DMA_FROM_DEVICE,
744 		.max_len = PAGE_SIZE,
745 		.nid = NUMA_NO_NODE,
746 		.dev = qdma->eth->dev,
747 		.napi = &q->napi,
748 	};
749 	struct airoha_eth *eth = qdma->eth;
750 	int qid = q - &qdma->q_rx[0], thr;
751 	dma_addr_t dma_addr;
752 
753 	q->buf_size = PAGE_SIZE / 2;
754 	q->ndesc = ndesc;
755 	q->qdma = qdma;
756 
757 	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
758 				GFP_KERNEL);
759 	if (!q->entry)
760 		return -ENOMEM;
761 
762 	q->page_pool = page_pool_create(&pp_params);
763 	if (IS_ERR(q->page_pool)) {
764 		int err = PTR_ERR(q->page_pool);
765 
766 		q->page_pool = NULL;
767 		return err;
768 	}
769 
770 	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
771 				      &dma_addr, GFP_KERNEL);
772 	if (!q->desc)
773 		return -ENOMEM;
774 
775 	netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
776 
777 	airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
778 	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
779 			RX_RING_SIZE_MASK,
780 			FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
781 
782 	thr = clamp(ndesc >> 3, 1, 32);
783 	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
784 			FIELD_PREP(RX_RING_THR_MASK, thr));
785 	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
786 			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
787 	airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
788 
789 	airoha_qdma_fill_rx_queue(q);
790 
791 	return 0;
792 }
793 
airoha_qdma_cleanup_rx_queue(struct airoha_queue * q)794 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
795 {
796 	struct airoha_qdma *qdma = q->qdma;
797 	struct airoha_eth *eth = qdma->eth;
798 	int qid = q - &qdma->q_rx[0];
799 
800 	while (q->queued) {
801 		struct airoha_queue_entry *e = &q->entry[q->tail];
802 		struct airoha_qdma_desc *desc = &q->desc[q->tail];
803 		struct page *page = virt_to_head_page(e->buf);
804 
805 		dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
806 					page_pool_get_dma_dir(q->page_pool));
807 		page_pool_put_full_page(q->page_pool, page, false);
808 		/* Reset DMA descriptor */
809 		WRITE_ONCE(desc->ctrl, 0);
810 		WRITE_ONCE(desc->addr, 0);
811 		WRITE_ONCE(desc->data, 0);
812 		WRITE_ONCE(desc->msg0, 0);
813 		WRITE_ONCE(desc->msg1, 0);
814 		WRITE_ONCE(desc->msg2, 0);
815 		WRITE_ONCE(desc->msg3, 0);
816 
817 		q->tail = (q->tail + 1) % q->ndesc;
818 		q->queued--;
819 	}
820 
821 	q->head = q->tail;
822 	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
823 			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail));
824 }
825 
airoha_qdma_init_rx(struct airoha_qdma * qdma)826 static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
827 {
828 	int i;
829 
830 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
831 		int err;
832 
833 		if (!(RX_DONE_INT_MASK & BIT(i))) {
834 			/* rx-queue not binded to irq */
835 			continue;
836 		}
837 
838 		err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
839 						RX_DSCP_NUM(i));
840 		if (err)
841 			return err;
842 	}
843 
844 	return 0;
845 }
846 
airoha_qdma_tx_napi_poll(struct napi_struct * napi,int budget)847 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
848 {
849 	struct airoha_tx_irq_queue *irq_q;
850 	int id, done = 0, irq_queued;
851 	struct airoha_qdma *qdma;
852 	struct airoha_eth *eth;
853 	u32 status, head;
854 
855 	irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
856 	qdma = irq_q->qdma;
857 	id = irq_q - &qdma->q_tx_irq[0];
858 	eth = qdma->eth;
859 
860 	status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
861 	head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
862 	head = head % irq_q->size;
863 	irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
864 
865 	while (irq_queued > 0 && done < budget) {
866 		u32 qid, val = irq_q->q[head];
867 		struct airoha_qdma_desc *desc;
868 		struct airoha_queue_entry *e;
869 		struct airoha_queue *q;
870 		u32 index, desc_ctrl;
871 		struct sk_buff *skb;
872 
873 		if (val == 0xff)
874 			break;
875 
876 		irq_q->q[head] = 0xff; /* mark as done */
877 		head = (head + 1) % irq_q->size;
878 		irq_queued--;
879 		done++;
880 
881 		qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
882 		if (qid >= ARRAY_SIZE(qdma->q_tx))
883 			continue;
884 
885 		q = &qdma->q_tx[qid];
886 		if (!q->ndesc)
887 			continue;
888 
889 		index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
890 		if (index >= q->ndesc)
891 			continue;
892 
893 		spin_lock_bh(&q->lock);
894 
895 		if (!q->queued)
896 			goto unlock;
897 
898 		desc = &q->desc[index];
899 		desc_ctrl = le32_to_cpu(desc->ctrl);
900 
901 		if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
902 		    !(desc_ctrl & QDMA_DESC_DROP_MASK))
903 			goto unlock;
904 
905 		e = &q->entry[index];
906 		skb = e->skb;
907 
908 		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
909 				 DMA_TO_DEVICE);
910 		e->dma_addr = 0;
911 		list_add_tail(&e->list, &q->tx_list);
912 
913 		WRITE_ONCE(desc->msg0, 0);
914 		WRITE_ONCE(desc->msg1, 0);
915 		q->queued--;
916 
917 		if (skb) {
918 			u16 queue = skb_get_queue_mapping(skb);
919 			struct netdev_queue *txq;
920 
921 			txq = netdev_get_tx_queue(skb->dev, queue);
922 			netdev_tx_completed_queue(txq, 1, skb->len);
923 			if (netif_tx_queue_stopped(txq) &&
924 			    q->ndesc - q->queued >= q->free_thr)
925 				netif_tx_wake_queue(txq);
926 
927 			dev_kfree_skb_any(skb);
928 		}
929 unlock:
930 		spin_unlock_bh(&q->lock);
931 	}
932 
933 	if (done) {
934 		int i, len = done >> 7;
935 
936 		for (i = 0; i < len; i++)
937 			airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
938 					IRQ_CLEAR_LEN_MASK, 0x80);
939 		airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
940 				IRQ_CLEAR_LEN_MASK, (done & 0x7f));
941 	}
942 
943 	if (done < budget && napi_complete(napi))
944 		airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
945 				       TX_DONE_INT_MASK(id));
946 
947 	return done;
948 }
949 
airoha_qdma_init_tx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int size)950 static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
951 				     struct airoha_qdma *qdma, int size)
952 {
953 	struct airoha_eth *eth = qdma->eth;
954 	int i, qid = q - &qdma->q_tx[0];
955 	dma_addr_t dma_addr;
956 
957 	spin_lock_init(&q->lock);
958 	q->ndesc = size;
959 	q->qdma = qdma;
960 	q->free_thr = 1 + MAX_SKB_FRAGS;
961 	INIT_LIST_HEAD(&q->tx_list);
962 
963 	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
964 				GFP_KERNEL);
965 	if (!q->entry)
966 		return -ENOMEM;
967 
968 	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
969 				      &dma_addr, GFP_KERNEL);
970 	if (!q->desc)
971 		return -ENOMEM;
972 
973 	for (i = 0; i < q->ndesc; i++) {
974 		u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
975 
976 		list_add_tail(&q->entry[i].list, &q->tx_list);
977 		WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
978 	}
979 
980 	/* xmit ring drop default setting */
981 	airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
982 			TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
983 
984 	airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
985 	airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
986 			FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
987 	airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
988 			FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
989 
990 	return 0;
991 }
992 
airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue * irq_q,struct airoha_qdma * qdma,int size)993 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
994 				   struct airoha_qdma *qdma, int size)
995 {
996 	int id = irq_q - &qdma->q_tx_irq[0];
997 	struct airoha_eth *eth = qdma->eth;
998 	dma_addr_t dma_addr;
999 
1000 	netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
1001 			  airoha_qdma_tx_napi_poll);
1002 	irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
1003 				       &dma_addr, GFP_KERNEL);
1004 	if (!irq_q->q)
1005 		return -ENOMEM;
1006 
1007 	memset(irq_q->q, 0xff, size * sizeof(u32));
1008 	irq_q->size = size;
1009 	irq_q->qdma = qdma;
1010 
1011 	airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
1012 	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
1013 			FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
1014 	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
1015 			FIELD_PREP(TX_IRQ_THR_MASK, 1));
1016 
1017 	return 0;
1018 }
1019 
airoha_qdma_init_tx(struct airoha_qdma * qdma)1020 static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
1021 {
1022 	int i, err;
1023 
1024 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1025 		err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
1026 					      IRQ_QUEUE_LEN(i));
1027 		if (err)
1028 			return err;
1029 	}
1030 
1031 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1032 		err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
1033 						TX_DSCP_NUM);
1034 		if (err)
1035 			return err;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
airoha_qdma_cleanup_tx_queue(struct airoha_queue * q)1041 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
1042 {
1043 	struct airoha_eth *eth = q->qdma->eth;
1044 	int i;
1045 
1046 	spin_lock_bh(&q->lock);
1047 	for (i = 0; i < q->ndesc; i++) {
1048 		struct airoha_queue_entry *e = &q->entry[i];
1049 
1050 		if (!e->dma_addr)
1051 			continue;
1052 
1053 		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1054 				 DMA_TO_DEVICE);
1055 		dev_kfree_skb_any(e->skb);
1056 		e->dma_addr = 0;
1057 		e->skb = NULL;
1058 		list_add_tail(&e->list, &q->tx_list);
1059 		q->queued--;
1060 	}
1061 	spin_unlock_bh(&q->lock);
1062 }
1063 
airoha_qdma_init_hfwd_queues(struct airoha_qdma * qdma)1064 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
1065 {
1066 	int size, index, num_desc = HW_DSCP_NUM;
1067 	struct airoha_eth *eth = qdma->eth;
1068 	int id = qdma - &eth->qdma[0];
1069 	u32 status, buf_size;
1070 	dma_addr_t dma_addr;
1071 	const char *name;
1072 
1073 	name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
1074 	if (!name)
1075 		return -ENOMEM;
1076 
1077 	buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
1078 	index = of_property_match_string(eth->dev->of_node,
1079 					 "memory-region-names", name);
1080 	if (index >= 0) {
1081 		struct reserved_mem *rmem;
1082 		struct device_node *np;
1083 
1084 		/* Consume reserved memory for hw forwarding buffers queue if
1085 		 * available in the DTS
1086 		 */
1087 		np = of_parse_phandle(eth->dev->of_node, "memory-region",
1088 				      index);
1089 		if (!np)
1090 			return -ENODEV;
1091 
1092 		rmem = of_reserved_mem_lookup(np);
1093 		of_node_put(np);
1094 		dma_addr = rmem->base;
1095 		/* Compute the number of hw descriptors according to the
1096 		 * reserved memory size and the payload buffer size
1097 		 */
1098 		num_desc = div_u64(rmem->size, buf_size);
1099 	} else {
1100 		size = buf_size * num_desc;
1101 		if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
1102 					 GFP_KERNEL))
1103 			return -ENOMEM;
1104 	}
1105 
1106 	airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
1107 
1108 	size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
1109 	if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
1110 		return -ENOMEM;
1111 
1112 	airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
1113 	/* QDMA0: 2KB. QDMA1: 1KB */
1114 	airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
1115 			HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
1116 			FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
1117 	airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
1118 			FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
1119 	airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
1120 			LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
1121 			HW_FWD_DESC_NUM_MASK,
1122 			FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
1123 			LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
1124 
1125 	return read_poll_timeout(airoha_qdma_rr, status,
1126 				 !(status & LMGR_INIT_START), USEC_PER_MSEC,
1127 				 30 * USEC_PER_MSEC, true, qdma,
1128 				 REG_LMGR_INIT_CFG);
1129 }
1130 
airoha_qdma_init_qos(struct airoha_qdma * qdma)1131 static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
1132 {
1133 	airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
1134 	airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
1135 
1136 	airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
1137 			  PSE_BUF_ESTIMATE_EN_MASK);
1138 
1139 	airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
1140 			EGRESS_RATE_METER_EN_MASK |
1141 			EGRESS_RATE_METER_EQ_RATE_EN_MASK);
1142 	/* 2047us x 31 = 63.457ms */
1143 	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1144 			EGRESS_RATE_METER_WINDOW_SZ_MASK,
1145 			FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
1146 	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1147 			EGRESS_RATE_METER_TIMESLICE_MASK,
1148 			FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
1149 
1150 	/* ratelimit init */
1151 	airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
1152 	/* fast-tick 25us */
1153 	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
1154 			FIELD_PREP(GLB_FAST_TICK_MASK, 25));
1155 	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
1156 			FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
1157 
1158 	airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
1159 	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
1160 			FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
1161 	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
1162 			EGRESS_SLOW_TICK_RATIO_MASK,
1163 			FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
1164 
1165 	airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
1166 	airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
1167 			  INGRESS_TRTCM_MODE_MASK);
1168 	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
1169 			FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
1170 	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
1171 			INGRESS_SLOW_TICK_RATIO_MASK,
1172 			FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
1173 
1174 	airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
1175 	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
1176 			FIELD_PREP(SLA_FAST_TICK_MASK, 25));
1177 	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
1178 			FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
1179 }
1180 
airoha_qdma_init_qos_stats(struct airoha_qdma * qdma)1181 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
1182 {
1183 	int i;
1184 
1185 	for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
1186 		/* Tx-cpu transferred count */
1187 		airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
1188 		airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1189 			       CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1190 			       CNTR_ALL_DSCP_RING_EN_MASK |
1191 			       FIELD_PREP(CNTR_CHAN_MASK, i));
1192 		/* Tx-fwd transferred count */
1193 		airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
1194 		airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1195 			       CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1196 			       CNTR_ALL_DSCP_RING_EN_MASK |
1197 			       FIELD_PREP(CNTR_SRC_MASK, 1) |
1198 			       FIELD_PREP(CNTR_CHAN_MASK, i));
1199 	}
1200 }
1201 
airoha_qdma_hw_init(struct airoha_qdma * qdma)1202 static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
1203 {
1204 	int i;
1205 
1206 	for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1207 		/* clear pending irqs */
1208 		airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1209 		/* setup rx irqs */
1210 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
1211 				       INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1212 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
1213 				       INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1214 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
1215 				       INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1216 		airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
1217 				       INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1218 	}
1219 	/* setup tx irqs */
1220 	airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
1221 			       TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
1222 	airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
1223 			       TX_COHERENT_HIGH_INT_MASK);
1224 
1225 	/* setup irq binding */
1226 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1227 		if (!qdma->q_tx[i].ndesc)
1228 			continue;
1229 
1230 		if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
1231 			airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
1232 					TX_RING_IRQ_BLOCKING_CFG_MASK);
1233 		else
1234 			airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
1235 					  TX_RING_IRQ_BLOCKING_CFG_MASK);
1236 	}
1237 
1238 	airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1239 		       FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
1240 		       GLOBAL_CFG_CPU_TXR_RR_MASK |
1241 		       GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
1242 		       GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
1243 		       GLOBAL_CFG_MULTICAST_EN_MASK |
1244 		       GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
1245 		       GLOBAL_CFG_TX_WB_DONE_MASK |
1246 		       FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
1247 
1248 	airoha_qdma_init_qos(qdma);
1249 
1250 	/* disable qdma rx delay interrupt */
1251 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1252 		if (!qdma->q_rx[i].ndesc)
1253 			continue;
1254 
1255 		airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
1256 				  RX_DELAY_INT_MASK);
1257 	}
1258 
1259 	airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
1260 			TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
1261 	airoha_qdma_init_qos_stats(qdma);
1262 
1263 	return 0;
1264 }
1265 
airoha_irq_handler(int irq,void * dev_instance)1266 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
1267 {
1268 	struct airoha_irq_bank *irq_bank = dev_instance;
1269 	struct airoha_qdma *qdma = irq_bank->qdma;
1270 	u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
1271 	u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
1272 	int i;
1273 
1274 	for (i = 0; i < ARRAY_SIZE(intr); i++) {
1275 		intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
1276 		intr[i] &= irq_bank->irqmask[i];
1277 		airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
1278 	}
1279 
1280 	if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
1281 		return IRQ_NONE;
1282 
1283 	rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
1284 	if (rx_intr1) {
1285 		airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
1286 		rx_intr_mask |= rx_intr1;
1287 	}
1288 
1289 	rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
1290 	if (rx_intr2) {
1291 		airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
1292 		rx_intr_mask |= (rx_intr2 << 16);
1293 	}
1294 
1295 	for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
1296 		if (!qdma->q_rx[i].ndesc)
1297 			continue;
1298 
1299 		if (rx_intr_mask & BIT(i))
1300 			napi_schedule(&qdma->q_rx[i].napi);
1301 	}
1302 
1303 	if (intr[0] & INT_TX_MASK) {
1304 		for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1305 			if (!(intr[0] & TX_DONE_INT_MASK(i)))
1306 				continue;
1307 
1308 			airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
1309 						TX_DONE_INT_MASK(i));
1310 			napi_schedule(&qdma->q_tx_irq[i].napi);
1311 		}
1312 	}
1313 
1314 	return IRQ_HANDLED;
1315 }
1316 
airoha_qdma_init_irq_banks(struct platform_device * pdev,struct airoha_qdma * qdma)1317 static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
1318 				      struct airoha_qdma *qdma)
1319 {
1320 	struct airoha_eth *eth = qdma->eth;
1321 	int i, id = qdma - &eth->qdma[0];
1322 
1323 	for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1324 		struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
1325 		int err, irq_index = 4 * id + i;
1326 		const char *name;
1327 
1328 		spin_lock_init(&irq_bank->irq_lock);
1329 		irq_bank->qdma = qdma;
1330 
1331 		irq_bank->irq = platform_get_irq(pdev, irq_index);
1332 		if (irq_bank->irq < 0)
1333 			return irq_bank->irq;
1334 
1335 		name = devm_kasprintf(eth->dev, GFP_KERNEL,
1336 				      KBUILD_MODNAME ".%d", irq_index);
1337 		if (!name)
1338 			return -ENOMEM;
1339 
1340 		err = devm_request_irq(eth->dev, irq_bank->irq,
1341 				       airoha_irq_handler, IRQF_SHARED, name,
1342 				       irq_bank);
1343 		if (err)
1344 			return err;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
airoha_qdma_init(struct platform_device * pdev,struct airoha_eth * eth,struct airoha_qdma * qdma)1350 static int airoha_qdma_init(struct platform_device *pdev,
1351 			    struct airoha_eth *eth,
1352 			    struct airoha_qdma *qdma)
1353 {
1354 	int err, id = qdma - &eth->qdma[0];
1355 	const char *res;
1356 
1357 	qdma->eth = eth;
1358 	res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
1359 	if (!res)
1360 		return -ENOMEM;
1361 
1362 	qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
1363 	if (IS_ERR(qdma->regs))
1364 		return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
1365 				     "failed to iomap qdma%d regs\n", id);
1366 
1367 	err = airoha_qdma_init_irq_banks(pdev, qdma);
1368 	if (err)
1369 		return err;
1370 
1371 	err = airoha_qdma_init_rx(qdma);
1372 	if (err)
1373 		return err;
1374 
1375 	err = airoha_qdma_init_tx(qdma);
1376 	if (err)
1377 		return err;
1378 
1379 	err = airoha_qdma_init_hfwd_queues(qdma);
1380 	if (err)
1381 		return err;
1382 
1383 	return airoha_qdma_hw_init(qdma);
1384 }
1385 
airoha_hw_init(struct platform_device * pdev,struct airoha_eth * eth)1386 static int airoha_hw_init(struct platform_device *pdev,
1387 			  struct airoha_eth *eth)
1388 {
1389 	int err, i;
1390 
1391 	/* disable xsi */
1392 	err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
1393 	if (err)
1394 		return err;
1395 
1396 	err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
1397 	if (err)
1398 		return err;
1399 
1400 	msleep(20);
1401 	err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
1402 	if (err)
1403 		return err;
1404 
1405 	msleep(20);
1406 	err = airoha_fe_init(eth);
1407 	if (err)
1408 		return err;
1409 
1410 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
1411 		err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
1412 		if (err)
1413 			return err;
1414 	}
1415 
1416 	err = airoha_ppe_init(eth);
1417 	if (err)
1418 		return err;
1419 
1420 	set_bit(DEV_STATE_INITIALIZED, &eth->state);
1421 
1422 	return 0;
1423 }
1424 
airoha_hw_cleanup(struct airoha_qdma * qdma)1425 static void airoha_hw_cleanup(struct airoha_qdma *qdma)
1426 {
1427 	int i;
1428 
1429 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1430 		if (!qdma->q_rx[i].ndesc)
1431 			continue;
1432 
1433 		netif_napi_del(&qdma->q_rx[i].napi);
1434 		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
1435 		if (qdma->q_rx[i].page_pool)
1436 			page_pool_destroy(qdma->q_rx[i].page_pool);
1437 	}
1438 
1439 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1440 		netif_napi_del(&qdma->q_tx_irq[i].napi);
1441 
1442 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1443 		if (!qdma->q_tx[i].ndesc)
1444 			continue;
1445 
1446 		airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1447 	}
1448 }
1449 
airoha_qdma_start_napi(struct airoha_qdma * qdma)1450 static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
1451 {
1452 	int i;
1453 
1454 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1455 		napi_enable(&qdma->q_tx_irq[i].napi);
1456 
1457 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1458 		if (!qdma->q_rx[i].ndesc)
1459 			continue;
1460 
1461 		napi_enable(&qdma->q_rx[i].napi);
1462 	}
1463 }
1464 
airoha_qdma_stop_napi(struct airoha_qdma * qdma)1465 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
1466 {
1467 	int i;
1468 
1469 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1470 		napi_disable(&qdma->q_tx_irq[i].napi);
1471 
1472 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1473 		if (!qdma->q_rx[i].ndesc)
1474 			continue;
1475 
1476 		napi_disable(&qdma->q_rx[i].napi);
1477 	}
1478 }
1479 
airoha_update_hw_stats(struct airoha_gdm_port * port)1480 static void airoha_update_hw_stats(struct airoha_gdm_port *port)
1481 {
1482 	struct airoha_eth *eth = port->qdma->eth;
1483 	u32 val, i = 0;
1484 
1485 	spin_lock(&port->stats.lock);
1486 	u64_stats_update_begin(&port->stats.syncp);
1487 
1488 	/* TX */
1489 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
1490 	port->stats.tx_ok_pkts += ((u64)val << 32);
1491 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
1492 	port->stats.tx_ok_pkts += val;
1493 
1494 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
1495 	port->stats.tx_ok_bytes += ((u64)val << 32);
1496 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
1497 	port->stats.tx_ok_bytes += val;
1498 
1499 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
1500 	port->stats.tx_drops += val;
1501 
1502 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
1503 	port->stats.tx_broadcast += val;
1504 
1505 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
1506 	port->stats.tx_multicast += val;
1507 
1508 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
1509 	port->stats.tx_len[i] += val;
1510 
1511 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
1512 	port->stats.tx_len[i] += ((u64)val << 32);
1513 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
1514 	port->stats.tx_len[i++] += val;
1515 
1516 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
1517 	port->stats.tx_len[i] += ((u64)val << 32);
1518 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
1519 	port->stats.tx_len[i++] += val;
1520 
1521 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
1522 	port->stats.tx_len[i] += ((u64)val << 32);
1523 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
1524 	port->stats.tx_len[i++] += val;
1525 
1526 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
1527 	port->stats.tx_len[i] += ((u64)val << 32);
1528 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
1529 	port->stats.tx_len[i++] += val;
1530 
1531 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
1532 	port->stats.tx_len[i] += ((u64)val << 32);
1533 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
1534 	port->stats.tx_len[i++] += val;
1535 
1536 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
1537 	port->stats.tx_len[i] += ((u64)val << 32);
1538 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
1539 	port->stats.tx_len[i++] += val;
1540 
1541 	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
1542 	port->stats.tx_len[i++] += val;
1543 
1544 	/* RX */
1545 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
1546 	port->stats.rx_ok_pkts += ((u64)val << 32);
1547 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
1548 	port->stats.rx_ok_pkts += val;
1549 
1550 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
1551 	port->stats.rx_ok_bytes += ((u64)val << 32);
1552 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
1553 	port->stats.rx_ok_bytes += val;
1554 
1555 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
1556 	port->stats.rx_drops += val;
1557 
1558 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
1559 	port->stats.rx_broadcast += val;
1560 
1561 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
1562 	port->stats.rx_multicast += val;
1563 
1564 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
1565 	port->stats.rx_errors += val;
1566 
1567 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
1568 	port->stats.rx_crc_error += val;
1569 
1570 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
1571 	port->stats.rx_over_errors += val;
1572 
1573 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
1574 	port->stats.rx_fragment += val;
1575 
1576 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
1577 	port->stats.rx_jabber += val;
1578 
1579 	i = 0;
1580 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
1581 	port->stats.rx_len[i] += val;
1582 
1583 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
1584 	port->stats.rx_len[i] += ((u64)val << 32);
1585 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
1586 	port->stats.rx_len[i++] += val;
1587 
1588 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
1589 	port->stats.rx_len[i] += ((u64)val << 32);
1590 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
1591 	port->stats.rx_len[i++] += val;
1592 
1593 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
1594 	port->stats.rx_len[i] += ((u64)val << 32);
1595 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
1596 	port->stats.rx_len[i++] += val;
1597 
1598 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
1599 	port->stats.rx_len[i] += ((u64)val << 32);
1600 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
1601 	port->stats.rx_len[i++] += val;
1602 
1603 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
1604 	port->stats.rx_len[i] += ((u64)val << 32);
1605 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
1606 	port->stats.rx_len[i++] += val;
1607 
1608 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
1609 	port->stats.rx_len[i] += ((u64)val << 32);
1610 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
1611 	port->stats.rx_len[i++] += val;
1612 
1613 	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
1614 	port->stats.rx_len[i++] += val;
1615 
1616 	/* reset mib counters */
1617 	airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
1618 		      FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
1619 
1620 	u64_stats_update_end(&port->stats.syncp);
1621 	spin_unlock(&port->stats.lock);
1622 }
1623 
airoha_dev_open(struct net_device * dev)1624 static int airoha_dev_open(struct net_device *dev)
1625 {
1626 	int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
1627 	struct airoha_gdm_port *port = netdev_priv(dev);
1628 	struct airoha_qdma *qdma = port->qdma;
1629 
1630 	netif_tx_start_all_queues(dev);
1631 	err = airoha_set_vip_for_gdm_port(port, true);
1632 	if (err)
1633 		return err;
1634 
1635 	if (netdev_uses_dsa(dev))
1636 		airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1637 			      GDM_STAG_EN_MASK);
1638 	else
1639 		airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1640 				GDM_STAG_EN_MASK);
1641 
1642 	airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
1643 		      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1644 		      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1645 		      FIELD_PREP(GDM_LONG_LEN_MASK, len));
1646 
1647 	airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
1648 			GLOBAL_CFG_TX_DMA_EN_MASK |
1649 			GLOBAL_CFG_RX_DMA_EN_MASK);
1650 	atomic_inc(&qdma->users);
1651 
1652 	return 0;
1653 }
1654 
airoha_dev_stop(struct net_device * dev)1655 static int airoha_dev_stop(struct net_device *dev)
1656 {
1657 	struct airoha_gdm_port *port = netdev_priv(dev);
1658 	struct airoha_qdma *qdma = port->qdma;
1659 	int i, err;
1660 
1661 	netif_tx_disable(dev);
1662 	err = airoha_set_vip_for_gdm_port(port, false);
1663 	if (err)
1664 		return err;
1665 
1666 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
1667 		netdev_tx_reset_subqueue(dev, i);
1668 
1669 	if (atomic_dec_and_test(&qdma->users)) {
1670 		airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
1671 				  GLOBAL_CFG_TX_DMA_EN_MASK |
1672 				  GLOBAL_CFG_RX_DMA_EN_MASK);
1673 
1674 		for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1675 			if (!qdma->q_tx[i].ndesc)
1676 				continue;
1677 
1678 			airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1679 		}
1680 	}
1681 
1682 	return 0;
1683 }
1684 
airoha_dev_set_macaddr(struct net_device * dev,void * p)1685 static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
1686 {
1687 	struct airoha_gdm_port *port = netdev_priv(dev);
1688 	int err;
1689 
1690 	err = eth_mac_addr(dev, p);
1691 	if (err)
1692 		return err;
1693 
1694 	airoha_set_macaddr(port, dev->dev_addr);
1695 
1696 	return 0;
1697 }
1698 
airhoha_set_gdm2_loopback(struct airoha_gdm_port * port)1699 static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
1700 {
1701 	struct airoha_eth *eth = port->qdma->eth;
1702 	u32 val, pse_port, chan, nbq;
1703 	int src_port;
1704 
1705 	/* Forward the traffic to the proper GDM port */
1706 	pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1707 					       : FE_PSE_PORT_GDM4;
1708 	airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1709 				    pse_port);
1710 	airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1711 			GDM_STRIP_CRC_MASK);
1712 
1713 	/* Enable GDM2 loopback */
1714 	airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff);
1715 	airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff);
1716 
1717 	chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
1718 	airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX),
1719 		      LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
1720 		      FIELD_PREP(LPBK_CHAN_MASK, chan) |
1721 		      LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
1722 		      LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
1723 	airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX),
1724 		      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1725 		      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1726 		      FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
1727 
1728 	/* Disable VIP and IFC for GDM2 */
1729 	airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
1730 	airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
1731 
1732 	/* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
1733 	nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
1734 	src_port = eth->soc->ops.get_src_port_id(port, nbq);
1735 	if (src_port < 0)
1736 		return src_port;
1737 
1738 	airoha_fe_rmw(eth, REG_FE_WAN_PORT,
1739 		      WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
1740 		      FIELD_PREP(WAN0_MASK, src_port));
1741 	val = src_port & SP_CPORT_DFT_MASK;
1742 	airoha_fe_rmw(eth,
1743 		      REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
1744 		      SP_CPORT_MASK(val),
1745 		      FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val)));
1746 
1747 	if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth))
1748 		airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
1749 			      FC_ID_OF_SRC_PORT24_MASK,
1750 			      FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
1751 
1752 	return 0;
1753 }
1754 
airoha_dev_init(struct net_device * dev)1755 static int airoha_dev_init(struct net_device *dev)
1756 {
1757 	struct airoha_gdm_port *port = netdev_priv(dev);
1758 	struct airoha_qdma *qdma = port->qdma;
1759 	struct airoha_eth *eth = qdma->eth;
1760 	u32 pse_port, fe_cpu_port;
1761 	u8 ppe_id;
1762 
1763 	airoha_set_macaddr(port, dev->dev_addr);
1764 
1765 	switch (port->id) {
1766 	case AIROHA_GDM3_IDX:
1767 	case AIROHA_GDM4_IDX:
1768 		/* If GDM2 is active we can't enable loopback */
1769 		if (!eth->ports[1]) {
1770 			int err;
1771 
1772 			err = airhoha_set_gdm2_loopback(port);
1773 			if (err)
1774 				return err;
1775 		}
1776 		fallthrough;
1777 	case AIROHA_GDM2_IDX:
1778 		if (airoha_ppe_is_enabled(eth, 1)) {
1779 			/* For PPE2 always use secondary cpu port. */
1780 			fe_cpu_port = FE_PSE_PORT_CDM2;
1781 			pse_port = FE_PSE_PORT_PPE2;
1782 			break;
1783 		}
1784 		fallthrough;
1785 	default: {
1786 		u8 qdma_id = qdma - &eth->qdma[0];
1787 
1788 		/* For PPE1 select cpu port according to the running QDMA. */
1789 		fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
1790 		pse_port = FE_PSE_PORT_PPE1;
1791 		break;
1792 	}
1793 	}
1794 
1795 	airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
1796 	ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0;
1797 	airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id),
1798 		      DFT_CPORT_MASK(port->id),
1799 		      fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id)));
1800 
1801 	return 0;
1802 }
1803 
airoha_dev_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1804 static void airoha_dev_get_stats64(struct net_device *dev,
1805 				   struct rtnl_link_stats64 *storage)
1806 {
1807 	struct airoha_gdm_port *port = netdev_priv(dev);
1808 	unsigned int start;
1809 
1810 	airoha_update_hw_stats(port);
1811 	do {
1812 		start = u64_stats_fetch_begin(&port->stats.syncp);
1813 		storage->rx_packets = port->stats.rx_ok_pkts;
1814 		storage->tx_packets = port->stats.tx_ok_pkts;
1815 		storage->rx_bytes = port->stats.rx_ok_bytes;
1816 		storage->tx_bytes = port->stats.tx_ok_bytes;
1817 		storage->multicast = port->stats.rx_multicast;
1818 		storage->rx_errors = port->stats.rx_errors;
1819 		storage->rx_dropped = port->stats.rx_drops;
1820 		storage->tx_dropped = port->stats.tx_drops;
1821 		storage->rx_crc_errors = port->stats.rx_crc_error;
1822 		storage->rx_over_errors = port->stats.rx_over_errors;
1823 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
1824 }
1825 
airoha_dev_change_mtu(struct net_device * dev,int mtu)1826 static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
1827 {
1828 	struct airoha_gdm_port *port = netdev_priv(dev);
1829 	struct airoha_eth *eth = port->qdma->eth;
1830 	u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
1831 
1832 	airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
1833 		      GDM_LONG_LEN_MASK,
1834 		      FIELD_PREP(GDM_LONG_LEN_MASK, len));
1835 	WRITE_ONCE(dev->mtu, mtu);
1836 
1837 	return 0;
1838 }
1839 
airoha_dev_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1840 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
1841 				   struct net_device *sb_dev)
1842 {
1843 	struct airoha_gdm_port *port = netdev_priv(dev);
1844 	int queue, channel;
1845 
1846 	/* For dsa device select QoS channel according to the dsa user port
1847 	 * index, rely on port id otherwise. Select QoS queue based on the
1848 	 * skb priority.
1849 	 */
1850 	channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
1851 	channel = channel % AIROHA_NUM_QOS_CHANNELS;
1852 	queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
1853 	queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
1854 
1855 	return queue < dev->num_tx_queues ? queue : 0;
1856 }
1857 
airoha_get_dsa_tag(struct sk_buff * skb,struct net_device * dev)1858 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
1859 {
1860 #if IS_ENABLED(CONFIG_NET_DSA)
1861 	struct ethhdr *ehdr;
1862 	u8 xmit_tpid;
1863 	u16 tag;
1864 
1865 	if (!netdev_uses_dsa(dev))
1866 		return 0;
1867 
1868 	if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
1869 		return 0;
1870 
1871 	if (skb_cow_head(skb, 0))
1872 		return 0;
1873 
1874 	ehdr = (struct ethhdr *)skb->data;
1875 	tag = be16_to_cpu(ehdr->h_proto);
1876 	xmit_tpid = tag >> 8;
1877 
1878 	switch (xmit_tpid) {
1879 	case MTK_HDR_XMIT_TAGGED_TPID_8100:
1880 		ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
1881 		tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
1882 		break;
1883 	case MTK_HDR_XMIT_TAGGED_TPID_88A8:
1884 		ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
1885 		tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
1886 		break;
1887 	default:
1888 		/* PPE module requires untagged DSA packets to work properly,
1889 		 * so move DSA tag to DMA descriptor.
1890 		 */
1891 		memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
1892 		__skb_pull(skb, MTK_HDR_LEN);
1893 		break;
1894 	}
1895 
1896 	return tag;
1897 #else
1898 	return 0;
1899 #endif
1900 }
1901 
airoha_get_fe_port(struct airoha_gdm_port * port)1902 static int airoha_get_fe_port(struct airoha_gdm_port *port)
1903 {
1904 	struct airoha_qdma *qdma = port->qdma;
1905 	struct airoha_eth *eth = qdma->eth;
1906 
1907 	switch (eth->soc->version) {
1908 	case 0x7583:
1909 		return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1910 						   : port->id;
1911 	case 0x7581:
1912 	default:
1913 		return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
1914 						   : port->id;
1915 	}
1916 }
1917 
airoha_dev_xmit(struct sk_buff * skb,struct net_device * dev)1918 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
1919 				   struct net_device *dev)
1920 {
1921 	struct airoha_gdm_port *port = netdev_priv(dev);
1922 	struct airoha_qdma *qdma = port->qdma;
1923 	u32 nr_frags, tag, msg0, msg1, len;
1924 	struct airoha_queue_entry *e;
1925 	struct netdev_queue *txq;
1926 	struct airoha_queue *q;
1927 	LIST_HEAD(tx_list);
1928 	void *data;
1929 	int i, qid;
1930 	u16 index;
1931 	u8 fport;
1932 
1933 	qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
1934 	tag = airoha_get_dsa_tag(skb, dev);
1935 
1936 	msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
1937 			  qid / AIROHA_NUM_QOS_QUEUES) |
1938 	       FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
1939 			  qid % AIROHA_NUM_QOS_QUEUES) |
1940 	       FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
1941 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1942 		msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
1943 			FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
1944 			FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
1945 
1946 	/* TSO: fill MSS info in tcp checksum field */
1947 	if (skb_is_gso(skb)) {
1948 		if (skb_cow_head(skb, 0))
1949 			goto error;
1950 
1951 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
1952 						 SKB_GSO_TCPV6)) {
1953 			__be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
1954 
1955 			tcp_hdr(skb)->check = (__force __sum16)csum;
1956 			msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
1957 		}
1958 	}
1959 
1960 	fport = airoha_get_fe_port(port);
1961 	msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
1962 	       FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
1963 
1964 	q = &qdma->q_tx[qid];
1965 	if (WARN_ON_ONCE(!q->ndesc))
1966 		goto error;
1967 
1968 	spin_lock_bh(&q->lock);
1969 
1970 	txq = netdev_get_tx_queue(dev, qid);
1971 	nr_frags = 1 + skb_shinfo(skb)->nr_frags;
1972 
1973 	if (q->queued + nr_frags >= q->ndesc) {
1974 		/* not enough space in the queue */
1975 		netif_tx_stop_queue(txq);
1976 		spin_unlock_bh(&q->lock);
1977 		return NETDEV_TX_BUSY;
1978 	}
1979 
1980 	len = skb_headlen(skb);
1981 	data = skb->data;
1982 
1983 	e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
1984 			     list);
1985 	index = e - q->entry;
1986 
1987 	for (i = 0; i < nr_frags; i++) {
1988 		struct airoha_qdma_desc *desc = &q->desc[index];
1989 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1990 		dma_addr_t addr;
1991 		u32 val;
1992 
1993 		addr = dma_map_single(dev->dev.parent, data, len,
1994 				      DMA_TO_DEVICE);
1995 		if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
1996 			goto error_unmap;
1997 
1998 		list_move_tail(&e->list, &tx_list);
1999 		e->skb = i ? NULL : skb;
2000 		e->dma_addr = addr;
2001 		e->dma_len = len;
2002 
2003 		e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
2004 				     list);
2005 		index = e - q->entry;
2006 
2007 		val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2008 		if (i < nr_frags - 1)
2009 			val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2010 		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2011 		WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2012 		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2013 		WRITE_ONCE(desc->data, cpu_to_le32(val));
2014 		WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2015 		WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2016 		WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2017 
2018 		data = skb_frag_address(frag);
2019 		len = skb_frag_size(frag);
2020 	}
2021 	q->queued += i;
2022 
2023 	skb_tx_timestamp(skb);
2024 	netdev_tx_sent_queue(txq, skb->len);
2025 
2026 	if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2027 		airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2028 				TX_RING_CPU_IDX_MASK,
2029 				FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
2030 
2031 	if (q->ndesc - q->queued < q->free_thr)
2032 		netif_tx_stop_queue(txq);
2033 
2034 	spin_unlock_bh(&q->lock);
2035 
2036 	return NETDEV_TX_OK;
2037 
2038 error_unmap:
2039 	while (!list_empty(&tx_list)) {
2040 		e = list_first_entry(&tx_list, struct airoha_queue_entry,
2041 				     list);
2042 		dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
2043 				 DMA_TO_DEVICE);
2044 		e->dma_addr = 0;
2045 		list_move_tail(&e->list, &q->tx_list);
2046 	}
2047 
2048 	spin_unlock_bh(&q->lock);
2049 error:
2050 	dev_kfree_skb_any(skb);
2051 	dev->stats.tx_dropped++;
2052 
2053 	return NETDEV_TX_OK;
2054 }
2055 
airoha_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2056 static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2057 				       struct ethtool_drvinfo *info)
2058 {
2059 	struct airoha_gdm_port *port = netdev_priv(dev);
2060 	struct airoha_eth *eth = port->qdma->eth;
2061 
2062 	strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2063 	strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2064 }
2065 
airoha_ethtool_get_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * stats)2066 static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2067 					 struct ethtool_eth_mac_stats *stats)
2068 {
2069 	struct airoha_gdm_port *port = netdev_priv(dev);
2070 	unsigned int start;
2071 
2072 	airoha_update_hw_stats(port);
2073 	do {
2074 		start = u64_stats_fetch_begin(&port->stats.syncp);
2075 		stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
2076 		stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
2077 		stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2078 		stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2079 		stats->FramesReceivedOK = port->stats.rx_ok_pkts;
2080 		stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
2081 		stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2082 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2083 }
2084 
2085 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2086 	{    0,    64 },
2087 	{   65,   127 },
2088 	{  128,   255 },
2089 	{  256,   511 },
2090 	{  512,  1023 },
2091 	{ 1024,  1518 },
2092 	{ 1519, 10239 },
2093 	{},
2094 };
2095 
2096 static void
airoha_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * stats,const struct ethtool_rmon_hist_range ** ranges)2097 airoha_ethtool_get_rmon_stats(struct net_device *dev,
2098 			      struct ethtool_rmon_stats *stats,
2099 			      const struct ethtool_rmon_hist_range **ranges)
2100 {
2101 	struct airoha_gdm_port *port = netdev_priv(dev);
2102 	struct airoha_hw_stats *hw_stats = &port->stats;
2103 	unsigned int start;
2104 
2105 	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2106 		     ARRAY_SIZE(hw_stats->tx_len) + 1);
2107 	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2108 		     ARRAY_SIZE(hw_stats->rx_len) + 1);
2109 
2110 	*ranges = airoha_ethtool_rmon_ranges;
2111 	airoha_update_hw_stats(port);
2112 	do {
2113 		int i;
2114 
2115 		start = u64_stats_fetch_begin(&port->stats.syncp);
2116 		stats->fragments = hw_stats->rx_fragment;
2117 		stats->jabbers = hw_stats->rx_jabber;
2118 		for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2119 		     i++) {
2120 			stats->hist[i] = hw_stats->rx_len[i];
2121 			stats->hist_tx[i] = hw_stats->tx_len[i];
2122 		}
2123 	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2124 }
2125 
airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port * port,int channel,enum tx_sched_mode mode,const u16 * weights,u8 n_weights)2126 static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
2127 					 int channel, enum tx_sched_mode mode,
2128 					 const u16 *weights, u8 n_weights)
2129 {
2130 	int i;
2131 
2132 	for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2133 		airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2134 				  TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2135 
2136 	for (i = 0; i < n_weights; i++) {
2137 		u32 status;
2138 		int err;
2139 
2140 		airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2141 			       TWRR_RW_CMD_MASK |
2142 			       FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2143 			       FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2144 			       FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2145 		err = read_poll_timeout(airoha_qdma_rr, status,
2146 					status & TWRR_RW_CMD_DONE,
2147 					USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2148 					true, port->qdma,
2149 					REG_TXWRR_WEIGHT_CFG);
2150 		if (err)
2151 			return err;
2152 	}
2153 
2154 	airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2155 			CHAN_QOS_MODE_MASK(channel),
2156 			mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
2157 
2158 	return 0;
2159 }
2160 
airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port * port,int channel)2161 static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
2162 					 int channel)
2163 {
2164 	static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2165 
2166 	return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
2167 					     ARRAY_SIZE(w));
2168 }
2169 
airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port * port,int channel,struct tc_ets_qopt_offload * opt)2170 static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
2171 					int channel,
2172 					struct tc_ets_qopt_offload *opt)
2173 {
2174 	struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2175 	enum tx_sched_mode mode = TC_SCH_SP;
2176 	u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2177 	int i, nstrict = 0;
2178 
2179 	if (p->bands > AIROHA_NUM_QOS_QUEUES)
2180 		return -EINVAL;
2181 
2182 	for (i = 0; i < p->bands; i++) {
2183 		if (!p->quanta[i])
2184 			nstrict++;
2185 	}
2186 
2187 	/* this configuration is not supported by the hw */
2188 	if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2189 		return -EINVAL;
2190 
2191 	/* EN7581 SoC supports fixed QoS band priority where WRR queues have
2192 	 * lowest priorities with respect to SP ones.
2193 	 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
2194 	 */
2195 	for (i = 0; i < nstrict; i++) {
2196 		if (p->priomap[p->bands - i - 1] != i)
2197 			return -EINVAL;
2198 	}
2199 
2200 	for (i = 0; i < p->bands - nstrict; i++) {
2201 		if (p->priomap[i] != nstrict + i)
2202 			return -EINVAL;
2203 
2204 		w[i] = p->weights[nstrict + i];
2205 	}
2206 
2207 	if (!nstrict)
2208 		mode = TC_SCH_WRR8;
2209 	else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2210 		mode = nstrict + 1;
2211 
2212 	return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
2213 					     ARRAY_SIZE(w));
2214 }
2215 
airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port * port,int channel,struct tc_ets_qopt_offload * opt)2216 static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
2217 					int channel,
2218 					struct tc_ets_qopt_offload *opt)
2219 {
2220 	u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2221 					    REG_CNTR_VAL(channel << 1));
2222 	u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2223 					    REG_CNTR_VAL((channel << 1) + 1));
2224 	u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2225 			 (fwd_tx_packets - port->fwd_tx_packets);
2226 	_bstats_update(opt->stats.bstats, 0, tx_packets);
2227 
2228 	port->cpu_tx_packets = cpu_tx_packets;
2229 	port->fwd_tx_packets = fwd_tx_packets;
2230 
2231 	return 0;
2232 }
2233 
airoha_tc_setup_qdisc_ets(struct airoha_gdm_port * port,struct tc_ets_qopt_offload * opt)2234 static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
2235 				     struct tc_ets_qopt_offload *opt)
2236 {
2237 	int channel;
2238 
2239 	if (opt->parent == TC_H_ROOT)
2240 		return -EINVAL;
2241 
2242 	channel = TC_H_MAJ(opt->handle) >> 16;
2243 	channel = channel % AIROHA_NUM_QOS_CHANNELS;
2244 
2245 	switch (opt->command) {
2246 	case TC_ETS_REPLACE:
2247 		return airoha_qdma_set_tx_ets_sched(port, channel, opt);
2248 	case TC_ETS_DESTROY:
2249 		/* PRIO is default qdisc scheduler */
2250 		return airoha_qdma_set_tx_prio_sched(port, channel);
2251 	case TC_ETS_STATS:
2252 		return airoha_qdma_get_tx_ets_stats(port, channel, opt);
2253 	default:
2254 		return -EOPNOTSUPP;
2255 	}
2256 }
2257 
airoha_qdma_get_rl_param(struct airoha_qdma * qdma,int queue_id,u32 addr,enum trtcm_param_type param,u32 * val_low,u32 * val_high)2258 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
2259 				    u32 addr, enum trtcm_param_type param,
2260 				    u32 *val_low, u32 *val_high)
2261 {
2262 	u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2263 	u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2264 			  FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2265 			  FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2266 
2267 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2268 	if (read_poll_timeout(airoha_qdma_rr, val,
2269 			      val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2270 			      USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
2271 			      REG_TRTCM_CFG_PARAM(addr)))
2272 		return -ETIMEDOUT;
2273 
2274 	*val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2275 	if (val_high)
2276 		*val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2277 
2278 	return 0;
2279 }
2280 
airoha_qdma_set_rl_param(struct airoha_qdma * qdma,int queue_id,u32 addr,enum trtcm_param_type param,u32 val)2281 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
2282 				    u32 addr, enum trtcm_param_type param,
2283 				    u32 val)
2284 {
2285 	u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2286 	u32 config = RATE_LIMIT_PARAM_RW_MASK |
2287 		     FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2288 		     FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2289 		     FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2290 
2291 	airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2292 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2293 
2294 	return read_poll_timeout(airoha_qdma_rr, val,
2295 				 val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2296 				 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2297 				 qdma, REG_TRTCM_CFG_PARAM(addr));
2298 }
2299 
airoha_qdma_set_rl_config(struct airoha_qdma * qdma,int queue_id,u32 addr,bool enable,u32 enable_mask)2300 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
2301 				     u32 addr, bool enable, u32 enable_mask)
2302 {
2303 	u32 val;
2304 	int err;
2305 
2306 	err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2307 				       &val, NULL);
2308 	if (err)
2309 		return err;
2310 
2311 	val = enable ? val | enable_mask : val & ~enable_mask;
2312 
2313 	return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2314 					val);
2315 }
2316 
airoha_qdma_set_rl_token_bucket(struct airoha_qdma * qdma,int queue_id,u32 rate_val,u32 bucket_size)2317 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
2318 					   int queue_id, u32 rate_val,
2319 					   u32 bucket_size)
2320 {
2321 	u32 val, config, tick, unit, rate, rate_frac;
2322 	int err;
2323 
2324 	err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2325 				       TRTCM_MISC_MODE, &config, NULL);
2326 	if (err)
2327 		return err;
2328 
2329 	val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
2330 	tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2331 	if (config & TRTCM_TICK_SEL)
2332 		tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2333 	if (!tick)
2334 		return -EINVAL;
2335 
2336 	unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2337 	if (!unit)
2338 		return -EINVAL;
2339 
2340 	rate = rate_val / unit;
2341 	rate_frac = rate_val % unit;
2342 	rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2343 	rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2344 	       FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2345 
2346 	err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2347 				       TRTCM_TOKEN_RATE_MODE, rate);
2348 	if (err)
2349 		return err;
2350 
2351 	val = bucket_size;
2352 	if (!(config & TRTCM_PKT_MODE))
2353 		val = max_t(u32, val, MIN_TOKEN_SIZE);
2354 	val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2355 
2356 	return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2357 					TRTCM_BUCKETSIZE_SHIFT_MODE, val);
2358 }
2359 
airoha_qdma_init_rl_config(struct airoha_qdma * qdma,int queue_id,bool enable,enum trtcm_unit_type unit)2360 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
2361 				      bool enable, enum trtcm_unit_type unit)
2362 {
2363 	bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
2364 	enum trtcm_param mode = TRTCM_METER_MODE;
2365 	int err;
2366 
2367 	mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
2368 	err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2369 					enable, mode);
2370 	if (err)
2371 		return err;
2372 
2373 	return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2374 					 tick_sel, TRTCM_TICK_SEL);
2375 }
2376 
airoha_qdma_get_trtcm_param(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_param_type param,enum trtcm_mode_type mode,u32 * val_low,u32 * val_high)2377 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
2378 				       u32 addr, enum trtcm_param_type param,
2379 				       enum trtcm_mode_type mode,
2380 				       u32 *val_low, u32 *val_high)
2381 {
2382 	u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2383 	u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2384 			  FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2385 			  FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2386 			  FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2387 
2388 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2389 	if (read_poll_timeout(airoha_qdma_rr, val,
2390 			      val & TRTCM_PARAM_RW_DONE_MASK,
2391 			      USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2392 			      qdma, REG_TRTCM_CFG_PARAM(addr)))
2393 		return -ETIMEDOUT;
2394 
2395 	*val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2396 	if (val_high)
2397 		*val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2398 
2399 	return 0;
2400 }
2401 
airoha_qdma_set_trtcm_param(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_param_type param,enum trtcm_mode_type mode,u32 val)2402 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
2403 				       u32 addr, enum trtcm_param_type param,
2404 				       enum trtcm_mode_type mode, u32 val)
2405 {
2406 	u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2407 	u32 config = TRTCM_PARAM_RW_MASK |
2408 		     FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2409 		     FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2410 		     FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2411 		     FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2412 
2413 	airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2414 	airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2415 
2416 	return read_poll_timeout(airoha_qdma_rr, val,
2417 				 val & TRTCM_PARAM_RW_DONE_MASK,
2418 				 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2419 				 qdma, REG_TRTCM_CFG_PARAM(addr));
2420 }
2421 
airoha_qdma_set_trtcm_config(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_mode_type mode,bool enable,u32 enable_mask)2422 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
2423 					u32 addr, enum trtcm_mode_type mode,
2424 					bool enable, u32 enable_mask)
2425 {
2426 	u32 val;
2427 
2428 	if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2429 					mode, &val, NULL))
2430 		return -EINVAL;
2431 
2432 	val = enable ? val | enable_mask : val & ~enable_mask;
2433 
2434 	return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2435 					   mode, val);
2436 }
2437 
airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_mode_type mode,u32 rate_val,u32 bucket_size)2438 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
2439 					      int channel, u32 addr,
2440 					      enum trtcm_mode_type mode,
2441 					      u32 rate_val, u32 bucket_size)
2442 {
2443 	u32 val, config, tick, unit, rate, rate_frac;
2444 	int err;
2445 
2446 	if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2447 					mode, &config, NULL))
2448 		return -EINVAL;
2449 
2450 	val = airoha_qdma_rr(qdma, addr);
2451 	tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2452 	if (config & TRTCM_TICK_SEL)
2453 		tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2454 	if (!tick)
2455 		return -EINVAL;
2456 
2457 	unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2458 	if (!unit)
2459 		return -EINVAL;
2460 
2461 	rate = rate_val / unit;
2462 	rate_frac = rate_val % unit;
2463 	rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2464 	rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2465 	       FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2466 
2467 	err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
2468 					  TRTCM_TOKEN_RATE_MODE, mode, rate);
2469 	if (err)
2470 		return err;
2471 
2472 	val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
2473 	val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2474 
2475 	return airoha_qdma_set_trtcm_param(qdma, channel, addr,
2476 					   TRTCM_BUCKETSIZE_SHIFT_MODE,
2477 					   mode, val);
2478 }
2479 
airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port * port,int channel,u32 rate,u32 bucket_size)2480 static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
2481 					 int channel, u32 rate,
2482 					 u32 bucket_size)
2483 {
2484 	int i, err;
2485 
2486 	for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
2487 		err = airoha_qdma_set_trtcm_config(port->qdma, channel,
2488 						   REG_EGRESS_TRTCM_CFG, i,
2489 						   !!rate, TRTCM_METER_MODE);
2490 		if (err)
2491 			return err;
2492 
2493 		err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
2494 							 REG_EGRESS_TRTCM_CFG,
2495 							 i, rate, bucket_size);
2496 		if (err)
2497 			return err;
2498 	}
2499 
2500 	return 0;
2501 }
2502 
airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port * port,struct tc_htb_qopt_offload * opt)2503 static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
2504 					  struct tc_htb_qopt_offload *opt)
2505 {
2506 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2507 	u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
2508 	struct net_device *dev = port->dev;
2509 	int num_tx_queues = dev->real_num_tx_queues;
2510 	int err;
2511 
2512 	if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
2513 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
2514 		return -EINVAL;
2515 	}
2516 
2517 	err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
2518 	if (err) {
2519 		NL_SET_ERR_MSG_MOD(opt->extack,
2520 				   "failed configuring htb offload");
2521 		return err;
2522 	}
2523 
2524 	if (opt->command == TC_HTB_NODE_MODIFY)
2525 		return 0;
2526 
2527 	err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
2528 	if (err) {
2529 		airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
2530 		NL_SET_ERR_MSG_MOD(opt->extack,
2531 				   "failed setting real_num_tx_queues");
2532 		return err;
2533 	}
2534 
2535 	set_bit(channel, port->qos_sq_bmap);
2536 	opt->qid = AIROHA_NUM_TX_RING + channel;
2537 
2538 	return 0;
2539 }
2540 
airoha_qdma_set_rx_meter(struct airoha_gdm_port * port,u32 rate,u32 bucket_size,enum trtcm_unit_type unit_type)2541 static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
2542 				    u32 rate, u32 bucket_size,
2543 				    enum trtcm_unit_type unit_type)
2544 {
2545 	struct airoha_qdma *qdma = port->qdma;
2546 	int i;
2547 
2548 	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2549 		int err;
2550 
2551 		if (!qdma->q_rx[i].ndesc)
2552 			continue;
2553 
2554 		err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
2555 		if (err)
2556 			return err;
2557 
2558 		err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
2559 						      bucket_size);
2560 		if (err)
2561 			return err;
2562 	}
2563 
2564 	return 0;
2565 }
2566 
airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload * f)2567 static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
2568 {
2569 	const struct flow_action *actions = &f->rule->action;
2570 	const struct flow_action_entry *act;
2571 
2572 	if (!flow_action_has_entries(actions)) {
2573 		NL_SET_ERR_MSG_MOD(f->common.extack,
2574 				   "filter run with no actions");
2575 		return -EINVAL;
2576 	}
2577 
2578 	if (!flow_offload_has_one_action(actions)) {
2579 		NL_SET_ERR_MSG_MOD(f->common.extack,
2580 				   "only once action per filter is supported");
2581 		return -EOPNOTSUPP;
2582 	}
2583 
2584 	act = &actions->entries[0];
2585 	if (act->id != FLOW_ACTION_POLICE) {
2586 		NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
2587 		return -EOPNOTSUPP;
2588 	}
2589 
2590 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
2591 		NL_SET_ERR_MSG_MOD(f->common.extack,
2592 				   "invalid exceed action id");
2593 		return -EOPNOTSUPP;
2594 	}
2595 
2596 	if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
2597 		NL_SET_ERR_MSG_MOD(f->common.extack,
2598 				   "invalid notexceed action id");
2599 		return -EOPNOTSUPP;
2600 	}
2601 
2602 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
2603 	    !flow_action_is_last_entry(actions, act)) {
2604 		NL_SET_ERR_MSG_MOD(f->common.extack,
2605 				   "action accept must be last");
2606 		return -EOPNOTSUPP;
2607 	}
2608 
2609 	if (act->police.peakrate_bytes_ps || act->police.avrate ||
2610 	    act->police.overhead || act->police.mtu) {
2611 		NL_SET_ERR_MSG_MOD(f->common.extack,
2612 				   "peakrate/avrate/overhead/mtu unsupported");
2613 		return -EOPNOTSUPP;
2614 	}
2615 
2616 	return 0;
2617 }
2618 
airoha_dev_tc_matchall(struct net_device * dev,struct tc_cls_matchall_offload * f)2619 static int airoha_dev_tc_matchall(struct net_device *dev,
2620 				  struct tc_cls_matchall_offload *f)
2621 {
2622 	enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
2623 	struct airoha_gdm_port *port = netdev_priv(dev);
2624 	u32 rate = 0, bucket_size = 0;
2625 
2626 	switch (f->command) {
2627 	case TC_CLSMATCHALL_REPLACE: {
2628 		const struct flow_action_entry *act;
2629 		int err;
2630 
2631 		err = airoha_tc_matchall_act_validate(f);
2632 		if (err)
2633 			return err;
2634 
2635 		act = &f->rule->action.entries[0];
2636 		if (act->police.rate_pkt_ps) {
2637 			rate = act->police.rate_pkt_ps;
2638 			bucket_size = act->police.burst_pkt;
2639 			unit_type = TRTCM_PACKET_UNIT;
2640 		} else {
2641 			rate = div_u64(act->police.rate_bytes_ps, 1000);
2642 			rate = rate << 3; /* Kbps */
2643 			bucket_size = act->police.burst;
2644 		}
2645 		fallthrough;
2646 	}
2647 	case TC_CLSMATCHALL_DESTROY:
2648 		return airoha_qdma_set_rx_meter(port, rate, bucket_size,
2649 						unit_type);
2650 	default:
2651 		return -EOPNOTSUPP;
2652 	}
2653 }
2654 
airoha_dev_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2655 static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
2656 					void *type_data, void *cb_priv)
2657 {
2658 	struct net_device *dev = cb_priv;
2659 	struct airoha_gdm_port *port = netdev_priv(dev);
2660 	struct airoha_eth *eth = port->qdma->eth;
2661 
2662 	if (!tc_can_offload(dev))
2663 		return -EOPNOTSUPP;
2664 
2665 	switch (type) {
2666 	case TC_SETUP_CLSFLOWER:
2667 		return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
2668 	case TC_SETUP_CLSMATCHALL:
2669 		return airoha_dev_tc_matchall(dev, type_data);
2670 	default:
2671 		return -EOPNOTSUPP;
2672 	}
2673 }
2674 
airoha_dev_setup_tc_block(struct airoha_gdm_port * port,struct flow_block_offload * f)2675 static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
2676 				     struct flow_block_offload *f)
2677 {
2678 	flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
2679 	static LIST_HEAD(block_cb_list);
2680 	struct flow_block_cb *block_cb;
2681 
2682 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2683 		return -EOPNOTSUPP;
2684 
2685 	f->driver_block_list = &block_cb_list;
2686 	switch (f->command) {
2687 	case FLOW_BLOCK_BIND:
2688 		block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
2689 		if (block_cb) {
2690 			flow_block_cb_incref(block_cb);
2691 			return 0;
2692 		}
2693 		block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
2694 		if (IS_ERR(block_cb))
2695 			return PTR_ERR(block_cb);
2696 
2697 		flow_block_cb_incref(block_cb);
2698 		flow_block_cb_add(block_cb, f);
2699 		list_add_tail(&block_cb->driver_list, &block_cb_list);
2700 		return 0;
2701 	case FLOW_BLOCK_UNBIND:
2702 		block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
2703 		if (!block_cb)
2704 			return -ENOENT;
2705 
2706 		if (!flow_block_cb_decref(block_cb)) {
2707 			flow_block_cb_remove(block_cb, f);
2708 			list_del(&block_cb->driver_list);
2709 		}
2710 		return 0;
2711 	default:
2712 		return -EOPNOTSUPP;
2713 	}
2714 }
2715 
airoha_tc_remove_htb_queue(struct airoha_gdm_port * port,int queue)2716 static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
2717 {
2718 	struct net_device *dev = port->dev;
2719 
2720 	netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
2721 	airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
2722 	clear_bit(queue, port->qos_sq_bmap);
2723 }
2724 
airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port * port,struct tc_htb_qopt_offload * opt)2725 static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
2726 					   struct tc_htb_qopt_offload *opt)
2727 {
2728 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2729 
2730 	if (!test_bit(channel, port->qos_sq_bmap)) {
2731 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2732 		return -EINVAL;
2733 	}
2734 
2735 	airoha_tc_remove_htb_queue(port, channel);
2736 
2737 	return 0;
2738 }
2739 
airoha_tc_htb_destroy(struct airoha_gdm_port * port)2740 static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
2741 {
2742 	int q;
2743 
2744 	for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
2745 		airoha_tc_remove_htb_queue(port, q);
2746 
2747 	return 0;
2748 }
2749 
airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port * port,struct tc_htb_qopt_offload * opt)2750 static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
2751 					    struct tc_htb_qopt_offload *opt)
2752 {
2753 	u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2754 
2755 	if (!test_bit(channel, port->qos_sq_bmap)) {
2756 		NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2757 		return -EINVAL;
2758 	}
2759 
2760 	opt->qid = AIROHA_NUM_TX_RING + channel;
2761 
2762 	return 0;
2763 }
2764 
airoha_tc_setup_qdisc_htb(struct airoha_gdm_port * port,struct tc_htb_qopt_offload * opt)2765 static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
2766 				     struct tc_htb_qopt_offload *opt)
2767 {
2768 	switch (opt->command) {
2769 	case TC_HTB_CREATE:
2770 		break;
2771 	case TC_HTB_DESTROY:
2772 		return airoha_tc_htb_destroy(port);
2773 	case TC_HTB_NODE_MODIFY:
2774 	case TC_HTB_LEAF_ALLOC_QUEUE:
2775 		return airoha_tc_htb_alloc_leaf_queue(port, opt);
2776 	case TC_HTB_LEAF_DEL:
2777 	case TC_HTB_LEAF_DEL_LAST:
2778 	case TC_HTB_LEAF_DEL_LAST_FORCE:
2779 		return airoha_tc_htb_delete_leaf_queue(port, opt);
2780 	case TC_HTB_LEAF_QUERY_QUEUE:
2781 		return airoha_tc_get_htb_get_leaf_queue(port, opt);
2782 	default:
2783 		return -EOPNOTSUPP;
2784 	}
2785 
2786 	return 0;
2787 }
2788 
airoha_dev_tc_setup(struct net_device * dev,enum tc_setup_type type,void * type_data)2789 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
2790 			       void *type_data)
2791 {
2792 	struct airoha_gdm_port *port = netdev_priv(dev);
2793 
2794 	switch (type) {
2795 	case TC_SETUP_QDISC_ETS:
2796 		return airoha_tc_setup_qdisc_ets(port, type_data);
2797 	case TC_SETUP_QDISC_HTB:
2798 		return airoha_tc_setup_qdisc_htb(port, type_data);
2799 	case TC_SETUP_BLOCK:
2800 	case TC_SETUP_FT:
2801 		return airoha_dev_setup_tc_block(port, type_data);
2802 	default:
2803 		return -EOPNOTSUPP;
2804 	}
2805 }
2806 
2807 static const struct net_device_ops airoha_netdev_ops = {
2808 	.ndo_init		= airoha_dev_init,
2809 	.ndo_open		= airoha_dev_open,
2810 	.ndo_stop		= airoha_dev_stop,
2811 	.ndo_change_mtu		= airoha_dev_change_mtu,
2812 	.ndo_select_queue	= airoha_dev_select_queue,
2813 	.ndo_start_xmit		= airoha_dev_xmit,
2814 	.ndo_get_stats64        = airoha_dev_get_stats64,
2815 	.ndo_set_mac_address	= airoha_dev_set_macaddr,
2816 	.ndo_setup_tc		= airoha_dev_tc_setup,
2817 };
2818 
2819 static const struct ethtool_ops airoha_ethtool_ops = {
2820 	.get_drvinfo		= airoha_ethtool_get_drvinfo,
2821 	.get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
2822 	.get_rmon_stats		= airoha_ethtool_get_rmon_stats,
2823 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2824 	.get_link		= ethtool_op_get_link,
2825 };
2826 
airoha_metadata_dst_alloc(struct airoha_gdm_port * port)2827 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
2828 {
2829 	int i;
2830 
2831 	for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2832 		struct metadata_dst *md_dst;
2833 
2834 		md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
2835 					    GFP_KERNEL);
2836 		if (!md_dst)
2837 			return -ENOMEM;
2838 
2839 		md_dst->u.port_info.port_id = i;
2840 		port->dsa_meta[i] = md_dst;
2841 	}
2842 
2843 	return 0;
2844 }
2845 
airoha_metadata_dst_free(struct airoha_gdm_port * port)2846 static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
2847 {
2848 	int i;
2849 
2850 	for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2851 		if (!port->dsa_meta[i])
2852 			continue;
2853 
2854 		metadata_dst_free(port->dsa_meta[i]);
2855 	}
2856 }
2857 
airoha_is_valid_gdm_port(struct airoha_eth * eth,struct airoha_gdm_port * port)2858 bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
2859 			      struct airoha_gdm_port *port)
2860 {
2861 	int i;
2862 
2863 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2864 		if (eth->ports[i] == port)
2865 			return true;
2866 	}
2867 
2868 	return false;
2869 }
2870 
airoha_alloc_gdm_port(struct airoha_eth * eth,struct device_node * np,int index)2871 static int airoha_alloc_gdm_port(struct airoha_eth *eth,
2872 				 struct device_node *np, int index)
2873 {
2874 	const __be32 *id_ptr = of_get_property(np, "reg", NULL);
2875 	struct airoha_gdm_port *port;
2876 	struct airoha_qdma *qdma;
2877 	struct net_device *dev;
2878 	int err, p;
2879 	u32 id;
2880 
2881 	if (!id_ptr) {
2882 		dev_err(eth->dev, "missing gdm port id\n");
2883 		return -EINVAL;
2884 	}
2885 
2886 	id = be32_to_cpup(id_ptr);
2887 	p = id - 1;
2888 
2889 	if (!id || id > ARRAY_SIZE(eth->ports)) {
2890 		dev_err(eth->dev, "invalid gdm port id: %d\n", id);
2891 		return -EINVAL;
2892 	}
2893 
2894 	if (eth->ports[p]) {
2895 		dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
2896 		return -EINVAL;
2897 	}
2898 
2899 	dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
2900 				      AIROHA_NUM_NETDEV_TX_RINGS,
2901 				      AIROHA_NUM_RX_RING);
2902 	if (!dev) {
2903 		dev_err(eth->dev, "alloc_etherdev failed\n");
2904 		return -ENOMEM;
2905 	}
2906 
2907 	qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
2908 	dev->netdev_ops = &airoha_netdev_ops;
2909 	dev->ethtool_ops = &airoha_ethtool_ops;
2910 	dev->max_mtu = AIROHA_MAX_MTU;
2911 	dev->watchdog_timeo = 5 * HZ;
2912 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2913 			   NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2914 			   NETIF_F_SG | NETIF_F_TSO |
2915 			   NETIF_F_HW_TC;
2916 	dev->features |= dev->hw_features;
2917 	dev->vlan_features = dev->hw_features;
2918 	dev->dev.of_node = np;
2919 	dev->irq = qdma->irq_banks[0].irq;
2920 	SET_NETDEV_DEV(dev, eth->dev);
2921 
2922 	/* reserve hw queues for HTB offloading */
2923 	err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
2924 	if (err)
2925 		return err;
2926 
2927 	err = of_get_ethdev_address(np, dev);
2928 	if (err) {
2929 		if (err == -EPROBE_DEFER)
2930 			return err;
2931 
2932 		eth_hw_addr_random(dev);
2933 		dev_info(eth->dev, "generated random MAC address %pM\n",
2934 			 dev->dev_addr);
2935 	}
2936 
2937 	port = netdev_priv(dev);
2938 	u64_stats_init(&port->stats.syncp);
2939 	spin_lock_init(&port->stats.lock);
2940 	port->qdma = qdma;
2941 	port->dev = dev;
2942 	port->id = id;
2943 	eth->ports[p] = port;
2944 
2945 	return airoha_metadata_dst_alloc(port);
2946 }
2947 
airoha_register_gdm_devices(struct airoha_eth * eth)2948 static int airoha_register_gdm_devices(struct airoha_eth *eth)
2949 {
2950 	int i;
2951 
2952 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2953 		struct airoha_gdm_port *port = eth->ports[i];
2954 		int err;
2955 
2956 		if (!port)
2957 			continue;
2958 
2959 		err = register_netdev(port->dev);
2960 		if (err)
2961 			return err;
2962 	}
2963 
2964 	set_bit(DEV_STATE_REGISTERED, &eth->state);
2965 
2966 	return 0;
2967 }
2968 
airoha_probe(struct platform_device * pdev)2969 static int airoha_probe(struct platform_device *pdev)
2970 {
2971 	struct reset_control_bulk_data *xsi_rsts;
2972 	struct device_node *np;
2973 	struct airoha_eth *eth;
2974 	int i, err;
2975 
2976 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2977 	if (!eth)
2978 		return -ENOMEM;
2979 
2980 	eth->soc = of_device_get_match_data(&pdev->dev);
2981 	if (!eth->soc)
2982 		return -EINVAL;
2983 
2984 	eth->dev = &pdev->dev;
2985 
2986 	err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
2987 	if (err) {
2988 		dev_err(eth->dev, "failed configuring DMA mask\n");
2989 		return err;
2990 	}
2991 
2992 	eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
2993 	if (IS_ERR(eth->fe_regs))
2994 		return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
2995 				     "failed to iomap fe regs\n");
2996 
2997 	eth->rsts[0].id = "fe";
2998 	eth->rsts[1].id = "pdma";
2999 	eth->rsts[2].id = "qdma";
3000 	err = devm_reset_control_bulk_get_exclusive(eth->dev,
3001 						    ARRAY_SIZE(eth->rsts),
3002 						    eth->rsts);
3003 	if (err) {
3004 		dev_err(eth->dev, "failed to get bulk reset lines\n");
3005 		return err;
3006 	}
3007 
3008 	xsi_rsts = devm_kcalloc(eth->dev,
3009 				eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
3010 				GFP_KERNEL);
3011 	if (!xsi_rsts)
3012 		return -ENOMEM;
3013 
3014 	eth->xsi_rsts = xsi_rsts;
3015 	for (i = 0; i < eth->soc->num_xsi_rsts; i++)
3016 		eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
3017 
3018 	err = devm_reset_control_bulk_get_exclusive(eth->dev,
3019 						    eth->soc->num_xsi_rsts,
3020 						    eth->xsi_rsts);
3021 	if (err) {
3022 		dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
3023 		return err;
3024 	}
3025 
3026 	eth->napi_dev = alloc_netdev_dummy(0);
3027 	if (!eth->napi_dev)
3028 		return -ENOMEM;
3029 
3030 	/* Enable threaded NAPI by default */
3031 	eth->napi_dev->threaded = true;
3032 	strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
3033 	platform_set_drvdata(pdev, eth);
3034 
3035 	err = airoha_hw_init(pdev, eth);
3036 	if (err)
3037 		goto error_hw_cleanup;
3038 
3039 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3040 		airoha_qdma_start_napi(&eth->qdma[i]);
3041 
3042 	i = 0;
3043 	for_each_child_of_node(pdev->dev.of_node, np) {
3044 		if (!of_device_is_compatible(np, "airoha,eth-mac"))
3045 			continue;
3046 
3047 		if (!of_device_is_available(np))
3048 			continue;
3049 
3050 		err = airoha_alloc_gdm_port(eth, np, i++);
3051 		if (err) {
3052 			of_node_put(np);
3053 			goto error_napi_stop;
3054 		}
3055 	}
3056 
3057 	err = airoha_register_gdm_devices(eth);
3058 	if (err)
3059 		goto error_napi_stop;
3060 
3061 	return 0;
3062 
3063 error_napi_stop:
3064 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3065 		airoha_qdma_stop_napi(&eth->qdma[i]);
3066 	airoha_ppe_deinit(eth);
3067 error_hw_cleanup:
3068 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3069 		airoha_hw_cleanup(&eth->qdma[i]);
3070 
3071 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3072 		struct airoha_gdm_port *port = eth->ports[i];
3073 
3074 		if (!port)
3075 			continue;
3076 
3077 		if (port->dev->reg_state == NETREG_REGISTERED)
3078 			unregister_netdev(port->dev);
3079 		airoha_metadata_dst_free(port);
3080 	}
3081 	free_netdev(eth->napi_dev);
3082 	platform_set_drvdata(pdev, NULL);
3083 
3084 	return err;
3085 }
3086 
airoha_remove(struct platform_device * pdev)3087 static void airoha_remove(struct platform_device *pdev)
3088 {
3089 	struct airoha_eth *eth = platform_get_drvdata(pdev);
3090 	int i;
3091 
3092 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
3093 		airoha_qdma_stop_napi(&eth->qdma[i]);
3094 		airoha_hw_cleanup(&eth->qdma[i]);
3095 	}
3096 
3097 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3098 		struct airoha_gdm_port *port = eth->ports[i];
3099 
3100 		if (!port)
3101 			continue;
3102 
3103 		unregister_netdev(port->dev);
3104 		airoha_metadata_dst_free(port);
3105 	}
3106 	free_netdev(eth->napi_dev);
3107 
3108 	airoha_ppe_deinit(eth);
3109 	platform_set_drvdata(pdev, NULL);
3110 }
3111 
3112 static const char * const en7581_xsi_rsts_names[] = {
3113 	"xsi-mac",
3114 	"hsi0-mac",
3115 	"hsi1-mac",
3116 	"hsi-mac",
3117 	"xfp-mac",
3118 };
3119 
airoha_en7581_get_src_port_id(struct airoha_gdm_port * port,int nbq)3120 static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3121 {
3122 	switch (port->id) {
3123 	case AIROHA_GDM3_IDX:
3124 		/* 7581 SoC supports PCIe serdes on GDM3 port */
3125 		if (nbq == 4)
3126 			return HSGMII_LAN_7581_PCIE0_SRCPORT;
3127 		if (nbq == 5)
3128 			return HSGMII_LAN_7581_PCIE1_SRCPORT;
3129 		break;
3130 	case AIROHA_GDM4_IDX:
3131 		/* 7581 SoC supports eth and usb serdes on GDM4 port */
3132 		if (!nbq)
3133 			return HSGMII_LAN_7581_ETH_SRCPORT;
3134 		if (nbq == 1)
3135 			return HSGMII_LAN_7581_USB_SRCPORT;
3136 		break;
3137 	default:
3138 		break;
3139 	}
3140 
3141 	return -EINVAL;
3142 }
3143 
3144 static const char * const an7583_xsi_rsts_names[] = {
3145 	"xsi-mac",
3146 	"hsi0-mac",
3147 	"hsi1-mac",
3148 	"xfp-mac",
3149 };
3150 
airoha_an7583_get_src_port_id(struct airoha_gdm_port * port,int nbq)3151 static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3152 {
3153 	switch (port->id) {
3154 	case AIROHA_GDM3_IDX:
3155 		/* 7583 SoC supports eth serdes on GDM3 port */
3156 		if (!nbq)
3157 			return HSGMII_LAN_7583_ETH_SRCPORT;
3158 		break;
3159 	case AIROHA_GDM4_IDX:
3160 		/* 7583 SoC supports PCIe and USB serdes on GDM4 port */
3161 		if (!nbq)
3162 			return HSGMII_LAN_7583_PCIE_SRCPORT;
3163 		if (nbq == 1)
3164 			return HSGMII_LAN_7583_USB_SRCPORT;
3165 		break;
3166 	default:
3167 		break;
3168 	}
3169 
3170 	return -EINVAL;
3171 }
3172 
3173 static const struct airoha_eth_soc_data en7581_soc_data = {
3174 	.version = 0x7581,
3175 	.xsi_rsts_names = en7581_xsi_rsts_names,
3176 	.num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
3177 	.num_ppe = 2,
3178 	.ops = {
3179 		.get_src_port_id = airoha_en7581_get_src_port_id,
3180 	},
3181 };
3182 
3183 static const struct airoha_eth_soc_data an7583_soc_data = {
3184 	.version = 0x7583,
3185 	.xsi_rsts_names = an7583_xsi_rsts_names,
3186 	.num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
3187 	.num_ppe = 1,
3188 	.ops = {
3189 		.get_src_port_id = airoha_an7583_get_src_port_id,
3190 	},
3191 };
3192 
3193 static const struct of_device_id of_airoha_match[] = {
3194 	{ .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
3195 	{ .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
3196 	{ /* sentinel */ }
3197 };
3198 MODULE_DEVICE_TABLE(of, of_airoha_match);
3199 
3200 static struct platform_driver airoha_driver = {
3201 	.probe = airoha_probe,
3202 	.remove = airoha_remove,
3203 	.driver = {
3204 		.name = KBUILD_MODNAME,
3205 		.of_match_table = of_airoha_match,
3206 	},
3207 };
3208 module_platform_driver(airoha_driver);
3209 
3210 MODULE_LICENSE("GPL");
3211 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
3212 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
3213