1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2024 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6 #include <linux/of.h>
7 #include <linux/of_net.h>
8 #include <linux/of_reserved_mem.h>
9 #include <linux/platform_device.h>
10 #include <linux/tcp.h>
11 #include <linux/u64_stats_sync.h>
12 #include <net/dst_metadata.h>
13 #include <net/page_pool/helpers.h>
14 #include <net/pkt_cls.h>
15 #include <uapi/linux/ppp_defs.h>
16
17 #include "airoha_regs.h"
18 #include "airoha_eth.h"
19
airoha_rr(void __iomem * base,u32 offset)20 u32 airoha_rr(void __iomem *base, u32 offset)
21 {
22 return readl(base + offset);
23 }
24
airoha_wr(void __iomem * base,u32 offset,u32 val)25 void airoha_wr(void __iomem *base, u32 offset, u32 val)
26 {
27 writel(val, base + offset);
28 }
29
airoha_rmw(void __iomem * base,u32 offset,u32 mask,u32 val)30 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
31 {
32 val |= (airoha_rr(base, offset) & ~mask);
33 airoha_wr(base, offset, val);
34
35 return val;
36 }
37
airoha_qdma_set_irqmask(struct airoha_irq_bank * irq_bank,int index,u32 clear,u32 set)38 static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
39 int index, u32 clear, u32 set)
40 {
41 struct airoha_qdma *qdma = irq_bank->qdma;
42 int bank = irq_bank - &qdma->irq_banks[0];
43 unsigned long flags;
44
45 if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
46 return;
47
48 spin_lock_irqsave(&irq_bank->irq_lock, flags);
49
50 irq_bank->irqmask[index] &= ~clear;
51 irq_bank->irqmask[index] |= set;
52 airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
53 irq_bank->irqmask[index]);
54 /* Read irq_enable register in order to guarantee the update above
55 * completes in the spinlock critical section.
56 */
57 airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
58
59 spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
60 }
61
airoha_qdma_irq_enable(struct airoha_irq_bank * irq_bank,int index,u32 mask)62 static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
63 int index, u32 mask)
64 {
65 airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
66 }
67
airoha_qdma_irq_disable(struct airoha_irq_bank * irq_bank,int index,u32 mask)68 static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
69 int index, u32 mask)
70 {
71 airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
72 }
73
airoha_set_macaddr(struct airoha_gdm_port * port,const u8 * addr)74 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
75 {
76 struct airoha_eth *eth = port->qdma->eth;
77 u32 val, reg;
78
79 reg = airoha_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
80 : REG_FE_WAN_MAC_H;
81 val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
82 airoha_fe_wr(eth, reg, val);
83
84 val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
85 airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
86 airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
87
88 airoha_ppe_init_upd_mem(port);
89 }
90
airoha_set_gdm_port_fwd_cfg(struct airoha_eth * eth,u32 addr,u32 val)91 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
92 u32 val)
93 {
94 airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
95 FIELD_PREP(GDM_OCFQ_MASK, val));
96 airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
97 FIELD_PREP(GDM_MCFQ_MASK, val));
98 airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
99 FIELD_PREP(GDM_BCFQ_MASK, val));
100 airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
101 FIELD_PREP(GDM_UCFQ_MASK, val));
102 }
103
airoha_set_vip_for_gdm_port(struct airoha_gdm_port * port,bool enable)104 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
105 bool enable)
106 {
107 struct airoha_eth *eth = port->qdma->eth;
108 u32 vip_port;
109
110 vip_port = eth->soc->ops.get_vip_port(port, port->nbq);
111 if (enable) {
112 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
113 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
114 } else {
115 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
116 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
117 }
118
119 return 0;
120 }
121
airoha_fe_maccr_init(struct airoha_eth * eth)122 static void airoha_fe_maccr_init(struct airoha_eth *eth)
123 {
124 int p;
125
126 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
127 airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
128 GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
129 GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
130
131 airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
132 FIELD_PREP(CDM_VLAN_MASK, 0x8100));
133
134 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
135 }
136
airoha_fe_vip_setup(struct airoha_eth * eth)137 static void airoha_fe_vip_setup(struct airoha_eth *eth)
138 {
139 airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
140 airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
141
142 airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
143 airoha_fe_wr(eth, REG_FE_VIP_EN(4),
144 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
145 PATN_EN_MASK);
146
147 airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
148 airoha_fe_wr(eth, REG_FE_VIP_EN(6),
149 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
150 PATN_EN_MASK);
151
152 airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
153 airoha_fe_wr(eth, REG_FE_VIP_EN(7),
154 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
155 PATN_EN_MASK);
156
157 /* BOOTP (0x43) */
158 airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
159 airoha_fe_wr(eth, REG_FE_VIP_EN(8),
160 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
161 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
162
163 /* BOOTP (0x44) */
164 airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
165 airoha_fe_wr(eth, REG_FE_VIP_EN(9),
166 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
167 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
168
169 /* ISAKMP */
170 airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
171 airoha_fe_wr(eth, REG_FE_VIP_EN(10),
172 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
173 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
174
175 airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
176 airoha_fe_wr(eth, REG_FE_VIP_EN(11),
177 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
178 PATN_EN_MASK);
179
180 /* DHCPv6 */
181 airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
182 airoha_fe_wr(eth, REG_FE_VIP_EN(12),
183 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
184 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
185
186 airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
187 airoha_fe_wr(eth, REG_FE_VIP_EN(19),
188 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
189 PATN_EN_MASK);
190
191 /* ETH->ETH_P_1905 (0x893a) */
192 airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
193 airoha_fe_wr(eth, REG_FE_VIP_EN(20),
194 PATN_FCPU_EN_MASK | PATN_EN_MASK);
195
196 airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
197 airoha_fe_wr(eth, REG_FE_VIP_EN(21),
198 PATN_FCPU_EN_MASK | PATN_EN_MASK);
199 }
200
airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue)201 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
202 u32 port, u32 queue)
203 {
204 u32 val;
205
206 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
207 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
208 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
209 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
210 val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
211
212 return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
213 }
214
airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue,u32 val)215 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
216 u32 port, u32 queue, u32 val)
217 {
218 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
219 FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
220 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
221 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
222 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
223 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
224 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
225 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
226 }
227
airoha_fe_get_pse_all_rsv(struct airoha_eth * eth)228 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
229 {
230 u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
231
232 return FIELD_GET(PSE_ALLRSV_MASK, val);
233 }
234
airoha_fe_set_pse_oq_rsv(struct airoha_eth * eth,u32 port,u32 queue,u32 val)235 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
236 u32 port, u32 queue, u32 val)
237 {
238 u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
239 u32 tmp, all_rsv, fq_limit;
240
241 airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
242
243 /* modify all rsv */
244 all_rsv = airoha_fe_get_pse_all_rsv(eth);
245 all_rsv += (val - orig_val);
246 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
247 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
248
249 /* modify hthd */
250 tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
251 fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
252 tmp = fq_limit - all_rsv - 0x20;
253 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
254 PSE_SHARE_USED_HTHD_MASK,
255 FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
256
257 tmp = fq_limit - all_rsv - 0x100;
258 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
259 PSE_SHARE_USED_MTHD_MASK,
260 FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
261 tmp = (3 * tmp) >> 2;
262 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
263 PSE_SHARE_USED_LTHD_MASK,
264 FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
265
266 return 0;
267 }
268
airoha_fe_pse_ports_init(struct airoha_eth * eth)269 static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
270 {
271 const u32 pse_port_num_queues[] = {
272 [FE_PSE_PORT_CDM1] = 6,
273 [FE_PSE_PORT_GDM1] = 6,
274 [FE_PSE_PORT_GDM2] = 32,
275 [FE_PSE_PORT_GDM3] = 6,
276 [FE_PSE_PORT_PPE1] = 4,
277 [FE_PSE_PORT_CDM2] = 6,
278 [FE_PSE_PORT_CDM3] = 8,
279 [FE_PSE_PORT_CDM4] = 10,
280 [FE_PSE_PORT_PPE2] = 4,
281 [FE_PSE_PORT_GDM4] = 2,
282 [FE_PSE_PORT_CDM5] = 2,
283 };
284 int q;
285
286 if (airoha_ppe_is_enabled(eth, 1)) {
287 u32 all_rsv;
288
289 /* hw misses PPE2 oq rsv */
290 all_rsv = airoha_fe_get_pse_all_rsv(eth);
291 all_rsv += PSE_RSV_PAGES *
292 pse_port_num_queues[FE_PSE_PORT_PPE2];
293 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
294 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
295 }
296
297 /* CMD1 */
298 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
299 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
300 PSE_QUEUE_RSV_PAGES);
301 /* GMD1 */
302 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
303 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
304 PSE_QUEUE_RSV_PAGES);
305 /* GMD2 */
306 for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
307 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
308 /* GMD3 */
309 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
310 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
311 PSE_QUEUE_RSV_PAGES);
312 /* PPE1 */
313 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
314 if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
315 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
316 PSE_QUEUE_RSV_PAGES);
317 else
318 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
319 }
320 /* CDM2 */
321 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
322 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
323 PSE_QUEUE_RSV_PAGES);
324 /* CDM3 */
325 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
326 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
327 /* CDM4 */
328 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
329 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
330 PSE_QUEUE_RSV_PAGES);
331 if (airoha_ppe_is_enabled(eth, 1)) {
332 /* PPE2 */
333 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
334 if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
335 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
336 q,
337 PSE_QUEUE_RSV_PAGES);
338 else
339 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
340 q, 0);
341 }
342 }
343 /* GMD4 */
344 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
345 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
346 PSE_QUEUE_RSV_PAGES);
347 /* CDM5 */
348 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
349 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
350 PSE_QUEUE_RSV_PAGES);
351 }
352
airoha_fe_mc_vlan_clear(struct airoha_eth * eth)353 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
354 {
355 int i;
356
357 for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
358 int err, j;
359 u32 val;
360
361 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
362
363 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
364 MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
365 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
366 err = read_poll_timeout(airoha_fe_rr, val,
367 val & MC_VLAN_CFG_CMD_DONE_MASK,
368 USEC_PER_MSEC, 5 * USEC_PER_MSEC,
369 false, eth, REG_MC_VLAN_CFG);
370 if (err)
371 return err;
372
373 for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
374 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
375
376 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
377 FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
378 MC_VLAN_CFG_RW_MASK;
379 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
380 err = read_poll_timeout(airoha_fe_rr, val,
381 val & MC_VLAN_CFG_CMD_DONE_MASK,
382 USEC_PER_MSEC,
383 5 * USEC_PER_MSEC, false, eth,
384 REG_MC_VLAN_CFG);
385 if (err)
386 return err;
387 }
388 }
389
390 return 0;
391 }
392
airoha_fe_crsn_qsel_init(struct airoha_eth * eth)393 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
394 {
395 /* CDM1_CRSN_QSEL */
396 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
397 CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
398 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
399 CDM_CRSN_QSEL_Q1));
400 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
401 CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
402 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
403 CDM_CRSN_QSEL_Q1));
404 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
405 CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
406 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
407 CDM_CRSN_QSEL_Q1));
408 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
409 CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
410 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
411 CDM_CRSN_QSEL_Q6));
412 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
413 CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
414 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
415 CDM_CRSN_QSEL_Q1));
416 /* CDM2_CRSN_QSEL */
417 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
418 CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
419 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
420 CDM_CRSN_QSEL_Q1));
421 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
422 CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
423 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
424 CDM_CRSN_QSEL_Q1));
425 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
426 CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
427 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
428 CDM_CRSN_QSEL_Q1));
429 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
430 CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
431 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
432 CDM_CRSN_QSEL_Q6));
433 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
434 CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
435 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
436 CDM_CRSN_QSEL_Q1));
437 }
438
airoha_fe_init(struct airoha_eth * eth)439 static int airoha_fe_init(struct airoha_eth *eth)
440 {
441 airoha_fe_maccr_init(eth);
442
443 /* PSE IQ reserve */
444 airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
445 FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
446 airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
447 PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
448 FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
449 FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
450
451 /* enable FE copy engine for KA/DPI */
452 airoha_fe_wr(eth, REG_FE_PCE_CFG, PCE_DPI_EN_MASK | PCE_KA_EN_MASK);
453 /* set vip queue selection to ring 1 */
454 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
455 FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
456 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
457 FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
458 /* set GDM4 source interface offset to 8 */
459 airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
460 GDM_SPORT_OFF2_MASK |
461 GDM_SPORT_OFF1_MASK |
462 GDM_SPORT_OFF0_MASK,
463 FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
464 FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
465 FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
466
467 /* set PSE Page as 128B */
468 airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
469 FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
470 FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
471 FE_DMA_GLO_PG_SZ_MASK);
472 airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
473 FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
474 FE_RST_GDM4_MBI_ARB_MASK);
475 usleep_range(1000, 2000);
476
477 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
478 * connect other rings to PSE Port0 OQ-0
479 */
480 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
481 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
482 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
483 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
484
485 airoha_fe_vip_setup(eth);
486 airoha_fe_pse_ports_init(eth);
487
488 airoha_fe_set(eth, REG_GDM_MISC_CFG,
489 GDM2_RDM_ACK_WAIT_PREF_MASK |
490 GDM2_CHN_VLD_MODE_MASK);
491 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
492 FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
493
494 /* init fragment and assemble Force Port */
495 /* NPU Core-3, NPU Bridge Channel-3 */
496 airoha_fe_rmw(eth, REG_IP_FRAG_FP,
497 IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
498 FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
499 FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
500 /* QDMA LAN, RX Ring-22 */
501 airoha_fe_rmw(eth, REG_IP_FRAG_FP,
502 IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
503 FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
504 FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
505
506 airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK);
507 airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK);
508
509 airoha_fe_crsn_qsel_init(eth);
510
511 airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
512 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
513
514 /* default aging mode for mbi unlock issue */
515 airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
516 MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
517 FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
518 FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
519
520 /* disable IFC by default */
521 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
522
523 /* enable 1:N vlan action, init vlan table */
524 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
525
526 return airoha_fe_mc_vlan_clear(eth);
527 }
528
airoha_qdma_fill_rx_queue(struct airoha_queue * q)529 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
530 {
531 struct airoha_qdma *qdma = q->qdma;
532 int qid = q - &qdma->q_rx[0];
533 int nframes = 0;
534
535 while (q->queued < q->ndesc - 1) {
536 struct airoha_queue_entry *e = &q->entry[q->head];
537 struct airoha_qdma_desc *desc = &q->desc[q->head];
538 struct page *page;
539 int offset;
540 u32 val;
541
542 page = page_pool_dev_alloc_frag(q->page_pool, &offset,
543 q->buf_size);
544 if (!page)
545 break;
546
547 q->head = (q->head + 1) % q->ndesc;
548 q->queued++;
549 nframes++;
550
551 e->buf = page_address(page) + offset;
552 e->dma_addr = page_pool_get_dma_addr(page) + offset;
553 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
554
555 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
556 WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
557 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
558 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
559 WRITE_ONCE(desc->data, cpu_to_le32(val));
560 WRITE_ONCE(desc->msg0, 0);
561 WRITE_ONCE(desc->msg1, 0);
562 WRITE_ONCE(desc->msg2, 0);
563 WRITE_ONCE(desc->msg3, 0);
564 }
565
566 if (nframes)
567 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
568 RX_RING_CPU_IDX_MASK,
569 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
570
571 return nframes;
572 }
573
airoha_qdma_get_gdm_port(struct airoha_eth * eth,struct airoha_qdma_desc * desc)574 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
575 struct airoha_qdma_desc *desc)
576 {
577 u32 port, sport, msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
578
579 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
580 switch (sport) {
581 case 0x10 ... 0x14:
582 port = 0;
583 break;
584 case 0x2 ... 0x4:
585 port = sport - 1;
586 break;
587 default:
588 return -EINVAL;
589 }
590
591 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
592 }
593
airoha_qdma_rx_process(struct airoha_queue * q,int budget)594 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
595 {
596 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
597 struct airoha_qdma *qdma = q->qdma;
598 struct airoha_eth *eth = qdma->eth;
599 int qid = q - &qdma->q_rx[0];
600 int done = 0;
601
602 while (done < budget) {
603 struct airoha_queue_entry *e = &q->entry[q->tail];
604 struct airoha_qdma_desc *desc = &q->desc[q->tail];
605 u32 hash, reason, msg1, desc_ctrl;
606 struct airoha_gdm_port *port;
607 int data_len, len, p;
608 struct page *page;
609
610 desc_ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
611 if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
612 break;
613
614 dma_rmb();
615
616 q->tail = (q->tail + 1) % q->ndesc;
617 q->queued--;
618
619 dma_sync_single_for_cpu(eth->dev, e->dma_addr,
620 SKB_WITH_OVERHEAD(q->buf_size), dir);
621
622 page = virt_to_head_page(e->buf);
623 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
624 data_len = q->skb ? q->buf_size
625 : SKB_WITH_OVERHEAD(q->buf_size);
626 if (!len || data_len < len)
627 goto free_frag;
628
629 p = airoha_qdma_get_gdm_port(eth, desc);
630 if (p < 0 || !eth->ports[p])
631 goto free_frag;
632
633 port = eth->ports[p];
634 if (!q->skb) { /* first buffer */
635 q->skb = napi_build_skb(e->buf, q->buf_size);
636 if (!q->skb)
637 goto free_frag;
638
639 __skb_put(q->skb, len);
640 skb_mark_for_recycle(q->skb);
641 q->skb->dev = port->dev;
642 q->skb->protocol = eth_type_trans(q->skb, port->dev);
643 q->skb->ip_summed = CHECKSUM_UNNECESSARY;
644 skb_record_rx_queue(q->skb, qid);
645 } else { /* scattered frame */
646 struct skb_shared_info *shinfo = skb_shinfo(q->skb);
647 int nr_frags = shinfo->nr_frags;
648
649 if (nr_frags >= ARRAY_SIZE(shinfo->frags))
650 goto free_frag;
651
652 skb_add_rx_frag(q->skb, nr_frags, page,
653 e->buf - page_address(page), len,
654 q->buf_size);
655 }
656
657 if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
658 continue;
659
660 if (netdev_uses_dsa(port->dev)) {
661 /* PPE module requires untagged packets to work
662 * properly and it provides DSA port index via the
663 * DMA descriptor. Report DSA tag to the DSA stack
664 * via skb dst info.
665 */
666 u32 msg0 = le32_to_cpu(READ_ONCE(desc->msg0));
667 u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, msg0);
668
669 if (sptag < ARRAY_SIZE(port->dsa_meta) &&
670 port->dsa_meta[sptag])
671 skb_dst_set_noref(q->skb,
672 &port->dsa_meta[sptag]->dst);
673 }
674
675 msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
676 hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
677 if (hash != AIROHA_RXD4_FOE_ENTRY)
678 skb_set_hash(q->skb, jhash_1word(hash, 0),
679 PKT_HASH_TYPE_L4);
680
681 reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
682 if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
683 airoha_ppe_check_skb(ð->ppe->dev, q->skb, hash,
684 false);
685
686 done++;
687 napi_gro_receive(&q->napi, q->skb);
688 q->skb = NULL;
689 continue;
690 free_frag:
691 if (q->skb) {
692 dev_kfree_skb(q->skb);
693 q->skb = NULL;
694 }
695 page_pool_put_full_page(q->page_pool, page, true);
696 }
697 airoha_qdma_fill_rx_queue(q);
698
699 return done;
700 }
701
airoha_qdma_rx_napi_poll(struct napi_struct * napi,int budget)702 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
703 {
704 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
705 int cur, done = 0;
706
707 do {
708 cur = airoha_qdma_rx_process(q, budget - done);
709 done += cur;
710 } while (cur && done < budget);
711
712 if (done < budget && napi_complete(napi)) {
713 struct airoha_qdma *qdma = q->qdma;
714 int i, qid = q - &qdma->q_rx[0];
715 int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
716 : QDMA_INT_REG_IDX2;
717
718 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
719 if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
720 continue;
721
722 airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
723 BIT(qid % RX_DONE_HIGH_OFFSET));
724 }
725 }
726
727 return done;
728 }
729
airoha_qdma_init_rx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int ndesc)730 static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
731 struct airoha_qdma *qdma, int ndesc)
732 {
733 const struct page_pool_params pp_params = {
734 .order = 0,
735 .pool_size = 256,
736 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
737 .dma_dir = DMA_FROM_DEVICE,
738 .max_len = PAGE_SIZE,
739 .nid = NUMA_NO_NODE,
740 .dev = qdma->eth->dev,
741 .napi = &q->napi,
742 };
743 struct airoha_eth *eth = qdma->eth;
744 int qid = q - &qdma->q_rx[0], thr;
745 dma_addr_t dma_addr;
746
747 q->buf_size = PAGE_SIZE / 2;
748 q->qdma = qdma;
749
750 q->entry = devm_kzalloc(eth->dev, ndesc * sizeof(*q->entry),
751 GFP_KERNEL);
752 if (!q->entry)
753 return -ENOMEM;
754
755 q->desc = dmam_alloc_coherent(eth->dev, ndesc * sizeof(*q->desc),
756 &dma_addr, GFP_KERNEL);
757 if (!q->desc)
758 return -ENOMEM;
759
760 q->page_pool = page_pool_create(&pp_params);
761 if (IS_ERR(q->page_pool)) {
762 int err = PTR_ERR(q->page_pool);
763
764 q->page_pool = NULL;
765 return err;
766 }
767
768 q->ndesc = ndesc;
769 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
770
771 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
772 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
773 RX_RING_SIZE_MASK,
774 FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
775
776 thr = clamp(ndesc >> 3, 1, 32);
777 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
778 FIELD_PREP(RX_RING_THR_MASK, thr));
779 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
780 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
781 airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
782
783 airoha_qdma_fill_rx_queue(q);
784
785 return 0;
786 }
787
airoha_qdma_cleanup_rx_queue(struct airoha_queue * q)788 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
789 {
790 struct airoha_qdma *qdma = q->qdma;
791 struct airoha_eth *eth = qdma->eth;
792 int qid = q - &qdma->q_rx[0];
793
794 while (q->queued) {
795 struct airoha_queue_entry *e = &q->entry[q->tail];
796 struct airoha_qdma_desc *desc = &q->desc[q->tail];
797 struct page *page = virt_to_head_page(e->buf);
798
799 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
800 page_pool_get_dma_dir(q->page_pool));
801 page_pool_put_full_page(q->page_pool, page, false);
802 /* Reset DMA descriptor */
803 WRITE_ONCE(desc->ctrl, 0);
804 WRITE_ONCE(desc->addr, 0);
805 WRITE_ONCE(desc->data, 0);
806 WRITE_ONCE(desc->msg0, 0);
807 WRITE_ONCE(desc->msg1, 0);
808 WRITE_ONCE(desc->msg2, 0);
809 WRITE_ONCE(desc->msg3, 0);
810
811 q->tail = (q->tail + 1) % q->ndesc;
812 q->queued--;
813 }
814
815 q->head = q->tail;
816 /* Set RX_DMA_IDX to RX_CPU_IDX to notify the hw the QDMA RX ring is
817 * empty.
818 */
819 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
820 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
821 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
822 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail));
823 }
824
airoha_qdma_init_rx(struct airoha_qdma * qdma)825 static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
826 {
827 int i;
828
829 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
830 int err;
831
832 if (!(RX_DONE_INT_MASK & BIT(i))) {
833 /* rx-queue not binded to irq */
834 continue;
835 }
836
837 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
838 RX_DSCP_NUM(i));
839 if (err)
840 return err;
841 }
842
843 return 0;
844 }
845
airoha_qdma_wake_netdev_txqs(struct airoha_queue * q)846 static void airoha_qdma_wake_netdev_txqs(struct airoha_queue *q)
847 {
848 struct airoha_qdma *qdma = q->qdma;
849 struct airoha_eth *eth = qdma->eth;
850 int i, qid = q - &qdma->q_tx[0];
851
852 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
853 struct airoha_gdm_port *port = eth->ports[i];
854 int j;
855
856 if (!port)
857 continue;
858
859 if (port->qdma != qdma)
860 continue;
861
862 for (j = 0; j < port->dev->num_tx_queues; j++) {
863 if (airoha_qdma_get_txq(qdma, j) != qid)
864 continue;
865
866 netif_wake_subqueue(port->dev, j);
867 }
868 }
869 q->txq_stopped = false;
870 }
871
airoha_qdma_tx_napi_poll(struct napi_struct * napi,int budget)872 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
873 {
874 struct airoha_tx_irq_queue *irq_q;
875 int id, done = 0, irq_queued;
876 struct airoha_qdma *qdma;
877 struct airoha_eth *eth;
878 u32 status, head;
879
880 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
881 qdma = irq_q->qdma;
882 id = irq_q - &qdma->q_tx_irq[0];
883 eth = qdma->eth;
884
885 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
886 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
887 head = head % irq_q->size;
888 irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
889
890 while (irq_queued > 0 && done < budget) {
891 u32 qid, val = irq_q->q[head];
892 struct airoha_qdma_desc *desc;
893 struct airoha_queue_entry *e;
894 struct airoha_queue *q;
895 u32 index, desc_ctrl;
896 struct sk_buff *skb;
897
898 if (val == 0xff)
899 break;
900
901 irq_q->q[head] = 0xff; /* mark as done */
902 head = (head + 1) % irq_q->size;
903 irq_queued--;
904 done++;
905
906 qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
907 if (qid >= ARRAY_SIZE(qdma->q_tx))
908 continue;
909
910 q = &qdma->q_tx[qid];
911 if (!q->ndesc)
912 continue;
913
914 index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
915 if (index >= q->ndesc)
916 continue;
917
918 spin_lock_bh(&q->lock);
919
920 if (!q->queued)
921 goto unlock;
922
923 desc = &q->desc[index];
924 desc_ctrl = le32_to_cpu(desc->ctrl);
925
926 if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
927 !(desc_ctrl & QDMA_DESC_DROP_MASK))
928 goto unlock;
929
930 e = &q->entry[index];
931 skb = e->skb;
932
933 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
934 DMA_TO_DEVICE);
935 e->dma_addr = 0;
936 list_add_tail(&e->list, &q->tx_list);
937
938 WRITE_ONCE(desc->msg0, 0);
939 WRITE_ONCE(desc->msg1, 0);
940 q->queued--;
941
942 if (skb) {
943 struct netdev_queue *txq;
944
945 txq = skb_get_tx_queue(skb->dev, skb);
946 netdev_tx_completed_queue(txq, 1, skb->len);
947 dev_kfree_skb_any(skb);
948 }
949
950 if (q->txq_stopped && q->ndesc - q->queued >= q->free_thr) {
951 /* Since multiple net_device TX queues can share the
952 * same hw QDMA TX queue, there is no guarantee we have
953 * inflight packets queued in hw belonging to a
954 * net_device TX queue stopped in the xmit path.
955 * In order to avoid any potential net_device TX queue
956 * stall, we need to wake all the net_device TX queues
957 * feeding the same hw QDMA TX queue.
958 */
959 airoha_qdma_wake_netdev_txqs(q);
960 }
961
962 unlock:
963 spin_unlock_bh(&q->lock);
964 }
965
966 if (done) {
967 int i, len = done >> 7;
968
969 for (i = 0; i < len; i++)
970 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
971 IRQ_CLEAR_LEN_MASK, 0x80);
972 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
973 IRQ_CLEAR_LEN_MASK, (done & 0x7f));
974 }
975
976 if (done < budget && napi_complete(napi))
977 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
978 TX_DONE_INT_MASK(id));
979
980 return done;
981 }
982
airoha_qdma_init_tx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int size)983 static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
984 struct airoha_qdma *qdma, int size)
985 {
986 struct airoha_eth *eth = qdma->eth;
987 int i, qid = q - &qdma->q_tx[0];
988 dma_addr_t dma_addr;
989
990 spin_lock_init(&q->lock);
991 q->qdma = qdma;
992 q->free_thr = 1 + MAX_SKB_FRAGS;
993 INIT_LIST_HEAD(&q->tx_list);
994
995 q->entry = devm_kzalloc(eth->dev, size * sizeof(*q->entry),
996 GFP_KERNEL);
997 if (!q->entry)
998 return -ENOMEM;
999
1000 q->desc = dmam_alloc_coherent(eth->dev, size * sizeof(*q->desc),
1001 &dma_addr, GFP_KERNEL);
1002 if (!q->desc)
1003 return -ENOMEM;
1004
1005 for (i = 0; i < size; i++) {
1006 u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
1007
1008 list_add_tail(&q->entry[i].list, &q->tx_list);
1009 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
1010 }
1011 q->ndesc = size;
1012
1013 /* xmit ring drop default setting */
1014 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
1015 TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
1016
1017 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
1018 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1019 FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
1020 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1021 FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
1022
1023 return 0;
1024 }
1025
airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue * irq_q,struct airoha_qdma * qdma,int size)1026 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
1027 struct airoha_qdma *qdma, int size)
1028 {
1029 int id = irq_q - &qdma->q_tx_irq[0];
1030 struct airoha_eth *eth = qdma->eth;
1031 dma_addr_t dma_addr;
1032
1033 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
1034 &dma_addr, GFP_KERNEL);
1035 if (!irq_q->q)
1036 return -ENOMEM;
1037
1038 memset(irq_q->q, 0xff, size * sizeof(u32));
1039 irq_q->size = size;
1040 irq_q->qdma = qdma;
1041
1042 netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
1043 airoha_qdma_tx_napi_poll);
1044
1045 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
1046 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
1047 FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
1048 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
1049 FIELD_PREP(TX_IRQ_THR_MASK, 1));
1050
1051 return 0;
1052 }
1053
airoha_qdma_init_tx(struct airoha_qdma * qdma)1054 static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
1055 {
1056 int i, err;
1057
1058 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1059 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
1060 IRQ_QUEUE_LEN(i));
1061 if (err)
1062 return err;
1063 }
1064
1065 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1066 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
1067 TX_DSCP_NUM);
1068 if (err)
1069 return err;
1070 }
1071
1072 return 0;
1073 }
1074
airoha_qdma_cleanup_tx_queue(struct airoha_queue * q)1075 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
1076 {
1077 struct airoha_qdma *qdma = q->qdma;
1078 struct airoha_eth *eth = qdma->eth;
1079 int i, qid = q - &qdma->q_tx[0];
1080 u16 index = 0;
1081
1082 spin_lock_bh(&q->lock);
1083 for (i = 0; i < q->ndesc; i++) {
1084 struct airoha_queue_entry *e = &q->entry[i];
1085 struct airoha_qdma_desc *desc = &q->desc[i];
1086
1087 if (!e->dma_addr)
1088 continue;
1089
1090 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1091 DMA_TO_DEVICE);
1092 dev_kfree_skb_any(e->skb);
1093 e->dma_addr = 0;
1094 e->skb = NULL;
1095 list_add_tail(&e->list, &q->tx_list);
1096
1097 /* Reset DMA descriptor */
1098 WRITE_ONCE(desc->ctrl, 0);
1099 WRITE_ONCE(desc->addr, 0);
1100 WRITE_ONCE(desc->data, 0);
1101 WRITE_ONCE(desc->msg0, 0);
1102 WRITE_ONCE(desc->msg1, 0);
1103 WRITE_ONCE(desc->msg2, 0);
1104
1105 q->queued--;
1106 }
1107
1108 if (!list_empty(&q->tx_list)) {
1109 struct airoha_queue_entry *e;
1110
1111 e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
1112 list);
1113 index = e - q->entry;
1114 }
1115 /* Set TX_DMA_IDX to TX_CPU_IDX to notify the hw the QDMA TX ring is
1116 * empty.
1117 */
1118 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1119 FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
1120 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1121 FIELD_PREP(TX_RING_DMA_IDX_MASK, index));
1122
1123 spin_unlock_bh(&q->lock);
1124 }
1125
airoha_qdma_init_hfwd_queues(struct airoha_qdma * qdma)1126 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
1127 {
1128 int size, index, num_desc = HW_DSCP_NUM;
1129 struct airoha_eth *eth = qdma->eth;
1130 int id = qdma - ð->qdma[0];
1131 u32 status, buf_size;
1132 dma_addr_t dma_addr;
1133 const char *name;
1134
1135 name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
1136 if (!name)
1137 return -ENOMEM;
1138
1139 buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
1140 index = of_property_match_string(eth->dev->of_node,
1141 "memory-region-names", name);
1142 if (index >= 0) {
1143 struct reserved_mem *rmem;
1144 struct device_node *np;
1145
1146 /* Consume reserved memory for hw forwarding buffers queue if
1147 * available in the DTS
1148 */
1149 np = of_parse_phandle(eth->dev->of_node, "memory-region",
1150 index);
1151 if (!np)
1152 return -ENODEV;
1153
1154 rmem = of_reserved_mem_lookup(np);
1155 of_node_put(np);
1156 dma_addr = rmem->base;
1157 /* Compute the number of hw descriptors according to the
1158 * reserved memory size and the payload buffer size
1159 */
1160 num_desc = div_u64(rmem->size, buf_size);
1161 } else {
1162 size = buf_size * num_desc;
1163 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
1164 GFP_KERNEL))
1165 return -ENOMEM;
1166 }
1167
1168 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
1169
1170 size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
1171 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
1172 return -ENOMEM;
1173
1174 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
1175 /* QDMA0: 2KB. QDMA1: 1KB */
1176 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
1177 HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
1178 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
1179 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
1180 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
1181 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
1182 LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
1183 HW_FWD_DESC_NUM_MASK,
1184 FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
1185 LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
1186
1187 return read_poll_timeout(airoha_qdma_rr, status,
1188 !(status & LMGR_INIT_START), USEC_PER_MSEC,
1189 30 * USEC_PER_MSEC, true, qdma,
1190 REG_LMGR_INIT_CFG);
1191 }
1192
airoha_qdma_init_qos(struct airoha_qdma * qdma)1193 static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
1194 {
1195 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
1196 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
1197
1198 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
1199 PSE_BUF_ESTIMATE_EN_MASK);
1200
1201 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
1202 EGRESS_RATE_METER_EN_MASK |
1203 EGRESS_RATE_METER_EQ_RATE_EN_MASK);
1204 /* 2047us x 31 = 63.457ms */
1205 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1206 EGRESS_RATE_METER_WINDOW_SZ_MASK,
1207 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
1208 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1209 EGRESS_RATE_METER_TIMESLICE_MASK,
1210 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
1211
1212 /* ratelimit init */
1213 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
1214 /* fast-tick 25us */
1215 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
1216 FIELD_PREP(GLB_FAST_TICK_MASK, 25));
1217 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
1218 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
1219
1220 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
1221 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
1222 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
1223 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
1224 EGRESS_SLOW_TICK_RATIO_MASK,
1225 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
1226
1227 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
1228 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
1229 INGRESS_TRTCM_MODE_MASK);
1230 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
1231 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
1232 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
1233 INGRESS_SLOW_TICK_RATIO_MASK,
1234 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
1235
1236 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
1237 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
1238 FIELD_PREP(SLA_FAST_TICK_MASK, 25));
1239 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
1240 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
1241 }
1242
airoha_qdma_init_qos_stats(struct airoha_qdma * qdma)1243 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
1244 {
1245 int i;
1246
1247 for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
1248 /* Tx-cpu transferred count */
1249 airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
1250 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1251 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1252 CNTR_ALL_DSCP_RING_EN_MASK |
1253 FIELD_PREP(CNTR_CHAN_MASK, i));
1254 /* Tx-fwd transferred count */
1255 airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
1256 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
1257 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
1258 CNTR_ALL_DSCP_RING_EN_MASK |
1259 FIELD_PREP(CNTR_SRC_MASK, 1) |
1260 FIELD_PREP(CNTR_CHAN_MASK, i));
1261 }
1262 }
1263
airoha_qdma_hw_init(struct airoha_qdma * qdma)1264 static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
1265 {
1266 int i;
1267
1268 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1269 /* clear pending irqs */
1270 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1271 /* setup rx irqs */
1272 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
1273 INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1274 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
1275 INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1276 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
1277 INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1278 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
1279 INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1280 }
1281 /* setup tx irqs */
1282 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
1283 TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
1284 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
1285 TX_COHERENT_HIGH_INT_MASK);
1286
1287 /* setup irq binding */
1288 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1289 if (!qdma->q_tx[i].ndesc)
1290 continue;
1291
1292 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
1293 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
1294 TX_RING_IRQ_BLOCKING_CFG_MASK);
1295 else
1296 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
1297 TX_RING_IRQ_BLOCKING_CFG_MASK);
1298 }
1299
1300 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1301 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
1302 GLOBAL_CFG_CPU_TXR_RR_MASK |
1303 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
1304 GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
1305 GLOBAL_CFG_MULTICAST_EN_MASK |
1306 GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
1307 GLOBAL_CFG_TX_WB_DONE_MASK |
1308 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
1309
1310 airoha_qdma_init_qos(qdma);
1311
1312 /* disable qdma rx delay interrupt */
1313 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1314 if (!qdma->q_rx[i].ndesc)
1315 continue;
1316
1317 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
1318 RX_DELAY_INT_MASK);
1319 }
1320
1321 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
1322 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
1323 airoha_qdma_init_qos_stats(qdma);
1324
1325 return 0;
1326 }
1327
airoha_irq_handler(int irq,void * dev_instance)1328 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
1329 {
1330 struct airoha_irq_bank *irq_bank = dev_instance;
1331 struct airoha_qdma *qdma = irq_bank->qdma;
1332 u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
1333 u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
1334 int i;
1335
1336 for (i = 0; i < ARRAY_SIZE(intr); i++) {
1337 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
1338 intr[i] &= irq_bank->irqmask[i];
1339 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
1340 }
1341
1342 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
1343 return IRQ_NONE;
1344
1345 rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
1346 if (rx_intr1) {
1347 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
1348 rx_intr_mask |= rx_intr1;
1349 }
1350
1351 rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
1352 if (rx_intr2) {
1353 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
1354 rx_intr_mask |= (rx_intr2 << 16);
1355 }
1356
1357 for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
1358 if (!qdma->q_rx[i].ndesc)
1359 continue;
1360
1361 if (rx_intr_mask & BIT(i))
1362 napi_schedule(&qdma->q_rx[i].napi);
1363 }
1364
1365 if (intr[0] & INT_TX_MASK) {
1366 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1367 if (!(intr[0] & TX_DONE_INT_MASK(i)))
1368 continue;
1369
1370 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
1371 TX_DONE_INT_MASK(i));
1372 napi_schedule(&qdma->q_tx_irq[i].napi);
1373 }
1374 }
1375
1376 return IRQ_HANDLED;
1377 }
1378
airoha_qdma_init_irq_banks(struct platform_device * pdev,struct airoha_qdma * qdma)1379 static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
1380 struct airoha_qdma *qdma)
1381 {
1382 struct airoha_eth *eth = qdma->eth;
1383 int i, id = qdma - ð->qdma[0];
1384
1385 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1386 struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
1387 int err, irq_index = 4 * id + i;
1388 const char *name;
1389
1390 spin_lock_init(&irq_bank->irq_lock);
1391 irq_bank->qdma = qdma;
1392
1393 irq_bank->irq = platform_get_irq(pdev, irq_index);
1394 if (irq_bank->irq < 0)
1395 return irq_bank->irq;
1396
1397 name = devm_kasprintf(eth->dev, GFP_KERNEL,
1398 KBUILD_MODNAME ".%d", irq_index);
1399 if (!name)
1400 return -ENOMEM;
1401
1402 err = devm_request_irq(eth->dev, irq_bank->irq,
1403 airoha_irq_handler, IRQF_SHARED, name,
1404 irq_bank);
1405 if (err)
1406 return err;
1407 }
1408
1409 return 0;
1410 }
1411
airoha_qdma_init(struct platform_device * pdev,struct airoha_eth * eth,struct airoha_qdma * qdma)1412 static int airoha_qdma_init(struct platform_device *pdev,
1413 struct airoha_eth *eth,
1414 struct airoha_qdma *qdma)
1415 {
1416 int err, id = qdma - ð->qdma[0];
1417 const char *res;
1418
1419 qdma->eth = eth;
1420 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
1421 if (!res)
1422 return -ENOMEM;
1423
1424 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
1425 if (IS_ERR(qdma->regs))
1426 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
1427 "failed to iomap qdma%d regs\n", id);
1428
1429 err = airoha_qdma_init_irq_banks(pdev, qdma);
1430 if (err)
1431 return err;
1432
1433 err = airoha_qdma_init_rx(qdma);
1434 if (err)
1435 return err;
1436
1437 err = airoha_qdma_init_tx(qdma);
1438 if (err)
1439 return err;
1440
1441 err = airoha_qdma_init_hfwd_queues(qdma);
1442 if (err)
1443 return err;
1444
1445 return airoha_qdma_hw_init(qdma);
1446 }
1447
airoha_qdma_cleanup(struct airoha_qdma * qdma)1448 static void airoha_qdma_cleanup(struct airoha_qdma *qdma)
1449 {
1450 int i;
1451
1452 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1453 if (!qdma->q_rx[i].ndesc)
1454 continue;
1455
1456 netif_napi_del(&qdma->q_rx[i].napi);
1457 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
1458 if (qdma->q_rx[i].page_pool) {
1459 page_pool_destroy(qdma->q_rx[i].page_pool);
1460 qdma->q_rx[i].page_pool = NULL;
1461 }
1462 }
1463
1464 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1465 if (!qdma->q_tx_irq[i].size)
1466 continue;
1467
1468 netif_napi_del(&qdma->q_tx_irq[i].napi);
1469 }
1470
1471 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1472 if (!qdma->q_tx[i].ndesc)
1473 continue;
1474
1475 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1476 }
1477 }
1478
airoha_hw_init(struct platform_device * pdev,struct airoha_eth * eth)1479 static int airoha_hw_init(struct platform_device *pdev,
1480 struct airoha_eth *eth)
1481 {
1482 int err, i;
1483
1484 /* disable xsi */
1485 err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
1486 if (err)
1487 return err;
1488
1489 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
1490 if (err)
1491 return err;
1492
1493 msleep(20);
1494 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
1495 if (err)
1496 return err;
1497
1498 msleep(20);
1499 err = airoha_fe_init(eth);
1500 if (err)
1501 return err;
1502
1503 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
1504 err = airoha_qdma_init(pdev, eth, ð->qdma[i]);
1505 if (err)
1506 goto error;
1507 }
1508
1509 err = airoha_ppe_init(eth);
1510 if (err)
1511 goto error;
1512
1513 set_bit(DEV_STATE_INITIALIZED, ð->state);
1514
1515 return 0;
1516 error:
1517 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
1518 airoha_qdma_cleanup(ð->qdma[i]);
1519
1520 return err;
1521 }
1522
airoha_hw_cleanup(struct airoha_eth * eth)1523 static void airoha_hw_cleanup(struct airoha_eth *eth)
1524 {
1525 int i;
1526
1527 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
1528 airoha_qdma_cleanup(ð->qdma[i]);
1529 airoha_ppe_deinit(eth);
1530 }
1531
airoha_qdma_start_napi(struct airoha_qdma * qdma)1532 static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
1533 {
1534 int i;
1535
1536 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1537 napi_enable(&qdma->q_tx_irq[i].napi);
1538
1539 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1540 if (!qdma->q_rx[i].ndesc)
1541 continue;
1542
1543 napi_enable(&qdma->q_rx[i].napi);
1544 }
1545 }
1546
airoha_qdma_stop_napi(struct airoha_qdma * qdma)1547 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
1548 {
1549 int i;
1550
1551 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
1552 napi_disable(&qdma->q_tx_irq[i].napi);
1553
1554 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1555 if (!qdma->q_rx[i].ndesc)
1556 continue;
1557
1558 napi_disable(&qdma->q_rx[i].napi);
1559 }
1560 }
1561
airoha_update_hw_stats(struct airoha_gdm_port * port)1562 static void airoha_update_hw_stats(struct airoha_gdm_port *port)
1563 {
1564 struct airoha_eth *eth = port->qdma->eth;
1565 u32 val, i = 0;
1566
1567 spin_lock(&port->stats.lock);
1568 u64_stats_update_begin(&port->stats.syncp);
1569
1570 /* TX */
1571 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
1572 port->stats.tx_ok_pkts += ((u64)val << 32);
1573 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
1574 port->stats.tx_ok_pkts += val;
1575
1576 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
1577 port->stats.tx_ok_bytes += ((u64)val << 32);
1578 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
1579 port->stats.tx_ok_bytes += val;
1580
1581 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
1582 port->stats.tx_drops += val;
1583
1584 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
1585 port->stats.tx_broadcast += val;
1586
1587 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
1588 port->stats.tx_multicast += val;
1589
1590 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
1591 port->stats.tx_len[i] += val;
1592
1593 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
1594 port->stats.tx_len[i] += ((u64)val << 32);
1595 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
1596 port->stats.tx_len[i++] += val;
1597
1598 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
1599 port->stats.tx_len[i] += ((u64)val << 32);
1600 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
1601 port->stats.tx_len[i++] += val;
1602
1603 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
1604 port->stats.tx_len[i] += ((u64)val << 32);
1605 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
1606 port->stats.tx_len[i++] += val;
1607
1608 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
1609 port->stats.tx_len[i] += ((u64)val << 32);
1610 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
1611 port->stats.tx_len[i++] += val;
1612
1613 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
1614 port->stats.tx_len[i] += ((u64)val << 32);
1615 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
1616 port->stats.tx_len[i++] += val;
1617
1618 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
1619 port->stats.tx_len[i] += ((u64)val << 32);
1620 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
1621 port->stats.tx_len[i++] += val;
1622
1623 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
1624 port->stats.tx_len[i++] += val;
1625
1626 /* RX */
1627 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
1628 port->stats.rx_ok_pkts += ((u64)val << 32);
1629 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
1630 port->stats.rx_ok_pkts += val;
1631
1632 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
1633 port->stats.rx_ok_bytes += ((u64)val << 32);
1634 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
1635 port->stats.rx_ok_bytes += val;
1636
1637 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
1638 port->stats.rx_drops += val;
1639
1640 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
1641 port->stats.rx_broadcast += val;
1642
1643 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
1644 port->stats.rx_multicast += val;
1645
1646 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
1647 port->stats.rx_errors += val;
1648
1649 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
1650 port->stats.rx_crc_error += val;
1651
1652 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
1653 port->stats.rx_over_errors += val;
1654
1655 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
1656 port->stats.rx_fragment += val;
1657
1658 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
1659 port->stats.rx_jabber += val;
1660
1661 i = 0;
1662 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
1663 port->stats.rx_len[i] += val;
1664
1665 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
1666 port->stats.rx_len[i] += ((u64)val << 32);
1667 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
1668 port->stats.rx_len[i++] += val;
1669
1670 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
1671 port->stats.rx_len[i] += ((u64)val << 32);
1672 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
1673 port->stats.rx_len[i++] += val;
1674
1675 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
1676 port->stats.rx_len[i] += ((u64)val << 32);
1677 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
1678 port->stats.rx_len[i++] += val;
1679
1680 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
1681 port->stats.rx_len[i] += ((u64)val << 32);
1682 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
1683 port->stats.rx_len[i++] += val;
1684
1685 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
1686 port->stats.rx_len[i] += ((u64)val << 32);
1687 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
1688 port->stats.rx_len[i++] += val;
1689
1690 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
1691 port->stats.rx_len[i] += ((u64)val << 32);
1692 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
1693 port->stats.rx_len[i++] += val;
1694
1695 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
1696 port->stats.rx_len[i++] += val;
1697
1698 /* reset mib counters */
1699 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
1700 FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
1701
1702 u64_stats_update_end(&port->stats.syncp);
1703 spin_unlock(&port->stats.lock);
1704 }
1705
airoha_dev_open(struct net_device * dev)1706 static int airoha_dev_open(struct net_device *dev)
1707 {
1708 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
1709 struct airoha_gdm_port *port = netdev_priv(dev);
1710 struct airoha_qdma *qdma = port->qdma;
1711 u32 pse_port = FE_PSE_PORT_PPE1;
1712
1713 netif_tx_start_all_queues(dev);
1714 err = airoha_set_vip_for_gdm_port(port, true);
1715 if (err)
1716 return err;
1717
1718 if (netdev_uses_dsa(dev))
1719 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1720 GDM_STAG_EN_MASK);
1721 else
1722 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
1723 GDM_STAG_EN_MASK);
1724
1725 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
1726 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1727 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1728 FIELD_PREP(GDM_LONG_LEN_MASK, len));
1729
1730 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
1731 GLOBAL_CFG_TX_DMA_EN_MASK |
1732 GLOBAL_CFG_RX_DMA_EN_MASK);
1733 atomic_inc(&qdma->users);
1734
1735 if (port->id == AIROHA_GDM2_IDX &&
1736 airoha_ppe_is_enabled(qdma->eth, 1)) {
1737 /* For PPE2 always use secondary cpu port. */
1738 pse_port = FE_PSE_PORT_PPE2;
1739 }
1740 airoha_set_gdm_port_fwd_cfg(qdma->eth, REG_GDM_FWD_CFG(port->id),
1741 pse_port);
1742
1743 return 0;
1744 }
1745
airoha_dev_stop(struct net_device * dev)1746 static int airoha_dev_stop(struct net_device *dev)
1747 {
1748 struct airoha_gdm_port *port = netdev_priv(dev);
1749 struct airoha_qdma *qdma = port->qdma;
1750 int i;
1751
1752 netif_tx_disable(dev);
1753 airoha_set_vip_for_gdm_port(port, false);
1754 for (i = 0; i < dev->num_tx_queues; i++)
1755 netdev_tx_reset_subqueue(dev, i);
1756
1757 airoha_set_gdm_port_fwd_cfg(qdma->eth, REG_GDM_FWD_CFG(port->id),
1758 FE_PSE_PORT_DROP);
1759
1760 if (atomic_dec_and_test(&qdma->users)) {
1761 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
1762 GLOBAL_CFG_TX_DMA_EN_MASK |
1763 GLOBAL_CFG_RX_DMA_EN_MASK);
1764
1765 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1766 if (!qdma->q_tx[i].ndesc)
1767 continue;
1768
1769 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
1770 }
1771 }
1772
1773 return 0;
1774 }
1775
airoha_dev_set_macaddr(struct net_device * dev,void * p)1776 static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
1777 {
1778 struct airoha_gdm_port *port = netdev_priv(dev);
1779 int err;
1780
1781 err = eth_mac_addr(dev, p);
1782 if (err)
1783 return err;
1784
1785 airoha_set_macaddr(port, dev->dev_addr);
1786
1787 return 0;
1788 }
1789
airoha_set_gdm2_loopback(struct airoha_gdm_port * port)1790 static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
1791 {
1792 struct airoha_eth *eth = port->qdma->eth;
1793 u32 val, pse_port, chan;
1794 int i, src_port;
1795
1796 /* Forward the traffic to the proper GDM port */
1797 pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1798 : FE_PSE_PORT_GDM4;
1799 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1800 pse_port);
1801 airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
1802 GDM_STRIP_CRC_MASK);
1803
1804 /* Enable GDM2 loopback */
1805 airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff);
1806 airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff);
1807
1808 chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
1809 airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX),
1810 LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
1811 FIELD_PREP(LPBK_CHAN_MASK, chan) |
1812 LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
1813 LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
1814 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX),
1815 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1816 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1817 FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
1818
1819 /* Disable VIP and IFC for GDM2 */
1820 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
1821 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
1822
1823 src_port = eth->soc->ops.get_src_port_id(port, port->nbq);
1824 if (src_port < 0)
1825 return src_port;
1826
1827 airoha_fe_rmw(eth, REG_FE_WAN_PORT,
1828 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
1829 FIELD_PREP(WAN0_MASK, src_port));
1830 val = src_port & SP_CPORT_DFT_MASK;
1831 airoha_fe_rmw(eth,
1832 REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
1833 SP_CPORT_MASK(val),
1834 __field_prep(SP_CPORT_MASK(val), FE_PSE_PORT_CDM2));
1835
1836 for (i = 0; i < eth->soc->num_ppe; i++)
1837 airoha_ppe_set_cpu_port(port, i, AIROHA_GDM2_IDX);
1838
1839 if (port->id == AIROHA_GDM4_IDX && airoha_is_7581(eth)) {
1840 u32 mask = FC_ID_OF_SRC_PORT_MASK(port->nbq);
1841
1842 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, mask,
1843 __field_prep(mask, AIROHA_GDM2_IDX));
1844 }
1845
1846 return 0;
1847 }
1848
airoha_dev_init(struct net_device * dev)1849 static int airoha_dev_init(struct net_device *dev)
1850 {
1851 struct airoha_gdm_port *port = netdev_priv(dev);
1852 struct airoha_eth *eth = port->eth;
1853 int i;
1854
1855 /* QDMA0 is used for lan ports while QDMA1 is used for WAN ports */
1856 port->qdma = ð->qdma[!airoha_is_lan_gdm_port(port)];
1857 port->dev->irq = port->qdma->irq_banks[0].irq;
1858 airoha_set_macaddr(port, dev->dev_addr);
1859
1860 switch (port->id) {
1861 case AIROHA_GDM3_IDX:
1862 case AIROHA_GDM4_IDX:
1863 /* If GDM2 is active we can't enable loopback */
1864 if (!eth->ports[1]) {
1865 int err;
1866
1867 err = airoha_set_gdm2_loopback(port);
1868 if (err)
1869 return err;
1870 }
1871 break;
1872 default:
1873 break;
1874 }
1875
1876 for (i = 0; i < eth->soc->num_ppe; i++)
1877 airoha_ppe_set_cpu_port(port, i,
1878 airoha_get_fe_port(port));
1879
1880 return 0;
1881 }
1882
airoha_dev_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1883 static void airoha_dev_get_stats64(struct net_device *dev,
1884 struct rtnl_link_stats64 *storage)
1885 {
1886 struct airoha_gdm_port *port = netdev_priv(dev);
1887 unsigned int start;
1888
1889 airoha_update_hw_stats(port);
1890 do {
1891 start = u64_stats_fetch_begin(&port->stats.syncp);
1892 storage->rx_packets = port->stats.rx_ok_pkts;
1893 storage->tx_packets = port->stats.tx_ok_pkts;
1894 storage->rx_bytes = port->stats.rx_ok_bytes;
1895 storage->tx_bytes = port->stats.tx_ok_bytes;
1896 storage->multicast = port->stats.rx_multicast;
1897 storage->rx_errors = port->stats.rx_errors;
1898 storage->rx_dropped = port->stats.rx_drops;
1899 storage->tx_dropped = port->stats.tx_drops;
1900 storage->rx_crc_errors = port->stats.rx_crc_error;
1901 storage->rx_over_errors = port->stats.rx_over_errors;
1902 } while (u64_stats_fetch_retry(&port->stats.syncp, start));
1903 }
1904
airoha_dev_change_mtu(struct net_device * dev,int mtu)1905 static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
1906 {
1907 struct airoha_gdm_port *port = netdev_priv(dev);
1908 struct airoha_eth *eth = port->qdma->eth;
1909 u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
1910
1911 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
1912 GDM_LONG_LEN_MASK,
1913 FIELD_PREP(GDM_LONG_LEN_MASK, len));
1914 WRITE_ONCE(dev->mtu, mtu);
1915
1916 return 0;
1917 }
1918
airoha_dev_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1919 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
1920 struct net_device *sb_dev)
1921 {
1922 struct airoha_gdm_port *port = netdev_priv(dev);
1923 int queue, channel;
1924
1925 /* For dsa device select QoS channel according to the dsa user port
1926 * index, rely on port id otherwise. Select QoS queue based on the
1927 * skb priority.
1928 */
1929 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
1930 channel = channel % AIROHA_NUM_QOS_CHANNELS;
1931 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
1932 queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
1933
1934 return queue < dev->num_tx_queues ? queue : 0;
1935 }
1936
airoha_get_dsa_tag(struct sk_buff * skb,struct net_device * dev)1937 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
1938 {
1939 #if IS_ENABLED(CONFIG_NET_DSA)
1940 struct ethhdr *ehdr;
1941 u8 xmit_tpid;
1942 u16 tag;
1943
1944 if (!netdev_uses_dsa(dev))
1945 return 0;
1946
1947 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
1948 return 0;
1949
1950 if (skb_cow_head(skb, 0))
1951 return 0;
1952
1953 ehdr = (struct ethhdr *)skb->data;
1954 tag = be16_to_cpu(ehdr->h_proto);
1955 xmit_tpid = tag >> 8;
1956
1957 switch (xmit_tpid) {
1958 case MTK_HDR_XMIT_TAGGED_TPID_8100:
1959 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
1960 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
1961 break;
1962 case MTK_HDR_XMIT_TAGGED_TPID_88A8:
1963 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
1964 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
1965 break;
1966 default:
1967 /* PPE module requires untagged DSA packets to work properly,
1968 * so move DSA tag to DMA descriptor.
1969 */
1970 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
1971 __skb_pull(skb, MTK_HDR_LEN);
1972 break;
1973 }
1974
1975 return tag;
1976 #else
1977 return 0;
1978 #endif
1979 }
1980
airoha_get_fe_port(struct airoha_gdm_port * port)1981 int airoha_get_fe_port(struct airoha_gdm_port *port)
1982 {
1983 struct airoha_qdma *qdma = port->qdma;
1984 struct airoha_eth *eth = qdma->eth;
1985
1986 switch (eth->soc->version) {
1987 case 0x7583:
1988 return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
1989 : port->id;
1990 case 0x7581:
1991 default:
1992 return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
1993 : port->id;
1994 }
1995 }
1996
airoha_dev_xmit(struct sk_buff * skb,struct net_device * dev)1997 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
1998 struct net_device *dev)
1999 {
2000 struct airoha_gdm_port *port = netdev_priv(dev);
2001 struct airoha_qdma *qdma = port->qdma;
2002 u32 nr_frags, tag, msg0, msg1, len;
2003 struct airoha_queue_entry *e;
2004 struct netdev_queue *txq;
2005 struct airoha_queue *q;
2006 LIST_HEAD(tx_list);
2007 int i = 0, qid;
2008 void *data;
2009 u16 index;
2010 u8 fport;
2011
2012 qid = airoha_qdma_get_txq(qdma, skb_get_queue_mapping(skb));
2013 tag = airoha_get_dsa_tag(skb, dev);
2014
2015 msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
2016 qid / AIROHA_NUM_QOS_QUEUES) |
2017 FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
2018 qid % AIROHA_NUM_QOS_QUEUES) |
2019 FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
2020 if (skb->ip_summed == CHECKSUM_PARTIAL)
2021 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
2022 FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
2023 FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
2024
2025 /* TSO: fill MSS info in tcp checksum field */
2026 if (skb_is_gso(skb)) {
2027 if (skb_cow_head(skb, 0))
2028 goto error;
2029
2030 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
2031 SKB_GSO_TCPV6)) {
2032 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
2033
2034 tcp_hdr(skb)->check = (__force __sum16)csum;
2035 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
2036 }
2037 }
2038
2039 fport = airoha_get_fe_port(port);
2040 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
2041 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
2042
2043 q = &qdma->q_tx[qid];
2044 if (WARN_ON_ONCE(!q->ndesc))
2045 goto error;
2046
2047 spin_lock_bh(&q->lock);
2048
2049 txq = skb_get_tx_queue(dev, skb);
2050 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
2051
2052 if (q->queued + nr_frags >= q->ndesc) {
2053 /* not enough space in the queue */
2054 netif_tx_stop_queue(txq);
2055 q->txq_stopped = true;
2056 spin_unlock_bh(&q->lock);
2057 return NETDEV_TX_BUSY;
2058 }
2059
2060 len = skb_headlen(skb);
2061 data = skb->data;
2062
2063 e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
2064 list);
2065 index = e - q->entry;
2066
2067 while (true) {
2068 struct airoha_qdma_desc *desc = &q->desc[index];
2069 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2070 dma_addr_t addr;
2071 u32 val;
2072
2073 addr = dma_map_single(dev->dev.parent, data, len,
2074 DMA_TO_DEVICE);
2075 if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2076 goto error_unmap;
2077
2078 list_move_tail(&e->list, &tx_list);
2079 e->skb = i == nr_frags - 1 ? skb : NULL;
2080 e->dma_addr = addr;
2081 e->dma_len = len;
2082
2083 e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
2084 list);
2085 index = e - q->entry;
2086
2087 val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2088 if (i < nr_frags - 1)
2089 val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2090 WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2091 WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2092 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2093 WRITE_ONCE(desc->data, cpu_to_le32(val));
2094 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2095 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2096 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2097
2098 if (++i == nr_frags)
2099 break;
2100
2101 data = skb_frag_address(frag);
2102 len = skb_frag_size(frag);
2103 }
2104 q->queued += i;
2105
2106 skb_tx_timestamp(skb);
2107 netdev_tx_sent_queue(txq, skb->len);
2108 if (q->ndesc - q->queued < q->free_thr) {
2109 netif_tx_stop_queue(txq);
2110 q->txq_stopped = true;
2111 }
2112
2113 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2114 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2115 TX_RING_CPU_IDX_MASK,
2116 FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
2117
2118 spin_unlock_bh(&q->lock);
2119
2120 return NETDEV_TX_OK;
2121
2122 error_unmap:
2123 list_for_each_entry(e, &tx_list, list) {
2124 dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
2125 DMA_TO_DEVICE);
2126 e->dma_addr = 0;
2127 }
2128 list_splice(&tx_list, &q->tx_list);
2129
2130 spin_unlock_bh(&q->lock);
2131 error:
2132 dev_kfree_skb_any(skb);
2133 dev->stats.tx_dropped++;
2134
2135 return NETDEV_TX_OK;
2136 }
2137
airoha_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2138 static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2139 struct ethtool_drvinfo *info)
2140 {
2141 struct airoha_gdm_port *port = netdev_priv(dev);
2142 struct airoha_eth *eth = port->qdma->eth;
2143
2144 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2145 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2146 }
2147
airoha_ethtool_get_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * stats)2148 static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2149 struct ethtool_eth_mac_stats *stats)
2150 {
2151 struct airoha_gdm_port *port = netdev_priv(dev);
2152 unsigned int start;
2153
2154 airoha_update_hw_stats(port);
2155 do {
2156 start = u64_stats_fetch_begin(&port->stats.syncp);
2157 stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
2158 stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
2159 stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2160 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2161 stats->FramesReceivedOK = port->stats.rx_ok_pkts;
2162 stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
2163 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2164 } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2165 }
2166
2167 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2168 { 0, 64 },
2169 { 65, 127 },
2170 { 128, 255 },
2171 { 256, 511 },
2172 { 512, 1023 },
2173 { 1024, 1518 },
2174 { 1519, 10239 },
2175 {},
2176 };
2177
2178 static void
airoha_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * stats,const struct ethtool_rmon_hist_range ** ranges)2179 airoha_ethtool_get_rmon_stats(struct net_device *dev,
2180 struct ethtool_rmon_stats *stats,
2181 const struct ethtool_rmon_hist_range **ranges)
2182 {
2183 struct airoha_gdm_port *port = netdev_priv(dev);
2184 struct airoha_hw_stats *hw_stats = &port->stats;
2185 unsigned int start;
2186
2187 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2188 ARRAY_SIZE(hw_stats->tx_len) + 1);
2189 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2190 ARRAY_SIZE(hw_stats->rx_len) + 1);
2191
2192 *ranges = airoha_ethtool_rmon_ranges;
2193 airoha_update_hw_stats(port);
2194 do {
2195 int i;
2196
2197 start = u64_stats_fetch_begin(&port->stats.syncp);
2198 stats->fragments = hw_stats->rx_fragment;
2199 stats->jabbers = hw_stats->rx_jabber;
2200 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2201 i++) {
2202 stats->hist[i] = hw_stats->rx_len[i];
2203 stats->hist_tx[i] = hw_stats->tx_len[i];
2204 }
2205 } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2206 }
2207
airoha_qdma_set_chan_tx_sched(struct net_device * dev,int channel,enum tx_sched_mode mode,const u16 * weights,u8 n_weights)2208 static int airoha_qdma_set_chan_tx_sched(struct net_device *dev,
2209 int channel, enum tx_sched_mode mode,
2210 const u16 *weights, u8 n_weights)
2211 {
2212 struct airoha_gdm_port *port = netdev_priv(dev);
2213 int i;
2214
2215 for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2216 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2217 TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2218
2219 for (i = 0; i < n_weights; i++) {
2220 u32 status;
2221 int err;
2222
2223 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2224 TWRR_RW_CMD_MASK |
2225 FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2226 FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2227 FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2228 err = read_poll_timeout(airoha_qdma_rr, status,
2229 status & TWRR_RW_CMD_DONE,
2230 USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2231 true, port->qdma,
2232 REG_TXWRR_WEIGHT_CFG);
2233 if (err)
2234 return err;
2235 }
2236
2237 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2238 CHAN_QOS_MODE_MASK(channel),
2239 __field_prep(CHAN_QOS_MODE_MASK(channel), mode));
2240
2241 return 0;
2242 }
2243
airoha_qdma_set_tx_prio_sched(struct net_device * dev,int channel)2244 static int airoha_qdma_set_tx_prio_sched(struct net_device *dev, int channel)
2245 {
2246 static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2247
2248 return airoha_qdma_set_chan_tx_sched(dev, channel, TC_SCH_SP, w,
2249 ARRAY_SIZE(w));
2250 }
2251
airoha_qdma_set_tx_ets_sched(struct net_device * dev,int channel,struct tc_ets_qopt_offload * opt)2252 static int airoha_qdma_set_tx_ets_sched(struct net_device *dev, int channel,
2253 struct tc_ets_qopt_offload *opt)
2254 {
2255 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2256 enum tx_sched_mode mode = TC_SCH_SP;
2257 u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2258 int i, nstrict = 0;
2259
2260 if (p->bands > AIROHA_NUM_QOS_QUEUES)
2261 return -EINVAL;
2262
2263 for (i = 0; i < p->bands; i++) {
2264 if (!p->quanta[i])
2265 nstrict++;
2266 }
2267
2268 /* this configuration is not supported by the hw */
2269 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2270 return -EINVAL;
2271
2272 /* EN7581 SoC supports fixed QoS band priority where WRR queues have
2273 * lowest priorities with respect to SP ones.
2274 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
2275 */
2276 for (i = 0; i < nstrict; i++) {
2277 if (p->priomap[p->bands - i - 1] != i)
2278 return -EINVAL;
2279 }
2280
2281 for (i = 0; i < p->bands - nstrict; i++) {
2282 if (p->priomap[i] != nstrict + i)
2283 return -EINVAL;
2284
2285 w[i] = p->weights[nstrict + i];
2286 }
2287
2288 if (!nstrict)
2289 mode = TC_SCH_WRR8;
2290 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2291 mode = nstrict + 1;
2292
2293 return airoha_qdma_set_chan_tx_sched(dev, channel, mode, w,
2294 ARRAY_SIZE(w));
2295 }
2296
airoha_qdma_get_tx_ets_stats(struct net_device * dev,int channel,struct tc_ets_qopt_offload * opt)2297 static int airoha_qdma_get_tx_ets_stats(struct net_device *dev, int channel,
2298 struct tc_ets_qopt_offload *opt)
2299 {
2300 struct airoha_gdm_port *port = netdev_priv(dev);
2301 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2302 REG_CNTR_VAL(channel << 1));
2303 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2304 REG_CNTR_VAL((channel << 1) + 1));
2305 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2306 (fwd_tx_packets - port->fwd_tx_packets);
2307
2308 _bstats_update(opt->stats.bstats, 0, tx_packets);
2309
2310 port->cpu_tx_packets = cpu_tx_packets;
2311 port->fwd_tx_packets = fwd_tx_packets;
2312
2313 return 0;
2314 }
2315
airoha_tc_setup_qdisc_ets(struct net_device * dev,struct tc_ets_qopt_offload * opt)2316 static int airoha_tc_setup_qdisc_ets(struct net_device *dev,
2317 struct tc_ets_qopt_offload *opt)
2318 {
2319 int channel;
2320
2321 if (opt->parent == TC_H_ROOT)
2322 return -EINVAL;
2323
2324 channel = TC_H_MAJ(opt->handle) >> 16;
2325 channel = channel % AIROHA_NUM_QOS_CHANNELS;
2326
2327 switch (opt->command) {
2328 case TC_ETS_REPLACE:
2329 return airoha_qdma_set_tx_ets_sched(dev, channel, opt);
2330 case TC_ETS_DESTROY:
2331 /* PRIO is default qdisc scheduler */
2332 return airoha_qdma_set_tx_prio_sched(dev, channel);
2333 case TC_ETS_STATS:
2334 return airoha_qdma_get_tx_ets_stats(dev, channel, opt);
2335 default:
2336 return -EOPNOTSUPP;
2337 }
2338 }
2339
airoha_qdma_get_rl_param(struct airoha_qdma * qdma,int queue_id,u32 addr,enum trtcm_param_type param,u32 * val_low,u32 * val_high)2340 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
2341 u32 addr, enum trtcm_param_type param,
2342 u32 *val_low, u32 *val_high)
2343 {
2344 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2345 u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2346 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2347 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2348
2349 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2350 if (read_poll_timeout(airoha_qdma_rr, val,
2351 val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2352 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
2353 REG_TRTCM_CFG_PARAM(addr)))
2354 return -ETIMEDOUT;
2355
2356 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2357 if (val_high)
2358 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2359
2360 return 0;
2361 }
2362
airoha_qdma_set_rl_param(struct airoha_qdma * qdma,int queue_id,u32 addr,enum trtcm_param_type param,u32 val)2363 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
2364 u32 addr, enum trtcm_param_type param,
2365 u32 val)
2366 {
2367 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
2368 u32 config = RATE_LIMIT_PARAM_RW_MASK |
2369 FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
2370 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
2371 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
2372
2373 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2374 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2375
2376 return read_poll_timeout(airoha_qdma_rr, val,
2377 val & RATE_LIMIT_PARAM_RW_DONE_MASK,
2378 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2379 qdma, REG_TRTCM_CFG_PARAM(addr));
2380 }
2381
airoha_qdma_set_rl_config(struct airoha_qdma * qdma,int queue_id,u32 addr,bool enable,u32 enable_mask)2382 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
2383 u32 addr, bool enable, u32 enable_mask)
2384 {
2385 u32 val;
2386 int err;
2387
2388 err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2389 &val, NULL);
2390 if (err)
2391 return err;
2392
2393 val = enable ? val | enable_mask : val & ~enable_mask;
2394
2395 return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
2396 val);
2397 }
2398
airoha_qdma_set_rl_token_bucket(struct airoha_qdma * qdma,int queue_id,u32 rate_val,u32 bucket_size)2399 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
2400 int queue_id, u32 rate_val,
2401 u32 bucket_size)
2402 {
2403 u32 val, config, tick, unit, rate, rate_frac;
2404 int err;
2405
2406 err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2407 TRTCM_MISC_MODE, &config, NULL);
2408 if (err)
2409 return err;
2410
2411 val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
2412 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2413 if (config & TRTCM_TICK_SEL)
2414 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2415 if (!tick)
2416 return -EINVAL;
2417
2418 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2419 if (!unit)
2420 return -EINVAL;
2421
2422 rate = rate_val / unit;
2423 rate_frac = rate_val % unit;
2424 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2425 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2426 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2427
2428 err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2429 TRTCM_TOKEN_RATE_MODE, rate);
2430 if (err)
2431 return err;
2432
2433 val = bucket_size;
2434 if (!(config & TRTCM_PKT_MODE))
2435 val = max_t(u32, val, MIN_TOKEN_SIZE);
2436 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2437
2438 return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2439 TRTCM_BUCKETSIZE_SHIFT_MODE, val);
2440 }
2441
airoha_qdma_init_rl_config(struct airoha_qdma * qdma,int queue_id,bool enable,enum trtcm_unit_type unit)2442 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
2443 bool enable, enum trtcm_unit_type unit)
2444 {
2445 bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
2446 enum trtcm_param mode = TRTCM_METER_MODE;
2447 int err;
2448
2449 mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
2450 err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2451 enable, mode);
2452 if (err)
2453 return err;
2454
2455 return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
2456 tick_sel, TRTCM_TICK_SEL);
2457 }
2458
airoha_qdma_get_trtcm_param(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_param_type param,enum trtcm_mode_type mode,u32 * val_low,u32 * val_high)2459 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
2460 u32 addr, enum trtcm_param_type param,
2461 enum trtcm_mode_type mode,
2462 u32 *val_low, u32 *val_high)
2463 {
2464 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2465 u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2466 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2467 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2468 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2469
2470 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2471 if (read_poll_timeout(airoha_qdma_rr, val,
2472 val & TRTCM_PARAM_RW_DONE_MASK,
2473 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2474 qdma, REG_TRTCM_CFG_PARAM(addr)))
2475 return -ETIMEDOUT;
2476
2477 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2478 if (val_high)
2479 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
2480
2481 return 0;
2482 }
2483
airoha_qdma_set_trtcm_param(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_param_type param,enum trtcm_mode_type mode,u32 val)2484 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
2485 u32 addr, enum trtcm_param_type param,
2486 enum trtcm_mode_type mode, u32 val)
2487 {
2488 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2489 u32 config = TRTCM_PARAM_RW_MASK |
2490 FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2491 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2492 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2493 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2494
2495 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
2496 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2497
2498 return read_poll_timeout(airoha_qdma_rr, val,
2499 val & TRTCM_PARAM_RW_DONE_MASK,
2500 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2501 qdma, REG_TRTCM_CFG_PARAM(addr));
2502 }
2503
airoha_qdma_set_trtcm_config(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_mode_type mode,bool enable,u32 enable_mask)2504 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
2505 u32 addr, enum trtcm_mode_type mode,
2506 bool enable, u32 enable_mask)
2507 {
2508 u32 val;
2509
2510 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2511 mode, &val, NULL))
2512 return -EINVAL;
2513
2514 val = enable ? val | enable_mask : val & ~enable_mask;
2515
2516 return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2517 mode, val);
2518 }
2519
airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma * qdma,int channel,u32 addr,enum trtcm_mode_type mode,u32 rate_val,u32 bucket_size)2520 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
2521 int channel, u32 addr,
2522 enum trtcm_mode_type mode,
2523 u32 rate_val, u32 bucket_size)
2524 {
2525 u32 val, config, tick, unit, rate, rate_frac;
2526 int err;
2527
2528 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
2529 mode, &config, NULL))
2530 return -EINVAL;
2531
2532 val = airoha_qdma_rr(qdma, addr);
2533 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
2534 if (config & TRTCM_TICK_SEL)
2535 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
2536 if (!tick)
2537 return -EINVAL;
2538
2539 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
2540 if (!unit)
2541 return -EINVAL;
2542
2543 rate = rate_val / unit;
2544 rate_frac = rate_val % unit;
2545 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
2546 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
2547 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
2548
2549 err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
2550 TRTCM_TOKEN_RATE_MODE, mode, rate);
2551 if (err)
2552 return err;
2553
2554 val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
2555 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
2556
2557 return airoha_qdma_set_trtcm_param(qdma, channel, addr,
2558 TRTCM_BUCKETSIZE_SHIFT_MODE,
2559 mode, val);
2560 }
2561
airoha_qdma_set_tx_rate_limit(struct net_device * dev,int channel,u32 rate,u32 bucket_size)2562 static int airoha_qdma_set_tx_rate_limit(struct net_device *dev,
2563 int channel, u32 rate,
2564 u32 bucket_size)
2565 {
2566 struct airoha_gdm_port *port = netdev_priv(dev);
2567 int i, err;
2568
2569 for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
2570 err = airoha_qdma_set_trtcm_config(port->qdma, channel,
2571 REG_EGRESS_TRTCM_CFG, i,
2572 !!rate, TRTCM_METER_MODE);
2573 if (err)
2574 return err;
2575
2576 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
2577 REG_EGRESS_TRTCM_CFG,
2578 i, rate, bucket_size);
2579 if (err)
2580 return err;
2581 }
2582
2583 return 0;
2584 }
2585
airoha_tc_htb_alloc_leaf_queue(struct net_device * dev,struct tc_htb_qopt_offload * opt)2586 static int airoha_tc_htb_alloc_leaf_queue(struct net_device *dev,
2587 struct tc_htb_qopt_offload *opt)
2588 {
2589 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2590 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
2591 int err, num_tx_queues = dev->real_num_tx_queues;
2592 struct airoha_gdm_port *port = netdev_priv(dev);
2593
2594 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
2595 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
2596 return -EINVAL;
2597 }
2598
2599 err = airoha_qdma_set_tx_rate_limit(dev, channel, rate, opt->quantum);
2600 if (err) {
2601 NL_SET_ERR_MSG_MOD(opt->extack,
2602 "failed configuring htb offload");
2603 return err;
2604 }
2605
2606 if (opt->command == TC_HTB_NODE_MODIFY)
2607 return 0;
2608
2609 err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
2610 if (err) {
2611 airoha_qdma_set_tx_rate_limit(dev, channel, 0, opt->quantum);
2612 NL_SET_ERR_MSG_MOD(opt->extack,
2613 "failed setting real_num_tx_queues");
2614 return err;
2615 }
2616
2617 set_bit(channel, port->qos_sq_bmap);
2618 opt->qid = AIROHA_NUM_TX_RING + channel;
2619
2620 return 0;
2621 }
2622
airoha_qdma_set_rx_meter(struct airoha_gdm_port * port,u32 rate,u32 bucket_size,enum trtcm_unit_type unit_type)2623 static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
2624 u32 rate, u32 bucket_size,
2625 enum trtcm_unit_type unit_type)
2626 {
2627 struct airoha_qdma *qdma = port->qdma;
2628 int i;
2629
2630 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2631 int err;
2632
2633 if (!qdma->q_rx[i].ndesc)
2634 continue;
2635
2636 err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
2637 if (err)
2638 return err;
2639
2640 err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
2641 bucket_size);
2642 if (err)
2643 return err;
2644 }
2645
2646 return 0;
2647 }
2648
airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload * f)2649 static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
2650 {
2651 const struct flow_action *actions = &f->rule->action;
2652 const struct flow_action_entry *act;
2653
2654 if (!flow_action_has_entries(actions)) {
2655 NL_SET_ERR_MSG_MOD(f->common.extack,
2656 "filter run with no actions");
2657 return -EINVAL;
2658 }
2659
2660 if (!flow_offload_has_one_action(actions)) {
2661 NL_SET_ERR_MSG_MOD(f->common.extack,
2662 "only once action per filter is supported");
2663 return -EOPNOTSUPP;
2664 }
2665
2666 act = &actions->entries[0];
2667 if (act->id != FLOW_ACTION_POLICE) {
2668 NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
2669 return -EOPNOTSUPP;
2670 }
2671
2672 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
2673 NL_SET_ERR_MSG_MOD(f->common.extack,
2674 "invalid exceed action id");
2675 return -EOPNOTSUPP;
2676 }
2677
2678 if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
2679 NL_SET_ERR_MSG_MOD(f->common.extack,
2680 "invalid notexceed action id");
2681 return -EOPNOTSUPP;
2682 }
2683
2684 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
2685 !flow_action_is_last_entry(actions, act)) {
2686 NL_SET_ERR_MSG_MOD(f->common.extack,
2687 "action accept must be last");
2688 return -EOPNOTSUPP;
2689 }
2690
2691 if (act->police.peakrate_bytes_ps || act->police.avrate ||
2692 act->police.overhead || act->police.mtu) {
2693 NL_SET_ERR_MSG_MOD(f->common.extack,
2694 "peakrate/avrate/overhead/mtu unsupported");
2695 return -EOPNOTSUPP;
2696 }
2697
2698 return 0;
2699 }
2700
airoha_dev_tc_matchall(struct net_device * dev,struct tc_cls_matchall_offload * f)2701 static int airoha_dev_tc_matchall(struct net_device *dev,
2702 struct tc_cls_matchall_offload *f)
2703 {
2704 enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
2705 struct airoha_gdm_port *port = netdev_priv(dev);
2706 u32 rate = 0, bucket_size = 0;
2707
2708 switch (f->command) {
2709 case TC_CLSMATCHALL_REPLACE: {
2710 const struct flow_action_entry *act;
2711 int err;
2712
2713 err = airoha_tc_matchall_act_validate(f);
2714 if (err)
2715 return err;
2716
2717 act = &f->rule->action.entries[0];
2718 if (act->police.rate_pkt_ps) {
2719 rate = act->police.rate_pkt_ps;
2720 bucket_size = act->police.burst_pkt;
2721 unit_type = TRTCM_PACKET_UNIT;
2722 } else {
2723 rate = div_u64(act->police.rate_bytes_ps, 1000);
2724 rate = rate << 3; /* Kbps */
2725 bucket_size = act->police.burst;
2726 }
2727 fallthrough;
2728 }
2729 case TC_CLSMATCHALL_DESTROY:
2730 return airoha_qdma_set_rx_meter(port, rate, bucket_size,
2731 unit_type);
2732 default:
2733 return -EOPNOTSUPP;
2734 }
2735 }
2736
airoha_dev_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2737 static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
2738 void *type_data, void *cb_priv)
2739 {
2740 struct net_device *dev = cb_priv;
2741 struct airoha_gdm_port *port = netdev_priv(dev);
2742 struct airoha_eth *eth = port->qdma->eth;
2743
2744 if (!tc_can_offload(dev))
2745 return -EOPNOTSUPP;
2746
2747 switch (type) {
2748 case TC_SETUP_CLSFLOWER:
2749 return airoha_ppe_setup_tc_block_cb(ð->ppe->dev, type_data);
2750 case TC_SETUP_CLSMATCHALL:
2751 return airoha_dev_tc_matchall(dev, type_data);
2752 default:
2753 return -EOPNOTSUPP;
2754 }
2755 }
2756
airoha_dev_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)2757 static int airoha_dev_setup_tc_block(struct net_device *dev,
2758 struct flow_block_offload *f)
2759 {
2760 flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
2761 static LIST_HEAD(block_cb_list);
2762 struct flow_block_cb *block_cb;
2763
2764 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2765 return -EOPNOTSUPP;
2766
2767 f->driver_block_list = &block_cb_list;
2768 switch (f->command) {
2769 case FLOW_BLOCK_BIND:
2770 block_cb = flow_block_cb_lookup(f->block, cb, dev);
2771 if (block_cb) {
2772 flow_block_cb_incref(block_cb);
2773 return 0;
2774 }
2775 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
2776 if (IS_ERR(block_cb))
2777 return PTR_ERR(block_cb);
2778
2779 flow_block_cb_incref(block_cb);
2780 flow_block_cb_add(block_cb, f);
2781 list_add_tail(&block_cb->driver_list, &block_cb_list);
2782 return 0;
2783 case FLOW_BLOCK_UNBIND:
2784 block_cb = flow_block_cb_lookup(f->block, cb, dev);
2785 if (!block_cb)
2786 return -ENOENT;
2787
2788 if (!flow_block_cb_decref(block_cb)) {
2789 flow_block_cb_remove(block_cb, f);
2790 list_del(&block_cb->driver_list);
2791 }
2792 return 0;
2793 default:
2794 return -EOPNOTSUPP;
2795 }
2796 }
2797
airoha_tc_remove_htb_queue(struct net_device * dev,int queue)2798 static void airoha_tc_remove_htb_queue(struct net_device *dev, int queue)
2799 {
2800 struct airoha_gdm_port *port = netdev_priv(dev);
2801
2802 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
2803 airoha_qdma_set_tx_rate_limit(dev, queue + 1, 0, 0);
2804 clear_bit(queue, port->qos_sq_bmap);
2805 }
2806
airoha_tc_htb_delete_leaf_queue(struct net_device * dev,struct tc_htb_qopt_offload * opt)2807 static int airoha_tc_htb_delete_leaf_queue(struct net_device *dev,
2808 struct tc_htb_qopt_offload *opt)
2809 {
2810 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2811 struct airoha_gdm_port *port = netdev_priv(dev);
2812
2813 if (!test_bit(channel, port->qos_sq_bmap)) {
2814 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2815 return -EINVAL;
2816 }
2817
2818 airoha_tc_remove_htb_queue(dev, channel);
2819
2820 return 0;
2821 }
2822
airoha_tc_htb_destroy(struct net_device * dev)2823 static int airoha_tc_htb_destroy(struct net_device *dev)
2824 {
2825 struct airoha_gdm_port *port = netdev_priv(dev);
2826 int q;
2827
2828 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
2829 airoha_tc_remove_htb_queue(dev, q);
2830
2831 return 0;
2832 }
2833
airoha_tc_get_htb_get_leaf_queue(struct net_device * dev,struct tc_htb_qopt_offload * opt)2834 static int airoha_tc_get_htb_get_leaf_queue(struct net_device *dev,
2835 struct tc_htb_qopt_offload *opt)
2836 {
2837 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
2838 struct airoha_gdm_port *port = netdev_priv(dev);
2839
2840 if (!test_bit(channel, port->qos_sq_bmap)) {
2841 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
2842 return -EINVAL;
2843 }
2844
2845 opt->qid = AIROHA_NUM_TX_RING + channel;
2846
2847 return 0;
2848 }
2849
airoha_tc_setup_qdisc_htb(struct net_device * dev,struct tc_htb_qopt_offload * opt)2850 static int airoha_tc_setup_qdisc_htb(struct net_device *dev,
2851 struct tc_htb_qopt_offload *opt)
2852 {
2853 switch (opt->command) {
2854 case TC_HTB_CREATE:
2855 break;
2856 case TC_HTB_DESTROY:
2857 return airoha_tc_htb_destroy(dev);
2858 case TC_HTB_NODE_MODIFY:
2859 case TC_HTB_LEAF_ALLOC_QUEUE:
2860 return airoha_tc_htb_alloc_leaf_queue(dev, opt);
2861 case TC_HTB_LEAF_DEL:
2862 case TC_HTB_LEAF_DEL_LAST:
2863 case TC_HTB_LEAF_DEL_LAST_FORCE:
2864 return airoha_tc_htb_delete_leaf_queue(dev, opt);
2865 case TC_HTB_LEAF_QUERY_QUEUE:
2866 return airoha_tc_get_htb_get_leaf_queue(dev, opt);
2867 default:
2868 return -EOPNOTSUPP;
2869 }
2870
2871 return 0;
2872 }
2873
airoha_dev_tc_setup(struct net_device * dev,enum tc_setup_type type,void * type_data)2874 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
2875 void *type_data)
2876 {
2877 switch (type) {
2878 case TC_SETUP_QDISC_ETS:
2879 return airoha_tc_setup_qdisc_ets(dev, type_data);
2880 case TC_SETUP_QDISC_HTB:
2881 return airoha_tc_setup_qdisc_htb(dev, type_data);
2882 case TC_SETUP_BLOCK:
2883 case TC_SETUP_FT:
2884 return airoha_dev_setup_tc_block(dev, type_data);
2885 default:
2886 return -EOPNOTSUPP;
2887 }
2888 }
2889
2890 static const struct net_device_ops airoha_netdev_ops = {
2891 .ndo_init = airoha_dev_init,
2892 .ndo_open = airoha_dev_open,
2893 .ndo_stop = airoha_dev_stop,
2894 .ndo_change_mtu = airoha_dev_change_mtu,
2895 .ndo_select_queue = airoha_dev_select_queue,
2896 .ndo_start_xmit = airoha_dev_xmit,
2897 .ndo_get_stats64 = airoha_dev_get_stats64,
2898 .ndo_set_mac_address = airoha_dev_set_macaddr,
2899 .ndo_setup_tc = airoha_dev_tc_setup,
2900 };
2901
2902 static const struct ethtool_ops airoha_ethtool_ops = {
2903 .get_drvinfo = airoha_ethtool_get_drvinfo,
2904 .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
2905 .get_rmon_stats = airoha_ethtool_get_rmon_stats,
2906 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2907 .get_link = ethtool_op_get_link,
2908 };
2909
airoha_metadata_dst_alloc(struct airoha_gdm_port * port)2910 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
2911 {
2912 int i;
2913
2914 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2915 struct metadata_dst *md_dst;
2916
2917 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
2918 GFP_KERNEL);
2919 if (!md_dst)
2920 return -ENOMEM;
2921
2922 md_dst->u.port_info.port_id = i;
2923 port->dsa_meta[i] = md_dst;
2924 }
2925
2926 return 0;
2927 }
2928
airoha_metadata_dst_free(struct airoha_gdm_port * port)2929 static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
2930 {
2931 int i;
2932
2933 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
2934 if (!port->dsa_meta[i])
2935 continue;
2936
2937 metadata_dst_free(port->dsa_meta[i]);
2938 }
2939 }
2940
airoha_is_valid_gdm_port(struct airoha_eth * eth,struct airoha_gdm_port * port)2941 bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
2942 struct airoha_gdm_port *port)
2943 {
2944 int i;
2945
2946 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2947 if (eth->ports[i] == port)
2948 return true;
2949 }
2950
2951 return false;
2952 }
2953
airoha_alloc_gdm_port(struct airoha_eth * eth,struct device_node * np)2954 static int airoha_alloc_gdm_port(struct airoha_eth *eth,
2955 struct device_node *np)
2956 {
2957 const __be32 *id_ptr = of_get_property(np, "reg", NULL);
2958 struct airoha_gdm_port *port;
2959 struct net_device *dev;
2960 int err, p;
2961 u32 id;
2962
2963 if (!id_ptr) {
2964 dev_err(eth->dev, "missing gdm port id\n");
2965 return -EINVAL;
2966 }
2967
2968 id = be32_to_cpup(id_ptr);
2969 p = id - 1;
2970
2971 if (!id || id > ARRAY_SIZE(eth->ports)) {
2972 dev_err(eth->dev, "invalid gdm port id: %d\n", id);
2973 return -EINVAL;
2974 }
2975
2976 if (eth->ports[p]) {
2977 dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
2978 return -EINVAL;
2979 }
2980
2981 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
2982 AIROHA_NUM_NETDEV_TX_RINGS,
2983 AIROHA_NUM_RX_RING);
2984 if (!dev) {
2985 dev_err(eth->dev, "alloc_etherdev failed\n");
2986 return -ENOMEM;
2987 }
2988
2989 dev->netdev_ops = &airoha_netdev_ops;
2990 dev->ethtool_ops = &airoha_ethtool_ops;
2991 dev->max_mtu = AIROHA_MAX_MTU;
2992 dev->watchdog_timeo = 5 * HZ;
2993 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2994 NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2995 NETIF_F_SG | NETIF_F_TSO |
2996 NETIF_F_HW_TC;
2997 dev->features |= dev->hw_features;
2998 dev->vlan_features = dev->hw_features;
2999 dev->dev.of_node = np;
3000 SET_NETDEV_DEV(dev, eth->dev);
3001
3002 /* reserve hw queues for HTB offloading */
3003 err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
3004 if (err)
3005 return err;
3006
3007 err = of_get_ethdev_address(np, dev);
3008 if (err) {
3009 if (err == -EPROBE_DEFER)
3010 return err;
3011
3012 eth_hw_addr_random(dev);
3013 dev_info(eth->dev, "generated random MAC address %pM\n",
3014 dev->dev_addr);
3015 }
3016
3017 port = netdev_priv(dev);
3018 u64_stats_init(&port->stats.syncp);
3019 spin_lock_init(&port->stats.lock);
3020 port->eth = eth;
3021 port->dev = dev;
3022 port->id = id;
3023 /* XXX: Read nbq from DTS */
3024 port->nbq = id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
3025 eth->ports[p] = port;
3026
3027 return airoha_metadata_dst_alloc(port);
3028 }
3029
airoha_register_gdm_devices(struct airoha_eth * eth)3030 static int airoha_register_gdm_devices(struct airoha_eth *eth)
3031 {
3032 int i;
3033
3034 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3035 struct airoha_gdm_port *port = eth->ports[i];
3036 int err;
3037
3038 if (!port)
3039 continue;
3040
3041 err = register_netdev(port->dev);
3042 if (err)
3043 return err;
3044 }
3045
3046 set_bit(DEV_STATE_REGISTERED, ð->state);
3047
3048 return 0;
3049 }
3050
airoha_probe(struct platform_device * pdev)3051 static int airoha_probe(struct platform_device *pdev)
3052 {
3053 struct reset_control_bulk_data *xsi_rsts;
3054 struct device_node *np;
3055 struct airoha_eth *eth;
3056 int i, err;
3057
3058 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3059 if (!eth)
3060 return -ENOMEM;
3061
3062 eth->soc = of_device_get_match_data(&pdev->dev);
3063 if (!eth->soc)
3064 return -EINVAL;
3065
3066 eth->dev = &pdev->dev;
3067
3068 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
3069 if (err) {
3070 dev_err(eth->dev, "failed configuring DMA mask\n");
3071 return err;
3072 }
3073
3074 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
3075 if (IS_ERR(eth->fe_regs))
3076 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
3077 "failed to iomap fe regs\n");
3078
3079 eth->rsts[0].id = "fe";
3080 eth->rsts[1].id = "pdma";
3081 eth->rsts[2].id = "qdma";
3082 err = devm_reset_control_bulk_get_exclusive(eth->dev,
3083 ARRAY_SIZE(eth->rsts),
3084 eth->rsts);
3085 if (err) {
3086 dev_err(eth->dev, "failed to get bulk reset lines\n");
3087 return err;
3088 }
3089
3090 xsi_rsts = devm_kcalloc(eth->dev,
3091 eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
3092 GFP_KERNEL);
3093 if (!xsi_rsts)
3094 return -ENOMEM;
3095
3096 eth->xsi_rsts = xsi_rsts;
3097 for (i = 0; i < eth->soc->num_xsi_rsts; i++)
3098 eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
3099
3100 err = devm_reset_control_bulk_get_exclusive(eth->dev,
3101 eth->soc->num_xsi_rsts,
3102 eth->xsi_rsts);
3103 if (err) {
3104 dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
3105 return err;
3106 }
3107
3108 eth->napi_dev = alloc_netdev_dummy(0);
3109 if (!eth->napi_dev)
3110 return -ENOMEM;
3111
3112 /* Enable threaded NAPI by default */
3113 eth->napi_dev->threaded = true;
3114 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
3115 platform_set_drvdata(pdev, eth);
3116
3117 err = airoha_hw_init(pdev, eth);
3118 if (err)
3119 goto error_netdev_free;
3120
3121 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3122 airoha_qdma_start_napi(ð->qdma[i]);
3123
3124 for_each_child_of_node(pdev->dev.of_node, np) {
3125 if (!of_device_is_compatible(np, "airoha,eth-mac"))
3126 continue;
3127
3128 if (!of_device_is_available(np))
3129 continue;
3130
3131 err = airoha_alloc_gdm_port(eth, np);
3132 if (err) {
3133 of_node_put(np);
3134 goto error_napi_stop;
3135 }
3136 }
3137
3138 err = airoha_register_gdm_devices(eth);
3139 if (err)
3140 goto error_napi_stop;
3141
3142 return 0;
3143
3144 error_napi_stop:
3145 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3146 airoha_qdma_stop_napi(ð->qdma[i]);
3147
3148 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3149 struct airoha_gdm_port *port = eth->ports[i];
3150
3151 if (!port)
3152 continue;
3153
3154 if (port->dev->reg_state == NETREG_REGISTERED)
3155 unregister_netdev(port->dev);
3156 airoha_metadata_dst_free(port);
3157 }
3158 airoha_hw_cleanup(eth);
3159 error_netdev_free:
3160 free_netdev(eth->napi_dev);
3161 platform_set_drvdata(pdev, NULL);
3162
3163 return err;
3164 }
3165
airoha_remove(struct platform_device * pdev)3166 static void airoha_remove(struct platform_device *pdev)
3167 {
3168 struct airoha_eth *eth = platform_get_drvdata(pdev);
3169 int i;
3170
3171 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3172 airoha_qdma_stop_napi(ð->qdma[i]);
3173
3174 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3175 struct airoha_gdm_port *port = eth->ports[i];
3176
3177 if (!port)
3178 continue;
3179
3180 unregister_netdev(port->dev);
3181 airoha_metadata_dst_free(port);
3182 }
3183 airoha_hw_cleanup(eth);
3184
3185 free_netdev(eth->napi_dev);
3186 platform_set_drvdata(pdev, NULL);
3187 }
3188
3189 static const char * const en7581_xsi_rsts_names[] = {
3190 "xsi-mac",
3191 "hsi0-mac",
3192 "hsi1-mac",
3193 "hsi-mac",
3194 "xfp-mac",
3195 };
3196
airoha_en7581_get_src_port_id(struct airoha_gdm_port * port,int nbq)3197 static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3198 {
3199 switch (port->id) {
3200 case AIROHA_GDM3_IDX:
3201 /* 7581 SoC supports PCIe serdes on GDM3 port */
3202 if (nbq == 4)
3203 return HSGMII_LAN_7581_PCIE0_SRCPORT;
3204 if (nbq == 5)
3205 return HSGMII_LAN_7581_PCIE1_SRCPORT;
3206 break;
3207 case AIROHA_GDM4_IDX:
3208 /* 7581 SoC supports eth and usb serdes on GDM4 port */
3209 if (!nbq)
3210 return HSGMII_LAN_7581_ETH_SRCPORT;
3211 if (nbq == 1)
3212 return HSGMII_LAN_7581_USB_SRCPORT;
3213 break;
3214 default:
3215 break;
3216 }
3217
3218 return -EINVAL;
3219 }
3220
airoha_en7581_get_vip_port(struct airoha_gdm_port * port,int nbq)3221 static u32 airoha_en7581_get_vip_port(struct airoha_gdm_port *port, int nbq)
3222 {
3223 switch (port->id) {
3224 case AIROHA_GDM3_IDX:
3225 if (nbq == 4)
3226 return XSI_PCIE0_VIP_PORT_MASK;
3227 if (nbq == 5)
3228 return XSI_PCIE1_VIP_PORT_MASK;
3229 break;
3230 case AIROHA_GDM4_IDX:
3231 if (!nbq)
3232 return XSI_ETH_VIP_PORT_MASK;
3233 if (nbq == 1)
3234 return XSI_USB_VIP_PORT_MASK;
3235 break;
3236 default:
3237 break;
3238 }
3239
3240 return 0;
3241 }
3242
3243 static const char * const an7583_xsi_rsts_names[] = {
3244 "xsi-mac",
3245 "hsi0-mac",
3246 "hsi1-mac",
3247 "xfp-mac",
3248 };
3249
airoha_an7583_get_src_port_id(struct airoha_gdm_port * port,int nbq)3250 static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
3251 {
3252 switch (port->id) {
3253 case AIROHA_GDM3_IDX:
3254 /* 7583 SoC supports eth serdes on GDM3 port */
3255 if (!nbq)
3256 return HSGMII_LAN_7583_ETH_SRCPORT;
3257 break;
3258 case AIROHA_GDM4_IDX:
3259 /* 7583 SoC supports PCIe and USB serdes on GDM4 port */
3260 if (!nbq)
3261 return HSGMII_LAN_7583_PCIE_SRCPORT;
3262 if (nbq == 1)
3263 return HSGMII_LAN_7583_USB_SRCPORT;
3264 break;
3265 default:
3266 break;
3267 }
3268
3269 return -EINVAL;
3270 }
3271
airoha_an7583_get_vip_port(struct airoha_gdm_port * port,int nbq)3272 static u32 airoha_an7583_get_vip_port(struct airoha_gdm_port *port, int nbq)
3273 {
3274 switch (port->id) {
3275 case AIROHA_GDM3_IDX:
3276 if (!nbq)
3277 return XSI_ETH_VIP_PORT_MASK;
3278 break;
3279 case AIROHA_GDM4_IDX:
3280 if (!nbq)
3281 return XSI_PCIE0_VIP_PORT_MASK;
3282 if (nbq == 1)
3283 return XSI_USB_VIP_PORT_MASK;
3284 break;
3285 default:
3286 break;
3287 }
3288
3289 return 0;
3290 }
3291
3292 static const struct airoha_eth_soc_data en7581_soc_data = {
3293 .version = 0x7581,
3294 .xsi_rsts_names = en7581_xsi_rsts_names,
3295 .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
3296 .num_ppe = 2,
3297 .ops = {
3298 .get_src_port_id = airoha_en7581_get_src_port_id,
3299 .get_vip_port = airoha_en7581_get_vip_port,
3300 },
3301 };
3302
3303 static const struct airoha_eth_soc_data an7583_soc_data = {
3304 .version = 0x7583,
3305 .xsi_rsts_names = an7583_xsi_rsts_names,
3306 .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
3307 .num_ppe = 1,
3308 .ops = {
3309 .get_src_port_id = airoha_an7583_get_src_port_id,
3310 .get_vip_port = airoha_an7583_get_vip_port,
3311 },
3312 };
3313
3314 static const struct of_device_id of_airoha_match[] = {
3315 { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
3316 { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
3317 { /* sentinel */ }
3318 };
3319 MODULE_DEVICE_TABLE(of, of_airoha_match);
3320
3321 static struct platform_driver airoha_driver = {
3322 .probe = airoha_probe,
3323 .remove = airoha_remove,
3324 .driver = {
3325 .name = KBUILD_MODNAME,
3326 .of_match_table = of_airoha_match,
3327 },
3328 };
3329 module_platform_driver(airoha_driver);
3330
3331 MODULE_LICENSE("GPL");
3332 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
3333 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
3334