xref: /linux/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c (revision 1260ed77798502de9c98020040d2995008de10cc)
1*d84ad2c0SDaniel Machon // SPDX-License-Identifier: GPL-2.0+
2*d84ad2c0SDaniel Machon /* Microchip lan969x Switch driver
3*d84ad2c0SDaniel Machon  *
4*d84ad2c0SDaniel Machon  * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
5*d84ad2c0SDaniel Machon  */
6*d84ad2c0SDaniel Machon #include <net/page_pool/helpers.h>
7*d84ad2c0SDaniel Machon 
8*d84ad2c0SDaniel Machon #include "../sparx5_main.h"
9*d84ad2c0SDaniel Machon #include "../sparx5_main_regs.h"
10*d84ad2c0SDaniel Machon #include "../sparx5_port.h"
11*d84ad2c0SDaniel Machon 
12*d84ad2c0SDaniel Machon #include "fdma_api.h"
13*d84ad2c0SDaniel Machon #include "lan969x.h"
14*d84ad2c0SDaniel Machon 
15*d84ad2c0SDaniel Machon #define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
16*d84ad2c0SDaniel Machon 
17*d84ad2c0SDaniel Machon static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
18*d84ad2c0SDaniel Machon 				      u64 *dataptr)
19*d84ad2c0SDaniel Machon {
20*d84ad2c0SDaniel Machon 	*dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
21*d84ad2c0SDaniel Machon 
22*d84ad2c0SDaniel Machon 	return 0;
23*d84ad2c0SDaniel Machon }
24*d84ad2c0SDaniel Machon 
25*d84ad2c0SDaniel Machon static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
26*d84ad2c0SDaniel Machon 				      u64 *dataptr)
27*d84ad2c0SDaniel Machon {
28*d84ad2c0SDaniel Machon 	struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
29*d84ad2c0SDaniel Machon 	struct page *page;
30*d84ad2c0SDaniel Machon 
31*d84ad2c0SDaniel Machon 	page = page_pool_dev_alloc_pages(rx->page_pool);
32*d84ad2c0SDaniel Machon 	if (unlikely(!page))
33*d84ad2c0SDaniel Machon 		return -ENOMEM;
34*d84ad2c0SDaniel Machon 
35*d84ad2c0SDaniel Machon 	rx->page[dcb][db] = page;
36*d84ad2c0SDaniel Machon 
37*d84ad2c0SDaniel Machon 	*dataptr = page_pool_get_dma_addr(page);
38*d84ad2c0SDaniel Machon 
39*d84ad2c0SDaniel Machon 	return 0;
40*d84ad2c0SDaniel Machon }
41*d84ad2c0SDaniel Machon 
42*d84ad2c0SDaniel Machon static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
43*d84ad2c0SDaniel Machon {
44*d84ad2c0SDaniel Machon 	struct fdma *fdma = &tx->fdma;
45*d84ad2c0SDaniel Machon 
46*d84ad2c0SDaniel Machon 	for (int i = 0; i < fdma->n_dcbs; ++i)
47*d84ad2c0SDaniel Machon 		if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
48*d84ad2c0SDaniel Machon 			return i;
49*d84ad2c0SDaniel Machon 
50*d84ad2c0SDaniel Machon 	return -ENOSPC;
51*d84ad2c0SDaniel Machon }
52*d84ad2c0SDaniel Machon 
53*d84ad2c0SDaniel Machon static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
54*d84ad2c0SDaniel Machon {
55*d84ad2c0SDaniel Machon 	struct fdma *fdma = &sparx5->tx.fdma;
56*d84ad2c0SDaniel Machon 	struct sparx5_tx_buf *db;
57*d84ad2c0SDaniel Machon 	unsigned long flags;
58*d84ad2c0SDaniel Machon 	int i;
59*d84ad2c0SDaniel Machon 
60*d84ad2c0SDaniel Machon 	spin_lock_irqsave(&sparx5->tx_lock, flags);
61*d84ad2c0SDaniel Machon 
62*d84ad2c0SDaniel Machon 	for (i = 0; i < fdma->n_dcbs; ++i) {
63*d84ad2c0SDaniel Machon 		db = &sparx5->tx.dbs[i];
64*d84ad2c0SDaniel Machon 
65*d84ad2c0SDaniel Machon 		if (!db->used)
66*d84ad2c0SDaniel Machon 			continue;
67*d84ad2c0SDaniel Machon 
68*d84ad2c0SDaniel Machon 		if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
69*d84ad2c0SDaniel Machon 			continue;
70*d84ad2c0SDaniel Machon 
71*d84ad2c0SDaniel Machon 		db->dev->stats.tx_bytes += db->skb->len;
72*d84ad2c0SDaniel Machon 		db->dev->stats.tx_packets++;
73*d84ad2c0SDaniel Machon 		sparx5->tx.packets++;
74*d84ad2c0SDaniel Machon 
75*d84ad2c0SDaniel Machon 		dma_unmap_single(sparx5->dev,
76*d84ad2c0SDaniel Machon 				 db->dma_addr,
77*d84ad2c0SDaniel Machon 				 db->skb->len,
78*d84ad2c0SDaniel Machon 				 DMA_TO_DEVICE);
79*d84ad2c0SDaniel Machon 
80*d84ad2c0SDaniel Machon 		if (!db->ptp)
81*d84ad2c0SDaniel Machon 			napi_consume_skb(db->skb, weight);
82*d84ad2c0SDaniel Machon 
83*d84ad2c0SDaniel Machon 		db->used = false;
84*d84ad2c0SDaniel Machon 	}
85*d84ad2c0SDaniel Machon 
86*d84ad2c0SDaniel Machon 	spin_unlock_irqrestore(&sparx5->tx_lock, flags);
87*d84ad2c0SDaniel Machon }
88*d84ad2c0SDaniel Machon 
89*d84ad2c0SDaniel Machon static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
90*d84ad2c0SDaniel Machon {
91*d84ad2c0SDaniel Machon 	struct fdma *fdma = &rx->fdma;
92*d84ad2c0SDaniel Machon 
93*d84ad2c0SDaniel Machon 	for (int i = 0; i < fdma->n_dcbs; ++i) {
94*d84ad2c0SDaniel Machon 		for (int j = 0; j < fdma->n_dbs; ++j)
95*d84ad2c0SDaniel Machon 			page_pool_put_full_page(rx->page_pool,
96*d84ad2c0SDaniel Machon 						rx->page[i][j], false);
97*d84ad2c0SDaniel Machon 	}
98*d84ad2c0SDaniel Machon }
99*d84ad2c0SDaniel Machon 
100*d84ad2c0SDaniel Machon static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
101*d84ad2c0SDaniel Machon 						 struct sparx5_rx *rx)
102*d84ad2c0SDaniel Machon {
103*d84ad2c0SDaniel Machon 	const struct sparx5_consts *consts = sparx5->data->consts;
104*d84ad2c0SDaniel Machon 	struct fdma *fdma = &rx->fdma;
105*d84ad2c0SDaniel Machon 	struct sparx5_port *port;
106*d84ad2c0SDaniel Machon 	struct frame_info fi;
107*d84ad2c0SDaniel Machon 	struct sk_buff *skb;
108*d84ad2c0SDaniel Machon 	struct fdma_db *db;
109*d84ad2c0SDaniel Machon 	struct page *page;
110*d84ad2c0SDaniel Machon 
111*d84ad2c0SDaniel Machon 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
112*d84ad2c0SDaniel Machon 	page = rx->page[fdma->dcb_index][fdma->db_index];
113*d84ad2c0SDaniel Machon 
114*d84ad2c0SDaniel Machon 	sparx5_ifh_parse(sparx5, page_address(page), &fi);
115*d84ad2c0SDaniel Machon 	port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
116*d84ad2c0SDaniel Machon 					       NULL;
117*d84ad2c0SDaniel Machon 	if (WARN_ON(!port))
118*d84ad2c0SDaniel Machon 		goto free_page;
119*d84ad2c0SDaniel Machon 
120*d84ad2c0SDaniel Machon 	skb = build_skb(page_address(page), fdma->db_size);
121*d84ad2c0SDaniel Machon 	if (unlikely(!skb))
122*d84ad2c0SDaniel Machon 		goto free_page;
123*d84ad2c0SDaniel Machon 
124*d84ad2c0SDaniel Machon 	skb_mark_for_recycle(skb);
125*d84ad2c0SDaniel Machon 	skb_put(skb, fdma_db_len_get(db));
126*d84ad2c0SDaniel Machon 	skb_pull(skb, IFH_LEN * sizeof(u32));
127*d84ad2c0SDaniel Machon 
128*d84ad2c0SDaniel Machon 	skb->dev = port->ndev;
129*d84ad2c0SDaniel Machon 
130*d84ad2c0SDaniel Machon 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
131*d84ad2c0SDaniel Machon 		skb_trim(skb, skb->len - ETH_FCS_LEN);
132*d84ad2c0SDaniel Machon 
133*d84ad2c0SDaniel Machon 	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
134*d84ad2c0SDaniel Machon 	skb->protocol = eth_type_trans(skb, skb->dev);
135*d84ad2c0SDaniel Machon 
136*d84ad2c0SDaniel Machon 	if (test_bit(port->portno, sparx5->bridge_mask))
137*d84ad2c0SDaniel Machon 		skb->offload_fwd_mark = 1;
138*d84ad2c0SDaniel Machon 
139*d84ad2c0SDaniel Machon 	skb->dev->stats.rx_bytes += skb->len;
140*d84ad2c0SDaniel Machon 	skb->dev->stats.rx_packets++;
141*d84ad2c0SDaniel Machon 
142*d84ad2c0SDaniel Machon 	return skb;
143*d84ad2c0SDaniel Machon 
144*d84ad2c0SDaniel Machon free_page:
145*d84ad2c0SDaniel Machon 	page_pool_recycle_direct(rx->page_pool, page);
146*d84ad2c0SDaniel Machon 
147*d84ad2c0SDaniel Machon 	return NULL;
148*d84ad2c0SDaniel Machon }
149*d84ad2c0SDaniel Machon 
150*d84ad2c0SDaniel Machon static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
151*d84ad2c0SDaniel Machon {
152*d84ad2c0SDaniel Machon 	struct sparx5_rx *rx = &sparx5->rx;
153*d84ad2c0SDaniel Machon 	struct fdma *fdma = &rx->fdma;
154*d84ad2c0SDaniel Machon 	int err;
155*d84ad2c0SDaniel Machon 
156*d84ad2c0SDaniel Machon 	struct page_pool_params pp_params = {
157*d84ad2c0SDaniel Machon 		.order = 0,
158*d84ad2c0SDaniel Machon 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
159*d84ad2c0SDaniel Machon 		.pool_size = fdma->n_dcbs * fdma->n_dbs,
160*d84ad2c0SDaniel Machon 		.nid = NUMA_NO_NODE,
161*d84ad2c0SDaniel Machon 		.dev = sparx5->dev,
162*d84ad2c0SDaniel Machon 		.dma_dir = DMA_FROM_DEVICE,
163*d84ad2c0SDaniel Machon 		.offset = 0,
164*d84ad2c0SDaniel Machon 		.max_len = fdma->db_size -
165*d84ad2c0SDaniel Machon 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
166*d84ad2c0SDaniel Machon 	};
167*d84ad2c0SDaniel Machon 
168*d84ad2c0SDaniel Machon 	rx->page_pool = page_pool_create(&pp_params);
169*d84ad2c0SDaniel Machon 	if (IS_ERR(rx->page_pool))
170*d84ad2c0SDaniel Machon 		return PTR_ERR(rx->page_pool);
171*d84ad2c0SDaniel Machon 
172*d84ad2c0SDaniel Machon 	err = fdma_alloc_coherent(sparx5->dev, fdma);
173*d84ad2c0SDaniel Machon 	if (err)
174*d84ad2c0SDaniel Machon 		return err;
175*d84ad2c0SDaniel Machon 
176*d84ad2c0SDaniel Machon 	fdma_dcbs_init(fdma,
177*d84ad2c0SDaniel Machon 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
178*d84ad2c0SDaniel Machon 		       FDMA_DCB_STATUS_INTR);
179*d84ad2c0SDaniel Machon 
180*d84ad2c0SDaniel Machon 	return 0;
181*d84ad2c0SDaniel Machon }
182*d84ad2c0SDaniel Machon 
183*d84ad2c0SDaniel Machon static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
184*d84ad2c0SDaniel Machon {
185*d84ad2c0SDaniel Machon 	struct sparx5_tx *tx = &sparx5->tx;
186*d84ad2c0SDaniel Machon 	struct fdma *fdma = &tx->fdma;
187*d84ad2c0SDaniel Machon 	int err;
188*d84ad2c0SDaniel Machon 
189*d84ad2c0SDaniel Machon 	tx->dbs = kcalloc(fdma->n_dcbs,
190*d84ad2c0SDaniel Machon 			  sizeof(struct sparx5_tx_buf),
191*d84ad2c0SDaniel Machon 			  GFP_KERNEL);
192*d84ad2c0SDaniel Machon 	if (!tx->dbs)
193*d84ad2c0SDaniel Machon 		return -ENOMEM;
194*d84ad2c0SDaniel Machon 
195*d84ad2c0SDaniel Machon 	err = fdma_alloc_coherent(sparx5->dev, fdma);
196*d84ad2c0SDaniel Machon 	if (err) {
197*d84ad2c0SDaniel Machon 		kfree(tx->dbs);
198*d84ad2c0SDaniel Machon 		return err;
199*d84ad2c0SDaniel Machon 	}
200*d84ad2c0SDaniel Machon 
201*d84ad2c0SDaniel Machon 	fdma_dcbs_init(fdma,
202*d84ad2c0SDaniel Machon 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
203*d84ad2c0SDaniel Machon 		       FDMA_DCB_STATUS_DONE);
204*d84ad2c0SDaniel Machon 
205*d84ad2c0SDaniel Machon 	return 0;
206*d84ad2c0SDaniel Machon }
207*d84ad2c0SDaniel Machon 
208*d84ad2c0SDaniel Machon static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
209*d84ad2c0SDaniel Machon {
210*d84ad2c0SDaniel Machon 	struct fdma *fdma = &sparx5->rx.fdma;
211*d84ad2c0SDaniel Machon 
212*d84ad2c0SDaniel Machon 	fdma->channel_id = FDMA_XTR_CHANNEL;
213*d84ad2c0SDaniel Machon 	fdma->n_dcbs = FDMA_DCB_MAX;
214*d84ad2c0SDaniel Machon 	fdma->n_dbs = 1;
215*d84ad2c0SDaniel Machon 	fdma->priv = sparx5;
216*d84ad2c0SDaniel Machon 	fdma->size = fdma_get_size(fdma);
217*d84ad2c0SDaniel Machon 	fdma->db_size = PAGE_SIZE;
218*d84ad2c0SDaniel Machon 	fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
219*d84ad2c0SDaniel Machon 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
220*d84ad2c0SDaniel Machon 
221*d84ad2c0SDaniel Machon 	/* Fetch a netdev for SKB and NAPI use, any will do */
222*d84ad2c0SDaniel Machon 	for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
223*d84ad2c0SDaniel Machon 		struct sparx5_port *port = sparx5->ports[idx];
224*d84ad2c0SDaniel Machon 
225*d84ad2c0SDaniel Machon 		if (port && port->ndev) {
226*d84ad2c0SDaniel Machon 			sparx5->rx.ndev = port->ndev;
227*d84ad2c0SDaniel Machon 			break;
228*d84ad2c0SDaniel Machon 		}
229*d84ad2c0SDaniel Machon 	}
230*d84ad2c0SDaniel Machon }
231*d84ad2c0SDaniel Machon 
232*d84ad2c0SDaniel Machon static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
233*d84ad2c0SDaniel Machon {
234*d84ad2c0SDaniel Machon 	struct fdma *fdma = &sparx5->tx.fdma;
235*d84ad2c0SDaniel Machon 
236*d84ad2c0SDaniel Machon 	fdma->channel_id = FDMA_INJ_CHANNEL;
237*d84ad2c0SDaniel Machon 	fdma->n_dcbs = FDMA_DCB_MAX;
238*d84ad2c0SDaniel Machon 	fdma->n_dbs = 1;
239*d84ad2c0SDaniel Machon 	fdma->priv = sparx5;
240*d84ad2c0SDaniel Machon 	fdma->size = fdma_get_size(fdma);
241*d84ad2c0SDaniel Machon 	fdma->db_size = PAGE_SIZE;
242*d84ad2c0SDaniel Machon 	fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
243*d84ad2c0SDaniel Machon 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
244*d84ad2c0SDaniel Machon }
245*d84ad2c0SDaniel Machon 
246*d84ad2c0SDaniel Machon int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
247*d84ad2c0SDaniel Machon {
248*d84ad2c0SDaniel Machon 	struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
249*d84ad2c0SDaniel Machon 	struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
250*d84ad2c0SDaniel Machon 	int old_dcb, dcb_reload, counter = 0;
251*d84ad2c0SDaniel Machon 	struct fdma *fdma = &rx->fdma;
252*d84ad2c0SDaniel Machon 	struct sk_buff *skb;
253*d84ad2c0SDaniel Machon 
254*d84ad2c0SDaniel Machon 	dcb_reload = fdma->dcb_index;
255*d84ad2c0SDaniel Machon 
256*d84ad2c0SDaniel Machon 	lan969x_fdma_tx_clear_buf(sparx5, weight);
257*d84ad2c0SDaniel Machon 
258*d84ad2c0SDaniel Machon 	/* Process RX data */
259*d84ad2c0SDaniel Machon 	while (counter < weight) {
260*d84ad2c0SDaniel Machon 		if (!fdma_has_frames(fdma))
261*d84ad2c0SDaniel Machon 			break;
262*d84ad2c0SDaniel Machon 
263*d84ad2c0SDaniel Machon 		skb = lan969x_fdma_rx_get_frame(sparx5, rx);
264*d84ad2c0SDaniel Machon 		if (!skb)
265*d84ad2c0SDaniel Machon 			break;
266*d84ad2c0SDaniel Machon 
267*d84ad2c0SDaniel Machon 		napi_gro_receive(&rx->napi, skb);
268*d84ad2c0SDaniel Machon 
269*d84ad2c0SDaniel Machon 		fdma_db_advance(fdma);
270*d84ad2c0SDaniel Machon 		counter++;
271*d84ad2c0SDaniel Machon 		/* Check if the DCB can be reused */
272*d84ad2c0SDaniel Machon 		if (fdma_dcb_is_reusable(fdma))
273*d84ad2c0SDaniel Machon 			continue;
274*d84ad2c0SDaniel Machon 
275*d84ad2c0SDaniel Machon 		fdma_db_reset(fdma);
276*d84ad2c0SDaniel Machon 		fdma_dcb_advance(fdma);
277*d84ad2c0SDaniel Machon 	}
278*d84ad2c0SDaniel Machon 
279*d84ad2c0SDaniel Machon 	/* Allocate new pages and map them */
280*d84ad2c0SDaniel Machon 	while (dcb_reload != fdma->dcb_index) {
281*d84ad2c0SDaniel Machon 		old_dcb = dcb_reload;
282*d84ad2c0SDaniel Machon 		dcb_reload++;
283*d84ad2c0SDaniel Machon 		 /* n_dcbs must be a power of 2 */
284*d84ad2c0SDaniel Machon 		dcb_reload &= fdma->n_dcbs - 1;
285*d84ad2c0SDaniel Machon 
286*d84ad2c0SDaniel Machon 		fdma_dcb_add(fdma,
287*d84ad2c0SDaniel Machon 			     old_dcb,
288*d84ad2c0SDaniel Machon 			     FDMA_DCB_INFO_DATAL(fdma->db_size),
289*d84ad2c0SDaniel Machon 			     FDMA_DCB_STATUS_INTR);
290*d84ad2c0SDaniel Machon 
291*d84ad2c0SDaniel Machon 		sparx5_fdma_reload(sparx5, fdma);
292*d84ad2c0SDaniel Machon 	}
293*d84ad2c0SDaniel Machon 
294*d84ad2c0SDaniel Machon 	if (counter < weight && napi_complete_done(napi, counter))
295*d84ad2c0SDaniel Machon 		spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
296*d84ad2c0SDaniel Machon 
297*d84ad2c0SDaniel Machon 	return counter;
298*d84ad2c0SDaniel Machon }
299*d84ad2c0SDaniel Machon 
300*d84ad2c0SDaniel Machon int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
301*d84ad2c0SDaniel Machon 		      struct net_device *dev)
302*d84ad2c0SDaniel Machon {
303*d84ad2c0SDaniel Machon 	int next_dcb, needed_headroom, needed_tailroom, err;
304*d84ad2c0SDaniel Machon 	struct sparx5_tx *tx = &sparx5->tx;
305*d84ad2c0SDaniel Machon 	struct fdma *fdma = &tx->fdma;
306*d84ad2c0SDaniel Machon 	struct sparx5_tx_buf *db_buf;
307*d84ad2c0SDaniel Machon 	u64 status;
308*d84ad2c0SDaniel Machon 
309*d84ad2c0SDaniel Machon 	next_dcb = lan969x_fdma_get_next_dcb(tx);
310*d84ad2c0SDaniel Machon 	if (next_dcb < 0)
311*d84ad2c0SDaniel Machon 		return -EBUSY;
312*d84ad2c0SDaniel Machon 
313*d84ad2c0SDaniel Machon 	needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
314*d84ad2c0SDaniel Machon 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
315*d84ad2c0SDaniel Machon 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
316*d84ad2c0SDaniel Machon 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
317*d84ad2c0SDaniel Machon 				       GFP_ATOMIC);
318*d84ad2c0SDaniel Machon 		if (unlikely(err))
319*d84ad2c0SDaniel Machon 			return err;
320*d84ad2c0SDaniel Machon 	}
321*d84ad2c0SDaniel Machon 
322*d84ad2c0SDaniel Machon 	skb_push(skb, IFH_LEN * 4);
323*d84ad2c0SDaniel Machon 	memcpy(skb->data, ifh, IFH_LEN * 4);
324*d84ad2c0SDaniel Machon 	skb_put(skb, ETH_FCS_LEN);
325*d84ad2c0SDaniel Machon 
326*d84ad2c0SDaniel Machon 	db_buf = &tx->dbs[next_dcb];
327*d84ad2c0SDaniel Machon 	db_buf->dma_addr = dma_map_single(sparx5->dev,
328*d84ad2c0SDaniel Machon 					  skb->data,
329*d84ad2c0SDaniel Machon 					  skb->len,
330*d84ad2c0SDaniel Machon 					  DMA_TO_DEVICE);
331*d84ad2c0SDaniel Machon 	if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
332*d84ad2c0SDaniel Machon 		return -ENOMEM;
333*d84ad2c0SDaniel Machon 
334*d84ad2c0SDaniel Machon 	db_buf->dev = dev;
335*d84ad2c0SDaniel Machon 	db_buf->skb = skb;
336*d84ad2c0SDaniel Machon 	db_buf->ptp = false;
337*d84ad2c0SDaniel Machon 	db_buf->used = true;
338*d84ad2c0SDaniel Machon 
339*d84ad2c0SDaniel Machon 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
340*d84ad2c0SDaniel Machon 	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
341*d84ad2c0SDaniel Machon 		db_buf->ptp = true;
342*d84ad2c0SDaniel Machon 
343*d84ad2c0SDaniel Machon 	status = FDMA_DCB_STATUS_SOF |
344*d84ad2c0SDaniel Machon 		 FDMA_DCB_STATUS_EOF |
345*d84ad2c0SDaniel Machon 		 FDMA_DCB_STATUS_BLOCKO(0) |
346*d84ad2c0SDaniel Machon 		 FDMA_DCB_STATUS_BLOCKL(skb->len) |
347*d84ad2c0SDaniel Machon 		 FDMA_DCB_STATUS_INTR;
348*d84ad2c0SDaniel Machon 
349*d84ad2c0SDaniel Machon 	fdma_dcb_advance(fdma);
350*d84ad2c0SDaniel Machon 	fdma_dcb_add(fdma, next_dcb, 0, status);
351*d84ad2c0SDaniel Machon 
352*d84ad2c0SDaniel Machon 	sparx5_fdma_reload(sparx5, fdma);
353*d84ad2c0SDaniel Machon 
354*d84ad2c0SDaniel Machon 	return NETDEV_TX_OK;
355*d84ad2c0SDaniel Machon }
356*d84ad2c0SDaniel Machon 
357*d84ad2c0SDaniel Machon int lan969x_fdma_init(struct sparx5 *sparx5)
358*d84ad2c0SDaniel Machon {
359*d84ad2c0SDaniel Machon 	struct sparx5_rx *rx = &sparx5->rx;
360*d84ad2c0SDaniel Machon 	int err;
361*d84ad2c0SDaniel Machon 
362*d84ad2c0SDaniel Machon 	lan969x_fdma_rx_init(sparx5);
363*d84ad2c0SDaniel Machon 	lan969x_fdma_tx_init(sparx5);
364*d84ad2c0SDaniel Machon 	sparx5_fdma_injection_mode(sparx5);
365*d84ad2c0SDaniel Machon 
366*d84ad2c0SDaniel Machon 	err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
367*d84ad2c0SDaniel Machon 	if (err) {
368*d84ad2c0SDaniel Machon 		dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
369*d84ad2c0SDaniel Machon 		return err;
370*d84ad2c0SDaniel Machon 	}
371*d84ad2c0SDaniel Machon 
372*d84ad2c0SDaniel Machon 	err = lan969x_fdma_rx_alloc(sparx5);
373*d84ad2c0SDaniel Machon 	if (err) {
374*d84ad2c0SDaniel Machon 		dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
375*d84ad2c0SDaniel Machon 			err);
376*d84ad2c0SDaniel Machon 		return err;
377*d84ad2c0SDaniel Machon 	}
378*d84ad2c0SDaniel Machon 
379*d84ad2c0SDaniel Machon 	err = lan969x_fdma_tx_alloc(sparx5);
380*d84ad2c0SDaniel Machon 	if (err) {
381*d84ad2c0SDaniel Machon 		fdma_free_coherent(sparx5->dev, &rx->fdma);
382*d84ad2c0SDaniel Machon 		dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
383*d84ad2c0SDaniel Machon 			err);
384*d84ad2c0SDaniel Machon 		return err;
385*d84ad2c0SDaniel Machon 	}
386*d84ad2c0SDaniel Machon 
387*d84ad2c0SDaniel Machon 	/* Reset FDMA state */
388*d84ad2c0SDaniel Machon 	spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
389*d84ad2c0SDaniel Machon 	spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
390*d84ad2c0SDaniel Machon 
391*d84ad2c0SDaniel Machon 	return err;
392*d84ad2c0SDaniel Machon }
393*d84ad2c0SDaniel Machon 
394*d84ad2c0SDaniel Machon int lan969x_fdma_deinit(struct sparx5 *sparx5)
395*d84ad2c0SDaniel Machon {
396*d84ad2c0SDaniel Machon 	struct sparx5_rx *rx = &sparx5->rx;
397*d84ad2c0SDaniel Machon 	struct sparx5_tx *tx = &sparx5->tx;
398*d84ad2c0SDaniel Machon 
399*d84ad2c0SDaniel Machon 	sparx5_fdma_stop(sparx5);
400*d84ad2c0SDaniel Machon 	fdma_free_coherent(sparx5->dev, &tx->fdma);
401*d84ad2c0SDaniel Machon 	fdma_free_coherent(sparx5->dev, &rx->fdma);
402*d84ad2c0SDaniel Machon 	lan969x_fdma_free_pages(rx);
403*d84ad2c0SDaniel Machon 	page_pool_destroy(rx->page_pool);
404*d84ad2c0SDaniel Machon 
405*d84ad2c0SDaniel Machon 	return 0;
406*d84ad2c0SDaniel Machon }
407