xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision 29cc3a66a81ddaa237a3fe3c14cf0fc582db25bf)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27 				      u64 *dataptr)
28 {
29 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30 
31 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32 
33 	return 0;
34 }
35 
36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37 					  u64 *dataptr)
38 {
39 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
40 
41 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
42 
43 	return 0;
44 }
45 
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
47 {
48 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
49 }
50 
51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
52 {
53 	struct fdma *fdma = &rx->fdma;
54 	int i, j;
55 
56 	for (i = 0; i < fdma->n_dcbs; ++i) {
57 		for (j = 0; j < fdma->n_dbs; ++j)
58 			page_pool_put_full_page(rx->page_pool,
59 						rx->page[i][j], false);
60 	}
61 }
62 
63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
64 {
65 	struct fdma *fdma = &rx->fdma;
66 	struct page *page;
67 
68 	page = rx->page[fdma->dcb_index][fdma->db_index];
69 	if (unlikely(!page))
70 		return;
71 
72 	page_pool_recycle_direct(rx->page_pool, page);
73 }
74 
75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
76 {
77 	struct lan966x *lan966x = rx->lan966x;
78 	struct page_pool_params pp_params = {
79 		.order = rx->page_order,
80 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81 		.pool_size = rx->fdma.n_dcbs,
82 		.nid = NUMA_NO_NODE,
83 		.dev = lan966x->dev,
84 		.dma_dir = DMA_FROM_DEVICE,
85 		.offset = XDP_PACKET_HEADROOM,
86 		.max_len = rx->max_mtu -
87 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
88 	};
89 
90 	if (lan966x_xdp_present(lan966x))
91 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
92 
93 	rx->page_pool = page_pool_create(&pp_params);
94 
95 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
96 		struct lan966x_port *port;
97 
98 		if (!lan966x->ports[i])
99 			continue;
100 
101 		port = lan966x->ports[i];
102 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
104 					   rx->page_pool);
105 	}
106 
107 	return PTR_ERR_OR_ZERO(rx->page_pool);
108 }
109 
110 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
111 {
112 	struct lan966x *lan966x = rx->lan966x;
113 	struct fdma *fdma = &rx->fdma;
114 	int err;
115 
116 	if (lan966x_fdma_rx_alloc_page_pool(rx))
117 		return PTR_ERR(rx->page_pool);
118 
119 	err = fdma_alloc_coherent(lan966x->dev, fdma);
120 	if (err)
121 		return err;
122 
123 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124 		       FDMA_DCB_STATUS_INTR);
125 
126 	return 0;
127 }
128 
129 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
130 {
131 	struct fdma *fdma = &rx->fdma;
132 
133 	fdma->dcb_index++;
134 	fdma->dcb_index &= fdma->n_dcbs - 1;
135 }
136 
137 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
138 {
139 	struct lan966x *lan966x = rx->lan966x;
140 	struct fdma *fdma = &rx->fdma;
141 	u32 mask;
142 
143 	/* When activating a channel, first is required to write the first DCB
144 	 * address and then to activate it
145 	 */
146 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
147 	       FDMA_DCB_LLP(fdma->channel_id));
148 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
149 	       FDMA_DCB_LLP1(fdma->channel_id));
150 
151 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
152 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
153 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
154 	       FDMA_CH_CFG_CH_MEM_SET(1),
155 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
156 
157 	/* Start fdma */
158 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
159 		FDMA_PORT_CTRL_XTR_STOP,
160 		lan966x, FDMA_PORT_CTRL(0));
161 
162 	/* Enable interrupts */
163 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
164 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
165 	mask |= BIT(fdma->channel_id);
166 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
167 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
168 		lan966x, FDMA_INTR_DB_ENA);
169 
170 	/* Activate the channel */
171 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
172 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
173 		lan966x, FDMA_CH_ACTIVATE);
174 }
175 
176 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
177 {
178 	struct lan966x *lan966x = rx->lan966x;
179 	struct fdma *fdma = &rx->fdma;
180 	u32 val;
181 
182 	/* Disable the channel */
183 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
184 		FDMA_CH_DISABLE_CH_DISABLE,
185 		lan966x, FDMA_CH_DISABLE);
186 
187 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
188 				  val, !(val & BIT(fdma->channel_id)),
189 				  READL_SLEEP_US, READL_TIMEOUT_US);
190 
191 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
192 		FDMA_CH_DB_DISCARD_DB_DISCARD,
193 		lan966x, FDMA_CH_DB_DISCARD);
194 }
195 
196 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
197 {
198 	struct lan966x *lan966x = rx->lan966x;
199 
200 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
201 		FDMA_CH_RELOAD_CH_RELOAD,
202 		lan966x, FDMA_CH_RELOAD);
203 }
204 
205 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
206 {
207 	struct lan966x *lan966x = tx->lan966x;
208 	struct fdma *fdma = &tx->fdma;
209 	int err;
210 
211 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
212 			       GFP_KERNEL);
213 	if (!tx->dcbs_buf)
214 		return -ENOMEM;
215 
216 	err = fdma_alloc_coherent(lan966x->dev, fdma);
217 	if (err)
218 		goto out;
219 
220 	fdma_dcbs_init(fdma, 0, 0);
221 
222 	return 0;
223 
224 out:
225 	kfree(tx->dcbs_buf);
226 	return -ENOMEM;
227 }
228 
229 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
230 {
231 	struct lan966x *lan966x = tx->lan966x;
232 	struct fdma *fdma = &tx->fdma;
233 	int size;
234 
235 	kfree(tx->dcbs_buf);
236 
237 	size = sizeof(struct fdma_dcb) * fdma->n_dcbs;
238 	size = ALIGN(size, PAGE_SIZE);
239 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
240 }
241 
242 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
243 {
244 	struct lan966x *lan966x = tx->lan966x;
245 	struct fdma *fdma = &tx->fdma;
246 	u32 mask;
247 
248 	/* When activating a channel, first is required to write the first DCB
249 	 * address and then to activate it
250 	 */
251 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
252 	       FDMA_DCB_LLP(fdma->channel_id));
253 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
254 	       FDMA_DCB_LLP1(fdma->channel_id));
255 
256 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
257 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
258 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
259 	       FDMA_CH_CFG_CH_MEM_SET(1),
260 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
261 
262 	/* Start fdma */
263 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
264 		FDMA_PORT_CTRL_INJ_STOP,
265 		lan966x, FDMA_PORT_CTRL(0));
266 
267 	/* Enable interrupts */
268 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
269 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
270 	mask |= BIT(fdma->channel_id);
271 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
272 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
273 		lan966x, FDMA_INTR_DB_ENA);
274 
275 	/* Activate the channel */
276 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
277 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
278 		lan966x, FDMA_CH_ACTIVATE);
279 }
280 
281 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
282 {
283 	struct lan966x *lan966x = tx->lan966x;
284 	struct fdma *fdma = &tx->fdma;
285 	u32 val;
286 
287 	/* Disable the channel */
288 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
289 		FDMA_CH_DISABLE_CH_DISABLE,
290 		lan966x, FDMA_CH_DISABLE);
291 
292 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
293 				  val, !(val & BIT(fdma->channel_id)),
294 				  READL_SLEEP_US, READL_TIMEOUT_US);
295 
296 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
297 		FDMA_CH_DB_DISCARD_DB_DISCARD,
298 		lan966x, FDMA_CH_DB_DISCARD);
299 
300 	tx->activated = false;
301 	tx->last_in_use = -1;
302 }
303 
304 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
305 {
306 	struct lan966x *lan966x = tx->lan966x;
307 
308 	/* Write the registers to reload the channel */
309 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
310 		FDMA_CH_RELOAD_CH_RELOAD,
311 		lan966x, FDMA_CH_RELOAD);
312 }
313 
314 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
315 {
316 	struct lan966x_port *port;
317 	int i;
318 
319 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
320 		port = lan966x->ports[i];
321 		if (!port)
322 			continue;
323 
324 		if (netif_queue_stopped(port->dev))
325 			netif_wake_queue(port->dev);
326 	}
327 }
328 
329 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
330 {
331 	struct lan966x_port *port;
332 	int i;
333 
334 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
335 		port = lan966x->ports[i];
336 		if (!port)
337 			continue;
338 
339 		netif_stop_queue(port->dev);
340 	}
341 }
342 
343 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
344 {
345 	struct lan966x_tx *tx = &lan966x->tx;
346 	struct lan966x_rx *rx = &lan966x->rx;
347 	struct lan966x_tx_dcb_buf *dcb_buf;
348 	struct fdma *fdma = &tx->fdma;
349 	struct xdp_frame_bulk bq;
350 	unsigned long flags;
351 	bool clear = false;
352 	struct fdma_db *db;
353 	int i;
354 
355 	xdp_frame_bulk_init(&bq);
356 
357 	spin_lock_irqsave(&lan966x->tx_lock, flags);
358 	for (i = 0; i < fdma->n_dcbs; ++i) {
359 		dcb_buf = &tx->dcbs_buf[i];
360 
361 		if (!dcb_buf->used)
362 			continue;
363 
364 		db = &fdma->dcbs[i].db[0];
365 		if (!(db->status & FDMA_DCB_STATUS_DONE))
366 			continue;
367 
368 		dcb_buf->dev->stats.tx_packets++;
369 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
370 
371 		dcb_buf->used = false;
372 		if (dcb_buf->use_skb) {
373 			dma_unmap_single(lan966x->dev,
374 					 dcb_buf->dma_addr,
375 					 dcb_buf->len,
376 					 DMA_TO_DEVICE);
377 
378 			if (!dcb_buf->ptp)
379 				napi_consume_skb(dcb_buf->data.skb, weight);
380 		} else {
381 			if (dcb_buf->xdp_ndo)
382 				dma_unmap_single(lan966x->dev,
383 						 dcb_buf->dma_addr,
384 						 dcb_buf->len,
385 						 DMA_TO_DEVICE);
386 
387 			if (dcb_buf->xdp_ndo)
388 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
389 			else
390 				page_pool_recycle_direct(rx->page_pool,
391 							 dcb_buf->data.page);
392 		}
393 
394 		clear = true;
395 	}
396 
397 	xdp_flush_frame_bulk(&bq);
398 
399 	if (clear)
400 		lan966x_fdma_wakeup_netdev(lan966x);
401 
402 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
403 }
404 
405 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
406 {
407 	struct fdma *fdma = &rx->fdma;
408 	struct fdma_db *db;
409 
410 	/* Check if there is any data */
411 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
412 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
413 		return false;
414 
415 	return true;
416 }
417 
418 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
419 {
420 	struct lan966x *lan966x = rx->lan966x;
421 	struct fdma *fdma = &rx->fdma;
422 	struct lan966x_port *port;
423 	struct fdma_db *db;
424 	struct page *page;
425 
426 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
427 	page = rx->page[fdma->dcb_index][fdma->db_index];
428 	if (unlikely(!page))
429 		return FDMA_ERROR;
430 
431 	dma_sync_single_for_cpu(lan966x->dev,
432 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
433 				FDMA_DCB_STATUS_BLOCKL(db->status),
434 				DMA_FROM_DEVICE);
435 
436 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
437 				 src_port);
438 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
439 		return FDMA_ERROR;
440 
441 	port = lan966x->ports[*src_port];
442 	if (!lan966x_xdp_port_present(port))
443 		return FDMA_PASS;
444 
445 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
446 }
447 
448 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
449 						 u64 src_port)
450 {
451 	struct lan966x *lan966x = rx->lan966x;
452 	struct fdma *fdma = &rx->fdma;
453 	struct sk_buff *skb;
454 	struct fdma_db *db;
455 	struct page *page;
456 	u64 timestamp;
457 
458 	/* Get the received frame and unmap it */
459 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
460 	page = rx->page[fdma->dcb_index][fdma->db_index];
461 
462 	skb = build_skb(page_address(page), fdma->db_size);
463 	if (unlikely(!skb))
464 		goto free_page;
465 
466 	skb_mark_for_recycle(skb);
467 
468 	skb_reserve(skb, XDP_PACKET_HEADROOM);
469 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
470 
471 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
472 
473 	skb->dev = lan966x->ports[src_port]->dev;
474 	skb_pull(skb, IFH_LEN_BYTES);
475 
476 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
477 		skb_trim(skb, skb->len - ETH_FCS_LEN);
478 
479 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
480 	skb->protocol = eth_type_trans(skb, skb->dev);
481 
482 	if (lan966x->bridge_mask & BIT(src_port)) {
483 		skb->offload_fwd_mark = 1;
484 
485 		skb_reset_network_header(skb);
486 		if (!lan966x_hw_offload(lan966x, src_port, skb))
487 			skb->offload_fwd_mark = 0;
488 	}
489 
490 	skb->dev->stats.rx_bytes += skb->len;
491 	skb->dev->stats.rx_packets++;
492 
493 	return skb;
494 
495 free_page:
496 	page_pool_recycle_direct(rx->page_pool, page);
497 
498 	return NULL;
499 }
500 
501 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
502 {
503 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
504 	struct lan966x_rx *rx = &lan966x->rx;
505 	int old_dcb, dcb_reload, counter = 0;
506 	struct fdma *fdma = &rx->fdma;
507 	bool redirect = false;
508 	struct sk_buff *skb;
509 	u64 src_port;
510 
511 	dcb_reload = fdma->dcb_index;
512 
513 	lan966x_fdma_tx_clear_buf(lan966x, weight);
514 
515 	/* Get all received skb */
516 	while (counter < weight) {
517 		if (!lan966x_fdma_rx_more_frames(rx))
518 			break;
519 
520 		counter++;
521 
522 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
523 		case FDMA_PASS:
524 			break;
525 		case FDMA_ERROR:
526 			lan966x_fdma_rx_free_page(rx);
527 			lan966x_fdma_rx_advance_dcb(rx);
528 			goto allocate_new;
529 		case FDMA_REDIRECT:
530 			redirect = true;
531 			fallthrough;
532 		case FDMA_TX:
533 			lan966x_fdma_rx_advance_dcb(rx);
534 			continue;
535 		case FDMA_DROP:
536 			lan966x_fdma_rx_free_page(rx);
537 			lan966x_fdma_rx_advance_dcb(rx);
538 			continue;
539 		}
540 
541 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
542 		lan966x_fdma_rx_advance_dcb(rx);
543 		if (!skb)
544 			goto allocate_new;
545 
546 		napi_gro_receive(&lan966x->napi, skb);
547 	}
548 
549 allocate_new:
550 	/* Allocate new pages and map them */
551 	while (dcb_reload != fdma->dcb_index) {
552 		old_dcb = dcb_reload;
553 		dcb_reload++;
554 		dcb_reload &= fdma->n_dcbs - 1;
555 
556 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
557 			     FDMA_DCB_STATUS_INTR);
558 
559 		lan966x_fdma_rx_reload(rx);
560 	}
561 
562 	if (redirect)
563 		xdp_do_flush();
564 
565 	if (counter < weight && napi_complete_done(napi, counter))
566 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
567 
568 	return counter;
569 }
570 
571 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
572 {
573 	struct lan966x *lan966x = args;
574 	u32 db, err, err_type;
575 
576 	db = lan_rd(lan966x, FDMA_INTR_DB);
577 	err = lan_rd(lan966x, FDMA_INTR_ERR);
578 
579 	if (db) {
580 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
581 		lan_wr(db, lan966x, FDMA_INTR_DB);
582 
583 		napi_schedule(&lan966x->napi);
584 	}
585 
586 	if (err) {
587 		err_type = lan_rd(lan966x, FDMA_ERRORS);
588 
589 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
590 
591 		lan_wr(err, lan966x, FDMA_INTR_ERR);
592 		lan_wr(err_type, lan966x, FDMA_ERRORS);
593 	}
594 
595 	return IRQ_HANDLED;
596 }
597 
598 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
599 {
600 	struct lan966x_tx_dcb_buf *dcb_buf;
601 	struct fdma *fdma = &tx->fdma;
602 	int i;
603 
604 	for (i = 0; i < fdma->n_dcbs; ++i) {
605 		dcb_buf = &tx->dcbs_buf[i];
606 		if (!dcb_buf->used && i != tx->last_in_use)
607 			return i;
608 	}
609 
610 	return -1;
611 }
612 
613 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
614 {
615 	struct lan966x *lan966x = tx->lan966x;
616 	struct fdma *fdma = &tx->fdma;
617 	struct fdma_dcb *dcb;
618 
619 	if (likely(lan966x->tx.activated)) {
620 		/* Connect current dcb to the next db */
621 		dcb = &fdma->dcbs[tx->last_in_use];
622 		dcb->nextptr = fdma->dma + (next_to_use *
623 					  sizeof(struct fdma_dcb));
624 
625 		lan966x_fdma_tx_reload(tx);
626 	} else {
627 		/* Because it is first time, then just activate */
628 		lan966x->tx.activated = true;
629 		lan966x_fdma_tx_activate(tx);
630 	}
631 
632 	/* Move to next dcb because this last in use */
633 	tx->last_in_use = next_to_use;
634 }
635 
636 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
637 {
638 	struct lan966x *lan966x = port->lan966x;
639 	struct lan966x_tx_dcb_buf *next_dcb_buf;
640 	struct lan966x_tx *tx = &lan966x->tx;
641 	struct xdp_frame *xdpf;
642 	dma_addr_t dma_addr;
643 	struct page *page;
644 	int next_to_use;
645 	__be32 *ifh;
646 	int ret = 0;
647 
648 	spin_lock(&lan966x->tx_lock);
649 
650 	/* Get next index */
651 	next_to_use = lan966x_fdma_get_next_dcb(tx);
652 	if (next_to_use < 0) {
653 		netif_stop_queue(port->dev);
654 		ret = NETDEV_TX_BUSY;
655 		goto out;
656 	}
657 
658 	/* Get the next buffer */
659 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
660 
661 	/* Generate new IFH */
662 	if (!len) {
663 		xdpf = ptr;
664 
665 		if (xdpf->headroom < IFH_LEN_BYTES) {
666 			ret = NETDEV_TX_OK;
667 			goto out;
668 		}
669 
670 		ifh = xdpf->data - IFH_LEN_BYTES;
671 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
672 		lan966x_ifh_set_bypass(ifh, 1);
673 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
674 
675 		dma_addr = dma_map_single(lan966x->dev,
676 					  xdpf->data - IFH_LEN_BYTES,
677 					  xdpf->len + IFH_LEN_BYTES,
678 					  DMA_TO_DEVICE);
679 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
680 			ret = NETDEV_TX_OK;
681 			goto out;
682 		}
683 
684 		next_dcb_buf->data.xdpf = xdpf;
685 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
686 	} else {
687 		page = ptr;
688 
689 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
690 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
691 		lan966x_ifh_set_bypass(ifh, 1);
692 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
693 
694 		dma_addr = page_pool_get_dma_addr(page);
695 		dma_sync_single_for_device(lan966x->dev,
696 					   dma_addr + XDP_PACKET_HEADROOM,
697 					   len + IFH_LEN_BYTES,
698 					   DMA_TO_DEVICE);
699 
700 		next_dcb_buf->data.page = page;
701 		next_dcb_buf->len = len + IFH_LEN_BYTES;
702 	}
703 
704 	/* Fill up the buffer */
705 	next_dcb_buf->use_skb = false;
706 	next_dcb_buf->xdp_ndo = !len;
707 	next_dcb_buf->dma_addr = dma_addr;
708 	next_dcb_buf->used = true;
709 	next_dcb_buf->ptp = false;
710 	next_dcb_buf->dev = port->dev;
711 
712 	__fdma_dcb_add(&tx->fdma,
713 		       next_to_use,
714 		       0,
715 		       FDMA_DCB_STATUS_INTR |
716 		       FDMA_DCB_STATUS_SOF |
717 		       FDMA_DCB_STATUS_EOF |
718 		       FDMA_DCB_STATUS_BLOCKO(0) |
719 		       FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
720 		       &fdma_nextptr_cb,
721 		       &lan966x_fdma_xdp_tx_dataptr_cb);
722 
723 	/* Start the transmission */
724 	lan966x_fdma_tx_start(tx, next_to_use);
725 
726 out:
727 	spin_unlock(&lan966x->tx_lock);
728 
729 	return ret;
730 }
731 
732 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
733 {
734 	struct lan966x_port *port = netdev_priv(dev);
735 	struct lan966x *lan966x = port->lan966x;
736 	struct lan966x_tx_dcb_buf *next_dcb_buf;
737 	struct lan966x_tx *tx = &lan966x->tx;
738 	int needed_headroom;
739 	int needed_tailroom;
740 	dma_addr_t dma_addr;
741 	int next_to_use;
742 	int err;
743 
744 	/* Get next index */
745 	next_to_use = lan966x_fdma_get_next_dcb(tx);
746 	if (next_to_use < 0) {
747 		netif_stop_queue(dev);
748 		return NETDEV_TX_BUSY;
749 	}
750 
751 	if (skb_put_padto(skb, ETH_ZLEN)) {
752 		dev->stats.tx_dropped++;
753 		return NETDEV_TX_OK;
754 	}
755 
756 	/* skb processing */
757 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
758 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
759 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
760 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
761 				       GFP_ATOMIC);
762 		if (unlikely(err)) {
763 			dev->stats.tx_dropped++;
764 			err = NETDEV_TX_OK;
765 			goto release;
766 		}
767 	}
768 
769 	skb_tx_timestamp(skb);
770 	skb_push(skb, IFH_LEN_BYTES);
771 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
772 	skb_put(skb, 4);
773 
774 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
775 				  DMA_TO_DEVICE);
776 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
777 		dev->stats.tx_dropped++;
778 		err = NETDEV_TX_OK;
779 		goto release;
780 	}
781 
782 	/* Fill up the buffer */
783 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
784 	next_dcb_buf->use_skb = true;
785 	next_dcb_buf->data.skb = skb;
786 	next_dcb_buf->xdp_ndo = false;
787 	next_dcb_buf->len = skb->len;
788 	next_dcb_buf->dma_addr = dma_addr;
789 	next_dcb_buf->used = true;
790 	next_dcb_buf->ptp = false;
791 	next_dcb_buf->dev = dev;
792 
793 	fdma_dcb_add(&tx->fdma,
794 		     next_to_use,
795 		     0,
796 		     FDMA_DCB_STATUS_INTR |
797 		     FDMA_DCB_STATUS_SOF |
798 		     FDMA_DCB_STATUS_EOF |
799 		     FDMA_DCB_STATUS_BLOCKO(0) |
800 		     FDMA_DCB_STATUS_BLOCKL(skb->len));
801 
802 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
803 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
804 		next_dcb_buf->ptp = true;
805 
806 	/* Start the transmission */
807 	lan966x_fdma_tx_start(tx, next_to_use);
808 
809 	return NETDEV_TX_OK;
810 
811 release:
812 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
813 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
814 		lan966x_ptp_txtstamp_release(port, skb);
815 
816 	dev_kfree_skb_any(skb);
817 	return err;
818 }
819 
820 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
821 {
822 	int max_mtu = 0;
823 	int i;
824 
825 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
826 		struct lan966x_port *port;
827 		int mtu;
828 
829 		port = lan966x->ports[i];
830 		if (!port)
831 			continue;
832 
833 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
834 		if (mtu > max_mtu)
835 			max_mtu = mtu;
836 	}
837 
838 	return max_mtu;
839 }
840 
841 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
842 {
843 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
844 }
845 
846 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
847 {
848 	struct page_pool *page_pool;
849 	dma_addr_t rx_dma;
850 	void *rx_dcbs;
851 	u32 size;
852 	int err;
853 
854 	/* Store these for later to free them */
855 	rx_dma = lan966x->rx.fdma.dma;
856 	rx_dcbs = lan966x->rx.fdma.dcbs;
857 	page_pool = lan966x->rx.page_pool;
858 
859 	napi_synchronize(&lan966x->napi);
860 	napi_disable(&lan966x->napi);
861 	lan966x_fdma_stop_netdev(lan966x);
862 
863 	lan966x_fdma_rx_disable(&lan966x->rx);
864 	lan966x_fdma_rx_free_pages(&lan966x->rx);
865 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
866 	lan966x->rx.max_mtu = new_mtu;
867 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
868 	if (err)
869 		goto restore;
870 	lan966x_fdma_rx_start(&lan966x->rx);
871 
872 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
873 	size = ALIGN(size, PAGE_SIZE);
874 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
875 
876 	page_pool_destroy(page_pool);
877 
878 	lan966x_fdma_wakeup_netdev(lan966x);
879 	napi_enable(&lan966x->napi);
880 
881 	return err;
882 restore:
883 	lan966x->rx.page_pool = page_pool;
884 	lan966x->rx.fdma.dma = rx_dma;
885 	lan966x->rx.fdma.dcbs = rx_dcbs;
886 	lan966x_fdma_rx_start(&lan966x->rx);
887 
888 	return err;
889 }
890 
891 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
892 {
893 	return lan966x_fdma_get_max_mtu(lan966x) +
894 	       IFH_LEN_BYTES +
895 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
896 	       VLAN_HLEN * 2 +
897 	       XDP_PACKET_HEADROOM;
898 }
899 
900 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
901 {
902 	int err;
903 	u32 val;
904 
905 	/* Disable the CPU port */
906 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
907 		QSYS_SW_PORT_MODE_PORT_ENA,
908 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
909 
910 	/* Flush the CPU queues */
911 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
912 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
913 			   READL_SLEEP_US, READL_TIMEOUT_US);
914 
915 	/* Add a sleep in case there are frames between the queues and the CPU
916 	 * port
917 	 */
918 	usleep_range(1000, 2000);
919 
920 	err = lan966x_fdma_reload(lan966x, max_mtu);
921 
922 	/* Enable back the CPU port */
923 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
924 		QSYS_SW_PORT_MODE_PORT_ENA,
925 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
926 
927 	return err;
928 }
929 
930 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
931 {
932 	int max_mtu;
933 
934 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
935 	if (max_mtu == lan966x->rx.max_mtu)
936 		return 0;
937 
938 	return __lan966x_fdma_reload(lan966x, max_mtu);
939 }
940 
941 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
942 {
943 	int max_mtu;
944 
945 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
946 	return __lan966x_fdma_reload(lan966x, max_mtu);
947 }
948 
949 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
950 {
951 	if (lan966x->fdma_ndev)
952 		return;
953 
954 	lan966x->fdma_ndev = dev;
955 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
956 	napi_enable(&lan966x->napi);
957 }
958 
959 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
960 {
961 	if (lan966x->fdma_ndev == dev) {
962 		netif_napi_del(&lan966x->napi);
963 		lan966x->fdma_ndev = NULL;
964 	}
965 }
966 
967 int lan966x_fdma_init(struct lan966x *lan966x)
968 {
969 	int err;
970 
971 	if (!lan966x->fdma)
972 		return 0;
973 
974 	lan966x->rx.lan966x = lan966x;
975 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
976 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
977 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
978 	lan966x->rx.fdma.priv = lan966x;
979 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
980 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
981 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
982 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
983 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
984 	lan966x->tx.lan966x = lan966x;
985 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
986 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
987 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
988 	lan966x->tx.fdma.priv = lan966x;
989 	lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
990 	lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
991 	lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
992 	lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
993 	lan966x->tx.last_in_use = -1;
994 
995 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
996 	if (err)
997 		return err;
998 
999 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1000 	if (err) {
1001 		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1002 		return err;
1003 	}
1004 
1005 	lan966x_fdma_rx_start(&lan966x->rx);
1006 
1007 	return 0;
1008 }
1009 
1010 void lan966x_fdma_deinit(struct lan966x *lan966x)
1011 {
1012 	if (!lan966x->fdma)
1013 		return;
1014 
1015 	lan966x_fdma_rx_disable(&lan966x->rx);
1016 	lan966x_fdma_tx_disable(&lan966x->tx);
1017 
1018 	napi_synchronize(&lan966x->napi);
1019 	napi_disable(&lan966x->napi);
1020 
1021 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1022 	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1023 	page_pool_destroy(lan966x->rx.page_pool);
1024 	lan966x_fdma_tx_free(&lan966x->tx);
1025 }
1026