xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision df2ddc1458c39eacb8678333bea668a76de540de)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27 				      u64 *dataptr)
28 {
29 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30 
31 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32 
33 	return 0;
34 }
35 
36 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
37 {
38 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
39 }
40 
41 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
42 {
43 	struct fdma *fdma = &rx->fdma;
44 	int i, j;
45 
46 	for (i = 0; i < fdma->n_dcbs; ++i) {
47 		for (j = 0; j < fdma->n_dbs; ++j)
48 			page_pool_put_full_page(rx->page_pool,
49 						rx->page[i][j], false);
50 	}
51 }
52 
53 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
54 {
55 	struct fdma *fdma = &rx->fdma;
56 	struct page *page;
57 
58 	page = rx->page[fdma->dcb_index][fdma->db_index];
59 	if (unlikely(!page))
60 		return;
61 
62 	page_pool_recycle_direct(rx->page_pool, page);
63 }
64 
65 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
66 {
67 	struct lan966x *lan966x = rx->lan966x;
68 	struct page_pool_params pp_params = {
69 		.order = rx->page_order,
70 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
71 		.pool_size = rx->fdma.n_dcbs,
72 		.nid = NUMA_NO_NODE,
73 		.dev = lan966x->dev,
74 		.dma_dir = DMA_FROM_DEVICE,
75 		.offset = XDP_PACKET_HEADROOM,
76 		.max_len = rx->max_mtu -
77 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
78 	};
79 
80 	if (lan966x_xdp_present(lan966x))
81 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
82 
83 	rx->page_pool = page_pool_create(&pp_params);
84 
85 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
86 		struct lan966x_port *port;
87 
88 		if (!lan966x->ports[i])
89 			continue;
90 
91 		port = lan966x->ports[i];
92 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
93 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
94 					   rx->page_pool);
95 	}
96 
97 	return PTR_ERR_OR_ZERO(rx->page_pool);
98 }
99 
100 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
101 {
102 	struct lan966x *lan966x = rx->lan966x;
103 	struct fdma *fdma = &rx->fdma;
104 	int err;
105 
106 	if (lan966x_fdma_rx_alloc_page_pool(rx))
107 		return PTR_ERR(rx->page_pool);
108 
109 	err = fdma_alloc_coherent(lan966x->dev, fdma);
110 	if (err)
111 		return err;
112 
113 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
114 		       FDMA_DCB_STATUS_INTR);
115 
116 	return 0;
117 }
118 
119 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
120 {
121 	struct fdma *fdma = &rx->fdma;
122 
123 	fdma->dcb_index++;
124 	fdma->dcb_index &= fdma->n_dcbs - 1;
125 }
126 
127 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
128 {
129 	struct lan966x *lan966x = rx->lan966x;
130 	struct fdma *fdma = &rx->fdma;
131 	u32 mask;
132 
133 	/* When activating a channel, first is required to write the first DCB
134 	 * address and then to activate it
135 	 */
136 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
137 	       FDMA_DCB_LLP(fdma->channel_id));
138 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
139 	       FDMA_DCB_LLP1(fdma->channel_id));
140 
141 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
142 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
143 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
144 	       FDMA_CH_CFG_CH_MEM_SET(1),
145 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
146 
147 	/* Start fdma */
148 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
149 		FDMA_PORT_CTRL_XTR_STOP,
150 		lan966x, FDMA_PORT_CTRL(0));
151 
152 	/* Enable interrupts */
153 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
154 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
155 	mask |= BIT(fdma->channel_id);
156 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
157 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
158 		lan966x, FDMA_INTR_DB_ENA);
159 
160 	/* Activate the channel */
161 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
162 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
163 		lan966x, FDMA_CH_ACTIVATE);
164 }
165 
166 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
167 {
168 	struct lan966x *lan966x = rx->lan966x;
169 	struct fdma *fdma = &rx->fdma;
170 	u32 val;
171 
172 	/* Disable the channel */
173 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
174 		FDMA_CH_DISABLE_CH_DISABLE,
175 		lan966x, FDMA_CH_DISABLE);
176 
177 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
178 				  val, !(val & BIT(fdma->channel_id)),
179 				  READL_SLEEP_US, READL_TIMEOUT_US);
180 
181 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
182 		FDMA_CH_DB_DISCARD_DB_DISCARD,
183 		lan966x, FDMA_CH_DB_DISCARD);
184 }
185 
186 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
187 {
188 	struct lan966x *lan966x = rx->lan966x;
189 
190 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
191 		FDMA_CH_RELOAD_CH_RELOAD,
192 		lan966x, FDMA_CH_RELOAD);
193 }
194 
195 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
196 {
197 	struct lan966x *lan966x = tx->lan966x;
198 	struct fdma *fdma = &tx->fdma;
199 	int err;
200 
201 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
202 			       GFP_KERNEL);
203 	if (!tx->dcbs_buf)
204 		return -ENOMEM;
205 
206 	err = fdma_alloc_coherent(lan966x->dev, fdma);
207 	if (err)
208 		goto out;
209 
210 	fdma_dcbs_init(fdma, 0, 0);
211 
212 	return 0;
213 
214 out:
215 	kfree(tx->dcbs_buf);
216 	return -ENOMEM;
217 }
218 
219 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
220 {
221 	struct lan966x *lan966x = tx->lan966x;
222 	struct fdma *fdma = &tx->fdma;
223 	int size;
224 
225 	kfree(tx->dcbs_buf);
226 
227 	size = sizeof(struct fdma_dcb) * fdma->n_dcbs;
228 	size = ALIGN(size, PAGE_SIZE);
229 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
230 }
231 
232 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
233 {
234 	struct lan966x *lan966x = tx->lan966x;
235 	struct fdma *fdma = &tx->fdma;
236 	u32 mask;
237 
238 	/* When activating a channel, first is required to write the first DCB
239 	 * address and then to activate it
240 	 */
241 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
242 	       FDMA_DCB_LLP(fdma->channel_id));
243 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
244 	       FDMA_DCB_LLP1(fdma->channel_id));
245 
246 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
247 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
248 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
249 	       FDMA_CH_CFG_CH_MEM_SET(1),
250 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
251 
252 	/* Start fdma */
253 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
254 		FDMA_PORT_CTRL_INJ_STOP,
255 		lan966x, FDMA_PORT_CTRL(0));
256 
257 	/* Enable interrupts */
258 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
259 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
260 	mask |= BIT(fdma->channel_id);
261 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
262 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
263 		lan966x, FDMA_INTR_DB_ENA);
264 
265 	/* Activate the channel */
266 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
267 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
268 		lan966x, FDMA_CH_ACTIVATE);
269 }
270 
271 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
272 {
273 	struct lan966x *lan966x = tx->lan966x;
274 	struct fdma *fdma = &tx->fdma;
275 	u32 val;
276 
277 	/* Disable the channel */
278 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
279 		FDMA_CH_DISABLE_CH_DISABLE,
280 		lan966x, FDMA_CH_DISABLE);
281 
282 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
283 				  val, !(val & BIT(fdma->channel_id)),
284 				  READL_SLEEP_US, READL_TIMEOUT_US);
285 
286 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
287 		FDMA_CH_DB_DISCARD_DB_DISCARD,
288 		lan966x, FDMA_CH_DB_DISCARD);
289 
290 	tx->activated = false;
291 	tx->last_in_use = -1;
292 }
293 
294 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
295 {
296 	struct lan966x *lan966x = tx->lan966x;
297 
298 	/* Write the registers to reload the channel */
299 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
300 		FDMA_CH_RELOAD_CH_RELOAD,
301 		lan966x, FDMA_CH_RELOAD);
302 }
303 
304 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
305 {
306 	struct lan966x_port *port;
307 	int i;
308 
309 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
310 		port = lan966x->ports[i];
311 		if (!port)
312 			continue;
313 
314 		if (netif_queue_stopped(port->dev))
315 			netif_wake_queue(port->dev);
316 	}
317 }
318 
319 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
320 {
321 	struct lan966x_port *port;
322 	int i;
323 
324 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
325 		port = lan966x->ports[i];
326 		if (!port)
327 			continue;
328 
329 		netif_stop_queue(port->dev);
330 	}
331 }
332 
333 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
334 {
335 	struct lan966x_tx *tx = &lan966x->tx;
336 	struct lan966x_rx *rx = &lan966x->rx;
337 	struct lan966x_tx_dcb_buf *dcb_buf;
338 	struct fdma *fdma = &tx->fdma;
339 	struct xdp_frame_bulk bq;
340 	unsigned long flags;
341 	bool clear = false;
342 	struct fdma_db *db;
343 	int i;
344 
345 	xdp_frame_bulk_init(&bq);
346 
347 	spin_lock_irqsave(&lan966x->tx_lock, flags);
348 	for (i = 0; i < fdma->n_dcbs; ++i) {
349 		dcb_buf = &tx->dcbs_buf[i];
350 
351 		if (!dcb_buf->used)
352 			continue;
353 
354 		db = &fdma->dcbs[i].db[0];
355 		if (!(db->status & FDMA_DCB_STATUS_DONE))
356 			continue;
357 
358 		dcb_buf->dev->stats.tx_packets++;
359 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
360 
361 		dcb_buf->used = false;
362 		if (dcb_buf->use_skb) {
363 			dma_unmap_single(lan966x->dev,
364 					 dcb_buf->dma_addr,
365 					 dcb_buf->len,
366 					 DMA_TO_DEVICE);
367 
368 			if (!dcb_buf->ptp)
369 				napi_consume_skb(dcb_buf->data.skb, weight);
370 		} else {
371 			if (dcb_buf->xdp_ndo)
372 				dma_unmap_single(lan966x->dev,
373 						 dcb_buf->dma_addr,
374 						 dcb_buf->len,
375 						 DMA_TO_DEVICE);
376 
377 			if (dcb_buf->xdp_ndo)
378 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
379 			else
380 				page_pool_recycle_direct(rx->page_pool,
381 							 dcb_buf->data.page);
382 		}
383 
384 		clear = true;
385 	}
386 
387 	xdp_flush_frame_bulk(&bq);
388 
389 	if (clear)
390 		lan966x_fdma_wakeup_netdev(lan966x);
391 
392 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
393 }
394 
395 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
396 {
397 	struct fdma *fdma = &rx->fdma;
398 	struct fdma_db *db;
399 
400 	/* Check if there is any data */
401 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
402 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
403 		return false;
404 
405 	return true;
406 }
407 
408 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
409 {
410 	struct lan966x *lan966x = rx->lan966x;
411 	struct fdma *fdma = &rx->fdma;
412 	struct lan966x_port *port;
413 	struct fdma_db *db;
414 	struct page *page;
415 
416 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
417 	page = rx->page[fdma->dcb_index][fdma->db_index];
418 	if (unlikely(!page))
419 		return FDMA_ERROR;
420 
421 	dma_sync_single_for_cpu(lan966x->dev,
422 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
423 				FDMA_DCB_STATUS_BLOCKL(db->status),
424 				DMA_FROM_DEVICE);
425 
426 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
427 				 src_port);
428 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
429 		return FDMA_ERROR;
430 
431 	port = lan966x->ports[*src_port];
432 	if (!lan966x_xdp_port_present(port))
433 		return FDMA_PASS;
434 
435 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
436 }
437 
438 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
439 						 u64 src_port)
440 {
441 	struct lan966x *lan966x = rx->lan966x;
442 	struct fdma *fdma = &rx->fdma;
443 	struct sk_buff *skb;
444 	struct fdma_db *db;
445 	struct page *page;
446 	u64 timestamp;
447 
448 	/* Get the received frame and unmap it */
449 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
450 	page = rx->page[fdma->dcb_index][fdma->db_index];
451 
452 	skb = build_skb(page_address(page), fdma->db_size);
453 	if (unlikely(!skb))
454 		goto free_page;
455 
456 	skb_mark_for_recycle(skb);
457 
458 	skb_reserve(skb, XDP_PACKET_HEADROOM);
459 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
460 
461 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
462 
463 	skb->dev = lan966x->ports[src_port]->dev;
464 	skb_pull(skb, IFH_LEN_BYTES);
465 
466 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
467 		skb_trim(skb, skb->len - ETH_FCS_LEN);
468 
469 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
470 	skb->protocol = eth_type_trans(skb, skb->dev);
471 
472 	if (lan966x->bridge_mask & BIT(src_port)) {
473 		skb->offload_fwd_mark = 1;
474 
475 		skb_reset_network_header(skb);
476 		if (!lan966x_hw_offload(lan966x, src_port, skb))
477 			skb->offload_fwd_mark = 0;
478 	}
479 
480 	skb->dev->stats.rx_bytes += skb->len;
481 	skb->dev->stats.rx_packets++;
482 
483 	return skb;
484 
485 free_page:
486 	page_pool_recycle_direct(rx->page_pool, page);
487 
488 	return NULL;
489 }
490 
491 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
492 {
493 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
494 	struct lan966x_rx *rx = &lan966x->rx;
495 	int old_dcb, dcb_reload, counter = 0;
496 	struct fdma *fdma = &rx->fdma;
497 	bool redirect = false;
498 	struct sk_buff *skb;
499 	u64 src_port;
500 
501 	dcb_reload = fdma->dcb_index;
502 
503 	lan966x_fdma_tx_clear_buf(lan966x, weight);
504 
505 	/* Get all received skb */
506 	while (counter < weight) {
507 		if (!lan966x_fdma_rx_more_frames(rx))
508 			break;
509 
510 		counter++;
511 
512 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
513 		case FDMA_PASS:
514 			break;
515 		case FDMA_ERROR:
516 			lan966x_fdma_rx_free_page(rx);
517 			lan966x_fdma_rx_advance_dcb(rx);
518 			goto allocate_new;
519 		case FDMA_REDIRECT:
520 			redirect = true;
521 			fallthrough;
522 		case FDMA_TX:
523 			lan966x_fdma_rx_advance_dcb(rx);
524 			continue;
525 		case FDMA_DROP:
526 			lan966x_fdma_rx_free_page(rx);
527 			lan966x_fdma_rx_advance_dcb(rx);
528 			continue;
529 		}
530 
531 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
532 		lan966x_fdma_rx_advance_dcb(rx);
533 		if (!skb)
534 			goto allocate_new;
535 
536 		napi_gro_receive(&lan966x->napi, skb);
537 	}
538 
539 allocate_new:
540 	/* Allocate new pages and map them */
541 	while (dcb_reload != fdma->dcb_index) {
542 		old_dcb = dcb_reload;
543 		dcb_reload++;
544 		dcb_reload &= fdma->n_dcbs - 1;
545 
546 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
547 			     FDMA_DCB_STATUS_INTR);
548 
549 		lan966x_fdma_rx_reload(rx);
550 	}
551 
552 	if (redirect)
553 		xdp_do_flush();
554 
555 	if (counter < weight && napi_complete_done(napi, counter))
556 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
557 
558 	return counter;
559 }
560 
561 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
562 {
563 	struct lan966x *lan966x = args;
564 	u32 db, err, err_type;
565 
566 	db = lan_rd(lan966x, FDMA_INTR_DB);
567 	err = lan_rd(lan966x, FDMA_INTR_ERR);
568 
569 	if (db) {
570 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
571 		lan_wr(db, lan966x, FDMA_INTR_DB);
572 
573 		napi_schedule(&lan966x->napi);
574 	}
575 
576 	if (err) {
577 		err_type = lan_rd(lan966x, FDMA_ERRORS);
578 
579 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
580 
581 		lan_wr(err, lan966x, FDMA_INTR_ERR);
582 		lan_wr(err_type, lan966x, FDMA_ERRORS);
583 	}
584 
585 	return IRQ_HANDLED;
586 }
587 
588 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
589 {
590 	struct lan966x_tx_dcb_buf *dcb_buf;
591 	struct fdma *fdma = &tx->fdma;
592 	int i;
593 
594 	for (i = 0; i < fdma->n_dcbs; ++i) {
595 		dcb_buf = &tx->dcbs_buf[i];
596 		if (!dcb_buf->used && i != tx->last_in_use)
597 			return i;
598 	}
599 
600 	return -1;
601 }
602 
603 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
604 				      int next_to_use, int len,
605 				      dma_addr_t dma_addr)
606 {
607 	struct fdma_dcb *next_dcb;
608 	struct fdma_db *next_db;
609 
610 	next_dcb = &tx->fdma.dcbs[next_to_use];
611 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
612 
613 	next_db = &next_dcb->db[0];
614 	next_db->dataptr = dma_addr;
615 	next_db->status = FDMA_DCB_STATUS_SOF |
616 			  FDMA_DCB_STATUS_EOF |
617 			  FDMA_DCB_STATUS_INTR |
618 			  FDMA_DCB_STATUS_BLOCKO(0) |
619 			  FDMA_DCB_STATUS_BLOCKL(len);
620 }
621 
622 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
623 {
624 	struct lan966x *lan966x = tx->lan966x;
625 	struct fdma *fdma = &tx->fdma;
626 	struct fdma_dcb *dcb;
627 
628 	if (likely(lan966x->tx.activated)) {
629 		/* Connect current dcb to the next db */
630 		dcb = &fdma->dcbs[tx->last_in_use];
631 		dcb->nextptr = fdma->dma + (next_to_use *
632 					  sizeof(struct fdma_dcb));
633 
634 		lan966x_fdma_tx_reload(tx);
635 	} else {
636 		/* Because it is first time, then just activate */
637 		lan966x->tx.activated = true;
638 		lan966x_fdma_tx_activate(tx);
639 	}
640 
641 	/* Move to next dcb because this last in use */
642 	tx->last_in_use = next_to_use;
643 }
644 
645 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
646 {
647 	struct lan966x *lan966x = port->lan966x;
648 	struct lan966x_tx_dcb_buf *next_dcb_buf;
649 	struct lan966x_tx *tx = &lan966x->tx;
650 	struct xdp_frame *xdpf;
651 	dma_addr_t dma_addr;
652 	struct page *page;
653 	int next_to_use;
654 	__be32 *ifh;
655 	int ret = 0;
656 
657 	spin_lock(&lan966x->tx_lock);
658 
659 	/* Get next index */
660 	next_to_use = lan966x_fdma_get_next_dcb(tx);
661 	if (next_to_use < 0) {
662 		netif_stop_queue(port->dev);
663 		ret = NETDEV_TX_BUSY;
664 		goto out;
665 	}
666 
667 	/* Get the next buffer */
668 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
669 
670 	/* Generate new IFH */
671 	if (!len) {
672 		xdpf = ptr;
673 
674 		if (xdpf->headroom < IFH_LEN_BYTES) {
675 			ret = NETDEV_TX_OK;
676 			goto out;
677 		}
678 
679 		ifh = xdpf->data - IFH_LEN_BYTES;
680 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
681 		lan966x_ifh_set_bypass(ifh, 1);
682 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
683 
684 		dma_addr = dma_map_single(lan966x->dev,
685 					  xdpf->data - IFH_LEN_BYTES,
686 					  xdpf->len + IFH_LEN_BYTES,
687 					  DMA_TO_DEVICE);
688 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
689 			ret = NETDEV_TX_OK;
690 			goto out;
691 		}
692 
693 		next_dcb_buf->data.xdpf = xdpf;
694 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
695 
696 		/* Setup next dcb */
697 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
698 					  xdpf->len + IFH_LEN_BYTES,
699 					  dma_addr);
700 	} else {
701 		page = ptr;
702 
703 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
704 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
705 		lan966x_ifh_set_bypass(ifh, 1);
706 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
707 
708 		dma_addr = page_pool_get_dma_addr(page);
709 		dma_sync_single_for_device(lan966x->dev,
710 					   dma_addr + XDP_PACKET_HEADROOM,
711 					   len + IFH_LEN_BYTES,
712 					   DMA_TO_DEVICE);
713 
714 		next_dcb_buf->data.page = page;
715 		next_dcb_buf->len = len + IFH_LEN_BYTES;
716 
717 		/* Setup next dcb */
718 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
719 					  len + IFH_LEN_BYTES,
720 					  dma_addr + XDP_PACKET_HEADROOM);
721 	}
722 
723 	/* Fill up the buffer */
724 	next_dcb_buf->use_skb = false;
725 	next_dcb_buf->xdp_ndo = !len;
726 	next_dcb_buf->dma_addr = dma_addr;
727 	next_dcb_buf->used = true;
728 	next_dcb_buf->ptp = false;
729 	next_dcb_buf->dev = port->dev;
730 
731 	/* Start the transmission */
732 	lan966x_fdma_tx_start(tx, next_to_use);
733 
734 out:
735 	spin_unlock(&lan966x->tx_lock);
736 
737 	return ret;
738 }
739 
740 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
741 {
742 	struct lan966x_port *port = netdev_priv(dev);
743 	struct lan966x *lan966x = port->lan966x;
744 	struct lan966x_tx_dcb_buf *next_dcb_buf;
745 	struct lan966x_tx *tx = &lan966x->tx;
746 	int needed_headroom;
747 	int needed_tailroom;
748 	dma_addr_t dma_addr;
749 	int next_to_use;
750 	int err;
751 
752 	/* Get next index */
753 	next_to_use = lan966x_fdma_get_next_dcb(tx);
754 	if (next_to_use < 0) {
755 		netif_stop_queue(dev);
756 		return NETDEV_TX_BUSY;
757 	}
758 
759 	if (skb_put_padto(skb, ETH_ZLEN)) {
760 		dev->stats.tx_dropped++;
761 		return NETDEV_TX_OK;
762 	}
763 
764 	/* skb processing */
765 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
766 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
767 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
768 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
769 				       GFP_ATOMIC);
770 		if (unlikely(err)) {
771 			dev->stats.tx_dropped++;
772 			err = NETDEV_TX_OK;
773 			goto release;
774 		}
775 	}
776 
777 	skb_tx_timestamp(skb);
778 	skb_push(skb, IFH_LEN_BYTES);
779 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
780 	skb_put(skb, 4);
781 
782 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
783 				  DMA_TO_DEVICE);
784 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
785 		dev->stats.tx_dropped++;
786 		err = NETDEV_TX_OK;
787 		goto release;
788 	}
789 
790 	/* Setup next dcb */
791 	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
792 
793 	/* Fill up the buffer */
794 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
795 	next_dcb_buf->use_skb = true;
796 	next_dcb_buf->data.skb = skb;
797 	next_dcb_buf->xdp_ndo = false;
798 	next_dcb_buf->len = skb->len;
799 	next_dcb_buf->dma_addr = dma_addr;
800 	next_dcb_buf->used = true;
801 	next_dcb_buf->ptp = false;
802 	next_dcb_buf->dev = dev;
803 
804 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
805 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
806 		next_dcb_buf->ptp = true;
807 
808 	/* Start the transmission */
809 	lan966x_fdma_tx_start(tx, next_to_use);
810 
811 	return NETDEV_TX_OK;
812 
813 release:
814 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
815 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
816 		lan966x_ptp_txtstamp_release(port, skb);
817 
818 	dev_kfree_skb_any(skb);
819 	return err;
820 }
821 
822 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
823 {
824 	int max_mtu = 0;
825 	int i;
826 
827 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
828 		struct lan966x_port *port;
829 		int mtu;
830 
831 		port = lan966x->ports[i];
832 		if (!port)
833 			continue;
834 
835 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
836 		if (mtu > max_mtu)
837 			max_mtu = mtu;
838 	}
839 
840 	return max_mtu;
841 }
842 
843 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
844 {
845 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
846 }
847 
848 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
849 {
850 	struct page_pool *page_pool;
851 	dma_addr_t rx_dma;
852 	void *rx_dcbs;
853 	u32 size;
854 	int err;
855 
856 	/* Store these for later to free them */
857 	rx_dma = lan966x->rx.fdma.dma;
858 	rx_dcbs = lan966x->rx.fdma.dcbs;
859 	page_pool = lan966x->rx.page_pool;
860 
861 	napi_synchronize(&lan966x->napi);
862 	napi_disable(&lan966x->napi);
863 	lan966x_fdma_stop_netdev(lan966x);
864 
865 	lan966x_fdma_rx_disable(&lan966x->rx);
866 	lan966x_fdma_rx_free_pages(&lan966x->rx);
867 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
868 	lan966x->rx.max_mtu = new_mtu;
869 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
870 	if (err)
871 		goto restore;
872 	lan966x_fdma_rx_start(&lan966x->rx);
873 
874 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
875 	size = ALIGN(size, PAGE_SIZE);
876 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
877 
878 	page_pool_destroy(page_pool);
879 
880 	lan966x_fdma_wakeup_netdev(lan966x);
881 	napi_enable(&lan966x->napi);
882 
883 	return err;
884 restore:
885 	lan966x->rx.page_pool = page_pool;
886 	lan966x->rx.fdma.dma = rx_dma;
887 	lan966x->rx.fdma.dcbs = rx_dcbs;
888 	lan966x_fdma_rx_start(&lan966x->rx);
889 
890 	return err;
891 }
892 
893 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
894 {
895 	return lan966x_fdma_get_max_mtu(lan966x) +
896 	       IFH_LEN_BYTES +
897 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
898 	       VLAN_HLEN * 2 +
899 	       XDP_PACKET_HEADROOM;
900 }
901 
902 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
903 {
904 	int err;
905 	u32 val;
906 
907 	/* Disable the CPU port */
908 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
909 		QSYS_SW_PORT_MODE_PORT_ENA,
910 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
911 
912 	/* Flush the CPU queues */
913 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
914 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
915 			   READL_SLEEP_US, READL_TIMEOUT_US);
916 
917 	/* Add a sleep in case there are frames between the queues and the CPU
918 	 * port
919 	 */
920 	usleep_range(1000, 2000);
921 
922 	err = lan966x_fdma_reload(lan966x, max_mtu);
923 
924 	/* Enable back the CPU port */
925 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
926 		QSYS_SW_PORT_MODE_PORT_ENA,
927 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
928 
929 	return err;
930 }
931 
932 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
933 {
934 	int max_mtu;
935 
936 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
937 	if (max_mtu == lan966x->rx.max_mtu)
938 		return 0;
939 
940 	return __lan966x_fdma_reload(lan966x, max_mtu);
941 }
942 
943 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
944 {
945 	int max_mtu;
946 
947 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
948 	return __lan966x_fdma_reload(lan966x, max_mtu);
949 }
950 
951 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
952 {
953 	if (lan966x->fdma_ndev)
954 		return;
955 
956 	lan966x->fdma_ndev = dev;
957 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
958 	napi_enable(&lan966x->napi);
959 }
960 
961 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
962 {
963 	if (lan966x->fdma_ndev == dev) {
964 		netif_napi_del(&lan966x->napi);
965 		lan966x->fdma_ndev = NULL;
966 	}
967 }
968 
969 int lan966x_fdma_init(struct lan966x *lan966x)
970 {
971 	int err;
972 
973 	if (!lan966x->fdma)
974 		return 0;
975 
976 	lan966x->rx.lan966x = lan966x;
977 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
978 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
979 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
980 	lan966x->rx.fdma.priv = lan966x;
981 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
982 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
983 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
984 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
985 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
986 	lan966x->tx.lan966x = lan966x;
987 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
988 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
989 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
990 	lan966x->tx.fdma.priv = lan966x;
991 	lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
992 	lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
993 	lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
994 	lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
995 	lan966x->tx.last_in_use = -1;
996 
997 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
998 	if (err)
999 		return err;
1000 
1001 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1002 	if (err) {
1003 		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1004 		return err;
1005 	}
1006 
1007 	lan966x_fdma_rx_start(&lan966x->rx);
1008 
1009 	return 0;
1010 }
1011 
1012 void lan966x_fdma_deinit(struct lan966x *lan966x)
1013 {
1014 	if (!lan966x->fdma)
1015 		return;
1016 
1017 	lan966x_fdma_rx_disable(&lan966x->rx);
1018 	lan966x_fdma_tx_disable(&lan966x->tx);
1019 
1020 	napi_synchronize(&lan966x->napi);
1021 	napi_disable(&lan966x->napi);
1022 
1023 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1024 	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1025 	page_pool_destroy(lan966x->rx.page_pool);
1026 	lan966x_fdma_tx_free(&lan966x->tx);
1027 }
1028