xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision 01a70754327bd628610587b6b7ae5efe2a5e7d1c)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
27 {
28 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
29 }
30 
31 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
32 					       struct fdma_db *db)
33 {
34 	struct page *page;
35 
36 	page = page_pool_dev_alloc_pages(rx->page_pool);
37 	if (unlikely(!page))
38 		return NULL;
39 
40 	db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
41 
42 	return page;
43 }
44 
45 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
46 {
47 	struct fdma *fdma = &rx->fdma;
48 	int i, j;
49 
50 	for (i = 0; i < fdma->n_dcbs; ++i) {
51 		for (j = 0; j < fdma->n_dbs; ++j)
52 			page_pool_put_full_page(rx->page_pool,
53 						rx->page[i][j], false);
54 	}
55 }
56 
57 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
58 {
59 	struct fdma *fdma = &rx->fdma;
60 	struct page *page;
61 
62 	page = rx->page[fdma->dcb_index][fdma->db_index];
63 	if (unlikely(!page))
64 		return;
65 
66 	page_pool_recycle_direct(rx->page_pool, page);
67 }
68 
69 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
70 				    struct fdma_dcb *dcb,
71 				    u64 nextptr)
72 {
73 	struct fdma *fdma = &rx->fdma;
74 	struct fdma_db *db;
75 	int i;
76 
77 	for (i = 0; i < fdma->n_dbs; ++i) {
78 		db = &dcb->db[i];
79 		db->status = FDMA_DCB_STATUS_INTR;
80 	}
81 
82 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
83 	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
84 
85 	fdma->last_dcb->nextptr = nextptr;
86 	fdma->last_dcb = dcb;
87 }
88 
89 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
90 {
91 	struct lan966x *lan966x = rx->lan966x;
92 	struct page_pool_params pp_params = {
93 		.order = rx->page_order,
94 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
95 		.pool_size = rx->fdma.n_dcbs,
96 		.nid = NUMA_NO_NODE,
97 		.dev = lan966x->dev,
98 		.dma_dir = DMA_FROM_DEVICE,
99 		.offset = XDP_PACKET_HEADROOM,
100 		.max_len = rx->max_mtu -
101 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
102 	};
103 
104 	if (lan966x_xdp_present(lan966x))
105 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
106 
107 	rx->page_pool = page_pool_create(&pp_params);
108 
109 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
110 		struct lan966x_port *port;
111 
112 		if (!lan966x->ports[i])
113 			continue;
114 
115 		port = lan966x->ports[i];
116 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
117 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
118 					   rx->page_pool);
119 	}
120 
121 	return PTR_ERR_OR_ZERO(rx->page_pool);
122 }
123 
124 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
125 {
126 	struct lan966x *lan966x = rx->lan966x;
127 	struct fdma *fdma = &rx->fdma;
128 	int err;
129 
130 	if (lan966x_fdma_rx_alloc_page_pool(rx))
131 		return PTR_ERR(rx->page_pool);
132 
133 	err = fdma_alloc_coherent(lan966x->dev, fdma);
134 	if (err)
135 		return err;
136 
137 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
138 		       FDMA_DCB_STATUS_INTR);
139 
140 	return 0;
141 }
142 
143 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
144 {
145 	struct fdma *fdma = &rx->fdma;
146 
147 	fdma->dcb_index++;
148 	fdma->dcb_index &= fdma->n_dcbs - 1;
149 }
150 
151 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
152 {
153 	struct lan966x *lan966x = rx->lan966x;
154 	struct fdma *fdma = &rx->fdma;
155 	u32 size;
156 
157 	/* Now it is possible to do the cleanup of dcb */
158 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
159 	size = ALIGN(size, PAGE_SIZE);
160 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
161 }
162 
163 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
164 {
165 	struct lan966x *lan966x = rx->lan966x;
166 	struct fdma *fdma = &rx->fdma;
167 	u32 mask;
168 
169 	/* When activating a channel, first is required to write the first DCB
170 	 * address and then to activate it
171 	 */
172 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
173 	       FDMA_DCB_LLP(fdma->channel_id));
174 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
175 	       FDMA_DCB_LLP1(fdma->channel_id));
176 
177 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
178 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
179 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
180 	       FDMA_CH_CFG_CH_MEM_SET(1),
181 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
182 
183 	/* Start fdma */
184 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
185 		FDMA_PORT_CTRL_XTR_STOP,
186 		lan966x, FDMA_PORT_CTRL(0));
187 
188 	/* Enable interrupts */
189 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
190 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
191 	mask |= BIT(fdma->channel_id);
192 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
193 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
194 		lan966x, FDMA_INTR_DB_ENA);
195 
196 	/* Activate the channel */
197 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
198 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
199 		lan966x, FDMA_CH_ACTIVATE);
200 }
201 
202 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
203 {
204 	struct lan966x *lan966x = rx->lan966x;
205 	struct fdma *fdma = &rx->fdma;
206 	u32 val;
207 
208 	/* Disable the channel */
209 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
210 		FDMA_CH_DISABLE_CH_DISABLE,
211 		lan966x, FDMA_CH_DISABLE);
212 
213 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
214 				  val, !(val & BIT(fdma->channel_id)),
215 				  READL_SLEEP_US, READL_TIMEOUT_US);
216 
217 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
218 		FDMA_CH_DB_DISCARD_DB_DISCARD,
219 		lan966x, FDMA_CH_DB_DISCARD);
220 }
221 
222 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
223 {
224 	struct lan966x *lan966x = rx->lan966x;
225 
226 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
227 		FDMA_CH_RELOAD_CH_RELOAD,
228 		lan966x, FDMA_CH_RELOAD);
229 }
230 
231 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
232 				    struct lan966x_tx_dcb *dcb)
233 {
234 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
235 	dcb->info = 0;
236 }
237 
238 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
239 {
240 	struct lan966x *lan966x = tx->lan966x;
241 	struct fdma *fdma = &tx->fdma;
242 	struct lan966x_tx_dcb *dcb;
243 	struct lan966x_db *db;
244 	int size;
245 	int i, j;
246 
247 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
248 			       GFP_KERNEL);
249 	if (!tx->dcbs_buf)
250 		return -ENOMEM;
251 
252 	/* calculate how many pages are needed to allocate the dcbs */
253 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
254 	size = ALIGN(size, PAGE_SIZE);
255 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
256 	if (!tx->dcbs)
257 		goto out;
258 
259 	/* Now for each dcb allocate the db */
260 	for (i = 0; i < fdma->n_dcbs; ++i) {
261 		dcb = &tx->dcbs[i];
262 
263 		for (j = 0; j < fdma->n_dbs; ++j) {
264 			db = &dcb->db[j];
265 			db->dataptr = 0;
266 			db->status = 0;
267 		}
268 
269 		lan966x_fdma_tx_add_dcb(tx, dcb);
270 	}
271 
272 	return 0;
273 
274 out:
275 	kfree(tx->dcbs_buf);
276 	return -ENOMEM;
277 }
278 
279 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
280 {
281 	struct lan966x *lan966x = tx->lan966x;
282 	struct fdma *fdma = &tx->fdma;
283 	int size;
284 
285 	kfree(tx->dcbs_buf);
286 
287 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
288 	size = ALIGN(size, PAGE_SIZE);
289 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
290 }
291 
292 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
293 {
294 	struct lan966x *lan966x = tx->lan966x;
295 	struct fdma *fdma = &tx->fdma;
296 	u32 mask;
297 
298 	/* When activating a channel, first is required to write the first DCB
299 	 * address and then to activate it
300 	 */
301 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
302 	       FDMA_DCB_LLP(fdma->channel_id));
303 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
304 	       FDMA_DCB_LLP1(fdma->channel_id));
305 
306 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
307 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
308 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
309 	       FDMA_CH_CFG_CH_MEM_SET(1),
310 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
311 
312 	/* Start fdma */
313 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
314 		FDMA_PORT_CTRL_INJ_STOP,
315 		lan966x, FDMA_PORT_CTRL(0));
316 
317 	/* Enable interrupts */
318 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
319 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
320 	mask |= BIT(fdma->channel_id);
321 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
322 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
323 		lan966x, FDMA_INTR_DB_ENA);
324 
325 	/* Activate the channel */
326 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
327 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
328 		lan966x, FDMA_CH_ACTIVATE);
329 }
330 
331 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
332 {
333 	struct lan966x *lan966x = tx->lan966x;
334 	struct fdma *fdma = &tx->fdma;
335 	u32 val;
336 
337 	/* Disable the channel */
338 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
339 		FDMA_CH_DISABLE_CH_DISABLE,
340 		lan966x, FDMA_CH_DISABLE);
341 
342 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
343 				  val, !(val & BIT(fdma->channel_id)),
344 				  READL_SLEEP_US, READL_TIMEOUT_US);
345 
346 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
347 		FDMA_CH_DB_DISCARD_DB_DISCARD,
348 		lan966x, FDMA_CH_DB_DISCARD);
349 
350 	tx->activated = false;
351 	tx->last_in_use = -1;
352 }
353 
354 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
355 {
356 	struct lan966x *lan966x = tx->lan966x;
357 
358 	/* Write the registers to reload the channel */
359 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
360 		FDMA_CH_RELOAD_CH_RELOAD,
361 		lan966x, FDMA_CH_RELOAD);
362 }
363 
364 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
365 {
366 	struct lan966x_port *port;
367 	int i;
368 
369 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
370 		port = lan966x->ports[i];
371 		if (!port)
372 			continue;
373 
374 		if (netif_queue_stopped(port->dev))
375 			netif_wake_queue(port->dev);
376 	}
377 }
378 
379 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
380 {
381 	struct lan966x_port *port;
382 	int i;
383 
384 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
385 		port = lan966x->ports[i];
386 		if (!port)
387 			continue;
388 
389 		netif_stop_queue(port->dev);
390 	}
391 }
392 
393 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
394 {
395 	struct lan966x_tx *tx = &lan966x->tx;
396 	struct lan966x_rx *rx = &lan966x->rx;
397 	struct lan966x_tx_dcb_buf *dcb_buf;
398 	struct xdp_frame_bulk bq;
399 	struct lan966x_db *db;
400 	unsigned long flags;
401 	bool clear = false;
402 	int i;
403 
404 	xdp_frame_bulk_init(&bq);
405 
406 	spin_lock_irqsave(&lan966x->tx_lock, flags);
407 	for (i = 0; i < tx->fdma.n_dcbs; ++i) {
408 		dcb_buf = &tx->dcbs_buf[i];
409 
410 		if (!dcb_buf->used)
411 			continue;
412 
413 		db = &tx->dcbs[i].db[0];
414 		if (!(db->status & FDMA_DCB_STATUS_DONE))
415 			continue;
416 
417 		dcb_buf->dev->stats.tx_packets++;
418 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
419 
420 		dcb_buf->used = false;
421 		if (dcb_buf->use_skb) {
422 			dma_unmap_single(lan966x->dev,
423 					 dcb_buf->dma_addr,
424 					 dcb_buf->len,
425 					 DMA_TO_DEVICE);
426 
427 			if (!dcb_buf->ptp)
428 				napi_consume_skb(dcb_buf->data.skb, weight);
429 		} else {
430 			if (dcb_buf->xdp_ndo)
431 				dma_unmap_single(lan966x->dev,
432 						 dcb_buf->dma_addr,
433 						 dcb_buf->len,
434 						 DMA_TO_DEVICE);
435 
436 			if (dcb_buf->xdp_ndo)
437 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
438 			else
439 				page_pool_recycle_direct(rx->page_pool,
440 							 dcb_buf->data.page);
441 		}
442 
443 		clear = true;
444 	}
445 
446 	xdp_flush_frame_bulk(&bq);
447 
448 	if (clear)
449 		lan966x_fdma_wakeup_netdev(lan966x);
450 
451 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
452 }
453 
454 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
455 {
456 	struct fdma *fdma = &rx->fdma;
457 	struct fdma_db *db;
458 
459 	/* Check if there is any data */
460 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
461 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
462 		return false;
463 
464 	return true;
465 }
466 
467 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
468 {
469 	struct lan966x *lan966x = rx->lan966x;
470 	struct fdma *fdma = &rx->fdma;
471 	struct lan966x_port *port;
472 	struct fdma_db *db;
473 	struct page *page;
474 
475 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
476 	page = rx->page[fdma->dcb_index][fdma->db_index];
477 	if (unlikely(!page))
478 		return FDMA_ERROR;
479 
480 	dma_sync_single_for_cpu(lan966x->dev,
481 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
482 				FDMA_DCB_STATUS_BLOCKL(db->status),
483 				DMA_FROM_DEVICE);
484 
485 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
486 				 src_port);
487 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
488 		return FDMA_ERROR;
489 
490 	port = lan966x->ports[*src_port];
491 	if (!lan966x_xdp_port_present(port))
492 		return FDMA_PASS;
493 
494 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
495 }
496 
497 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
498 						 u64 src_port)
499 {
500 	struct lan966x *lan966x = rx->lan966x;
501 	struct fdma *fdma = &rx->fdma;
502 	struct sk_buff *skb;
503 	struct fdma_db *db;
504 	struct page *page;
505 	u64 timestamp;
506 
507 	/* Get the received frame and unmap it */
508 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
509 	page = rx->page[fdma->dcb_index][fdma->db_index];
510 
511 	skb = build_skb(page_address(page), fdma->db_size);
512 	if (unlikely(!skb))
513 		goto free_page;
514 
515 	skb_mark_for_recycle(skb);
516 
517 	skb_reserve(skb, XDP_PACKET_HEADROOM);
518 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
519 
520 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
521 
522 	skb->dev = lan966x->ports[src_port]->dev;
523 	skb_pull(skb, IFH_LEN_BYTES);
524 
525 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
526 		skb_trim(skb, skb->len - ETH_FCS_LEN);
527 
528 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
529 	skb->protocol = eth_type_trans(skb, skb->dev);
530 
531 	if (lan966x->bridge_mask & BIT(src_port)) {
532 		skb->offload_fwd_mark = 1;
533 
534 		skb_reset_network_header(skb);
535 		if (!lan966x_hw_offload(lan966x, src_port, skb))
536 			skb->offload_fwd_mark = 0;
537 	}
538 
539 	skb->dev->stats.rx_bytes += skb->len;
540 	skb->dev->stats.rx_packets++;
541 
542 	return skb;
543 
544 free_page:
545 	page_pool_recycle_direct(rx->page_pool, page);
546 
547 	return NULL;
548 }
549 
550 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
551 {
552 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
553 	struct lan966x_rx *rx = &lan966x->rx;
554 	struct fdma *fdma = &rx->fdma;
555 	int dcb_reload, counter = 0;
556 	struct fdma_dcb *old_dcb;
557 	bool redirect = false;
558 	struct sk_buff *skb;
559 	struct fdma_db *db;
560 	struct page *page;
561 	u64 src_port;
562 	u64 nextptr;
563 
564 	dcb_reload = fdma->dcb_index;
565 
566 	lan966x_fdma_tx_clear_buf(lan966x, weight);
567 
568 	/* Get all received skb */
569 	while (counter < weight) {
570 		if (!lan966x_fdma_rx_more_frames(rx))
571 			break;
572 
573 		counter++;
574 
575 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
576 		case FDMA_PASS:
577 			break;
578 		case FDMA_ERROR:
579 			lan966x_fdma_rx_free_page(rx);
580 			lan966x_fdma_rx_advance_dcb(rx);
581 			goto allocate_new;
582 		case FDMA_REDIRECT:
583 			redirect = true;
584 			fallthrough;
585 		case FDMA_TX:
586 			lan966x_fdma_rx_advance_dcb(rx);
587 			continue;
588 		case FDMA_DROP:
589 			lan966x_fdma_rx_free_page(rx);
590 			lan966x_fdma_rx_advance_dcb(rx);
591 			continue;
592 		}
593 
594 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
595 		lan966x_fdma_rx_advance_dcb(rx);
596 		if (!skb)
597 			goto allocate_new;
598 
599 		napi_gro_receive(&lan966x->napi, skb);
600 	}
601 
602 allocate_new:
603 	/* Allocate new pages and map them */
604 	while (dcb_reload != fdma->dcb_index) {
605 		db = &fdma->dcbs[dcb_reload].db[fdma->db_index];
606 		page = lan966x_fdma_rx_alloc_page(rx, db);
607 		if (unlikely(!page))
608 			break;
609 		rx->page[dcb_reload][fdma->db_index] = page;
610 
611 		old_dcb = &fdma->dcbs[dcb_reload];
612 		dcb_reload++;
613 		dcb_reload &= fdma->n_dcbs - 1;
614 
615 		nextptr = fdma->dma + ((unsigned long)old_dcb -
616 				     (unsigned long)fdma->dcbs);
617 		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
618 		lan966x_fdma_rx_reload(rx);
619 	}
620 
621 	if (redirect)
622 		xdp_do_flush();
623 
624 	if (counter < weight && napi_complete_done(napi, counter))
625 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
626 
627 	return counter;
628 }
629 
630 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
631 {
632 	struct lan966x *lan966x = args;
633 	u32 db, err, err_type;
634 
635 	db = lan_rd(lan966x, FDMA_INTR_DB);
636 	err = lan_rd(lan966x, FDMA_INTR_ERR);
637 
638 	if (db) {
639 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
640 		lan_wr(db, lan966x, FDMA_INTR_DB);
641 
642 		napi_schedule(&lan966x->napi);
643 	}
644 
645 	if (err) {
646 		err_type = lan_rd(lan966x, FDMA_ERRORS);
647 
648 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
649 
650 		lan_wr(err, lan966x, FDMA_INTR_ERR);
651 		lan_wr(err_type, lan966x, FDMA_ERRORS);
652 	}
653 
654 	return IRQ_HANDLED;
655 }
656 
657 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
658 {
659 	struct lan966x_tx_dcb_buf *dcb_buf;
660 	struct fdma *fdma = &tx->fdma;
661 	int i;
662 
663 	for (i = 0; i < fdma->n_dcbs; ++i) {
664 		dcb_buf = &tx->dcbs_buf[i];
665 		if (!dcb_buf->used && i != tx->last_in_use)
666 			return i;
667 	}
668 
669 	return -1;
670 }
671 
672 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
673 				      int next_to_use, int len,
674 				      dma_addr_t dma_addr)
675 {
676 	struct lan966x_tx_dcb *next_dcb;
677 	struct lan966x_db *next_db;
678 
679 	next_dcb = &tx->dcbs[next_to_use];
680 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
681 
682 	next_db = &next_dcb->db[0];
683 	next_db->dataptr = dma_addr;
684 	next_db->status = FDMA_DCB_STATUS_SOF |
685 			  FDMA_DCB_STATUS_EOF |
686 			  FDMA_DCB_STATUS_INTR |
687 			  FDMA_DCB_STATUS_BLOCKO(0) |
688 			  FDMA_DCB_STATUS_BLOCKL(len);
689 }
690 
691 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
692 {
693 	struct lan966x *lan966x = tx->lan966x;
694 	struct lan966x_tx_dcb *dcb;
695 
696 	if (likely(lan966x->tx.activated)) {
697 		/* Connect current dcb to the next db */
698 		dcb = &tx->dcbs[tx->last_in_use];
699 		dcb->nextptr = tx->dma + (next_to_use *
700 					  sizeof(struct lan966x_tx_dcb));
701 
702 		lan966x_fdma_tx_reload(tx);
703 	} else {
704 		/* Because it is first time, then just activate */
705 		lan966x->tx.activated = true;
706 		lan966x_fdma_tx_activate(tx);
707 	}
708 
709 	/* Move to next dcb because this last in use */
710 	tx->last_in_use = next_to_use;
711 }
712 
713 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
714 {
715 	struct lan966x *lan966x = port->lan966x;
716 	struct lan966x_tx_dcb_buf *next_dcb_buf;
717 	struct lan966x_tx *tx = &lan966x->tx;
718 	struct xdp_frame *xdpf;
719 	dma_addr_t dma_addr;
720 	struct page *page;
721 	int next_to_use;
722 	__be32 *ifh;
723 	int ret = 0;
724 
725 	spin_lock(&lan966x->tx_lock);
726 
727 	/* Get next index */
728 	next_to_use = lan966x_fdma_get_next_dcb(tx);
729 	if (next_to_use < 0) {
730 		netif_stop_queue(port->dev);
731 		ret = NETDEV_TX_BUSY;
732 		goto out;
733 	}
734 
735 	/* Get the next buffer */
736 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
737 
738 	/* Generate new IFH */
739 	if (!len) {
740 		xdpf = ptr;
741 
742 		if (xdpf->headroom < IFH_LEN_BYTES) {
743 			ret = NETDEV_TX_OK;
744 			goto out;
745 		}
746 
747 		ifh = xdpf->data - IFH_LEN_BYTES;
748 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
749 		lan966x_ifh_set_bypass(ifh, 1);
750 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
751 
752 		dma_addr = dma_map_single(lan966x->dev,
753 					  xdpf->data - IFH_LEN_BYTES,
754 					  xdpf->len + IFH_LEN_BYTES,
755 					  DMA_TO_DEVICE);
756 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
757 			ret = NETDEV_TX_OK;
758 			goto out;
759 		}
760 
761 		next_dcb_buf->data.xdpf = xdpf;
762 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
763 
764 		/* Setup next dcb */
765 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
766 					  xdpf->len + IFH_LEN_BYTES,
767 					  dma_addr);
768 	} else {
769 		page = ptr;
770 
771 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
772 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
773 		lan966x_ifh_set_bypass(ifh, 1);
774 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
775 
776 		dma_addr = page_pool_get_dma_addr(page);
777 		dma_sync_single_for_device(lan966x->dev,
778 					   dma_addr + XDP_PACKET_HEADROOM,
779 					   len + IFH_LEN_BYTES,
780 					   DMA_TO_DEVICE);
781 
782 		next_dcb_buf->data.page = page;
783 		next_dcb_buf->len = len + IFH_LEN_BYTES;
784 
785 		/* Setup next dcb */
786 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
787 					  len + IFH_LEN_BYTES,
788 					  dma_addr + XDP_PACKET_HEADROOM);
789 	}
790 
791 	/* Fill up the buffer */
792 	next_dcb_buf->use_skb = false;
793 	next_dcb_buf->xdp_ndo = !len;
794 	next_dcb_buf->dma_addr = dma_addr;
795 	next_dcb_buf->used = true;
796 	next_dcb_buf->ptp = false;
797 	next_dcb_buf->dev = port->dev;
798 
799 	/* Start the transmission */
800 	lan966x_fdma_tx_start(tx, next_to_use);
801 
802 out:
803 	spin_unlock(&lan966x->tx_lock);
804 
805 	return ret;
806 }
807 
808 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
809 {
810 	struct lan966x_port *port = netdev_priv(dev);
811 	struct lan966x *lan966x = port->lan966x;
812 	struct lan966x_tx_dcb_buf *next_dcb_buf;
813 	struct lan966x_tx *tx = &lan966x->tx;
814 	int needed_headroom;
815 	int needed_tailroom;
816 	dma_addr_t dma_addr;
817 	int next_to_use;
818 	int err;
819 
820 	/* Get next index */
821 	next_to_use = lan966x_fdma_get_next_dcb(tx);
822 	if (next_to_use < 0) {
823 		netif_stop_queue(dev);
824 		return NETDEV_TX_BUSY;
825 	}
826 
827 	if (skb_put_padto(skb, ETH_ZLEN)) {
828 		dev->stats.tx_dropped++;
829 		return NETDEV_TX_OK;
830 	}
831 
832 	/* skb processing */
833 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
834 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
835 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
836 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
837 				       GFP_ATOMIC);
838 		if (unlikely(err)) {
839 			dev->stats.tx_dropped++;
840 			err = NETDEV_TX_OK;
841 			goto release;
842 		}
843 	}
844 
845 	skb_tx_timestamp(skb);
846 	skb_push(skb, IFH_LEN_BYTES);
847 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
848 	skb_put(skb, 4);
849 
850 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
851 				  DMA_TO_DEVICE);
852 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
853 		dev->stats.tx_dropped++;
854 		err = NETDEV_TX_OK;
855 		goto release;
856 	}
857 
858 	/* Setup next dcb */
859 	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
860 
861 	/* Fill up the buffer */
862 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
863 	next_dcb_buf->use_skb = true;
864 	next_dcb_buf->data.skb = skb;
865 	next_dcb_buf->xdp_ndo = false;
866 	next_dcb_buf->len = skb->len;
867 	next_dcb_buf->dma_addr = dma_addr;
868 	next_dcb_buf->used = true;
869 	next_dcb_buf->ptp = false;
870 	next_dcb_buf->dev = dev;
871 
872 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
873 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
874 		next_dcb_buf->ptp = true;
875 
876 	/* Start the transmission */
877 	lan966x_fdma_tx_start(tx, next_to_use);
878 
879 	return NETDEV_TX_OK;
880 
881 release:
882 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
883 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
884 		lan966x_ptp_txtstamp_release(port, skb);
885 
886 	dev_kfree_skb_any(skb);
887 	return err;
888 }
889 
890 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
891 {
892 	int max_mtu = 0;
893 	int i;
894 
895 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
896 		struct lan966x_port *port;
897 		int mtu;
898 
899 		port = lan966x->ports[i];
900 		if (!port)
901 			continue;
902 
903 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
904 		if (mtu > max_mtu)
905 			max_mtu = mtu;
906 	}
907 
908 	return max_mtu;
909 }
910 
911 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
912 {
913 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
914 }
915 
916 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
917 {
918 	struct page_pool *page_pool;
919 	dma_addr_t rx_dma;
920 	void *rx_dcbs;
921 	u32 size;
922 	int err;
923 
924 	/* Store these for later to free them */
925 	rx_dma = lan966x->rx.fdma.dma;
926 	rx_dcbs = lan966x->rx.fdma.dcbs;
927 	page_pool = lan966x->rx.page_pool;
928 
929 	napi_synchronize(&lan966x->napi);
930 	napi_disable(&lan966x->napi);
931 	lan966x_fdma_stop_netdev(lan966x);
932 
933 	lan966x_fdma_rx_disable(&lan966x->rx);
934 	lan966x_fdma_rx_free_pages(&lan966x->rx);
935 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
936 	lan966x->rx.max_mtu = new_mtu;
937 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
938 	if (err)
939 		goto restore;
940 	lan966x_fdma_rx_start(&lan966x->rx);
941 
942 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
943 	size = ALIGN(size, PAGE_SIZE);
944 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
945 
946 	page_pool_destroy(page_pool);
947 
948 	lan966x_fdma_wakeup_netdev(lan966x);
949 	napi_enable(&lan966x->napi);
950 
951 	return err;
952 restore:
953 	lan966x->rx.page_pool = page_pool;
954 	lan966x->rx.fdma.dma = rx_dma;
955 	lan966x->rx.fdma.dcbs = rx_dcbs;
956 	lan966x_fdma_rx_start(&lan966x->rx);
957 
958 	return err;
959 }
960 
961 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
962 {
963 	return lan966x_fdma_get_max_mtu(lan966x) +
964 	       IFH_LEN_BYTES +
965 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
966 	       VLAN_HLEN * 2 +
967 	       XDP_PACKET_HEADROOM;
968 }
969 
970 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
971 {
972 	int err;
973 	u32 val;
974 
975 	/* Disable the CPU port */
976 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
977 		QSYS_SW_PORT_MODE_PORT_ENA,
978 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
979 
980 	/* Flush the CPU queues */
981 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
982 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
983 			   READL_SLEEP_US, READL_TIMEOUT_US);
984 
985 	/* Add a sleep in case there are frames between the queues and the CPU
986 	 * port
987 	 */
988 	usleep_range(1000, 2000);
989 
990 	err = lan966x_fdma_reload(lan966x, max_mtu);
991 
992 	/* Enable back the CPU port */
993 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
994 		QSYS_SW_PORT_MODE_PORT_ENA,
995 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
996 
997 	return err;
998 }
999 
1000 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
1001 {
1002 	int max_mtu;
1003 
1004 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
1005 	if (max_mtu == lan966x->rx.max_mtu)
1006 		return 0;
1007 
1008 	return __lan966x_fdma_reload(lan966x, max_mtu);
1009 }
1010 
1011 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
1012 {
1013 	int max_mtu;
1014 
1015 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
1016 	return __lan966x_fdma_reload(lan966x, max_mtu);
1017 }
1018 
1019 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1020 {
1021 	if (lan966x->fdma_ndev)
1022 		return;
1023 
1024 	lan966x->fdma_ndev = dev;
1025 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1026 	napi_enable(&lan966x->napi);
1027 }
1028 
1029 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1030 {
1031 	if (lan966x->fdma_ndev == dev) {
1032 		netif_napi_del(&lan966x->napi);
1033 		lan966x->fdma_ndev = NULL;
1034 	}
1035 }
1036 
1037 int lan966x_fdma_init(struct lan966x *lan966x)
1038 {
1039 	int err;
1040 
1041 	if (!lan966x->fdma)
1042 		return 0;
1043 
1044 	lan966x->rx.lan966x = lan966x;
1045 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
1046 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
1047 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
1048 	lan966x->rx.fdma.priv = lan966x;
1049 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
1050 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
1051 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
1052 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
1053 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1054 	lan966x->tx.lan966x = lan966x;
1055 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
1056 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
1057 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
1058 	lan966x->tx.last_in_use = -1;
1059 
1060 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1061 	if (err)
1062 		return err;
1063 
1064 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1065 	if (err) {
1066 		lan966x_fdma_rx_free(&lan966x->rx);
1067 		return err;
1068 	}
1069 
1070 	lan966x_fdma_rx_start(&lan966x->rx);
1071 
1072 	return 0;
1073 }
1074 
1075 void lan966x_fdma_deinit(struct lan966x *lan966x)
1076 {
1077 	if (!lan966x->fdma)
1078 		return;
1079 
1080 	lan966x_fdma_rx_disable(&lan966x->rx);
1081 	lan966x_fdma_tx_disable(&lan966x->tx);
1082 
1083 	napi_synchronize(&lan966x->napi);
1084 	napi_disable(&lan966x->napi);
1085 
1086 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1087 	lan966x_fdma_rx_free(&lan966x->rx);
1088 	page_pool_destroy(lan966x->rx.page_pool);
1089 	lan966x_fdma_tx_free(&lan966x->tx);
1090 }
1091