xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision 2b5a09e67b72146161df176ba73ddb4d6607f3a0)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
27 {
28 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
29 }
30 
31 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
32 {
33 	struct fdma *fdma = &rx->fdma;
34 	int i, j;
35 
36 	for (i = 0; i < fdma->n_dcbs; ++i) {
37 		for (j = 0; j < fdma->n_dbs; ++j)
38 			page_pool_put_full_page(rx->page_pool,
39 						rx->page[i][j], false);
40 	}
41 }
42 
43 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
44 {
45 	struct fdma *fdma = &rx->fdma;
46 	struct page *page;
47 
48 	page = rx->page[fdma->dcb_index][fdma->db_index];
49 	if (unlikely(!page))
50 		return;
51 
52 	page_pool_recycle_direct(rx->page_pool, page);
53 }
54 
55 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
56 {
57 	struct lan966x *lan966x = rx->lan966x;
58 	struct page_pool_params pp_params = {
59 		.order = rx->page_order,
60 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
61 		.pool_size = rx->fdma.n_dcbs,
62 		.nid = NUMA_NO_NODE,
63 		.dev = lan966x->dev,
64 		.dma_dir = DMA_FROM_DEVICE,
65 		.offset = XDP_PACKET_HEADROOM,
66 		.max_len = rx->max_mtu -
67 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
68 	};
69 
70 	if (lan966x_xdp_present(lan966x))
71 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
72 
73 	rx->page_pool = page_pool_create(&pp_params);
74 
75 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
76 		struct lan966x_port *port;
77 
78 		if (!lan966x->ports[i])
79 			continue;
80 
81 		port = lan966x->ports[i];
82 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
83 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
84 					   rx->page_pool);
85 	}
86 
87 	return PTR_ERR_OR_ZERO(rx->page_pool);
88 }
89 
90 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
91 {
92 	struct lan966x *lan966x = rx->lan966x;
93 	struct fdma *fdma = &rx->fdma;
94 	int err;
95 
96 	if (lan966x_fdma_rx_alloc_page_pool(rx))
97 		return PTR_ERR(rx->page_pool);
98 
99 	err = fdma_alloc_coherent(lan966x->dev, fdma);
100 	if (err)
101 		return err;
102 
103 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
104 		       FDMA_DCB_STATUS_INTR);
105 
106 	return 0;
107 }
108 
109 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
110 {
111 	struct fdma *fdma = &rx->fdma;
112 
113 	fdma->dcb_index++;
114 	fdma->dcb_index &= fdma->n_dcbs - 1;
115 }
116 
117 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
118 {
119 	struct lan966x *lan966x = rx->lan966x;
120 	struct fdma *fdma = &rx->fdma;
121 	u32 size;
122 
123 	/* Now it is possible to do the cleanup of dcb */
124 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
125 	size = ALIGN(size, PAGE_SIZE);
126 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
127 }
128 
129 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
130 {
131 	struct lan966x *lan966x = rx->lan966x;
132 	struct fdma *fdma = &rx->fdma;
133 	u32 mask;
134 
135 	/* When activating a channel, first is required to write the first DCB
136 	 * address and then to activate it
137 	 */
138 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
139 	       FDMA_DCB_LLP(fdma->channel_id));
140 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
141 	       FDMA_DCB_LLP1(fdma->channel_id));
142 
143 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
144 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
145 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
146 	       FDMA_CH_CFG_CH_MEM_SET(1),
147 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
148 
149 	/* Start fdma */
150 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
151 		FDMA_PORT_CTRL_XTR_STOP,
152 		lan966x, FDMA_PORT_CTRL(0));
153 
154 	/* Enable interrupts */
155 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
156 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
157 	mask |= BIT(fdma->channel_id);
158 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
159 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
160 		lan966x, FDMA_INTR_DB_ENA);
161 
162 	/* Activate the channel */
163 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
164 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
165 		lan966x, FDMA_CH_ACTIVATE);
166 }
167 
168 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
169 {
170 	struct lan966x *lan966x = rx->lan966x;
171 	struct fdma *fdma = &rx->fdma;
172 	u32 val;
173 
174 	/* Disable the channel */
175 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
176 		FDMA_CH_DISABLE_CH_DISABLE,
177 		lan966x, FDMA_CH_DISABLE);
178 
179 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
180 				  val, !(val & BIT(fdma->channel_id)),
181 				  READL_SLEEP_US, READL_TIMEOUT_US);
182 
183 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
184 		FDMA_CH_DB_DISCARD_DB_DISCARD,
185 		lan966x, FDMA_CH_DB_DISCARD);
186 }
187 
188 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
189 {
190 	struct lan966x *lan966x = rx->lan966x;
191 
192 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
193 		FDMA_CH_RELOAD_CH_RELOAD,
194 		lan966x, FDMA_CH_RELOAD);
195 }
196 
197 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
198 				    struct lan966x_tx_dcb *dcb)
199 {
200 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
201 	dcb->info = 0;
202 }
203 
204 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
205 {
206 	struct lan966x *lan966x = tx->lan966x;
207 	struct fdma *fdma = &tx->fdma;
208 	struct lan966x_tx_dcb *dcb;
209 	struct lan966x_db *db;
210 	int size;
211 	int i, j;
212 
213 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
214 			       GFP_KERNEL);
215 	if (!tx->dcbs_buf)
216 		return -ENOMEM;
217 
218 	/* calculate how many pages are needed to allocate the dcbs */
219 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
220 	size = ALIGN(size, PAGE_SIZE);
221 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
222 	if (!tx->dcbs)
223 		goto out;
224 
225 	/* Now for each dcb allocate the db */
226 	for (i = 0; i < fdma->n_dcbs; ++i) {
227 		dcb = &tx->dcbs[i];
228 
229 		for (j = 0; j < fdma->n_dbs; ++j) {
230 			db = &dcb->db[j];
231 			db->dataptr = 0;
232 			db->status = 0;
233 		}
234 
235 		lan966x_fdma_tx_add_dcb(tx, dcb);
236 	}
237 
238 	return 0;
239 
240 out:
241 	kfree(tx->dcbs_buf);
242 	return -ENOMEM;
243 }
244 
245 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
246 {
247 	struct lan966x *lan966x = tx->lan966x;
248 	struct fdma *fdma = &tx->fdma;
249 	int size;
250 
251 	kfree(tx->dcbs_buf);
252 
253 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
254 	size = ALIGN(size, PAGE_SIZE);
255 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
256 }
257 
258 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
259 {
260 	struct lan966x *lan966x = tx->lan966x;
261 	struct fdma *fdma = &tx->fdma;
262 	u32 mask;
263 
264 	/* When activating a channel, first is required to write the first DCB
265 	 * address and then to activate it
266 	 */
267 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
268 	       FDMA_DCB_LLP(fdma->channel_id));
269 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
270 	       FDMA_DCB_LLP1(fdma->channel_id));
271 
272 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
273 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
274 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
275 	       FDMA_CH_CFG_CH_MEM_SET(1),
276 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
277 
278 	/* Start fdma */
279 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
280 		FDMA_PORT_CTRL_INJ_STOP,
281 		lan966x, FDMA_PORT_CTRL(0));
282 
283 	/* Enable interrupts */
284 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
285 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
286 	mask |= BIT(fdma->channel_id);
287 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
288 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
289 		lan966x, FDMA_INTR_DB_ENA);
290 
291 	/* Activate the channel */
292 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
293 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
294 		lan966x, FDMA_CH_ACTIVATE);
295 }
296 
297 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
298 {
299 	struct lan966x *lan966x = tx->lan966x;
300 	struct fdma *fdma = &tx->fdma;
301 	u32 val;
302 
303 	/* Disable the channel */
304 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
305 		FDMA_CH_DISABLE_CH_DISABLE,
306 		lan966x, FDMA_CH_DISABLE);
307 
308 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
309 				  val, !(val & BIT(fdma->channel_id)),
310 				  READL_SLEEP_US, READL_TIMEOUT_US);
311 
312 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
313 		FDMA_CH_DB_DISCARD_DB_DISCARD,
314 		lan966x, FDMA_CH_DB_DISCARD);
315 
316 	tx->activated = false;
317 	tx->last_in_use = -1;
318 }
319 
320 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
321 {
322 	struct lan966x *lan966x = tx->lan966x;
323 
324 	/* Write the registers to reload the channel */
325 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
326 		FDMA_CH_RELOAD_CH_RELOAD,
327 		lan966x, FDMA_CH_RELOAD);
328 }
329 
330 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
331 {
332 	struct lan966x_port *port;
333 	int i;
334 
335 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
336 		port = lan966x->ports[i];
337 		if (!port)
338 			continue;
339 
340 		if (netif_queue_stopped(port->dev))
341 			netif_wake_queue(port->dev);
342 	}
343 }
344 
345 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
346 {
347 	struct lan966x_port *port;
348 	int i;
349 
350 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
351 		port = lan966x->ports[i];
352 		if (!port)
353 			continue;
354 
355 		netif_stop_queue(port->dev);
356 	}
357 }
358 
359 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
360 {
361 	struct lan966x_tx *tx = &lan966x->tx;
362 	struct lan966x_rx *rx = &lan966x->rx;
363 	struct lan966x_tx_dcb_buf *dcb_buf;
364 	struct xdp_frame_bulk bq;
365 	struct lan966x_db *db;
366 	unsigned long flags;
367 	bool clear = false;
368 	int i;
369 
370 	xdp_frame_bulk_init(&bq);
371 
372 	spin_lock_irqsave(&lan966x->tx_lock, flags);
373 	for (i = 0; i < tx->fdma.n_dcbs; ++i) {
374 		dcb_buf = &tx->dcbs_buf[i];
375 
376 		if (!dcb_buf->used)
377 			continue;
378 
379 		db = &tx->dcbs[i].db[0];
380 		if (!(db->status & FDMA_DCB_STATUS_DONE))
381 			continue;
382 
383 		dcb_buf->dev->stats.tx_packets++;
384 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
385 
386 		dcb_buf->used = false;
387 		if (dcb_buf->use_skb) {
388 			dma_unmap_single(lan966x->dev,
389 					 dcb_buf->dma_addr,
390 					 dcb_buf->len,
391 					 DMA_TO_DEVICE);
392 
393 			if (!dcb_buf->ptp)
394 				napi_consume_skb(dcb_buf->data.skb, weight);
395 		} else {
396 			if (dcb_buf->xdp_ndo)
397 				dma_unmap_single(lan966x->dev,
398 						 dcb_buf->dma_addr,
399 						 dcb_buf->len,
400 						 DMA_TO_DEVICE);
401 
402 			if (dcb_buf->xdp_ndo)
403 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
404 			else
405 				page_pool_recycle_direct(rx->page_pool,
406 							 dcb_buf->data.page);
407 		}
408 
409 		clear = true;
410 	}
411 
412 	xdp_flush_frame_bulk(&bq);
413 
414 	if (clear)
415 		lan966x_fdma_wakeup_netdev(lan966x);
416 
417 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
418 }
419 
420 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
421 {
422 	struct fdma *fdma = &rx->fdma;
423 	struct fdma_db *db;
424 
425 	/* Check if there is any data */
426 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
427 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
428 		return false;
429 
430 	return true;
431 }
432 
433 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
434 {
435 	struct lan966x *lan966x = rx->lan966x;
436 	struct fdma *fdma = &rx->fdma;
437 	struct lan966x_port *port;
438 	struct fdma_db *db;
439 	struct page *page;
440 
441 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
442 	page = rx->page[fdma->dcb_index][fdma->db_index];
443 	if (unlikely(!page))
444 		return FDMA_ERROR;
445 
446 	dma_sync_single_for_cpu(lan966x->dev,
447 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
448 				FDMA_DCB_STATUS_BLOCKL(db->status),
449 				DMA_FROM_DEVICE);
450 
451 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
452 				 src_port);
453 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
454 		return FDMA_ERROR;
455 
456 	port = lan966x->ports[*src_port];
457 	if (!lan966x_xdp_port_present(port))
458 		return FDMA_PASS;
459 
460 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
461 }
462 
463 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
464 						 u64 src_port)
465 {
466 	struct lan966x *lan966x = rx->lan966x;
467 	struct fdma *fdma = &rx->fdma;
468 	struct sk_buff *skb;
469 	struct fdma_db *db;
470 	struct page *page;
471 	u64 timestamp;
472 
473 	/* Get the received frame and unmap it */
474 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
475 	page = rx->page[fdma->dcb_index][fdma->db_index];
476 
477 	skb = build_skb(page_address(page), fdma->db_size);
478 	if (unlikely(!skb))
479 		goto free_page;
480 
481 	skb_mark_for_recycle(skb);
482 
483 	skb_reserve(skb, XDP_PACKET_HEADROOM);
484 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
485 
486 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
487 
488 	skb->dev = lan966x->ports[src_port]->dev;
489 	skb_pull(skb, IFH_LEN_BYTES);
490 
491 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
492 		skb_trim(skb, skb->len - ETH_FCS_LEN);
493 
494 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
495 	skb->protocol = eth_type_trans(skb, skb->dev);
496 
497 	if (lan966x->bridge_mask & BIT(src_port)) {
498 		skb->offload_fwd_mark = 1;
499 
500 		skb_reset_network_header(skb);
501 		if (!lan966x_hw_offload(lan966x, src_port, skb))
502 			skb->offload_fwd_mark = 0;
503 	}
504 
505 	skb->dev->stats.rx_bytes += skb->len;
506 	skb->dev->stats.rx_packets++;
507 
508 	return skb;
509 
510 free_page:
511 	page_pool_recycle_direct(rx->page_pool, page);
512 
513 	return NULL;
514 }
515 
516 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
517 {
518 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
519 	struct lan966x_rx *rx = &lan966x->rx;
520 	int old_dcb, dcb_reload, counter = 0;
521 	struct fdma *fdma = &rx->fdma;
522 	bool redirect = false;
523 	struct sk_buff *skb;
524 	u64 src_port;
525 
526 	dcb_reload = fdma->dcb_index;
527 
528 	lan966x_fdma_tx_clear_buf(lan966x, weight);
529 
530 	/* Get all received skb */
531 	while (counter < weight) {
532 		if (!lan966x_fdma_rx_more_frames(rx))
533 			break;
534 
535 		counter++;
536 
537 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
538 		case FDMA_PASS:
539 			break;
540 		case FDMA_ERROR:
541 			lan966x_fdma_rx_free_page(rx);
542 			lan966x_fdma_rx_advance_dcb(rx);
543 			goto allocate_new;
544 		case FDMA_REDIRECT:
545 			redirect = true;
546 			fallthrough;
547 		case FDMA_TX:
548 			lan966x_fdma_rx_advance_dcb(rx);
549 			continue;
550 		case FDMA_DROP:
551 			lan966x_fdma_rx_free_page(rx);
552 			lan966x_fdma_rx_advance_dcb(rx);
553 			continue;
554 		}
555 
556 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
557 		lan966x_fdma_rx_advance_dcb(rx);
558 		if (!skb)
559 			goto allocate_new;
560 
561 		napi_gro_receive(&lan966x->napi, skb);
562 	}
563 
564 allocate_new:
565 	/* Allocate new pages and map them */
566 	while (dcb_reload != fdma->dcb_index) {
567 		old_dcb = dcb_reload;
568 		dcb_reload++;
569 		dcb_reload &= fdma->n_dcbs - 1;
570 
571 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
572 			     FDMA_DCB_STATUS_INTR);
573 
574 		lan966x_fdma_rx_reload(rx);
575 	}
576 
577 	if (redirect)
578 		xdp_do_flush();
579 
580 	if (counter < weight && napi_complete_done(napi, counter))
581 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
582 
583 	return counter;
584 }
585 
586 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
587 {
588 	struct lan966x *lan966x = args;
589 	u32 db, err, err_type;
590 
591 	db = lan_rd(lan966x, FDMA_INTR_DB);
592 	err = lan_rd(lan966x, FDMA_INTR_ERR);
593 
594 	if (db) {
595 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
596 		lan_wr(db, lan966x, FDMA_INTR_DB);
597 
598 		napi_schedule(&lan966x->napi);
599 	}
600 
601 	if (err) {
602 		err_type = lan_rd(lan966x, FDMA_ERRORS);
603 
604 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
605 
606 		lan_wr(err, lan966x, FDMA_INTR_ERR);
607 		lan_wr(err_type, lan966x, FDMA_ERRORS);
608 	}
609 
610 	return IRQ_HANDLED;
611 }
612 
613 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
614 {
615 	struct lan966x_tx_dcb_buf *dcb_buf;
616 	struct fdma *fdma = &tx->fdma;
617 	int i;
618 
619 	for (i = 0; i < fdma->n_dcbs; ++i) {
620 		dcb_buf = &tx->dcbs_buf[i];
621 		if (!dcb_buf->used && i != tx->last_in_use)
622 			return i;
623 	}
624 
625 	return -1;
626 }
627 
628 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
629 				      int next_to_use, int len,
630 				      dma_addr_t dma_addr)
631 {
632 	struct lan966x_tx_dcb *next_dcb;
633 	struct lan966x_db *next_db;
634 
635 	next_dcb = &tx->dcbs[next_to_use];
636 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
637 
638 	next_db = &next_dcb->db[0];
639 	next_db->dataptr = dma_addr;
640 	next_db->status = FDMA_DCB_STATUS_SOF |
641 			  FDMA_DCB_STATUS_EOF |
642 			  FDMA_DCB_STATUS_INTR |
643 			  FDMA_DCB_STATUS_BLOCKO(0) |
644 			  FDMA_DCB_STATUS_BLOCKL(len);
645 }
646 
647 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
648 {
649 	struct lan966x *lan966x = tx->lan966x;
650 	struct lan966x_tx_dcb *dcb;
651 
652 	if (likely(lan966x->tx.activated)) {
653 		/* Connect current dcb to the next db */
654 		dcb = &tx->dcbs[tx->last_in_use];
655 		dcb->nextptr = tx->dma + (next_to_use *
656 					  sizeof(struct lan966x_tx_dcb));
657 
658 		lan966x_fdma_tx_reload(tx);
659 	} else {
660 		/* Because it is first time, then just activate */
661 		lan966x->tx.activated = true;
662 		lan966x_fdma_tx_activate(tx);
663 	}
664 
665 	/* Move to next dcb because this last in use */
666 	tx->last_in_use = next_to_use;
667 }
668 
669 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
670 {
671 	struct lan966x *lan966x = port->lan966x;
672 	struct lan966x_tx_dcb_buf *next_dcb_buf;
673 	struct lan966x_tx *tx = &lan966x->tx;
674 	struct xdp_frame *xdpf;
675 	dma_addr_t dma_addr;
676 	struct page *page;
677 	int next_to_use;
678 	__be32 *ifh;
679 	int ret = 0;
680 
681 	spin_lock(&lan966x->tx_lock);
682 
683 	/* Get next index */
684 	next_to_use = lan966x_fdma_get_next_dcb(tx);
685 	if (next_to_use < 0) {
686 		netif_stop_queue(port->dev);
687 		ret = NETDEV_TX_BUSY;
688 		goto out;
689 	}
690 
691 	/* Get the next buffer */
692 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
693 
694 	/* Generate new IFH */
695 	if (!len) {
696 		xdpf = ptr;
697 
698 		if (xdpf->headroom < IFH_LEN_BYTES) {
699 			ret = NETDEV_TX_OK;
700 			goto out;
701 		}
702 
703 		ifh = xdpf->data - IFH_LEN_BYTES;
704 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
705 		lan966x_ifh_set_bypass(ifh, 1);
706 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
707 
708 		dma_addr = dma_map_single(lan966x->dev,
709 					  xdpf->data - IFH_LEN_BYTES,
710 					  xdpf->len + IFH_LEN_BYTES,
711 					  DMA_TO_DEVICE);
712 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
713 			ret = NETDEV_TX_OK;
714 			goto out;
715 		}
716 
717 		next_dcb_buf->data.xdpf = xdpf;
718 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
719 
720 		/* Setup next dcb */
721 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
722 					  xdpf->len + IFH_LEN_BYTES,
723 					  dma_addr);
724 	} else {
725 		page = ptr;
726 
727 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
728 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
729 		lan966x_ifh_set_bypass(ifh, 1);
730 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
731 
732 		dma_addr = page_pool_get_dma_addr(page);
733 		dma_sync_single_for_device(lan966x->dev,
734 					   dma_addr + XDP_PACKET_HEADROOM,
735 					   len + IFH_LEN_BYTES,
736 					   DMA_TO_DEVICE);
737 
738 		next_dcb_buf->data.page = page;
739 		next_dcb_buf->len = len + IFH_LEN_BYTES;
740 
741 		/* Setup next dcb */
742 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
743 					  len + IFH_LEN_BYTES,
744 					  dma_addr + XDP_PACKET_HEADROOM);
745 	}
746 
747 	/* Fill up the buffer */
748 	next_dcb_buf->use_skb = false;
749 	next_dcb_buf->xdp_ndo = !len;
750 	next_dcb_buf->dma_addr = dma_addr;
751 	next_dcb_buf->used = true;
752 	next_dcb_buf->ptp = false;
753 	next_dcb_buf->dev = port->dev;
754 
755 	/* Start the transmission */
756 	lan966x_fdma_tx_start(tx, next_to_use);
757 
758 out:
759 	spin_unlock(&lan966x->tx_lock);
760 
761 	return ret;
762 }
763 
764 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
765 {
766 	struct lan966x_port *port = netdev_priv(dev);
767 	struct lan966x *lan966x = port->lan966x;
768 	struct lan966x_tx_dcb_buf *next_dcb_buf;
769 	struct lan966x_tx *tx = &lan966x->tx;
770 	int needed_headroom;
771 	int needed_tailroom;
772 	dma_addr_t dma_addr;
773 	int next_to_use;
774 	int err;
775 
776 	/* Get next index */
777 	next_to_use = lan966x_fdma_get_next_dcb(tx);
778 	if (next_to_use < 0) {
779 		netif_stop_queue(dev);
780 		return NETDEV_TX_BUSY;
781 	}
782 
783 	if (skb_put_padto(skb, ETH_ZLEN)) {
784 		dev->stats.tx_dropped++;
785 		return NETDEV_TX_OK;
786 	}
787 
788 	/* skb processing */
789 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
790 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
791 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
792 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
793 				       GFP_ATOMIC);
794 		if (unlikely(err)) {
795 			dev->stats.tx_dropped++;
796 			err = NETDEV_TX_OK;
797 			goto release;
798 		}
799 	}
800 
801 	skb_tx_timestamp(skb);
802 	skb_push(skb, IFH_LEN_BYTES);
803 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
804 	skb_put(skb, 4);
805 
806 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
807 				  DMA_TO_DEVICE);
808 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
809 		dev->stats.tx_dropped++;
810 		err = NETDEV_TX_OK;
811 		goto release;
812 	}
813 
814 	/* Setup next dcb */
815 	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
816 
817 	/* Fill up the buffer */
818 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
819 	next_dcb_buf->use_skb = true;
820 	next_dcb_buf->data.skb = skb;
821 	next_dcb_buf->xdp_ndo = false;
822 	next_dcb_buf->len = skb->len;
823 	next_dcb_buf->dma_addr = dma_addr;
824 	next_dcb_buf->used = true;
825 	next_dcb_buf->ptp = false;
826 	next_dcb_buf->dev = dev;
827 
828 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
829 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
830 		next_dcb_buf->ptp = true;
831 
832 	/* Start the transmission */
833 	lan966x_fdma_tx_start(tx, next_to_use);
834 
835 	return NETDEV_TX_OK;
836 
837 release:
838 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
839 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
840 		lan966x_ptp_txtstamp_release(port, skb);
841 
842 	dev_kfree_skb_any(skb);
843 	return err;
844 }
845 
846 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
847 {
848 	int max_mtu = 0;
849 	int i;
850 
851 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
852 		struct lan966x_port *port;
853 		int mtu;
854 
855 		port = lan966x->ports[i];
856 		if (!port)
857 			continue;
858 
859 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
860 		if (mtu > max_mtu)
861 			max_mtu = mtu;
862 	}
863 
864 	return max_mtu;
865 }
866 
867 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
868 {
869 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
870 }
871 
872 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
873 {
874 	struct page_pool *page_pool;
875 	dma_addr_t rx_dma;
876 	void *rx_dcbs;
877 	u32 size;
878 	int err;
879 
880 	/* Store these for later to free them */
881 	rx_dma = lan966x->rx.fdma.dma;
882 	rx_dcbs = lan966x->rx.fdma.dcbs;
883 	page_pool = lan966x->rx.page_pool;
884 
885 	napi_synchronize(&lan966x->napi);
886 	napi_disable(&lan966x->napi);
887 	lan966x_fdma_stop_netdev(lan966x);
888 
889 	lan966x_fdma_rx_disable(&lan966x->rx);
890 	lan966x_fdma_rx_free_pages(&lan966x->rx);
891 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
892 	lan966x->rx.max_mtu = new_mtu;
893 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
894 	if (err)
895 		goto restore;
896 	lan966x_fdma_rx_start(&lan966x->rx);
897 
898 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
899 	size = ALIGN(size, PAGE_SIZE);
900 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
901 
902 	page_pool_destroy(page_pool);
903 
904 	lan966x_fdma_wakeup_netdev(lan966x);
905 	napi_enable(&lan966x->napi);
906 
907 	return err;
908 restore:
909 	lan966x->rx.page_pool = page_pool;
910 	lan966x->rx.fdma.dma = rx_dma;
911 	lan966x->rx.fdma.dcbs = rx_dcbs;
912 	lan966x_fdma_rx_start(&lan966x->rx);
913 
914 	return err;
915 }
916 
917 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
918 {
919 	return lan966x_fdma_get_max_mtu(lan966x) +
920 	       IFH_LEN_BYTES +
921 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
922 	       VLAN_HLEN * 2 +
923 	       XDP_PACKET_HEADROOM;
924 }
925 
926 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
927 {
928 	int err;
929 	u32 val;
930 
931 	/* Disable the CPU port */
932 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
933 		QSYS_SW_PORT_MODE_PORT_ENA,
934 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
935 
936 	/* Flush the CPU queues */
937 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
938 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
939 			   READL_SLEEP_US, READL_TIMEOUT_US);
940 
941 	/* Add a sleep in case there are frames between the queues and the CPU
942 	 * port
943 	 */
944 	usleep_range(1000, 2000);
945 
946 	err = lan966x_fdma_reload(lan966x, max_mtu);
947 
948 	/* Enable back the CPU port */
949 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
950 		QSYS_SW_PORT_MODE_PORT_ENA,
951 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
952 
953 	return err;
954 }
955 
956 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
957 {
958 	int max_mtu;
959 
960 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
961 	if (max_mtu == lan966x->rx.max_mtu)
962 		return 0;
963 
964 	return __lan966x_fdma_reload(lan966x, max_mtu);
965 }
966 
967 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
968 {
969 	int max_mtu;
970 
971 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
972 	return __lan966x_fdma_reload(lan966x, max_mtu);
973 }
974 
975 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
976 {
977 	if (lan966x->fdma_ndev)
978 		return;
979 
980 	lan966x->fdma_ndev = dev;
981 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
982 	napi_enable(&lan966x->napi);
983 }
984 
985 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
986 {
987 	if (lan966x->fdma_ndev == dev) {
988 		netif_napi_del(&lan966x->napi);
989 		lan966x->fdma_ndev = NULL;
990 	}
991 }
992 
993 int lan966x_fdma_init(struct lan966x *lan966x)
994 {
995 	int err;
996 
997 	if (!lan966x->fdma)
998 		return 0;
999 
1000 	lan966x->rx.lan966x = lan966x;
1001 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
1002 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
1003 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
1004 	lan966x->rx.fdma.priv = lan966x;
1005 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
1006 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
1007 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
1008 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
1009 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1010 	lan966x->tx.lan966x = lan966x;
1011 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
1012 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
1013 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
1014 	lan966x->tx.last_in_use = -1;
1015 
1016 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1017 	if (err)
1018 		return err;
1019 
1020 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1021 	if (err) {
1022 		lan966x_fdma_rx_free(&lan966x->rx);
1023 		return err;
1024 	}
1025 
1026 	lan966x_fdma_rx_start(&lan966x->rx);
1027 
1028 	return 0;
1029 }
1030 
1031 void lan966x_fdma_deinit(struct lan966x *lan966x)
1032 {
1033 	if (!lan966x->fdma)
1034 		return;
1035 
1036 	lan966x_fdma_rx_disable(&lan966x->rx);
1037 	lan966x_fdma_tx_disable(&lan966x->tx);
1038 
1039 	napi_synchronize(&lan966x->napi);
1040 	napi_disable(&lan966x->napi);
1041 
1042 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1043 	lan966x_fdma_rx_free(&lan966x->rx);
1044 	page_pool_destroy(lan966x->rx.page_pool);
1045 	lan966x_fdma_tx_free(&lan966x->tx);
1046 }
1047