xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision f51293b3ea89b3028745d8f0c9206df4bdc16905)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
27 {
28 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
29 }
30 
31 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
32 {
33 	struct fdma *fdma = &rx->fdma;
34 	int i, j;
35 
36 	for (i = 0; i < fdma->n_dcbs; ++i) {
37 		for (j = 0; j < fdma->n_dbs; ++j)
38 			page_pool_put_full_page(rx->page_pool,
39 						rx->page[i][j], false);
40 	}
41 }
42 
43 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
44 {
45 	struct fdma *fdma = &rx->fdma;
46 	struct page *page;
47 
48 	page = rx->page[fdma->dcb_index][fdma->db_index];
49 	if (unlikely(!page))
50 		return;
51 
52 	page_pool_recycle_direct(rx->page_pool, page);
53 }
54 
55 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
56 {
57 	struct lan966x *lan966x = rx->lan966x;
58 	struct page_pool_params pp_params = {
59 		.order = rx->page_order,
60 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
61 		.pool_size = rx->fdma.n_dcbs,
62 		.nid = NUMA_NO_NODE,
63 		.dev = lan966x->dev,
64 		.dma_dir = DMA_FROM_DEVICE,
65 		.offset = XDP_PACKET_HEADROOM,
66 		.max_len = rx->max_mtu -
67 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
68 	};
69 
70 	if (lan966x_xdp_present(lan966x))
71 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
72 
73 	rx->page_pool = page_pool_create(&pp_params);
74 
75 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
76 		struct lan966x_port *port;
77 
78 		if (!lan966x->ports[i])
79 			continue;
80 
81 		port = lan966x->ports[i];
82 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
83 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
84 					   rx->page_pool);
85 	}
86 
87 	return PTR_ERR_OR_ZERO(rx->page_pool);
88 }
89 
90 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
91 {
92 	struct lan966x *lan966x = rx->lan966x;
93 	struct fdma *fdma = &rx->fdma;
94 	int err;
95 
96 	if (lan966x_fdma_rx_alloc_page_pool(rx))
97 		return PTR_ERR(rx->page_pool);
98 
99 	err = fdma_alloc_coherent(lan966x->dev, fdma);
100 	if (err)
101 		return err;
102 
103 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
104 		       FDMA_DCB_STATUS_INTR);
105 
106 	return 0;
107 }
108 
109 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
110 {
111 	struct fdma *fdma = &rx->fdma;
112 
113 	fdma->dcb_index++;
114 	fdma->dcb_index &= fdma->n_dcbs - 1;
115 }
116 
117 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
118 {
119 	struct lan966x *lan966x = rx->lan966x;
120 	struct fdma *fdma = &rx->fdma;
121 	u32 mask;
122 
123 	/* When activating a channel, first is required to write the first DCB
124 	 * address and then to activate it
125 	 */
126 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
127 	       FDMA_DCB_LLP(fdma->channel_id));
128 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
129 	       FDMA_DCB_LLP1(fdma->channel_id));
130 
131 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
132 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
133 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
134 	       FDMA_CH_CFG_CH_MEM_SET(1),
135 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
136 
137 	/* Start fdma */
138 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
139 		FDMA_PORT_CTRL_XTR_STOP,
140 		lan966x, FDMA_PORT_CTRL(0));
141 
142 	/* Enable interrupts */
143 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
144 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
145 	mask |= BIT(fdma->channel_id);
146 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
147 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
148 		lan966x, FDMA_INTR_DB_ENA);
149 
150 	/* Activate the channel */
151 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
152 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
153 		lan966x, FDMA_CH_ACTIVATE);
154 }
155 
156 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
157 {
158 	struct lan966x *lan966x = rx->lan966x;
159 	struct fdma *fdma = &rx->fdma;
160 	u32 val;
161 
162 	/* Disable the channel */
163 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
164 		FDMA_CH_DISABLE_CH_DISABLE,
165 		lan966x, FDMA_CH_DISABLE);
166 
167 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
168 				  val, !(val & BIT(fdma->channel_id)),
169 				  READL_SLEEP_US, READL_TIMEOUT_US);
170 
171 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
172 		FDMA_CH_DB_DISCARD_DB_DISCARD,
173 		lan966x, FDMA_CH_DB_DISCARD);
174 }
175 
176 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
177 {
178 	struct lan966x *lan966x = rx->lan966x;
179 
180 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
181 		FDMA_CH_RELOAD_CH_RELOAD,
182 		lan966x, FDMA_CH_RELOAD);
183 }
184 
185 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
186 				    struct lan966x_tx_dcb *dcb)
187 {
188 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
189 	dcb->info = 0;
190 }
191 
192 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
193 {
194 	struct lan966x *lan966x = tx->lan966x;
195 	struct fdma *fdma = &tx->fdma;
196 	struct lan966x_tx_dcb *dcb;
197 	struct lan966x_db *db;
198 	int size;
199 	int i, j;
200 
201 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
202 			       GFP_KERNEL);
203 	if (!tx->dcbs_buf)
204 		return -ENOMEM;
205 
206 	/* calculate how many pages are needed to allocate the dcbs */
207 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
208 	size = ALIGN(size, PAGE_SIZE);
209 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
210 	if (!tx->dcbs)
211 		goto out;
212 
213 	/* Now for each dcb allocate the db */
214 	for (i = 0; i < fdma->n_dcbs; ++i) {
215 		dcb = &tx->dcbs[i];
216 
217 		for (j = 0; j < fdma->n_dbs; ++j) {
218 			db = &dcb->db[j];
219 			db->dataptr = 0;
220 			db->status = 0;
221 		}
222 
223 		lan966x_fdma_tx_add_dcb(tx, dcb);
224 	}
225 
226 	return 0;
227 
228 out:
229 	kfree(tx->dcbs_buf);
230 	return -ENOMEM;
231 }
232 
233 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
234 {
235 	struct lan966x *lan966x = tx->lan966x;
236 	struct fdma *fdma = &tx->fdma;
237 	int size;
238 
239 	kfree(tx->dcbs_buf);
240 
241 	size = sizeof(struct lan966x_tx_dcb) * fdma->n_dcbs;
242 	size = ALIGN(size, PAGE_SIZE);
243 	dma_free_coherent(lan966x->dev, size, fdma->dcbs, fdma->dma);
244 }
245 
246 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
247 {
248 	struct lan966x *lan966x = tx->lan966x;
249 	struct fdma *fdma = &tx->fdma;
250 	u32 mask;
251 
252 	/* When activating a channel, first is required to write the first DCB
253 	 * address and then to activate it
254 	 */
255 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
256 	       FDMA_DCB_LLP(fdma->channel_id));
257 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
258 	       FDMA_DCB_LLP1(fdma->channel_id));
259 
260 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
261 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
262 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
263 	       FDMA_CH_CFG_CH_MEM_SET(1),
264 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
265 
266 	/* Start fdma */
267 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
268 		FDMA_PORT_CTRL_INJ_STOP,
269 		lan966x, FDMA_PORT_CTRL(0));
270 
271 	/* Enable interrupts */
272 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
273 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
274 	mask |= BIT(fdma->channel_id);
275 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
276 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
277 		lan966x, FDMA_INTR_DB_ENA);
278 
279 	/* Activate the channel */
280 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
281 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
282 		lan966x, FDMA_CH_ACTIVATE);
283 }
284 
285 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
286 {
287 	struct lan966x *lan966x = tx->lan966x;
288 	struct fdma *fdma = &tx->fdma;
289 	u32 val;
290 
291 	/* Disable the channel */
292 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
293 		FDMA_CH_DISABLE_CH_DISABLE,
294 		lan966x, FDMA_CH_DISABLE);
295 
296 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
297 				  val, !(val & BIT(fdma->channel_id)),
298 				  READL_SLEEP_US, READL_TIMEOUT_US);
299 
300 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
301 		FDMA_CH_DB_DISCARD_DB_DISCARD,
302 		lan966x, FDMA_CH_DB_DISCARD);
303 
304 	tx->activated = false;
305 	tx->last_in_use = -1;
306 }
307 
308 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
309 {
310 	struct lan966x *lan966x = tx->lan966x;
311 
312 	/* Write the registers to reload the channel */
313 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
314 		FDMA_CH_RELOAD_CH_RELOAD,
315 		lan966x, FDMA_CH_RELOAD);
316 }
317 
318 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
319 {
320 	struct lan966x_port *port;
321 	int i;
322 
323 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
324 		port = lan966x->ports[i];
325 		if (!port)
326 			continue;
327 
328 		if (netif_queue_stopped(port->dev))
329 			netif_wake_queue(port->dev);
330 	}
331 }
332 
333 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
334 {
335 	struct lan966x_port *port;
336 	int i;
337 
338 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
339 		port = lan966x->ports[i];
340 		if (!port)
341 			continue;
342 
343 		netif_stop_queue(port->dev);
344 	}
345 }
346 
347 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
348 {
349 	struct lan966x_tx *tx = &lan966x->tx;
350 	struct lan966x_rx *rx = &lan966x->rx;
351 	struct lan966x_tx_dcb_buf *dcb_buf;
352 	struct xdp_frame_bulk bq;
353 	struct lan966x_db *db;
354 	unsigned long flags;
355 	bool clear = false;
356 	int i;
357 
358 	xdp_frame_bulk_init(&bq);
359 
360 	spin_lock_irqsave(&lan966x->tx_lock, flags);
361 	for (i = 0; i < tx->fdma.n_dcbs; ++i) {
362 		dcb_buf = &tx->dcbs_buf[i];
363 
364 		if (!dcb_buf->used)
365 			continue;
366 
367 		db = &tx->dcbs[i].db[0];
368 		if (!(db->status & FDMA_DCB_STATUS_DONE))
369 			continue;
370 
371 		dcb_buf->dev->stats.tx_packets++;
372 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
373 
374 		dcb_buf->used = false;
375 		if (dcb_buf->use_skb) {
376 			dma_unmap_single(lan966x->dev,
377 					 dcb_buf->dma_addr,
378 					 dcb_buf->len,
379 					 DMA_TO_DEVICE);
380 
381 			if (!dcb_buf->ptp)
382 				napi_consume_skb(dcb_buf->data.skb, weight);
383 		} else {
384 			if (dcb_buf->xdp_ndo)
385 				dma_unmap_single(lan966x->dev,
386 						 dcb_buf->dma_addr,
387 						 dcb_buf->len,
388 						 DMA_TO_DEVICE);
389 
390 			if (dcb_buf->xdp_ndo)
391 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
392 			else
393 				page_pool_recycle_direct(rx->page_pool,
394 							 dcb_buf->data.page);
395 		}
396 
397 		clear = true;
398 	}
399 
400 	xdp_flush_frame_bulk(&bq);
401 
402 	if (clear)
403 		lan966x_fdma_wakeup_netdev(lan966x);
404 
405 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
406 }
407 
408 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
409 {
410 	struct fdma *fdma = &rx->fdma;
411 	struct fdma_db *db;
412 
413 	/* Check if there is any data */
414 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
415 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
416 		return false;
417 
418 	return true;
419 }
420 
421 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
422 {
423 	struct lan966x *lan966x = rx->lan966x;
424 	struct fdma *fdma = &rx->fdma;
425 	struct lan966x_port *port;
426 	struct fdma_db *db;
427 	struct page *page;
428 
429 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
430 	page = rx->page[fdma->dcb_index][fdma->db_index];
431 	if (unlikely(!page))
432 		return FDMA_ERROR;
433 
434 	dma_sync_single_for_cpu(lan966x->dev,
435 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
436 				FDMA_DCB_STATUS_BLOCKL(db->status),
437 				DMA_FROM_DEVICE);
438 
439 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
440 				 src_port);
441 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
442 		return FDMA_ERROR;
443 
444 	port = lan966x->ports[*src_port];
445 	if (!lan966x_xdp_port_present(port))
446 		return FDMA_PASS;
447 
448 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
449 }
450 
451 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
452 						 u64 src_port)
453 {
454 	struct lan966x *lan966x = rx->lan966x;
455 	struct fdma *fdma = &rx->fdma;
456 	struct sk_buff *skb;
457 	struct fdma_db *db;
458 	struct page *page;
459 	u64 timestamp;
460 
461 	/* Get the received frame and unmap it */
462 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
463 	page = rx->page[fdma->dcb_index][fdma->db_index];
464 
465 	skb = build_skb(page_address(page), fdma->db_size);
466 	if (unlikely(!skb))
467 		goto free_page;
468 
469 	skb_mark_for_recycle(skb);
470 
471 	skb_reserve(skb, XDP_PACKET_HEADROOM);
472 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
473 
474 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
475 
476 	skb->dev = lan966x->ports[src_port]->dev;
477 	skb_pull(skb, IFH_LEN_BYTES);
478 
479 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
480 		skb_trim(skb, skb->len - ETH_FCS_LEN);
481 
482 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
483 	skb->protocol = eth_type_trans(skb, skb->dev);
484 
485 	if (lan966x->bridge_mask & BIT(src_port)) {
486 		skb->offload_fwd_mark = 1;
487 
488 		skb_reset_network_header(skb);
489 		if (!lan966x_hw_offload(lan966x, src_port, skb))
490 			skb->offload_fwd_mark = 0;
491 	}
492 
493 	skb->dev->stats.rx_bytes += skb->len;
494 	skb->dev->stats.rx_packets++;
495 
496 	return skb;
497 
498 free_page:
499 	page_pool_recycle_direct(rx->page_pool, page);
500 
501 	return NULL;
502 }
503 
504 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
505 {
506 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
507 	struct lan966x_rx *rx = &lan966x->rx;
508 	int old_dcb, dcb_reload, counter = 0;
509 	struct fdma *fdma = &rx->fdma;
510 	bool redirect = false;
511 	struct sk_buff *skb;
512 	u64 src_port;
513 
514 	dcb_reload = fdma->dcb_index;
515 
516 	lan966x_fdma_tx_clear_buf(lan966x, weight);
517 
518 	/* Get all received skb */
519 	while (counter < weight) {
520 		if (!lan966x_fdma_rx_more_frames(rx))
521 			break;
522 
523 		counter++;
524 
525 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
526 		case FDMA_PASS:
527 			break;
528 		case FDMA_ERROR:
529 			lan966x_fdma_rx_free_page(rx);
530 			lan966x_fdma_rx_advance_dcb(rx);
531 			goto allocate_new;
532 		case FDMA_REDIRECT:
533 			redirect = true;
534 			fallthrough;
535 		case FDMA_TX:
536 			lan966x_fdma_rx_advance_dcb(rx);
537 			continue;
538 		case FDMA_DROP:
539 			lan966x_fdma_rx_free_page(rx);
540 			lan966x_fdma_rx_advance_dcb(rx);
541 			continue;
542 		}
543 
544 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
545 		lan966x_fdma_rx_advance_dcb(rx);
546 		if (!skb)
547 			goto allocate_new;
548 
549 		napi_gro_receive(&lan966x->napi, skb);
550 	}
551 
552 allocate_new:
553 	/* Allocate new pages and map them */
554 	while (dcb_reload != fdma->dcb_index) {
555 		old_dcb = dcb_reload;
556 		dcb_reload++;
557 		dcb_reload &= fdma->n_dcbs - 1;
558 
559 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
560 			     FDMA_DCB_STATUS_INTR);
561 
562 		lan966x_fdma_rx_reload(rx);
563 	}
564 
565 	if (redirect)
566 		xdp_do_flush();
567 
568 	if (counter < weight && napi_complete_done(napi, counter))
569 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
570 
571 	return counter;
572 }
573 
574 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
575 {
576 	struct lan966x *lan966x = args;
577 	u32 db, err, err_type;
578 
579 	db = lan_rd(lan966x, FDMA_INTR_DB);
580 	err = lan_rd(lan966x, FDMA_INTR_ERR);
581 
582 	if (db) {
583 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
584 		lan_wr(db, lan966x, FDMA_INTR_DB);
585 
586 		napi_schedule(&lan966x->napi);
587 	}
588 
589 	if (err) {
590 		err_type = lan_rd(lan966x, FDMA_ERRORS);
591 
592 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
593 
594 		lan_wr(err, lan966x, FDMA_INTR_ERR);
595 		lan_wr(err_type, lan966x, FDMA_ERRORS);
596 	}
597 
598 	return IRQ_HANDLED;
599 }
600 
601 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
602 {
603 	struct lan966x_tx_dcb_buf *dcb_buf;
604 	struct fdma *fdma = &tx->fdma;
605 	int i;
606 
607 	for (i = 0; i < fdma->n_dcbs; ++i) {
608 		dcb_buf = &tx->dcbs_buf[i];
609 		if (!dcb_buf->used && i != tx->last_in_use)
610 			return i;
611 	}
612 
613 	return -1;
614 }
615 
616 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
617 				      int next_to_use, int len,
618 				      dma_addr_t dma_addr)
619 {
620 	struct lan966x_tx_dcb *next_dcb;
621 	struct lan966x_db *next_db;
622 
623 	next_dcb = &tx->dcbs[next_to_use];
624 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
625 
626 	next_db = &next_dcb->db[0];
627 	next_db->dataptr = dma_addr;
628 	next_db->status = FDMA_DCB_STATUS_SOF |
629 			  FDMA_DCB_STATUS_EOF |
630 			  FDMA_DCB_STATUS_INTR |
631 			  FDMA_DCB_STATUS_BLOCKO(0) |
632 			  FDMA_DCB_STATUS_BLOCKL(len);
633 }
634 
635 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
636 {
637 	struct lan966x *lan966x = tx->lan966x;
638 	struct lan966x_tx_dcb *dcb;
639 
640 	if (likely(lan966x->tx.activated)) {
641 		/* Connect current dcb to the next db */
642 		dcb = &tx->dcbs[tx->last_in_use];
643 		dcb->nextptr = tx->dma + (next_to_use *
644 					  sizeof(struct lan966x_tx_dcb));
645 
646 		lan966x_fdma_tx_reload(tx);
647 	} else {
648 		/* Because it is first time, then just activate */
649 		lan966x->tx.activated = true;
650 		lan966x_fdma_tx_activate(tx);
651 	}
652 
653 	/* Move to next dcb because this last in use */
654 	tx->last_in_use = next_to_use;
655 }
656 
657 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
658 {
659 	struct lan966x *lan966x = port->lan966x;
660 	struct lan966x_tx_dcb_buf *next_dcb_buf;
661 	struct lan966x_tx *tx = &lan966x->tx;
662 	struct xdp_frame *xdpf;
663 	dma_addr_t dma_addr;
664 	struct page *page;
665 	int next_to_use;
666 	__be32 *ifh;
667 	int ret = 0;
668 
669 	spin_lock(&lan966x->tx_lock);
670 
671 	/* Get next index */
672 	next_to_use = lan966x_fdma_get_next_dcb(tx);
673 	if (next_to_use < 0) {
674 		netif_stop_queue(port->dev);
675 		ret = NETDEV_TX_BUSY;
676 		goto out;
677 	}
678 
679 	/* Get the next buffer */
680 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
681 
682 	/* Generate new IFH */
683 	if (!len) {
684 		xdpf = ptr;
685 
686 		if (xdpf->headroom < IFH_LEN_BYTES) {
687 			ret = NETDEV_TX_OK;
688 			goto out;
689 		}
690 
691 		ifh = xdpf->data - IFH_LEN_BYTES;
692 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
693 		lan966x_ifh_set_bypass(ifh, 1);
694 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
695 
696 		dma_addr = dma_map_single(lan966x->dev,
697 					  xdpf->data - IFH_LEN_BYTES,
698 					  xdpf->len + IFH_LEN_BYTES,
699 					  DMA_TO_DEVICE);
700 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
701 			ret = NETDEV_TX_OK;
702 			goto out;
703 		}
704 
705 		next_dcb_buf->data.xdpf = xdpf;
706 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
707 
708 		/* Setup next dcb */
709 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
710 					  xdpf->len + IFH_LEN_BYTES,
711 					  dma_addr);
712 	} else {
713 		page = ptr;
714 
715 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
716 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
717 		lan966x_ifh_set_bypass(ifh, 1);
718 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
719 
720 		dma_addr = page_pool_get_dma_addr(page);
721 		dma_sync_single_for_device(lan966x->dev,
722 					   dma_addr + XDP_PACKET_HEADROOM,
723 					   len + IFH_LEN_BYTES,
724 					   DMA_TO_DEVICE);
725 
726 		next_dcb_buf->data.page = page;
727 		next_dcb_buf->len = len + IFH_LEN_BYTES;
728 
729 		/* Setup next dcb */
730 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
731 					  len + IFH_LEN_BYTES,
732 					  dma_addr + XDP_PACKET_HEADROOM);
733 	}
734 
735 	/* Fill up the buffer */
736 	next_dcb_buf->use_skb = false;
737 	next_dcb_buf->xdp_ndo = !len;
738 	next_dcb_buf->dma_addr = dma_addr;
739 	next_dcb_buf->used = true;
740 	next_dcb_buf->ptp = false;
741 	next_dcb_buf->dev = port->dev;
742 
743 	/* Start the transmission */
744 	lan966x_fdma_tx_start(tx, next_to_use);
745 
746 out:
747 	spin_unlock(&lan966x->tx_lock);
748 
749 	return ret;
750 }
751 
752 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
753 {
754 	struct lan966x_port *port = netdev_priv(dev);
755 	struct lan966x *lan966x = port->lan966x;
756 	struct lan966x_tx_dcb_buf *next_dcb_buf;
757 	struct lan966x_tx *tx = &lan966x->tx;
758 	int needed_headroom;
759 	int needed_tailroom;
760 	dma_addr_t dma_addr;
761 	int next_to_use;
762 	int err;
763 
764 	/* Get next index */
765 	next_to_use = lan966x_fdma_get_next_dcb(tx);
766 	if (next_to_use < 0) {
767 		netif_stop_queue(dev);
768 		return NETDEV_TX_BUSY;
769 	}
770 
771 	if (skb_put_padto(skb, ETH_ZLEN)) {
772 		dev->stats.tx_dropped++;
773 		return NETDEV_TX_OK;
774 	}
775 
776 	/* skb processing */
777 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
778 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
779 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
780 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
781 				       GFP_ATOMIC);
782 		if (unlikely(err)) {
783 			dev->stats.tx_dropped++;
784 			err = NETDEV_TX_OK;
785 			goto release;
786 		}
787 	}
788 
789 	skb_tx_timestamp(skb);
790 	skb_push(skb, IFH_LEN_BYTES);
791 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
792 	skb_put(skb, 4);
793 
794 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
795 				  DMA_TO_DEVICE);
796 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
797 		dev->stats.tx_dropped++;
798 		err = NETDEV_TX_OK;
799 		goto release;
800 	}
801 
802 	/* Setup next dcb */
803 	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
804 
805 	/* Fill up the buffer */
806 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
807 	next_dcb_buf->use_skb = true;
808 	next_dcb_buf->data.skb = skb;
809 	next_dcb_buf->xdp_ndo = false;
810 	next_dcb_buf->len = skb->len;
811 	next_dcb_buf->dma_addr = dma_addr;
812 	next_dcb_buf->used = true;
813 	next_dcb_buf->ptp = false;
814 	next_dcb_buf->dev = dev;
815 
816 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
817 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
818 		next_dcb_buf->ptp = true;
819 
820 	/* Start the transmission */
821 	lan966x_fdma_tx_start(tx, next_to_use);
822 
823 	return NETDEV_TX_OK;
824 
825 release:
826 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
827 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
828 		lan966x_ptp_txtstamp_release(port, skb);
829 
830 	dev_kfree_skb_any(skb);
831 	return err;
832 }
833 
834 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
835 {
836 	int max_mtu = 0;
837 	int i;
838 
839 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
840 		struct lan966x_port *port;
841 		int mtu;
842 
843 		port = lan966x->ports[i];
844 		if (!port)
845 			continue;
846 
847 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
848 		if (mtu > max_mtu)
849 			max_mtu = mtu;
850 	}
851 
852 	return max_mtu;
853 }
854 
855 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
856 {
857 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
858 }
859 
860 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
861 {
862 	struct page_pool *page_pool;
863 	dma_addr_t rx_dma;
864 	void *rx_dcbs;
865 	u32 size;
866 	int err;
867 
868 	/* Store these for later to free them */
869 	rx_dma = lan966x->rx.fdma.dma;
870 	rx_dcbs = lan966x->rx.fdma.dcbs;
871 	page_pool = lan966x->rx.page_pool;
872 
873 	napi_synchronize(&lan966x->napi);
874 	napi_disable(&lan966x->napi);
875 	lan966x_fdma_stop_netdev(lan966x);
876 
877 	lan966x_fdma_rx_disable(&lan966x->rx);
878 	lan966x_fdma_rx_free_pages(&lan966x->rx);
879 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
880 	lan966x->rx.max_mtu = new_mtu;
881 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
882 	if (err)
883 		goto restore;
884 	lan966x_fdma_rx_start(&lan966x->rx);
885 
886 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
887 	size = ALIGN(size, PAGE_SIZE);
888 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
889 
890 	page_pool_destroy(page_pool);
891 
892 	lan966x_fdma_wakeup_netdev(lan966x);
893 	napi_enable(&lan966x->napi);
894 
895 	return err;
896 restore:
897 	lan966x->rx.page_pool = page_pool;
898 	lan966x->rx.fdma.dma = rx_dma;
899 	lan966x->rx.fdma.dcbs = rx_dcbs;
900 	lan966x_fdma_rx_start(&lan966x->rx);
901 
902 	return err;
903 }
904 
905 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
906 {
907 	return lan966x_fdma_get_max_mtu(lan966x) +
908 	       IFH_LEN_BYTES +
909 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
910 	       VLAN_HLEN * 2 +
911 	       XDP_PACKET_HEADROOM;
912 }
913 
914 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
915 {
916 	int err;
917 	u32 val;
918 
919 	/* Disable the CPU port */
920 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
921 		QSYS_SW_PORT_MODE_PORT_ENA,
922 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
923 
924 	/* Flush the CPU queues */
925 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
926 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
927 			   READL_SLEEP_US, READL_TIMEOUT_US);
928 
929 	/* Add a sleep in case there are frames between the queues and the CPU
930 	 * port
931 	 */
932 	usleep_range(1000, 2000);
933 
934 	err = lan966x_fdma_reload(lan966x, max_mtu);
935 
936 	/* Enable back the CPU port */
937 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
938 		QSYS_SW_PORT_MODE_PORT_ENA,
939 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
940 
941 	return err;
942 }
943 
944 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
945 {
946 	int max_mtu;
947 
948 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
949 	if (max_mtu == lan966x->rx.max_mtu)
950 		return 0;
951 
952 	return __lan966x_fdma_reload(lan966x, max_mtu);
953 }
954 
955 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
956 {
957 	int max_mtu;
958 
959 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
960 	return __lan966x_fdma_reload(lan966x, max_mtu);
961 }
962 
963 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
964 {
965 	if (lan966x->fdma_ndev)
966 		return;
967 
968 	lan966x->fdma_ndev = dev;
969 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
970 	napi_enable(&lan966x->napi);
971 }
972 
973 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
974 {
975 	if (lan966x->fdma_ndev == dev) {
976 		netif_napi_del(&lan966x->napi);
977 		lan966x->fdma_ndev = NULL;
978 	}
979 }
980 
981 int lan966x_fdma_init(struct lan966x *lan966x)
982 {
983 	int err;
984 
985 	if (!lan966x->fdma)
986 		return 0;
987 
988 	lan966x->rx.lan966x = lan966x;
989 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
990 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
991 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
992 	lan966x->rx.fdma.priv = lan966x;
993 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
994 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
995 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
996 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
997 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
998 	lan966x->tx.lan966x = lan966x;
999 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
1000 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
1001 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
1002 	lan966x->tx.last_in_use = -1;
1003 
1004 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1005 	if (err)
1006 		return err;
1007 
1008 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1009 	if (err) {
1010 		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1011 		return err;
1012 	}
1013 
1014 	lan966x_fdma_rx_start(&lan966x->rx);
1015 
1016 	return 0;
1017 }
1018 
1019 void lan966x_fdma_deinit(struct lan966x *lan966x)
1020 {
1021 	if (!lan966x->fdma)
1022 		return;
1023 
1024 	lan966x_fdma_rx_disable(&lan966x->rx);
1025 	lan966x_fdma_tx_disable(&lan966x->tx);
1026 
1027 	napi_synchronize(&lan966x->napi);
1028 	napi_disable(&lan966x->napi);
1029 
1030 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1031 	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1032 	page_pool_destroy(lan966x->rx.page_pool);
1033 	lan966x_fdma_tx_free(&lan966x->tx);
1034 }
1035