xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision c06fef96c7d599ac6d1cdfd42ffee8c8d59729fe)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27 				      u64 *dataptr)
28 {
29 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30 
31 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32 
33 	return 0;
34 }
35 
36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37 					  u64 *dataptr)
38 {
39 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
40 
41 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
42 
43 	return 0;
44 }
45 
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
47 {
48 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
49 }
50 
51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
52 {
53 	struct fdma *fdma = &rx->fdma;
54 	int i, j;
55 
56 	for (i = 0; i < fdma->n_dcbs; ++i) {
57 		for (j = 0; j < fdma->n_dbs; ++j)
58 			page_pool_put_full_page(rx->page_pool,
59 						rx->page[i][j], false);
60 	}
61 }
62 
63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
64 {
65 	struct fdma *fdma = &rx->fdma;
66 	struct page *page;
67 
68 	page = rx->page[fdma->dcb_index][fdma->db_index];
69 	if (unlikely(!page))
70 		return;
71 
72 	page_pool_recycle_direct(rx->page_pool, page);
73 }
74 
75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
76 {
77 	struct lan966x *lan966x = rx->lan966x;
78 	struct page_pool_params pp_params = {
79 		.order = rx->page_order,
80 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81 		.pool_size = rx->fdma.n_dcbs,
82 		.nid = NUMA_NO_NODE,
83 		.dev = lan966x->dev,
84 		.dma_dir = DMA_FROM_DEVICE,
85 		.offset = XDP_PACKET_HEADROOM,
86 		.max_len = rx->max_mtu -
87 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
88 	};
89 
90 	if (lan966x_xdp_present(lan966x))
91 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
92 
93 	rx->page_pool = page_pool_create(&pp_params);
94 
95 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
96 		struct lan966x_port *port;
97 
98 		if (!lan966x->ports[i])
99 			continue;
100 
101 		port = lan966x->ports[i];
102 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
104 					   rx->page_pool);
105 	}
106 
107 	return PTR_ERR_OR_ZERO(rx->page_pool);
108 }
109 
110 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
111 {
112 	struct lan966x *lan966x = rx->lan966x;
113 	struct fdma *fdma = &rx->fdma;
114 	int err;
115 
116 	if (lan966x_fdma_rx_alloc_page_pool(rx))
117 		return PTR_ERR(rx->page_pool);
118 
119 	err = fdma_alloc_coherent(lan966x->dev, fdma);
120 	if (err)
121 		return err;
122 
123 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124 		       FDMA_DCB_STATUS_INTR);
125 
126 	return 0;
127 }
128 
129 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
130 {
131 	struct fdma *fdma = &rx->fdma;
132 
133 	fdma->dcb_index++;
134 	fdma->dcb_index &= fdma->n_dcbs - 1;
135 }
136 
137 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
138 {
139 	struct lan966x *lan966x = rx->lan966x;
140 	struct fdma *fdma = &rx->fdma;
141 	u32 mask;
142 
143 	/* When activating a channel, first is required to write the first DCB
144 	 * address and then to activate it
145 	 */
146 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
147 	       FDMA_DCB_LLP(fdma->channel_id));
148 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
149 	       FDMA_DCB_LLP1(fdma->channel_id));
150 
151 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
152 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
153 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
154 	       FDMA_CH_CFG_CH_MEM_SET(1),
155 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
156 
157 	/* Start fdma */
158 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
159 		FDMA_PORT_CTRL_XTR_STOP,
160 		lan966x, FDMA_PORT_CTRL(0));
161 
162 	/* Enable interrupts */
163 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
164 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
165 	mask |= BIT(fdma->channel_id);
166 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
167 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
168 		lan966x, FDMA_INTR_DB_ENA);
169 
170 	/* Activate the channel */
171 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
172 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
173 		lan966x, FDMA_CH_ACTIVATE);
174 }
175 
176 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
177 {
178 	struct lan966x *lan966x = rx->lan966x;
179 	struct fdma *fdma = &rx->fdma;
180 	u32 val;
181 
182 	/* Disable the channel */
183 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
184 		FDMA_CH_DISABLE_CH_DISABLE,
185 		lan966x, FDMA_CH_DISABLE);
186 
187 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
188 				  val, !(val & BIT(fdma->channel_id)),
189 				  READL_SLEEP_US, READL_TIMEOUT_US);
190 
191 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
192 		FDMA_CH_DB_DISCARD_DB_DISCARD,
193 		lan966x, FDMA_CH_DB_DISCARD);
194 }
195 
196 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
197 {
198 	struct lan966x *lan966x = rx->lan966x;
199 
200 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
201 		FDMA_CH_RELOAD_CH_RELOAD,
202 		lan966x, FDMA_CH_RELOAD);
203 }
204 
205 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
206 {
207 	struct lan966x *lan966x = tx->lan966x;
208 	struct fdma *fdma = &tx->fdma;
209 	int err;
210 
211 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
212 			       GFP_KERNEL);
213 	if (!tx->dcbs_buf)
214 		return -ENOMEM;
215 
216 	err = fdma_alloc_coherent(lan966x->dev, fdma);
217 	if (err)
218 		goto out;
219 
220 	fdma_dcbs_init(fdma, 0, 0);
221 
222 	return 0;
223 
224 out:
225 	kfree(tx->dcbs_buf);
226 	return -ENOMEM;
227 }
228 
229 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
230 {
231 	struct lan966x *lan966x = tx->lan966x;
232 
233 	kfree(tx->dcbs_buf);
234 	fdma_free_coherent(lan966x->dev, &tx->fdma);
235 }
236 
237 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
238 {
239 	struct lan966x *lan966x = tx->lan966x;
240 	struct fdma *fdma = &tx->fdma;
241 	u32 mask;
242 
243 	/* When activating a channel, first is required to write the first DCB
244 	 * address and then to activate it
245 	 */
246 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
247 	       FDMA_DCB_LLP(fdma->channel_id));
248 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
249 	       FDMA_DCB_LLP1(fdma->channel_id));
250 
251 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
252 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
253 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
254 	       FDMA_CH_CFG_CH_MEM_SET(1),
255 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
256 
257 	/* Start fdma */
258 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
259 		FDMA_PORT_CTRL_INJ_STOP,
260 		lan966x, FDMA_PORT_CTRL(0));
261 
262 	/* Enable interrupts */
263 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
264 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
265 	mask |= BIT(fdma->channel_id);
266 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
267 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
268 		lan966x, FDMA_INTR_DB_ENA);
269 
270 	/* Activate the channel */
271 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
272 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
273 		lan966x, FDMA_CH_ACTIVATE);
274 }
275 
276 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
277 {
278 	struct lan966x *lan966x = tx->lan966x;
279 	struct fdma *fdma = &tx->fdma;
280 	u32 val;
281 
282 	/* Disable the channel */
283 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
284 		FDMA_CH_DISABLE_CH_DISABLE,
285 		lan966x, FDMA_CH_DISABLE);
286 
287 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
288 				  val, !(val & BIT(fdma->channel_id)),
289 				  READL_SLEEP_US, READL_TIMEOUT_US);
290 
291 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
292 		FDMA_CH_DB_DISCARD_DB_DISCARD,
293 		lan966x, FDMA_CH_DB_DISCARD);
294 
295 	tx->activated = false;
296 }
297 
298 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
299 {
300 	struct lan966x *lan966x = tx->lan966x;
301 
302 	/* Write the registers to reload the channel */
303 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
304 		FDMA_CH_RELOAD_CH_RELOAD,
305 		lan966x, FDMA_CH_RELOAD);
306 }
307 
308 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
309 {
310 	struct lan966x_port *port;
311 	int i;
312 
313 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
314 		port = lan966x->ports[i];
315 		if (!port)
316 			continue;
317 
318 		if (netif_queue_stopped(port->dev))
319 			netif_wake_queue(port->dev);
320 	}
321 }
322 
323 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
324 {
325 	struct lan966x_port *port;
326 	int i;
327 
328 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
329 		port = lan966x->ports[i];
330 		if (!port)
331 			continue;
332 
333 		netif_stop_queue(port->dev);
334 	}
335 }
336 
337 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
338 {
339 	struct lan966x_tx *tx = &lan966x->tx;
340 	struct lan966x_rx *rx = &lan966x->rx;
341 	struct lan966x_tx_dcb_buf *dcb_buf;
342 	struct fdma *fdma = &tx->fdma;
343 	struct xdp_frame_bulk bq;
344 	unsigned long flags;
345 	bool clear = false;
346 	struct fdma_db *db;
347 	int i;
348 
349 	xdp_frame_bulk_init(&bq);
350 
351 	spin_lock_irqsave(&lan966x->tx_lock, flags);
352 	for (i = 0; i < fdma->n_dcbs; ++i) {
353 		dcb_buf = &tx->dcbs_buf[i];
354 
355 		if (!dcb_buf->used)
356 			continue;
357 
358 		db = &fdma->dcbs[i].db[0];
359 		if (!(db->status & FDMA_DCB_STATUS_DONE))
360 			continue;
361 
362 		dcb_buf->dev->stats.tx_packets++;
363 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
364 
365 		dcb_buf->used = false;
366 		if (dcb_buf->use_skb) {
367 			dma_unmap_single(lan966x->dev,
368 					 dcb_buf->dma_addr,
369 					 dcb_buf->len,
370 					 DMA_TO_DEVICE);
371 
372 			if (!dcb_buf->ptp)
373 				napi_consume_skb(dcb_buf->data.skb, weight);
374 		} else {
375 			if (dcb_buf->xdp_ndo)
376 				dma_unmap_single(lan966x->dev,
377 						 dcb_buf->dma_addr,
378 						 dcb_buf->len,
379 						 DMA_TO_DEVICE);
380 
381 			if (dcb_buf->xdp_ndo)
382 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
383 			else
384 				page_pool_recycle_direct(rx->page_pool,
385 							 dcb_buf->data.page);
386 		}
387 
388 		clear = true;
389 	}
390 
391 	xdp_flush_frame_bulk(&bq);
392 
393 	if (clear)
394 		lan966x_fdma_wakeup_netdev(lan966x);
395 
396 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
397 }
398 
399 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
400 {
401 	struct fdma *fdma = &rx->fdma;
402 	struct fdma_db *db;
403 
404 	/* Check if there is any data */
405 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
406 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
407 		return false;
408 
409 	return true;
410 }
411 
412 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
413 {
414 	struct lan966x *lan966x = rx->lan966x;
415 	struct fdma *fdma = &rx->fdma;
416 	struct lan966x_port *port;
417 	struct fdma_db *db;
418 	struct page *page;
419 
420 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
421 	page = rx->page[fdma->dcb_index][fdma->db_index];
422 	if (unlikely(!page))
423 		return FDMA_ERROR;
424 
425 	dma_sync_single_for_cpu(lan966x->dev,
426 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
427 				FDMA_DCB_STATUS_BLOCKL(db->status),
428 				DMA_FROM_DEVICE);
429 
430 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
431 				 src_port);
432 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
433 		return FDMA_ERROR;
434 
435 	port = lan966x->ports[*src_port];
436 	if (!lan966x_xdp_port_present(port))
437 		return FDMA_PASS;
438 
439 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
440 }
441 
442 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
443 						 u64 src_port)
444 {
445 	struct lan966x *lan966x = rx->lan966x;
446 	struct fdma *fdma = &rx->fdma;
447 	struct sk_buff *skb;
448 	struct fdma_db *db;
449 	struct page *page;
450 	u64 timestamp;
451 
452 	/* Get the received frame and unmap it */
453 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
454 	page = rx->page[fdma->dcb_index][fdma->db_index];
455 
456 	skb = build_skb(page_address(page), fdma->db_size);
457 	if (unlikely(!skb))
458 		goto free_page;
459 
460 	skb_mark_for_recycle(skb);
461 
462 	skb_reserve(skb, XDP_PACKET_HEADROOM);
463 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
464 
465 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
466 
467 	skb->dev = lan966x->ports[src_port]->dev;
468 	skb_pull(skb, IFH_LEN_BYTES);
469 
470 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
471 		skb_trim(skb, skb->len - ETH_FCS_LEN);
472 
473 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
474 	skb->protocol = eth_type_trans(skb, skb->dev);
475 
476 	if (lan966x->bridge_mask & BIT(src_port)) {
477 		skb->offload_fwd_mark = 1;
478 
479 		skb_reset_network_header(skb);
480 		if (!lan966x_hw_offload(lan966x, src_port, skb))
481 			skb->offload_fwd_mark = 0;
482 	}
483 
484 	skb->dev->stats.rx_bytes += skb->len;
485 	skb->dev->stats.rx_packets++;
486 
487 	return skb;
488 
489 free_page:
490 	page_pool_recycle_direct(rx->page_pool, page);
491 
492 	return NULL;
493 }
494 
495 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
496 {
497 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
498 	struct lan966x_rx *rx = &lan966x->rx;
499 	int old_dcb, dcb_reload, counter = 0;
500 	struct fdma *fdma = &rx->fdma;
501 	bool redirect = false;
502 	struct sk_buff *skb;
503 	u64 src_port;
504 
505 	dcb_reload = fdma->dcb_index;
506 
507 	lan966x_fdma_tx_clear_buf(lan966x, weight);
508 
509 	/* Get all received skb */
510 	while (counter < weight) {
511 		if (!lan966x_fdma_rx_more_frames(rx))
512 			break;
513 
514 		counter++;
515 
516 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
517 		case FDMA_PASS:
518 			break;
519 		case FDMA_ERROR:
520 			lan966x_fdma_rx_free_page(rx);
521 			lan966x_fdma_rx_advance_dcb(rx);
522 			goto allocate_new;
523 		case FDMA_REDIRECT:
524 			redirect = true;
525 			fallthrough;
526 		case FDMA_TX:
527 			lan966x_fdma_rx_advance_dcb(rx);
528 			continue;
529 		case FDMA_DROP:
530 			lan966x_fdma_rx_free_page(rx);
531 			lan966x_fdma_rx_advance_dcb(rx);
532 			continue;
533 		}
534 
535 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
536 		lan966x_fdma_rx_advance_dcb(rx);
537 		if (!skb)
538 			goto allocate_new;
539 
540 		napi_gro_receive(&lan966x->napi, skb);
541 	}
542 
543 allocate_new:
544 	/* Allocate new pages and map them */
545 	while (dcb_reload != fdma->dcb_index) {
546 		old_dcb = dcb_reload;
547 		dcb_reload++;
548 		dcb_reload &= fdma->n_dcbs - 1;
549 
550 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
551 			     FDMA_DCB_STATUS_INTR);
552 
553 		lan966x_fdma_rx_reload(rx);
554 	}
555 
556 	if (redirect)
557 		xdp_do_flush();
558 
559 	if (counter < weight && napi_complete_done(napi, counter))
560 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
561 
562 	return counter;
563 }
564 
565 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
566 {
567 	struct lan966x *lan966x = args;
568 	u32 db, err, err_type;
569 
570 	db = lan_rd(lan966x, FDMA_INTR_DB);
571 	err = lan_rd(lan966x, FDMA_INTR_ERR);
572 
573 	if (db) {
574 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
575 		lan_wr(db, lan966x, FDMA_INTR_DB);
576 
577 		napi_schedule(&lan966x->napi);
578 	}
579 
580 	if (err) {
581 		err_type = lan_rd(lan966x, FDMA_ERRORS);
582 
583 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
584 
585 		lan_wr(err, lan966x, FDMA_INTR_ERR);
586 		lan_wr(err_type, lan966x, FDMA_ERRORS);
587 	}
588 
589 	return IRQ_HANDLED;
590 }
591 
592 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
593 {
594 	struct lan966x_tx_dcb_buf *dcb_buf;
595 	struct fdma *fdma = &tx->fdma;
596 	int i;
597 
598 	for (i = 0; i < fdma->n_dcbs; ++i) {
599 		dcb_buf = &tx->dcbs_buf[i];
600 		if (!dcb_buf->used && &fdma->dcbs[i] != fdma->last_dcb)
601 			return i;
602 	}
603 
604 	return -1;
605 }
606 
607 static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
608 {
609 	struct lan966x *lan966x = tx->lan966x;
610 
611 	if (likely(lan966x->tx.activated)) {
612 		lan966x_fdma_tx_reload(tx);
613 	} else {
614 		/* Because it is first time, then just activate */
615 		lan966x->tx.activated = true;
616 		lan966x_fdma_tx_activate(tx);
617 	}
618 }
619 
620 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
621 {
622 	struct lan966x *lan966x = port->lan966x;
623 	struct lan966x_tx_dcb_buf *next_dcb_buf;
624 	struct lan966x_tx *tx = &lan966x->tx;
625 	struct xdp_frame *xdpf;
626 	dma_addr_t dma_addr;
627 	struct page *page;
628 	int next_to_use;
629 	__be32 *ifh;
630 	int ret = 0;
631 
632 	spin_lock(&lan966x->tx_lock);
633 
634 	/* Get next index */
635 	next_to_use = lan966x_fdma_get_next_dcb(tx);
636 	if (next_to_use < 0) {
637 		netif_stop_queue(port->dev);
638 		ret = NETDEV_TX_BUSY;
639 		goto out;
640 	}
641 
642 	/* Get the next buffer */
643 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
644 
645 	/* Generate new IFH */
646 	if (!len) {
647 		xdpf = ptr;
648 
649 		if (xdpf->headroom < IFH_LEN_BYTES) {
650 			ret = NETDEV_TX_OK;
651 			goto out;
652 		}
653 
654 		ifh = xdpf->data - IFH_LEN_BYTES;
655 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
656 		lan966x_ifh_set_bypass(ifh, 1);
657 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
658 
659 		dma_addr = dma_map_single(lan966x->dev,
660 					  xdpf->data - IFH_LEN_BYTES,
661 					  xdpf->len + IFH_LEN_BYTES,
662 					  DMA_TO_DEVICE);
663 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
664 			ret = NETDEV_TX_OK;
665 			goto out;
666 		}
667 
668 		next_dcb_buf->data.xdpf = xdpf;
669 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
670 	} else {
671 		page = ptr;
672 
673 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
674 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
675 		lan966x_ifh_set_bypass(ifh, 1);
676 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
677 
678 		dma_addr = page_pool_get_dma_addr(page);
679 		dma_sync_single_for_device(lan966x->dev,
680 					   dma_addr + XDP_PACKET_HEADROOM,
681 					   len + IFH_LEN_BYTES,
682 					   DMA_TO_DEVICE);
683 
684 		next_dcb_buf->data.page = page;
685 		next_dcb_buf->len = len + IFH_LEN_BYTES;
686 	}
687 
688 	/* Fill up the buffer */
689 	next_dcb_buf->use_skb = false;
690 	next_dcb_buf->xdp_ndo = !len;
691 	next_dcb_buf->dma_addr = dma_addr;
692 	next_dcb_buf->used = true;
693 	next_dcb_buf->ptp = false;
694 	next_dcb_buf->dev = port->dev;
695 
696 	__fdma_dcb_add(&tx->fdma,
697 		       next_to_use,
698 		       0,
699 		       FDMA_DCB_STATUS_INTR |
700 		       FDMA_DCB_STATUS_SOF |
701 		       FDMA_DCB_STATUS_EOF |
702 		       FDMA_DCB_STATUS_BLOCKO(0) |
703 		       FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
704 		       &fdma_nextptr_cb,
705 		       &lan966x_fdma_xdp_tx_dataptr_cb);
706 
707 	/* Start the transmission */
708 	lan966x_fdma_tx_start(tx);
709 
710 out:
711 	spin_unlock(&lan966x->tx_lock);
712 
713 	return ret;
714 }
715 
716 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
717 {
718 	struct lan966x_port *port = netdev_priv(dev);
719 	struct lan966x *lan966x = port->lan966x;
720 	struct lan966x_tx_dcb_buf *next_dcb_buf;
721 	struct lan966x_tx *tx = &lan966x->tx;
722 	int needed_headroom;
723 	int needed_tailroom;
724 	dma_addr_t dma_addr;
725 	int next_to_use;
726 	int err;
727 
728 	/* Get next index */
729 	next_to_use = lan966x_fdma_get_next_dcb(tx);
730 	if (next_to_use < 0) {
731 		netif_stop_queue(dev);
732 		return NETDEV_TX_BUSY;
733 	}
734 
735 	if (skb_put_padto(skb, ETH_ZLEN)) {
736 		dev->stats.tx_dropped++;
737 		return NETDEV_TX_OK;
738 	}
739 
740 	/* skb processing */
741 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
742 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
743 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
744 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
745 				       GFP_ATOMIC);
746 		if (unlikely(err)) {
747 			dev->stats.tx_dropped++;
748 			err = NETDEV_TX_OK;
749 			goto release;
750 		}
751 	}
752 
753 	skb_tx_timestamp(skb);
754 	skb_push(skb, IFH_LEN_BYTES);
755 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
756 	skb_put(skb, 4);
757 
758 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
759 				  DMA_TO_DEVICE);
760 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
761 		dev->stats.tx_dropped++;
762 		err = NETDEV_TX_OK;
763 		goto release;
764 	}
765 
766 	/* Fill up the buffer */
767 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
768 	next_dcb_buf->use_skb = true;
769 	next_dcb_buf->data.skb = skb;
770 	next_dcb_buf->xdp_ndo = false;
771 	next_dcb_buf->len = skb->len;
772 	next_dcb_buf->dma_addr = dma_addr;
773 	next_dcb_buf->used = true;
774 	next_dcb_buf->ptp = false;
775 	next_dcb_buf->dev = dev;
776 
777 	fdma_dcb_add(&tx->fdma,
778 		     next_to_use,
779 		     0,
780 		     FDMA_DCB_STATUS_INTR |
781 		     FDMA_DCB_STATUS_SOF |
782 		     FDMA_DCB_STATUS_EOF |
783 		     FDMA_DCB_STATUS_BLOCKO(0) |
784 		     FDMA_DCB_STATUS_BLOCKL(skb->len));
785 
786 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
787 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
788 		next_dcb_buf->ptp = true;
789 
790 	/* Start the transmission */
791 	lan966x_fdma_tx_start(tx);
792 
793 	return NETDEV_TX_OK;
794 
795 release:
796 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
797 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
798 		lan966x_ptp_txtstamp_release(port, skb);
799 
800 	dev_kfree_skb_any(skb);
801 	return err;
802 }
803 
804 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
805 {
806 	int max_mtu = 0;
807 	int i;
808 
809 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
810 		struct lan966x_port *port;
811 		int mtu;
812 
813 		port = lan966x->ports[i];
814 		if (!port)
815 			continue;
816 
817 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
818 		if (mtu > max_mtu)
819 			max_mtu = mtu;
820 	}
821 
822 	return max_mtu;
823 }
824 
825 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
826 {
827 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
828 }
829 
830 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
831 {
832 	struct page_pool *page_pool;
833 	dma_addr_t rx_dma;
834 	void *rx_dcbs;
835 	u32 size;
836 	int err;
837 
838 	/* Store these for later to free them */
839 	rx_dma = lan966x->rx.fdma.dma;
840 	rx_dcbs = lan966x->rx.fdma.dcbs;
841 	page_pool = lan966x->rx.page_pool;
842 
843 	napi_synchronize(&lan966x->napi);
844 	napi_disable(&lan966x->napi);
845 	lan966x_fdma_stop_netdev(lan966x);
846 
847 	lan966x_fdma_rx_disable(&lan966x->rx);
848 	lan966x_fdma_rx_free_pages(&lan966x->rx);
849 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
850 	lan966x->rx.max_mtu = new_mtu;
851 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
852 	if (err)
853 		goto restore;
854 	lan966x_fdma_rx_start(&lan966x->rx);
855 
856 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
857 	size = ALIGN(size, PAGE_SIZE);
858 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
859 
860 	page_pool_destroy(page_pool);
861 
862 	lan966x_fdma_wakeup_netdev(lan966x);
863 	napi_enable(&lan966x->napi);
864 
865 	return err;
866 restore:
867 	lan966x->rx.page_pool = page_pool;
868 	lan966x->rx.fdma.dma = rx_dma;
869 	lan966x->rx.fdma.dcbs = rx_dcbs;
870 	lan966x_fdma_rx_start(&lan966x->rx);
871 
872 	return err;
873 }
874 
875 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
876 {
877 	return lan966x_fdma_get_max_mtu(lan966x) +
878 	       IFH_LEN_BYTES +
879 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
880 	       VLAN_HLEN * 2 +
881 	       XDP_PACKET_HEADROOM;
882 }
883 
884 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
885 {
886 	int err;
887 	u32 val;
888 
889 	/* Disable the CPU port */
890 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
891 		QSYS_SW_PORT_MODE_PORT_ENA,
892 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
893 
894 	/* Flush the CPU queues */
895 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
896 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
897 			   READL_SLEEP_US, READL_TIMEOUT_US);
898 
899 	/* Add a sleep in case there are frames between the queues and the CPU
900 	 * port
901 	 */
902 	usleep_range(1000, 2000);
903 
904 	err = lan966x_fdma_reload(lan966x, max_mtu);
905 
906 	/* Enable back the CPU port */
907 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
908 		QSYS_SW_PORT_MODE_PORT_ENA,
909 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
910 
911 	return err;
912 }
913 
914 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
915 {
916 	int max_mtu;
917 
918 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
919 	if (max_mtu == lan966x->rx.max_mtu)
920 		return 0;
921 
922 	return __lan966x_fdma_reload(lan966x, max_mtu);
923 }
924 
925 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
926 {
927 	int max_mtu;
928 
929 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
930 	return __lan966x_fdma_reload(lan966x, max_mtu);
931 }
932 
933 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
934 {
935 	if (lan966x->fdma_ndev)
936 		return;
937 
938 	lan966x->fdma_ndev = dev;
939 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
940 	napi_enable(&lan966x->napi);
941 }
942 
943 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
944 {
945 	if (lan966x->fdma_ndev == dev) {
946 		netif_napi_del(&lan966x->napi);
947 		lan966x->fdma_ndev = NULL;
948 	}
949 }
950 
951 int lan966x_fdma_init(struct lan966x *lan966x)
952 {
953 	int err;
954 
955 	if (!lan966x->fdma)
956 		return 0;
957 
958 	lan966x->rx.lan966x = lan966x;
959 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
960 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
961 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
962 	lan966x->rx.fdma.priv = lan966x;
963 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
964 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
965 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
966 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
967 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
968 	lan966x->tx.lan966x = lan966x;
969 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
970 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
971 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
972 	lan966x->tx.fdma.priv = lan966x;
973 	lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
974 	lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
975 	lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
976 	lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
977 
978 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
979 	if (err)
980 		return err;
981 
982 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
983 	if (err) {
984 		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
985 		return err;
986 	}
987 
988 	lan966x_fdma_rx_start(&lan966x->rx);
989 
990 	return 0;
991 }
992 
993 void lan966x_fdma_deinit(struct lan966x *lan966x)
994 {
995 	if (!lan966x->fdma)
996 		return;
997 
998 	lan966x_fdma_rx_disable(&lan966x->rx);
999 	lan966x_fdma_tx_disable(&lan966x->tx);
1000 
1001 	napi_synchronize(&lan966x->napi);
1002 	napi_disable(&lan966x->napi);
1003 
1004 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1005 	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1006 	page_pool_destroy(lan966x->rx.page_pool);
1007 	lan966x_fdma_tx_free(&lan966x->tx);
1008 }
1009