xref: /linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision 8cdd0bd02283036130396d70183c15a97f3be6b7)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 				      u64 *dataptr)
11 {
12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 	struct lan966x_rx *rx = &lan966x->rx;
14 	struct page *page;
15 
16 	page = page_pool_dev_alloc_pages(rx->page_pool);
17 	if (unlikely(!page))
18 		return -ENOMEM;
19 
20 	rx->page[dcb][db] = page;
21 	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22 
23 	return 0;
24 }
25 
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27 				      u64 *dataptr)
28 {
29 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30 
31 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32 
33 	return 0;
34 }
35 
36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37 					  u64 *dataptr)
38 {
39 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
40 
41 	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
42 
43 	return 0;
44 }
45 
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
47 {
48 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
49 }
50 
51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
52 {
53 	struct fdma *fdma = &rx->fdma;
54 	int i, j;
55 
56 	for (i = 0; i < fdma->n_dcbs; ++i) {
57 		for (j = 0; j < fdma->n_dbs; ++j)
58 			page_pool_put_full_page(rx->page_pool,
59 						rx->page[i][j], false);
60 	}
61 }
62 
63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
64 {
65 	struct fdma *fdma = &rx->fdma;
66 	struct page *page;
67 
68 	page = rx->page[fdma->dcb_index][fdma->db_index];
69 	if (unlikely(!page))
70 		return;
71 
72 	page_pool_recycle_direct(rx->page_pool, page);
73 }
74 
75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
76 {
77 	struct lan966x *lan966x = rx->lan966x;
78 	struct page_pool_params pp_params = {
79 		.order = rx->page_order,
80 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81 		.pool_size = rx->fdma.n_dcbs,
82 		.nid = NUMA_NO_NODE,
83 		.dev = lan966x->dev,
84 		.dma_dir = DMA_FROM_DEVICE,
85 		.offset = XDP_PACKET_HEADROOM,
86 		.max_len = rx->max_mtu -
87 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
88 	};
89 
90 	if (lan966x_xdp_present(lan966x))
91 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
92 
93 	rx->page_pool = page_pool_create(&pp_params);
94 
95 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
96 		struct lan966x_port *port;
97 
98 		if (!lan966x->ports[i])
99 			continue;
100 
101 		port = lan966x->ports[i];
102 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
104 					   rx->page_pool);
105 	}
106 
107 	return PTR_ERR_OR_ZERO(rx->page_pool);
108 }
109 
110 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
111 {
112 	struct lan966x *lan966x = rx->lan966x;
113 	struct fdma *fdma = &rx->fdma;
114 	int err;
115 
116 	if (lan966x_fdma_rx_alloc_page_pool(rx))
117 		return PTR_ERR(rx->page_pool);
118 
119 	err = fdma_alloc_coherent(lan966x->dev, fdma);
120 	if (err)
121 		return err;
122 
123 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124 		       FDMA_DCB_STATUS_INTR);
125 
126 	return 0;
127 }
128 
129 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
130 {
131 	struct fdma *fdma = &rx->fdma;
132 
133 	fdma->dcb_index++;
134 	fdma->dcb_index &= fdma->n_dcbs - 1;
135 }
136 
137 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
138 {
139 	struct lan966x *lan966x = rx->lan966x;
140 	struct fdma *fdma = &rx->fdma;
141 	u32 mask;
142 
143 	/* When activating a channel, first is required to write the first DCB
144 	 * address and then to activate it
145 	 */
146 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
147 	       FDMA_DCB_LLP(fdma->channel_id));
148 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
149 	       FDMA_DCB_LLP1(fdma->channel_id));
150 
151 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
152 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
153 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
154 	       FDMA_CH_CFG_CH_MEM_SET(1),
155 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
156 
157 	/* Start fdma */
158 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
159 		FDMA_PORT_CTRL_XTR_STOP,
160 		lan966x, FDMA_PORT_CTRL(0));
161 
162 	/* Enable interrupts */
163 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
164 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
165 	mask |= BIT(fdma->channel_id);
166 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
167 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
168 		lan966x, FDMA_INTR_DB_ENA);
169 
170 	/* Activate the channel */
171 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
172 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
173 		lan966x, FDMA_CH_ACTIVATE);
174 }
175 
176 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
177 {
178 	struct lan966x *lan966x = rx->lan966x;
179 	struct fdma *fdma = &rx->fdma;
180 	u32 val;
181 
182 	/* Disable the channel */
183 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
184 		FDMA_CH_DISABLE_CH_DISABLE,
185 		lan966x, FDMA_CH_DISABLE);
186 
187 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
188 				  val, !(val & BIT(fdma->channel_id)),
189 				  READL_SLEEP_US, READL_TIMEOUT_US);
190 
191 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
192 		FDMA_CH_DB_DISCARD_DB_DISCARD,
193 		lan966x, FDMA_CH_DB_DISCARD);
194 }
195 
196 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
197 {
198 	struct lan966x *lan966x = rx->lan966x;
199 
200 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
201 		FDMA_CH_RELOAD_CH_RELOAD,
202 		lan966x, FDMA_CH_RELOAD);
203 }
204 
205 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
206 {
207 	struct lan966x *lan966x = tx->lan966x;
208 	struct fdma *fdma = &tx->fdma;
209 	int err;
210 
211 	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
212 			       GFP_KERNEL);
213 	if (!tx->dcbs_buf)
214 		return -ENOMEM;
215 
216 	err = fdma_alloc_coherent(lan966x->dev, fdma);
217 	if (err)
218 		goto out;
219 
220 	fdma_dcbs_init(fdma, 0, 0);
221 
222 	return 0;
223 
224 out:
225 	kfree(tx->dcbs_buf);
226 	return -ENOMEM;
227 }
228 
229 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
230 {
231 	struct lan966x *lan966x = tx->lan966x;
232 
233 	kfree(tx->dcbs_buf);
234 	fdma_free_coherent(lan966x->dev, &tx->fdma);
235 }
236 
237 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
238 {
239 	struct lan966x *lan966x = tx->lan966x;
240 	struct fdma *fdma = &tx->fdma;
241 	u32 mask;
242 
243 	/* When activating a channel, first is required to write the first DCB
244 	 * address and then to activate it
245 	 */
246 	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
247 	       FDMA_DCB_LLP(fdma->channel_id));
248 	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
249 	       FDMA_DCB_LLP1(fdma->channel_id));
250 
251 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
252 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
253 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
254 	       FDMA_CH_CFG_CH_MEM_SET(1),
255 	       lan966x, FDMA_CH_CFG(fdma->channel_id));
256 
257 	/* Start fdma */
258 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
259 		FDMA_PORT_CTRL_INJ_STOP,
260 		lan966x, FDMA_PORT_CTRL(0));
261 
262 	/* Enable interrupts */
263 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
264 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
265 	mask |= BIT(fdma->channel_id);
266 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
267 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
268 		lan966x, FDMA_INTR_DB_ENA);
269 
270 	/* Activate the channel */
271 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
272 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
273 		lan966x, FDMA_CH_ACTIVATE);
274 }
275 
276 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
277 {
278 	struct lan966x *lan966x = tx->lan966x;
279 	struct fdma *fdma = &tx->fdma;
280 	u32 val;
281 
282 	/* Disable the channel */
283 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
284 		FDMA_CH_DISABLE_CH_DISABLE,
285 		lan966x, FDMA_CH_DISABLE);
286 
287 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
288 				  val, !(val & BIT(fdma->channel_id)),
289 				  READL_SLEEP_US, READL_TIMEOUT_US);
290 
291 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
292 		FDMA_CH_DB_DISCARD_DB_DISCARD,
293 		lan966x, FDMA_CH_DB_DISCARD);
294 
295 	tx->activated = false;
296 	tx->last_in_use = -1;
297 }
298 
299 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
300 {
301 	struct lan966x *lan966x = tx->lan966x;
302 
303 	/* Write the registers to reload the channel */
304 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
305 		FDMA_CH_RELOAD_CH_RELOAD,
306 		lan966x, FDMA_CH_RELOAD);
307 }
308 
309 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
310 {
311 	struct lan966x_port *port;
312 	int i;
313 
314 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
315 		port = lan966x->ports[i];
316 		if (!port)
317 			continue;
318 
319 		if (netif_queue_stopped(port->dev))
320 			netif_wake_queue(port->dev);
321 	}
322 }
323 
324 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
325 {
326 	struct lan966x_port *port;
327 	int i;
328 
329 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
330 		port = lan966x->ports[i];
331 		if (!port)
332 			continue;
333 
334 		netif_stop_queue(port->dev);
335 	}
336 }
337 
338 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
339 {
340 	struct lan966x_tx *tx = &lan966x->tx;
341 	struct lan966x_rx *rx = &lan966x->rx;
342 	struct lan966x_tx_dcb_buf *dcb_buf;
343 	struct fdma *fdma = &tx->fdma;
344 	struct xdp_frame_bulk bq;
345 	unsigned long flags;
346 	bool clear = false;
347 	struct fdma_db *db;
348 	int i;
349 
350 	xdp_frame_bulk_init(&bq);
351 
352 	spin_lock_irqsave(&lan966x->tx_lock, flags);
353 	for (i = 0; i < fdma->n_dcbs; ++i) {
354 		dcb_buf = &tx->dcbs_buf[i];
355 
356 		if (!dcb_buf->used)
357 			continue;
358 
359 		db = &fdma->dcbs[i].db[0];
360 		if (!(db->status & FDMA_DCB_STATUS_DONE))
361 			continue;
362 
363 		dcb_buf->dev->stats.tx_packets++;
364 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
365 
366 		dcb_buf->used = false;
367 		if (dcb_buf->use_skb) {
368 			dma_unmap_single(lan966x->dev,
369 					 dcb_buf->dma_addr,
370 					 dcb_buf->len,
371 					 DMA_TO_DEVICE);
372 
373 			if (!dcb_buf->ptp)
374 				napi_consume_skb(dcb_buf->data.skb, weight);
375 		} else {
376 			if (dcb_buf->xdp_ndo)
377 				dma_unmap_single(lan966x->dev,
378 						 dcb_buf->dma_addr,
379 						 dcb_buf->len,
380 						 DMA_TO_DEVICE);
381 
382 			if (dcb_buf->xdp_ndo)
383 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
384 			else
385 				page_pool_recycle_direct(rx->page_pool,
386 							 dcb_buf->data.page);
387 		}
388 
389 		clear = true;
390 	}
391 
392 	xdp_flush_frame_bulk(&bq);
393 
394 	if (clear)
395 		lan966x_fdma_wakeup_netdev(lan966x);
396 
397 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
398 }
399 
400 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
401 {
402 	struct fdma *fdma = &rx->fdma;
403 	struct fdma_db *db;
404 
405 	/* Check if there is any data */
406 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
407 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
408 		return false;
409 
410 	return true;
411 }
412 
413 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
414 {
415 	struct lan966x *lan966x = rx->lan966x;
416 	struct fdma *fdma = &rx->fdma;
417 	struct lan966x_port *port;
418 	struct fdma_db *db;
419 	struct page *page;
420 
421 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
422 	page = rx->page[fdma->dcb_index][fdma->db_index];
423 	if (unlikely(!page))
424 		return FDMA_ERROR;
425 
426 	dma_sync_single_for_cpu(lan966x->dev,
427 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
428 				FDMA_DCB_STATUS_BLOCKL(db->status),
429 				DMA_FROM_DEVICE);
430 
431 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
432 				 src_port);
433 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
434 		return FDMA_ERROR;
435 
436 	port = lan966x->ports[*src_port];
437 	if (!lan966x_xdp_port_present(port))
438 		return FDMA_PASS;
439 
440 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
441 }
442 
443 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
444 						 u64 src_port)
445 {
446 	struct lan966x *lan966x = rx->lan966x;
447 	struct fdma *fdma = &rx->fdma;
448 	struct sk_buff *skb;
449 	struct fdma_db *db;
450 	struct page *page;
451 	u64 timestamp;
452 
453 	/* Get the received frame and unmap it */
454 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
455 	page = rx->page[fdma->dcb_index][fdma->db_index];
456 
457 	skb = build_skb(page_address(page), fdma->db_size);
458 	if (unlikely(!skb))
459 		goto free_page;
460 
461 	skb_mark_for_recycle(skb);
462 
463 	skb_reserve(skb, XDP_PACKET_HEADROOM);
464 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
465 
466 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
467 
468 	skb->dev = lan966x->ports[src_port]->dev;
469 	skb_pull(skb, IFH_LEN_BYTES);
470 
471 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
472 		skb_trim(skb, skb->len - ETH_FCS_LEN);
473 
474 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
475 	skb->protocol = eth_type_trans(skb, skb->dev);
476 
477 	if (lan966x->bridge_mask & BIT(src_port)) {
478 		skb->offload_fwd_mark = 1;
479 
480 		skb_reset_network_header(skb);
481 		if (!lan966x_hw_offload(lan966x, src_port, skb))
482 			skb->offload_fwd_mark = 0;
483 	}
484 
485 	skb->dev->stats.rx_bytes += skb->len;
486 	skb->dev->stats.rx_packets++;
487 
488 	return skb;
489 
490 free_page:
491 	page_pool_recycle_direct(rx->page_pool, page);
492 
493 	return NULL;
494 }
495 
496 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
497 {
498 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
499 	struct lan966x_rx *rx = &lan966x->rx;
500 	int old_dcb, dcb_reload, counter = 0;
501 	struct fdma *fdma = &rx->fdma;
502 	bool redirect = false;
503 	struct sk_buff *skb;
504 	u64 src_port;
505 
506 	dcb_reload = fdma->dcb_index;
507 
508 	lan966x_fdma_tx_clear_buf(lan966x, weight);
509 
510 	/* Get all received skb */
511 	while (counter < weight) {
512 		if (!lan966x_fdma_rx_more_frames(rx))
513 			break;
514 
515 		counter++;
516 
517 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
518 		case FDMA_PASS:
519 			break;
520 		case FDMA_ERROR:
521 			lan966x_fdma_rx_free_page(rx);
522 			lan966x_fdma_rx_advance_dcb(rx);
523 			goto allocate_new;
524 		case FDMA_REDIRECT:
525 			redirect = true;
526 			fallthrough;
527 		case FDMA_TX:
528 			lan966x_fdma_rx_advance_dcb(rx);
529 			continue;
530 		case FDMA_DROP:
531 			lan966x_fdma_rx_free_page(rx);
532 			lan966x_fdma_rx_advance_dcb(rx);
533 			continue;
534 		}
535 
536 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
537 		lan966x_fdma_rx_advance_dcb(rx);
538 		if (!skb)
539 			goto allocate_new;
540 
541 		napi_gro_receive(&lan966x->napi, skb);
542 	}
543 
544 allocate_new:
545 	/* Allocate new pages and map them */
546 	while (dcb_reload != fdma->dcb_index) {
547 		old_dcb = dcb_reload;
548 		dcb_reload++;
549 		dcb_reload &= fdma->n_dcbs - 1;
550 
551 		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
552 			     FDMA_DCB_STATUS_INTR);
553 
554 		lan966x_fdma_rx_reload(rx);
555 	}
556 
557 	if (redirect)
558 		xdp_do_flush();
559 
560 	if (counter < weight && napi_complete_done(napi, counter))
561 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
562 
563 	return counter;
564 }
565 
566 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
567 {
568 	struct lan966x *lan966x = args;
569 	u32 db, err, err_type;
570 
571 	db = lan_rd(lan966x, FDMA_INTR_DB);
572 	err = lan_rd(lan966x, FDMA_INTR_ERR);
573 
574 	if (db) {
575 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
576 		lan_wr(db, lan966x, FDMA_INTR_DB);
577 
578 		napi_schedule(&lan966x->napi);
579 	}
580 
581 	if (err) {
582 		err_type = lan_rd(lan966x, FDMA_ERRORS);
583 
584 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
585 
586 		lan_wr(err, lan966x, FDMA_INTR_ERR);
587 		lan_wr(err_type, lan966x, FDMA_ERRORS);
588 	}
589 
590 	return IRQ_HANDLED;
591 }
592 
593 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
594 {
595 	struct lan966x_tx_dcb_buf *dcb_buf;
596 	struct fdma *fdma = &tx->fdma;
597 	int i;
598 
599 	for (i = 0; i < fdma->n_dcbs; ++i) {
600 		dcb_buf = &tx->dcbs_buf[i];
601 		if (!dcb_buf->used && i != tx->last_in_use)
602 			return i;
603 	}
604 
605 	return -1;
606 }
607 
608 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
609 {
610 	struct lan966x *lan966x = tx->lan966x;
611 	struct fdma *fdma = &tx->fdma;
612 	struct fdma_dcb *dcb;
613 
614 	if (likely(lan966x->tx.activated)) {
615 		/* Connect current dcb to the next db */
616 		dcb = &fdma->dcbs[tx->last_in_use];
617 		dcb->nextptr = fdma->dma + (next_to_use *
618 					  sizeof(struct fdma_dcb));
619 
620 		lan966x_fdma_tx_reload(tx);
621 	} else {
622 		/* Because it is first time, then just activate */
623 		lan966x->tx.activated = true;
624 		lan966x_fdma_tx_activate(tx);
625 	}
626 
627 	/* Move to next dcb because this last in use */
628 	tx->last_in_use = next_to_use;
629 }
630 
631 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
632 {
633 	struct lan966x *lan966x = port->lan966x;
634 	struct lan966x_tx_dcb_buf *next_dcb_buf;
635 	struct lan966x_tx *tx = &lan966x->tx;
636 	struct xdp_frame *xdpf;
637 	dma_addr_t dma_addr;
638 	struct page *page;
639 	int next_to_use;
640 	__be32 *ifh;
641 	int ret = 0;
642 
643 	spin_lock(&lan966x->tx_lock);
644 
645 	/* Get next index */
646 	next_to_use = lan966x_fdma_get_next_dcb(tx);
647 	if (next_to_use < 0) {
648 		netif_stop_queue(port->dev);
649 		ret = NETDEV_TX_BUSY;
650 		goto out;
651 	}
652 
653 	/* Get the next buffer */
654 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
655 
656 	/* Generate new IFH */
657 	if (!len) {
658 		xdpf = ptr;
659 
660 		if (xdpf->headroom < IFH_LEN_BYTES) {
661 			ret = NETDEV_TX_OK;
662 			goto out;
663 		}
664 
665 		ifh = xdpf->data - IFH_LEN_BYTES;
666 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
667 		lan966x_ifh_set_bypass(ifh, 1);
668 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
669 
670 		dma_addr = dma_map_single(lan966x->dev,
671 					  xdpf->data - IFH_LEN_BYTES,
672 					  xdpf->len + IFH_LEN_BYTES,
673 					  DMA_TO_DEVICE);
674 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
675 			ret = NETDEV_TX_OK;
676 			goto out;
677 		}
678 
679 		next_dcb_buf->data.xdpf = xdpf;
680 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
681 	} else {
682 		page = ptr;
683 
684 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
685 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
686 		lan966x_ifh_set_bypass(ifh, 1);
687 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
688 
689 		dma_addr = page_pool_get_dma_addr(page);
690 		dma_sync_single_for_device(lan966x->dev,
691 					   dma_addr + XDP_PACKET_HEADROOM,
692 					   len + IFH_LEN_BYTES,
693 					   DMA_TO_DEVICE);
694 
695 		next_dcb_buf->data.page = page;
696 		next_dcb_buf->len = len + IFH_LEN_BYTES;
697 	}
698 
699 	/* Fill up the buffer */
700 	next_dcb_buf->use_skb = false;
701 	next_dcb_buf->xdp_ndo = !len;
702 	next_dcb_buf->dma_addr = dma_addr;
703 	next_dcb_buf->used = true;
704 	next_dcb_buf->ptp = false;
705 	next_dcb_buf->dev = port->dev;
706 
707 	__fdma_dcb_add(&tx->fdma,
708 		       next_to_use,
709 		       0,
710 		       FDMA_DCB_STATUS_INTR |
711 		       FDMA_DCB_STATUS_SOF |
712 		       FDMA_DCB_STATUS_EOF |
713 		       FDMA_DCB_STATUS_BLOCKO(0) |
714 		       FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
715 		       &fdma_nextptr_cb,
716 		       &lan966x_fdma_xdp_tx_dataptr_cb);
717 
718 	/* Start the transmission */
719 	lan966x_fdma_tx_start(tx, next_to_use);
720 
721 out:
722 	spin_unlock(&lan966x->tx_lock);
723 
724 	return ret;
725 }
726 
727 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
728 {
729 	struct lan966x_port *port = netdev_priv(dev);
730 	struct lan966x *lan966x = port->lan966x;
731 	struct lan966x_tx_dcb_buf *next_dcb_buf;
732 	struct lan966x_tx *tx = &lan966x->tx;
733 	int needed_headroom;
734 	int needed_tailroom;
735 	dma_addr_t dma_addr;
736 	int next_to_use;
737 	int err;
738 
739 	/* Get next index */
740 	next_to_use = lan966x_fdma_get_next_dcb(tx);
741 	if (next_to_use < 0) {
742 		netif_stop_queue(dev);
743 		return NETDEV_TX_BUSY;
744 	}
745 
746 	if (skb_put_padto(skb, ETH_ZLEN)) {
747 		dev->stats.tx_dropped++;
748 		return NETDEV_TX_OK;
749 	}
750 
751 	/* skb processing */
752 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
753 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
754 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
755 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
756 				       GFP_ATOMIC);
757 		if (unlikely(err)) {
758 			dev->stats.tx_dropped++;
759 			err = NETDEV_TX_OK;
760 			goto release;
761 		}
762 	}
763 
764 	skb_tx_timestamp(skb);
765 	skb_push(skb, IFH_LEN_BYTES);
766 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
767 	skb_put(skb, 4);
768 
769 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
770 				  DMA_TO_DEVICE);
771 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
772 		dev->stats.tx_dropped++;
773 		err = NETDEV_TX_OK;
774 		goto release;
775 	}
776 
777 	/* Fill up the buffer */
778 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
779 	next_dcb_buf->use_skb = true;
780 	next_dcb_buf->data.skb = skb;
781 	next_dcb_buf->xdp_ndo = false;
782 	next_dcb_buf->len = skb->len;
783 	next_dcb_buf->dma_addr = dma_addr;
784 	next_dcb_buf->used = true;
785 	next_dcb_buf->ptp = false;
786 	next_dcb_buf->dev = dev;
787 
788 	fdma_dcb_add(&tx->fdma,
789 		     next_to_use,
790 		     0,
791 		     FDMA_DCB_STATUS_INTR |
792 		     FDMA_DCB_STATUS_SOF |
793 		     FDMA_DCB_STATUS_EOF |
794 		     FDMA_DCB_STATUS_BLOCKO(0) |
795 		     FDMA_DCB_STATUS_BLOCKL(skb->len));
796 
797 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
798 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
799 		next_dcb_buf->ptp = true;
800 
801 	/* Start the transmission */
802 	lan966x_fdma_tx_start(tx, next_to_use);
803 
804 	return NETDEV_TX_OK;
805 
806 release:
807 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
808 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
809 		lan966x_ptp_txtstamp_release(port, skb);
810 
811 	dev_kfree_skb_any(skb);
812 	return err;
813 }
814 
815 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
816 {
817 	int max_mtu = 0;
818 	int i;
819 
820 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
821 		struct lan966x_port *port;
822 		int mtu;
823 
824 		port = lan966x->ports[i];
825 		if (!port)
826 			continue;
827 
828 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
829 		if (mtu > max_mtu)
830 			max_mtu = mtu;
831 	}
832 
833 	return max_mtu;
834 }
835 
836 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
837 {
838 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
839 }
840 
841 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
842 {
843 	struct page_pool *page_pool;
844 	dma_addr_t rx_dma;
845 	void *rx_dcbs;
846 	u32 size;
847 	int err;
848 
849 	/* Store these for later to free them */
850 	rx_dma = lan966x->rx.fdma.dma;
851 	rx_dcbs = lan966x->rx.fdma.dcbs;
852 	page_pool = lan966x->rx.page_pool;
853 
854 	napi_synchronize(&lan966x->napi);
855 	napi_disable(&lan966x->napi);
856 	lan966x_fdma_stop_netdev(lan966x);
857 
858 	lan966x_fdma_rx_disable(&lan966x->rx);
859 	lan966x_fdma_rx_free_pages(&lan966x->rx);
860 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
861 	lan966x->rx.max_mtu = new_mtu;
862 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
863 	if (err)
864 		goto restore;
865 	lan966x_fdma_rx_start(&lan966x->rx);
866 
867 	size = sizeof(struct fdma_dcb) * lan966x->rx.fdma.n_dcbs;
868 	size = ALIGN(size, PAGE_SIZE);
869 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
870 
871 	page_pool_destroy(page_pool);
872 
873 	lan966x_fdma_wakeup_netdev(lan966x);
874 	napi_enable(&lan966x->napi);
875 
876 	return err;
877 restore:
878 	lan966x->rx.page_pool = page_pool;
879 	lan966x->rx.fdma.dma = rx_dma;
880 	lan966x->rx.fdma.dcbs = rx_dcbs;
881 	lan966x_fdma_rx_start(&lan966x->rx);
882 
883 	return err;
884 }
885 
886 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
887 {
888 	return lan966x_fdma_get_max_mtu(lan966x) +
889 	       IFH_LEN_BYTES +
890 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
891 	       VLAN_HLEN * 2 +
892 	       XDP_PACKET_HEADROOM;
893 }
894 
895 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
896 {
897 	int err;
898 	u32 val;
899 
900 	/* Disable the CPU port */
901 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
902 		QSYS_SW_PORT_MODE_PORT_ENA,
903 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
904 
905 	/* Flush the CPU queues */
906 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
907 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
908 			   READL_SLEEP_US, READL_TIMEOUT_US);
909 
910 	/* Add a sleep in case there are frames between the queues and the CPU
911 	 * port
912 	 */
913 	usleep_range(1000, 2000);
914 
915 	err = lan966x_fdma_reload(lan966x, max_mtu);
916 
917 	/* Enable back the CPU port */
918 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
919 		QSYS_SW_PORT_MODE_PORT_ENA,
920 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
921 
922 	return err;
923 }
924 
925 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
926 {
927 	int max_mtu;
928 
929 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
930 	if (max_mtu == lan966x->rx.max_mtu)
931 		return 0;
932 
933 	return __lan966x_fdma_reload(lan966x, max_mtu);
934 }
935 
936 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
937 {
938 	int max_mtu;
939 
940 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
941 	return __lan966x_fdma_reload(lan966x, max_mtu);
942 }
943 
944 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
945 {
946 	if (lan966x->fdma_ndev)
947 		return;
948 
949 	lan966x->fdma_ndev = dev;
950 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
951 	napi_enable(&lan966x->napi);
952 }
953 
954 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
955 {
956 	if (lan966x->fdma_ndev == dev) {
957 		netif_napi_del(&lan966x->napi);
958 		lan966x->fdma_ndev = NULL;
959 	}
960 }
961 
962 int lan966x_fdma_init(struct lan966x *lan966x)
963 {
964 	int err;
965 
966 	if (!lan966x->fdma)
967 		return 0;
968 
969 	lan966x->rx.lan966x = lan966x;
970 	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
971 	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
972 	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
973 	lan966x->rx.fdma.priv = lan966x;
974 	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
975 	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
976 	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
977 	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
978 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
979 	lan966x->tx.lan966x = lan966x;
980 	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
981 	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
982 	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
983 	lan966x->tx.fdma.priv = lan966x;
984 	lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
985 	lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
986 	lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
987 	lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
988 	lan966x->tx.last_in_use = -1;
989 
990 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
991 	if (err)
992 		return err;
993 
994 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
995 	if (err) {
996 		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
997 		return err;
998 	}
999 
1000 	lan966x_fdma_rx_start(&lan966x->rx);
1001 
1002 	return 0;
1003 }
1004 
1005 void lan966x_fdma_deinit(struct lan966x *lan966x)
1006 {
1007 	if (!lan966x->fdma)
1008 		return;
1009 
1010 	lan966x_fdma_rx_disable(&lan966x->rx);
1011 	lan966x_fdma_tx_disable(&lan966x->tx);
1012 
1013 	napi_synchronize(&lan966x->napi);
1014 	napi_disable(&lan966x->napi);
1015 
1016 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1017 	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
1018 	page_pool_destroy(lan966x->rx.page_pool);
1019 	lan966x_fdma_tx_free(&lan966x->tx);
1020 }
1021