xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3 
4 #include <net/netdev_queues.h>
5 #include "hbg_common.h"
6 #include "hbg_irq.h"
7 #include "hbg_reg.h"
8 #include "hbg_txrx.h"
9 
10 #define CREATE_TRACE_POINTS
11 #include "hbg_trace.h"
12 
13 #define netdev_get_tx_ring(netdev) \
14 			(&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
15 
16 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
17 				   DMA_FROM_DEVICE : DMA_TO_DEVICE)
18 
19 #define hbg_queue_used_num(head, tail, ring) ({ \
20 	typeof(ring) _ring = (ring); \
21 	((tail) + _ring->len - (head)) % _ring->len; })
22 #define hbg_queue_left_num(head, tail, ring) ({ \
23 	typeof(ring) _r = (ring); \
24 	_r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
25 #define hbg_queue_is_empty(head, tail, ring) \
26 	(hbg_queue_used_num((head), (tail), (ring)) == 0)
27 #define hbg_queue_is_full(head, tail, ring) \
28 	(hbg_queue_left_num((head), (tail), (ring)) == 0)
29 #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
30 #define hbg_queue_move_next(p, ring) ({ \
31 	typeof(ring) _ring = (ring); \
32 	_ring->p = hbg_queue_next_prt(_ring->p, _ring); })
33 
34 #define hbg_get_page_order(ring) ({ \
35 	typeof(ring) _ring = (ring); \
36 	get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); })
37 #define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
38 
39 #define HBG_TX_STOP_THRS	2
40 #define HBG_TX_START_THRS	(2 * HBG_TX_STOP_THRS)
41 
42 static int hbg_dma_map(struct hbg_buffer *buffer)
43 {
44 	struct hbg_priv *priv = buffer->priv;
45 
46 	buffer->skb_dma = dma_map_single(&priv->pdev->dev,
47 					 buffer->skb->data, buffer->skb_len,
48 					 buffer_to_dma_dir(buffer));
49 	if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
50 		if (buffer->dir == HBG_DIR_RX)
51 			priv->stats.rx_dma_err_cnt++;
52 		else
53 			priv->stats.tx_dma_err_cnt++;
54 
55 		return -ENOMEM;
56 	}
57 
58 	return 0;
59 }
60 
61 static void hbg_dma_unmap(struct hbg_buffer *buffer)
62 {
63 	struct hbg_priv *priv = buffer->priv;
64 
65 	if (unlikely(!buffer->skb_dma))
66 		return;
67 
68 	dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
69 			 buffer_to_dma_dir(buffer));
70 	buffer->skb_dma = 0;
71 }
72 
73 static void hbg_buffer_free_page(struct hbg_buffer *buffer)
74 {
75 	struct hbg_ring *ring = buffer->ring;
76 
77 	if (unlikely(!buffer->page))
78 		return;
79 
80 	page_pool_put_full_page(ring->page_pool, buffer->page, false);
81 
82 	buffer->page = NULL;
83 	buffer->page_dma = 0;
84 	buffer->page_addr = NULL;
85 	buffer->page_size = 0;
86 	buffer->page_offset = 0;
87 }
88 
89 static int hbg_buffer_alloc_page(struct hbg_buffer *buffer)
90 {
91 	struct hbg_ring *ring = buffer->ring;
92 	u32 len = hbg_get_page_size(ring);
93 	u32 offset;
94 
95 	if (unlikely(!ring->page_pool))
96 		return 0;
97 
98 	buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len);
99 	if (unlikely(!buffer->page))
100 		return -ENOMEM;
101 
102 	buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset;
103 	buffer->page_addr = page_address(buffer->page) + offset;
104 	buffer->page_size = len;
105 	buffer->page_offset = offset;
106 
107 	return 0;
108 }
109 
110 static void hbg_init_tx_desc(struct hbg_buffer *buffer,
111 			     struct hbg_tx_desc *tx_desc)
112 {
113 	u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
114 	u32 word0 = 0;
115 
116 	word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
117 	word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
118 	if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
119 		word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
120 		word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
121 	}
122 
123 	tx_desc->word0 = word0;
124 	tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
125 				    buffer->skb->len);
126 	tx_desc->word2 = buffer->skb_dma;
127 	tx_desc->word3 = buffer->state_dma;
128 }
129 
130 netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
131 {
132 	struct hbg_ring *ring = netdev_get_tx_ring(netdev);
133 	struct hbg_priv *priv = netdev_priv(netdev);
134 	/* This smp_load_acquire() pairs with smp_store_release() in
135 	 * hbg_napi_tx_recycle() called in tx interrupt handle process.
136 	 */
137 	u32 ntc = smp_load_acquire(&ring->ntc);
138 	struct hbg_buffer *buffer;
139 	struct hbg_tx_desc tx_desc;
140 	u32 ntu = ring->ntu;
141 
142 	if (unlikely(!skb->len ||
143 		     skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
144 		dev_kfree_skb_any(skb);
145 		netdev->stats.tx_errors++;
146 		return NETDEV_TX_OK;
147 	}
148 
149 	if (!netif_subqueue_maybe_stop(netdev, 0,
150 				       hbg_queue_left_num(ntc, ntu, ring),
151 				       HBG_TX_STOP_THRS, HBG_TX_START_THRS))
152 		return NETDEV_TX_BUSY;
153 
154 	buffer = &ring->queue[ntu];
155 	buffer->skb = skb;
156 	buffer->skb_len = skb->len;
157 	if (unlikely(hbg_dma_map(buffer))) {
158 		dev_kfree_skb_any(skb);
159 		return NETDEV_TX_OK;
160 	}
161 
162 	buffer->state = HBG_TX_STATE_START;
163 	hbg_init_tx_desc(buffer, &tx_desc);
164 	hbg_hw_set_tx_desc(priv, &tx_desc);
165 
166 	/* This smp_store_release() pairs with smp_load_acquire() in
167 	 * hbg_napi_tx_recycle() called in tx interrupt handle process.
168 	 */
169 	smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
170 	dev_sw_netstats_tx_add(netdev, 1, skb->len);
171 	return NETDEV_TX_OK;
172 }
173 
174 static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
175 {
176 	if (unlikely(!buffer->skb))
177 		return;
178 
179 	dev_kfree_skb_any(buffer->skb);
180 	buffer->skb = NULL;
181 }
182 
183 static void hbg_buffer_free(struct hbg_buffer *buffer)
184 {
185 	if (buffer->skb) {
186 		hbg_dma_unmap(buffer);
187 		return hbg_buffer_free_skb(buffer);
188 	}
189 
190 	hbg_buffer_free_page(buffer);
191 }
192 
193 static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
194 {
195 	struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
196 	/* This smp_load_acquire() pairs with smp_store_release() in
197 	 * hbg_net_start_xmit() called in xmit process.
198 	 */
199 	u32 ntu = smp_load_acquire(&ring->ntu);
200 	struct hbg_priv *priv = ring->priv;
201 	struct hbg_buffer *buffer;
202 	u32 ntc = ring->ntc;
203 	int packet_done = 0;
204 
205 	/* We need do cleanup even if budget is 0.
206 	 * Per NAPI documentation budget is for Rx.
207 	 * So We hardcode the amount of work Tx NAPI does to 128.
208 	 */
209 	budget = 128;
210 	while (packet_done < budget) {
211 		if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
212 			break;
213 
214 		/* make sure HW write desc complete */
215 		dma_rmb();
216 
217 		buffer = &ring->queue[ntc];
218 		if (buffer->state != HBG_TX_STATE_COMPLETE)
219 			break;
220 
221 		hbg_buffer_free(buffer);
222 		ntc = hbg_queue_next_prt(ntc, ring);
223 		packet_done++;
224 	}
225 
226 	/* This smp_store_release() pairs with smp_load_acquire() in
227 	 * hbg_net_start_xmit() called in xmit process.
228 	 */
229 	smp_store_release(&ring->ntc, ntc);
230 	netif_wake_queue(priv->netdev);
231 
232 	if (likely(packet_done < budget &&
233 		   napi_complete_done(napi, packet_done)))
234 		hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
235 
236 	return packet_done;
237 }
238 
239 static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
240 				    struct hbg_rx_desc *desc,
241 				    struct sk_buff *skb)
242 {
243 	bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
244 
245 	skb->ip_summed = rx_checksum_offload ?
246 			 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
247 
248 	if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
249 		   !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
250 		return true;
251 
252 	switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
253 	case HBG_L3_OK:
254 		break;
255 	case HBG_L3_WRONG_HEAD:
256 		priv->stats.rx_desc_l3_wrong_head_cnt++;
257 		return false;
258 	case HBG_L3_CSUM_ERR:
259 		skb->ip_summed = CHECKSUM_NONE;
260 		priv->stats.rx_desc_l3_csum_err_cnt++;
261 
262 		/* Don't drop packets on csum validation failure,
263 		 * suggest by Jakub
264 		 */
265 		break;
266 	case HBG_L3_LEN_ERR:
267 		priv->stats.rx_desc_l3_len_err_cnt++;
268 		return false;
269 	case HBG_L3_ZERO_TTL:
270 		priv->stats.rx_desc_l3_zero_ttl_cnt++;
271 		return false;
272 	default:
273 		priv->stats.rx_desc_l3_other_cnt++;
274 		return false;
275 	}
276 
277 	switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
278 	case HBG_L4_OK:
279 		break;
280 	case HBG_L4_WRONG_HEAD:
281 		priv->stats.rx_desc_l4_wrong_head_cnt++;
282 		return false;
283 	case HBG_L4_LEN_ERR:
284 		priv->stats.rx_desc_l4_len_err_cnt++;
285 		return false;
286 	case HBG_L4_CSUM_ERR:
287 		skb->ip_summed = CHECKSUM_NONE;
288 		priv->stats.rx_desc_l4_csum_err_cnt++;
289 
290 		/* Don't drop packets on csum validation failure,
291 		 * suggest by Jakub
292 		 */
293 		break;
294 	case HBG_L4_ZERO_PORT_NUM:
295 		priv->stats.rx_desc_l4_zero_port_num_cnt++;
296 		return false;
297 	default:
298 		priv->stats.rx_desc_l4_other_cnt++;
299 		return false;
300 	}
301 
302 	return true;
303 }
304 
305 static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
306 					    struct hbg_rx_desc *desc)
307 {
308 	if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
309 		priv->stats.rx_desc_no_ip_pkt_cnt++;
310 		return;
311 	}
312 
313 	if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
314 		priv->stats.rx_desc_ip_ver_err_cnt++;
315 		return;
316 	}
317 
318 	/* 0:ipv4, 1:ipv6 */
319 	if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
320 		priv->stats.rx_desc_ipv6_pkt_cnt++;
321 	else
322 		priv->stats.rx_desc_ipv4_pkt_cnt++;
323 
324 	switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
325 	case HBG_IP_PKT:
326 		priv->stats.rx_desc_ip_pkt_cnt++;
327 		if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
328 			priv->stats.rx_desc_ip_opt_pkt_cnt++;
329 		if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
330 			priv->stats.rx_desc_frag_cnt++;
331 
332 		if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
333 			priv->stats.rx_desc_icmp_pkt_cnt++;
334 		else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
335 			priv->stats.rx_desc_ipsec_pkt_cnt++;
336 		break;
337 	case HBG_TCP_PKT:
338 		priv->stats.rx_desc_tcp_pkt_cnt++;
339 		break;
340 	case HBG_UDP_PKT:
341 		priv->stats.rx_desc_udp_pkt_cnt++;
342 		break;
343 	default:
344 		priv->stats.rx_desc_no_ip_pkt_cnt++;
345 		break;
346 	}
347 }
348 
349 static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
350 					 struct hbg_rx_desc *desc)
351 {
352 	if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
353 		priv->stats.rx_desc_key_not_match_cnt++;
354 		return;
355 	}
356 
357 	if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
358 		priv->stats.rx_desc_broadcast_pkt_cnt++;
359 	else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
360 		priv->stats.rx_desc_multicast_pkt_cnt++;
361 
362 	if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
363 		priv->stats.rx_desc_vlan_pkt_cnt++;
364 
365 	if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
366 		priv->stats.rx_desc_arp_pkt_cnt++;
367 		return;
368 	} else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
369 		priv->stats.rx_desc_rarp_pkt_cnt++;
370 		return;
371 	}
372 
373 	hbg_update_rx_ip_protocol_stats(priv, desc);
374 }
375 
376 static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
377 			     struct sk_buff *skb)
378 {
379 	if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
380 		     priv->dev_specs.max_frame_len)) {
381 		priv->stats.rx_desc_pkt_len_err_cnt++;
382 		return false;
383 	}
384 
385 	if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
386 		     priv->dev_specs.mac_id ||
387 		     FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
388 		priv->stats.rx_desc_drop++;
389 		return false;
390 	}
391 
392 	if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
393 		priv->stats.rx_desc_l2_err_cnt++;
394 		return false;
395 	}
396 
397 	if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
398 		priv->stats.rx_desc_l3l4_err_cnt++;
399 		return false;
400 	}
401 
402 	hbg_update_rx_protocol_stats(priv, desc);
403 	return true;
404 }
405 
406 static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
407 {
408 	struct hbg_ring *ring = &priv->rx_ring;
409 	struct hbg_buffer *buffer;
410 	int ret;
411 
412 	if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) ||
413 	    hbg_fifo_is_full(priv, ring->dir))
414 		return 0;
415 
416 	buffer = &ring->queue[ring->ntu];
417 	ret = hbg_buffer_alloc_page(buffer);
418 	if (unlikely(ret))
419 		return ret;
420 
421 	memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE);
422 	dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma,
423 				   HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE);
424 
425 	hbg_hw_fill_buffer(priv, buffer->page_dma);
426 	hbg_queue_move_next(ntu, ring);
427 	return 0;
428 }
429 
430 static int hbg_rx_fill_buffers(struct hbg_priv *priv)
431 {
432 	u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX);
433 	u32 max_count = priv->dev_specs.rx_fifo_num;
434 	u32 refill_count;
435 	int ret;
436 
437 	if (unlikely(remained >= max_count))
438 		return 0;
439 
440 	refill_count = max_count - remained;
441 	while (refill_count--) {
442 		ret = hbg_rx_fill_one_buffer(priv);
443 		if (unlikely(ret))
444 			break;
445 	}
446 
447 	return ret;
448 }
449 
450 static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
451 				  struct hbg_buffer *buffer)
452 {
453 	struct hbg_rx_desc *rx_desc;
454 
455 	/* make sure HW write desc complete */
456 	dma_rmb();
457 
458 	dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma,
459 				buffer->page_size, DMA_FROM_DEVICE);
460 
461 	rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
462 	return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
463 }
464 
465 static int hbg_build_skb(struct hbg_priv *priv,
466 			 struct hbg_buffer *buffer, u32 pkt_len)
467 {
468 	net_prefetch(buffer->page_addr);
469 
470 	buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size);
471 	if (unlikely(!buffer->skb))
472 		return -ENOMEM;
473 	skb_mark_for_recycle(buffer->skb);
474 
475 	/* page will be freed together with the skb */
476 	buffer->page = NULL;
477 
478 	return 0;
479 }
480 
481 static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
482 {
483 	struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
484 	struct hbg_priv *priv = ring->priv;
485 	struct hbg_rx_desc *rx_desc;
486 	struct hbg_buffer *buffer;
487 	u32 packet_done = 0;
488 	u32 pkt_len;
489 
490 	hbg_rx_fill_buffers(priv);
491 	while (packet_done < budget) {
492 		if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
493 			break;
494 
495 		buffer = &ring->queue[ring->ntc];
496 		if (unlikely(!buffer->page))
497 			goto next_buffer;
498 
499 		if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
500 			break;
501 		rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
502 		pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
503 		trace_hbg_rx_desc(priv, ring->ntc, rx_desc);
504 
505 		if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) {
506 			hbg_buffer_free_page(buffer);
507 			goto next_buffer;
508 		}
509 
510 		if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
511 			hbg_buffer_free_skb(buffer);
512 			goto next_buffer;
513 		}
514 
515 		skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
516 		skb_put(buffer->skb, pkt_len);
517 		buffer->skb->protocol = eth_type_trans(buffer->skb,
518 						       priv->netdev);
519 		dev_sw_netstats_rx_add(priv->netdev, pkt_len);
520 		napi_gro_receive(napi, buffer->skb);
521 		buffer->skb = NULL;
522 		buffer->page = NULL;
523 
524 next_buffer:
525 		hbg_rx_fill_one_buffer(priv);
526 		hbg_queue_move_next(ntc, ring);
527 		packet_done++;
528 	}
529 
530 	if (likely(packet_done < budget &&
531 		   napi_complete_done(napi, packet_done)))
532 		hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
533 
534 	return packet_done;
535 }
536 
537 static void hbg_ring_page_pool_destory(struct hbg_ring *ring)
538 {
539 	if (!ring->page_pool)
540 		return;
541 
542 	page_pool_destroy(ring->page_pool);
543 	ring->page_pool = NULL;
544 }
545 
546 static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring)
547 {
548 	u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir);
549 	struct page_pool_params pp_params = {
550 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
551 		.order = hbg_get_page_order(ring),
552 		.pool_size = ring->len * buf_size / hbg_get_page_size(ring),
553 		.nid = dev_to_node(&priv->pdev->dev),
554 		.dev = &priv->pdev->dev,
555 		.napi = &ring->napi,
556 		.dma_dir = DMA_FROM_DEVICE,
557 		.offset = 0,
558 		.max_len = hbg_get_page_size(ring),
559 	};
560 	int ret = 0;
561 
562 	ring->page_pool = page_pool_create(&pp_params);
563 	if (IS_ERR(ring->page_pool)) {
564 		ret = PTR_ERR(ring->page_pool);
565 		dev_err(&priv->pdev->dev,
566 			"failed to create page pool, ret = %d\n", ret);
567 		ring->page_pool = NULL;
568 	}
569 
570 	return ret;
571 }
572 
573 static void hbg_ring_uninit(struct hbg_ring *ring)
574 {
575 	struct hbg_buffer *buffer;
576 	u32 i;
577 
578 	if (!ring->queue)
579 		return;
580 
581 	napi_disable(&ring->napi);
582 	netif_napi_del(&ring->napi);
583 
584 	for (i = 0; i < ring->len; i++) {
585 		buffer = &ring->queue[i];
586 		hbg_buffer_free(buffer);
587 		buffer->ring = NULL;
588 		buffer->priv = NULL;
589 	}
590 
591 	hbg_ring_page_pool_destory(ring);
592 	dma_free_coherent(&ring->priv->pdev->dev,
593 			  ring->len * sizeof(*ring->queue),
594 			  ring->queue, ring->queue_dma);
595 	ring->queue = NULL;
596 	ring->queue_dma = 0;
597 	ring->len = 0;
598 	ring->priv = NULL;
599 }
600 
601 static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
602 			 int (*napi_poll)(struct napi_struct *, int),
603 			 enum hbg_dir dir)
604 {
605 	struct hbg_buffer *buffer;
606 	u32 i, len;
607 	int ret;
608 
609 	len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
610 	/* To improve receiving performance under high-stress scenarios,
611 	 * in the `hbg_napi_rx_poll()`, we first use the other half of
612 	 * the buffer to receive packets from the hardware via the
613 	 * `hbg_rx_fill_buffers()`, and then process the packets in the
614 	 * original half of the buffer to avoid packet loss caused by
615 	 * hardware overflow as much as possible.
616 	 */
617 	if (dir == HBG_DIR_RX)
618 		len += hbg_get_spec_fifo_max_num(priv, dir);
619 
620 	ring->queue = dma_alloc_coherent(&priv->pdev->dev,
621 					 len * sizeof(*ring->queue),
622 					 &ring->queue_dma, GFP_KERNEL);
623 	if (!ring->queue)
624 		return -ENOMEM;
625 
626 	for (i = 0; i < len; i++) {
627 		buffer = &ring->queue[i];
628 		buffer->skb_len = 0;
629 		buffer->dir = dir;
630 		buffer->ring = ring;
631 		buffer->priv = priv;
632 		buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
633 	}
634 
635 	ring->dir = dir;
636 	ring->priv = priv;
637 	ring->ntc = 0;
638 	ring->ntu = 0;
639 	ring->len = len;
640 
641 	if (dir == HBG_DIR_TX) {
642 		netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
643 	} else {
644 		netif_napi_add(priv->netdev, &ring->napi, napi_poll);
645 
646 		ret = hbg_ring_page_pool_init(priv, ring);
647 		if (ret) {
648 			netif_napi_del(&ring->napi);
649 			dma_free_coherent(&ring->priv->pdev->dev,
650 					  ring->len * sizeof(*ring->queue),
651 					  ring->queue, ring->queue_dma);
652 			ring->queue = NULL;
653 			ring->len = 0;
654 			return ret;
655 		}
656 	}
657 
658 	napi_enable(&ring->napi);
659 	return 0;
660 }
661 
662 static int hbg_tx_ring_init(struct hbg_priv *priv)
663 {
664 	struct hbg_ring *tx_ring = &priv->tx_ring;
665 
666 	if (!tx_ring->tout_log_buf)
667 		tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
668 						     HBG_TX_TIMEOUT_BUF_LEN,
669 						     GFP_KERNEL);
670 
671 	if (!tx_ring->tout_log_buf)
672 		return -ENOMEM;
673 
674 	return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
675 }
676 
677 static int hbg_rx_ring_init(struct hbg_priv *priv)
678 {
679 	int ret;
680 
681 	ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
682 	if (ret)
683 		return ret;
684 
685 	ret = hbg_rx_fill_buffers(priv);
686 	if (ret)
687 		hbg_ring_uninit(&priv->rx_ring);
688 
689 	return ret;
690 }
691 
692 int hbg_txrx_init(struct hbg_priv *priv)
693 {
694 	int ret;
695 
696 	ret = hbg_tx_ring_init(priv);
697 	if (ret) {
698 		dev_err(&priv->pdev->dev,
699 			"failed to init tx ring, ret = %d\n", ret);
700 		return ret;
701 	}
702 
703 	ret = hbg_rx_ring_init(priv);
704 	if (ret) {
705 		dev_err(&priv->pdev->dev,
706 			"failed to init rx ring, ret = %d\n", ret);
707 		hbg_ring_uninit(&priv->tx_ring);
708 	}
709 
710 	return ret;
711 }
712 
713 void hbg_txrx_uninit(struct hbg_priv *priv)
714 {
715 	hbg_ring_uninit(&priv->tx_ring);
716 	hbg_ring_uninit(&priv->rx_ring);
717 }
718