1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3
4 #include <net/netdev_queues.h>
5 #include "hbg_common.h"
6 #include "hbg_irq.h"
7 #include "hbg_reg.h"
8 #include "hbg_txrx.h"
9
10 #define netdev_get_tx_ring(netdev) \
11 (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
12
13 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
14 DMA_FROM_DEVICE : DMA_TO_DEVICE)
15
16 #define hbg_queue_used_num(head, tail, ring) ({ \
17 typeof(ring) _ring = (ring); \
18 ((tail) + _ring->len - (head)) % _ring->len; })
19 #define hbg_queue_left_num(head, tail, ring) ({ \
20 typeof(ring) _r = (ring); \
21 _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
22 #define hbg_queue_is_empty(head, tail, ring) \
23 (hbg_queue_used_num((head), (tail), (ring)) == 0)
24 #define hbg_queue_is_full(head, tail, ring) \
25 (hbg_queue_left_num((head), (tail), (ring)) == 0)
26 #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
27 #define hbg_queue_move_next(p, ring) ({ \
28 typeof(ring) _ring = (ring); \
29 _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
30
31 #define HBG_TX_STOP_THRS 2
32 #define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
33
hbg_dma_map(struct hbg_buffer * buffer)34 static int hbg_dma_map(struct hbg_buffer *buffer)
35 {
36 struct hbg_priv *priv = buffer->priv;
37
38 buffer->skb_dma = dma_map_single(&priv->pdev->dev,
39 buffer->skb->data, buffer->skb_len,
40 buffer_to_dma_dir(buffer));
41 if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
42 if (buffer->dir == HBG_DIR_RX)
43 priv->stats.rx_dma_err_cnt++;
44 else
45 priv->stats.tx_dma_err_cnt++;
46
47 return -ENOMEM;
48 }
49
50 return 0;
51 }
52
hbg_dma_unmap(struct hbg_buffer * buffer)53 static void hbg_dma_unmap(struct hbg_buffer *buffer)
54 {
55 struct hbg_priv *priv = buffer->priv;
56
57 if (unlikely(!buffer->skb_dma))
58 return;
59
60 dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
61 buffer_to_dma_dir(buffer));
62 buffer->skb_dma = 0;
63 }
64
hbg_init_tx_desc(struct hbg_buffer * buffer,struct hbg_tx_desc * tx_desc)65 static void hbg_init_tx_desc(struct hbg_buffer *buffer,
66 struct hbg_tx_desc *tx_desc)
67 {
68 u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
69 u32 word0 = 0;
70
71 word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
72 word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
73 if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
74 word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
75 word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
76 }
77
78 tx_desc->word0 = word0;
79 tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
80 buffer->skb->len);
81 tx_desc->word2 = buffer->skb_dma;
82 tx_desc->word3 = buffer->state_dma;
83 }
84
hbg_net_start_xmit(struct sk_buff * skb,struct net_device * netdev)85 netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
86 {
87 struct hbg_ring *ring = netdev_get_tx_ring(netdev);
88 struct hbg_priv *priv = netdev_priv(netdev);
89 /* This smp_load_acquire() pairs with smp_store_release() in
90 * hbg_napi_tx_recycle() called in tx interrupt handle process.
91 */
92 u32 ntc = smp_load_acquire(&ring->ntc);
93 struct hbg_buffer *buffer;
94 struct hbg_tx_desc tx_desc;
95 u32 ntu = ring->ntu;
96
97 if (unlikely(!skb->len ||
98 skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
99 dev_kfree_skb_any(skb);
100 netdev->stats.tx_errors++;
101 return NETDEV_TX_OK;
102 }
103
104 if (!netif_subqueue_maybe_stop(netdev, 0,
105 hbg_queue_left_num(ntc, ntu, ring),
106 HBG_TX_STOP_THRS, HBG_TX_START_THRS))
107 return NETDEV_TX_BUSY;
108
109 buffer = &ring->queue[ntu];
110 buffer->skb = skb;
111 buffer->skb_len = skb->len;
112 if (unlikely(hbg_dma_map(buffer))) {
113 dev_kfree_skb_any(skb);
114 return NETDEV_TX_OK;
115 }
116
117 buffer->state = HBG_TX_STATE_START;
118 hbg_init_tx_desc(buffer, &tx_desc);
119 hbg_hw_set_tx_desc(priv, &tx_desc);
120
121 /* This smp_store_release() pairs with smp_load_acquire() in
122 * hbg_napi_tx_recycle() called in tx interrupt handle process.
123 */
124 smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
125 dev_sw_netstats_tx_add(netdev, 1, skb->len);
126 return NETDEV_TX_OK;
127 }
128
hbg_buffer_free_skb(struct hbg_buffer * buffer)129 static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
130 {
131 if (unlikely(!buffer->skb))
132 return;
133
134 dev_kfree_skb_any(buffer->skb);
135 buffer->skb = NULL;
136 }
137
hbg_buffer_alloc_skb(struct hbg_buffer * buffer)138 static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
139 {
140 u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
141 struct hbg_priv *priv = buffer->priv;
142
143 buffer->skb = netdev_alloc_skb(priv->netdev, len);
144 if (unlikely(!buffer->skb))
145 return -ENOMEM;
146
147 buffer->skb_len = len;
148 memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
149 return 0;
150 }
151
hbg_buffer_free(struct hbg_buffer * buffer)152 static void hbg_buffer_free(struct hbg_buffer *buffer)
153 {
154 hbg_dma_unmap(buffer);
155 hbg_buffer_free_skb(buffer);
156 }
157
hbg_napi_tx_recycle(struct napi_struct * napi,int budget)158 static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
159 {
160 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
161 /* This smp_load_acquire() pairs with smp_store_release() in
162 * hbg_net_start_xmit() called in xmit process.
163 */
164 u32 ntu = smp_load_acquire(&ring->ntu);
165 struct hbg_priv *priv = ring->priv;
166 struct hbg_buffer *buffer;
167 u32 ntc = ring->ntc;
168 int packet_done = 0;
169
170 /* We need do cleanup even if budget is 0.
171 * Per NAPI documentation budget is for Rx.
172 * So We hardcode the amount of work Tx NAPI does to 128.
173 */
174 budget = 128;
175 while (packet_done < budget) {
176 if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
177 break;
178
179 /* make sure HW write desc complete */
180 dma_rmb();
181
182 buffer = &ring->queue[ntc];
183 if (buffer->state != HBG_TX_STATE_COMPLETE)
184 break;
185
186 hbg_buffer_free(buffer);
187 ntc = hbg_queue_next_prt(ntc, ring);
188 packet_done++;
189 }
190
191 /* This smp_store_release() pairs with smp_load_acquire() in
192 * hbg_net_start_xmit() called in xmit process.
193 */
194 smp_store_release(&ring->ntc, ntc);
195 netif_wake_queue(priv->netdev);
196
197 if (likely(packet_done < budget &&
198 napi_complete_done(napi, packet_done)))
199 hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
200
201 return packet_done;
202 }
203
hbg_rx_check_l3l4_error(struct hbg_priv * priv,struct hbg_rx_desc * desc,struct sk_buff * skb)204 static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
205 struct hbg_rx_desc *desc,
206 struct sk_buff *skb)
207 {
208 bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
209
210 skb->ip_summed = rx_checksum_offload ?
211 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
212
213 if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
214 !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
215 return true;
216
217 switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
218 case HBG_L3_OK:
219 break;
220 case HBG_L3_WRONG_HEAD:
221 priv->stats.rx_desc_l3_wrong_head_cnt++;
222 return false;
223 case HBG_L3_CSUM_ERR:
224 skb->ip_summed = CHECKSUM_NONE;
225 priv->stats.rx_desc_l3_csum_err_cnt++;
226
227 /* Don't drop packets on csum validation failure,
228 * suggest by Jakub
229 */
230 break;
231 case HBG_L3_LEN_ERR:
232 priv->stats.rx_desc_l3_len_err_cnt++;
233 return false;
234 case HBG_L3_ZERO_TTL:
235 priv->stats.rx_desc_l3_zero_ttl_cnt++;
236 return false;
237 default:
238 priv->stats.rx_desc_l3_other_cnt++;
239 return false;
240 }
241
242 switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
243 case HBG_L4_OK:
244 break;
245 case HBG_L4_WRONG_HEAD:
246 priv->stats.rx_desc_l4_wrong_head_cnt++;
247 return false;
248 case HBG_L4_LEN_ERR:
249 priv->stats.rx_desc_l4_len_err_cnt++;
250 return false;
251 case HBG_L4_CSUM_ERR:
252 skb->ip_summed = CHECKSUM_NONE;
253 priv->stats.rx_desc_l4_csum_err_cnt++;
254
255 /* Don't drop packets on csum validation failure,
256 * suggest by Jakub
257 */
258 break;
259 case HBG_L4_ZERO_PORT_NUM:
260 priv->stats.rx_desc_l4_zero_port_num_cnt++;
261 return false;
262 default:
263 priv->stats.rx_desc_l4_other_cnt++;
264 return false;
265 }
266
267 return true;
268 }
269
hbg_update_rx_ip_protocol_stats(struct hbg_priv * priv,struct hbg_rx_desc * desc)270 static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
271 struct hbg_rx_desc *desc)
272 {
273 if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
274 priv->stats.rx_desc_no_ip_pkt_cnt++;
275 return;
276 }
277
278 if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
279 priv->stats.rx_desc_ip_ver_err_cnt++;
280 return;
281 }
282
283 /* 0:ipv4, 1:ipv6 */
284 if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
285 priv->stats.rx_desc_ipv6_pkt_cnt++;
286 else
287 priv->stats.rx_desc_ipv4_pkt_cnt++;
288
289 switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
290 case HBG_IP_PKT:
291 priv->stats.rx_desc_ip_pkt_cnt++;
292 if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
293 priv->stats.rx_desc_ip_opt_pkt_cnt++;
294 if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
295 priv->stats.rx_desc_frag_cnt++;
296
297 if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
298 priv->stats.rx_desc_icmp_pkt_cnt++;
299 else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
300 priv->stats.rx_desc_ipsec_pkt_cnt++;
301 break;
302 case HBG_TCP_PKT:
303 priv->stats.rx_desc_tcp_pkt_cnt++;
304 break;
305 case HBG_UDP_PKT:
306 priv->stats.rx_desc_udp_pkt_cnt++;
307 break;
308 default:
309 priv->stats.rx_desc_no_ip_pkt_cnt++;
310 break;
311 }
312 }
313
hbg_update_rx_protocol_stats(struct hbg_priv * priv,struct hbg_rx_desc * desc)314 static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
315 struct hbg_rx_desc *desc)
316 {
317 if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
318 priv->stats.rx_desc_key_not_match_cnt++;
319 return;
320 }
321
322 if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
323 priv->stats.rx_desc_broadcast_pkt_cnt++;
324 else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
325 priv->stats.rx_desc_multicast_pkt_cnt++;
326
327 if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
328 priv->stats.rx_desc_vlan_pkt_cnt++;
329
330 if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
331 priv->stats.rx_desc_arp_pkt_cnt++;
332 return;
333 } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
334 priv->stats.rx_desc_rarp_pkt_cnt++;
335 return;
336 }
337
338 hbg_update_rx_ip_protocol_stats(priv, desc);
339 }
340
hbg_rx_pkt_check(struct hbg_priv * priv,struct hbg_rx_desc * desc,struct sk_buff * skb)341 static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
342 struct sk_buff *skb)
343 {
344 if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
345 priv->dev_specs.max_frame_len)) {
346 priv->stats.rx_desc_pkt_len_err_cnt++;
347 return false;
348 }
349
350 if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
351 priv->dev_specs.mac_id ||
352 FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
353 priv->stats.rx_desc_drop++;
354 return false;
355 }
356
357 if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
358 priv->stats.rx_desc_l2_err_cnt++;
359 return false;
360 }
361
362 if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
363 priv->stats.rx_desc_l3l4_err_cnt++;
364 return false;
365 }
366
367 hbg_update_rx_protocol_stats(priv, desc);
368 return true;
369 }
370
hbg_rx_fill_one_buffer(struct hbg_priv * priv)371 static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
372 {
373 struct hbg_ring *ring = &priv->rx_ring;
374 struct hbg_buffer *buffer;
375 int ret;
376
377 if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
378 return 0;
379
380 buffer = &ring->queue[ring->ntu];
381 ret = hbg_buffer_alloc_skb(buffer);
382 if (unlikely(ret))
383 return ret;
384
385 ret = hbg_dma_map(buffer);
386 if (unlikely(ret)) {
387 hbg_buffer_free_skb(buffer);
388 return ret;
389 }
390
391 hbg_hw_fill_buffer(priv, buffer->skb_dma);
392 hbg_queue_move_next(ntu, ring);
393 return 0;
394 }
395
hbg_sync_data_from_hw(struct hbg_priv * priv,struct hbg_buffer * buffer)396 static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
397 struct hbg_buffer *buffer)
398 {
399 struct hbg_rx_desc *rx_desc;
400
401 /* make sure HW write desc complete */
402 dma_rmb();
403
404 dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
405 buffer->skb_len, DMA_FROM_DEVICE);
406
407 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
408 return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
409 }
410
hbg_napi_rx_poll(struct napi_struct * napi,int budget)411 static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
412 {
413 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
414 struct hbg_priv *priv = ring->priv;
415 struct hbg_rx_desc *rx_desc;
416 struct hbg_buffer *buffer;
417 u32 packet_done = 0;
418 u32 pkt_len;
419
420 while (packet_done < budget) {
421 if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
422 break;
423
424 buffer = &ring->queue[ring->ntc];
425 if (unlikely(!buffer->skb))
426 goto next_buffer;
427
428 if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
429 break;
430 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
431 pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
432
433 if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
434 hbg_buffer_free(buffer);
435 goto next_buffer;
436 }
437
438 hbg_dma_unmap(buffer);
439 skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
440 skb_put(buffer->skb, pkt_len);
441 buffer->skb->protocol = eth_type_trans(buffer->skb,
442 priv->netdev);
443
444 dev_sw_netstats_rx_add(priv->netdev, pkt_len);
445 napi_gro_receive(napi, buffer->skb);
446 buffer->skb = NULL;
447
448 next_buffer:
449 hbg_rx_fill_one_buffer(priv);
450 hbg_queue_move_next(ntc, ring);
451 packet_done++;
452 }
453
454 if (likely(packet_done < budget &&
455 napi_complete_done(napi, packet_done)))
456 hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
457
458 return packet_done;
459 }
460
hbg_ring_uninit(struct hbg_ring * ring)461 static void hbg_ring_uninit(struct hbg_ring *ring)
462 {
463 struct hbg_buffer *buffer;
464 u32 i;
465
466 if (!ring->queue)
467 return;
468
469 napi_disable(&ring->napi);
470 netif_napi_del(&ring->napi);
471
472 for (i = 0; i < ring->len; i++) {
473 buffer = &ring->queue[i];
474 hbg_buffer_free(buffer);
475 buffer->ring = NULL;
476 buffer->priv = NULL;
477 }
478
479 dma_free_coherent(&ring->priv->pdev->dev,
480 ring->len * sizeof(*ring->queue),
481 ring->queue, ring->queue_dma);
482 ring->queue = NULL;
483 ring->queue_dma = 0;
484 ring->len = 0;
485 ring->priv = NULL;
486 }
487
hbg_ring_init(struct hbg_priv * priv,struct hbg_ring * ring,int (* napi_poll)(struct napi_struct *,int),enum hbg_dir dir)488 static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
489 int (*napi_poll)(struct napi_struct *, int),
490 enum hbg_dir dir)
491 {
492 struct hbg_buffer *buffer;
493 u32 i, len;
494
495 len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
496 ring->queue = dma_alloc_coherent(&priv->pdev->dev,
497 len * sizeof(*ring->queue),
498 &ring->queue_dma, GFP_KERNEL);
499 if (!ring->queue)
500 return -ENOMEM;
501
502 for (i = 0; i < len; i++) {
503 buffer = &ring->queue[i];
504 buffer->skb_len = 0;
505 buffer->dir = dir;
506 buffer->ring = ring;
507 buffer->priv = priv;
508 buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
509 }
510
511 ring->dir = dir;
512 ring->priv = priv;
513 ring->ntc = 0;
514 ring->ntu = 0;
515 ring->len = len;
516
517 if (dir == HBG_DIR_TX)
518 netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
519 else
520 netif_napi_add(priv->netdev, &ring->napi, napi_poll);
521
522 napi_enable(&ring->napi);
523 return 0;
524 }
525
hbg_tx_ring_init(struct hbg_priv * priv)526 static int hbg_tx_ring_init(struct hbg_priv *priv)
527 {
528 struct hbg_ring *tx_ring = &priv->tx_ring;
529
530 if (!tx_ring->tout_log_buf)
531 tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
532 HBG_TX_TIMEOUT_BUF_LEN,
533 GFP_KERNEL);
534
535 if (!tx_ring->tout_log_buf)
536 return -ENOMEM;
537
538 return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
539 }
540
hbg_rx_ring_init(struct hbg_priv * priv)541 static int hbg_rx_ring_init(struct hbg_priv *priv)
542 {
543 int ret;
544 u32 i;
545
546 ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
547 if (ret)
548 return ret;
549
550 for (i = 0; i < priv->rx_ring.len - 1; i++) {
551 ret = hbg_rx_fill_one_buffer(priv);
552 if (ret) {
553 hbg_ring_uninit(&priv->rx_ring);
554 return ret;
555 }
556 }
557
558 return 0;
559 }
560
hbg_txrx_init(struct hbg_priv * priv)561 int hbg_txrx_init(struct hbg_priv *priv)
562 {
563 int ret;
564
565 ret = hbg_tx_ring_init(priv);
566 if (ret) {
567 dev_err(&priv->pdev->dev,
568 "failed to init tx ring, ret = %d\n", ret);
569 return ret;
570 }
571
572 ret = hbg_rx_ring_init(priv);
573 if (ret) {
574 dev_err(&priv->pdev->dev,
575 "failed to init rx ring, ret = %d\n", ret);
576 hbg_ring_uninit(&priv->tx_ring);
577 }
578
579 return ret;
580 }
581
hbg_txrx_uninit(struct hbg_priv * priv)582 void hbg_txrx_uninit(struct hbg_priv *priv)
583 {
584 hbg_ring_uninit(&priv->tx_ring);
585 hbg_ring_uninit(&priv->rx_ring);
586 }
587