xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c (revision 60675d4ca1ef0857e44eba5849b74a3a998d0c0f)
140735e75SJijie Shao // SPDX-License-Identifier: GPL-2.0+
240735e75SJijie Shao // Copyright (c) 2024 Hisilicon Limited.
340735e75SJijie Shao 
440735e75SJijie Shao #include <net/netdev_queues.h>
540735e75SJijie Shao #include "hbg_common.h"
640735e75SJijie Shao #include "hbg_irq.h"
740735e75SJijie Shao #include "hbg_reg.h"
840735e75SJijie Shao #include "hbg_txrx.h"
940735e75SJijie Shao 
1040735e75SJijie Shao #define netdev_get_tx_ring(netdev) \
1140735e75SJijie Shao 			(&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
1240735e75SJijie Shao 
1340735e75SJijie Shao #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
1440735e75SJijie Shao 				   DMA_FROM_DEVICE : DMA_TO_DEVICE)
1540735e75SJijie Shao 
1640735e75SJijie Shao #define hbg_queue_used_num(head, tail, ring) ({ \
1740735e75SJijie Shao 	typeof(ring) _ring = (ring); \
1840735e75SJijie Shao 	((tail) + _ring->len - (head)) % _ring->len; })
1940735e75SJijie Shao #define hbg_queue_left_num(head, tail, ring) ({ \
2040735e75SJijie Shao 	typeof(ring) _r = (ring); \
2140735e75SJijie Shao 	_r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
2240735e75SJijie Shao #define hbg_queue_is_empty(head, tail, ring) \
2340735e75SJijie Shao 	(hbg_queue_used_num((head), (tail), (ring)) == 0)
24*f72e2559SJijie Shao #define hbg_queue_is_full(head, tail, ring) \
25*f72e2559SJijie Shao 	(hbg_queue_left_num((head), (tail), (ring)) == 0)
2640735e75SJijie Shao #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
27*f72e2559SJijie Shao #define hbg_queue_move_next(p, ring) ({ \
28*f72e2559SJijie Shao 	typeof(ring) _ring = (ring); \
29*f72e2559SJijie Shao 	_ring->p = hbg_queue_next_prt(_ring->p, _ring); })
3040735e75SJijie Shao 
3140735e75SJijie Shao #define HBG_TX_STOP_THRS	2
3240735e75SJijie Shao #define HBG_TX_START_THRS	(2 * HBG_TX_STOP_THRS)
3340735e75SJijie Shao 
3440735e75SJijie Shao static int hbg_dma_map(struct hbg_buffer *buffer)
3540735e75SJijie Shao {
3640735e75SJijie Shao 	struct hbg_priv *priv = buffer->priv;
3740735e75SJijie Shao 
3840735e75SJijie Shao 	buffer->skb_dma = dma_map_single(&priv->pdev->dev,
3940735e75SJijie Shao 					 buffer->skb->data, buffer->skb_len,
4040735e75SJijie Shao 					 buffer_to_dma_dir(buffer));
4140735e75SJijie Shao 	if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
4240735e75SJijie Shao 		return -ENOMEM;
4340735e75SJijie Shao 
4440735e75SJijie Shao 	return 0;
4540735e75SJijie Shao }
4640735e75SJijie Shao 
4740735e75SJijie Shao static void hbg_dma_unmap(struct hbg_buffer *buffer)
4840735e75SJijie Shao {
4940735e75SJijie Shao 	struct hbg_priv *priv = buffer->priv;
5040735e75SJijie Shao 
5140735e75SJijie Shao 	if (unlikely(!buffer->skb_dma))
5240735e75SJijie Shao 		return;
5340735e75SJijie Shao 
5440735e75SJijie Shao 	dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
5540735e75SJijie Shao 			 buffer_to_dma_dir(buffer));
5640735e75SJijie Shao 	buffer->skb_dma = 0;
5740735e75SJijie Shao }
5840735e75SJijie Shao 
5940735e75SJijie Shao static void hbg_init_tx_desc(struct hbg_buffer *buffer,
6040735e75SJijie Shao 			     struct hbg_tx_desc *tx_desc)
6140735e75SJijie Shao {
6240735e75SJijie Shao 	u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
6340735e75SJijie Shao 	u32 word0 = 0;
6440735e75SJijie Shao 
6540735e75SJijie Shao 	word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
6640735e75SJijie Shao 	word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
6740735e75SJijie Shao 	if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
6840735e75SJijie Shao 		word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
6940735e75SJijie Shao 		word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
7040735e75SJijie Shao 	}
7140735e75SJijie Shao 
7240735e75SJijie Shao 	tx_desc->word0 = word0;
7340735e75SJijie Shao 	tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
7440735e75SJijie Shao 				    buffer->skb->len);
7540735e75SJijie Shao 	tx_desc->word2 = buffer->skb_dma;
7640735e75SJijie Shao 	tx_desc->word3 = buffer->state_dma;
7740735e75SJijie Shao }
7840735e75SJijie Shao 
7940735e75SJijie Shao netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
8040735e75SJijie Shao {
8140735e75SJijie Shao 	struct hbg_ring *ring = netdev_get_tx_ring(netdev);
8240735e75SJijie Shao 	struct hbg_priv *priv = netdev_priv(netdev);
8340735e75SJijie Shao 	/* This smp_load_acquire() pairs with smp_store_release() in
8440735e75SJijie Shao 	 * hbg_napi_tx_recycle() called in tx interrupt handle process.
8540735e75SJijie Shao 	 */
8640735e75SJijie Shao 	u32 ntc = smp_load_acquire(&ring->ntc);
8740735e75SJijie Shao 	struct hbg_buffer *buffer;
8840735e75SJijie Shao 	struct hbg_tx_desc tx_desc;
8940735e75SJijie Shao 	u32 ntu = ring->ntu;
9040735e75SJijie Shao 
9140735e75SJijie Shao 	if (unlikely(!skb->len ||
9240735e75SJijie Shao 		     skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
9340735e75SJijie Shao 		dev_kfree_skb_any(skb);
9440735e75SJijie Shao 		netdev->stats.tx_errors++;
9540735e75SJijie Shao 		return NETDEV_TX_OK;
9640735e75SJijie Shao 	}
9740735e75SJijie Shao 
9840735e75SJijie Shao 	if (!netif_subqueue_maybe_stop(netdev, 0,
9940735e75SJijie Shao 				       hbg_queue_left_num(ntc, ntu, ring),
10040735e75SJijie Shao 				       HBG_TX_STOP_THRS, HBG_TX_START_THRS))
10140735e75SJijie Shao 		return NETDEV_TX_BUSY;
10240735e75SJijie Shao 
10340735e75SJijie Shao 	buffer = &ring->queue[ntu];
10440735e75SJijie Shao 	buffer->skb = skb;
10540735e75SJijie Shao 	buffer->skb_len = skb->len;
10640735e75SJijie Shao 	if (unlikely(hbg_dma_map(buffer))) {
10740735e75SJijie Shao 		dev_kfree_skb_any(skb);
10840735e75SJijie Shao 		return NETDEV_TX_OK;
10940735e75SJijie Shao 	}
11040735e75SJijie Shao 
11140735e75SJijie Shao 	buffer->state = HBG_TX_STATE_START;
11240735e75SJijie Shao 	hbg_init_tx_desc(buffer, &tx_desc);
11340735e75SJijie Shao 	hbg_hw_set_tx_desc(priv, &tx_desc);
11440735e75SJijie Shao 
11540735e75SJijie Shao 	/* This smp_store_release() pairs with smp_load_acquire() in
11640735e75SJijie Shao 	 * hbg_napi_tx_recycle() called in tx interrupt handle process.
11740735e75SJijie Shao 	 */
11840735e75SJijie Shao 	smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
11940735e75SJijie Shao 	dev_sw_netstats_tx_add(netdev, 1, skb->len);
12040735e75SJijie Shao 	return NETDEV_TX_OK;
12140735e75SJijie Shao }
12240735e75SJijie Shao 
12340735e75SJijie Shao static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
12440735e75SJijie Shao {
12540735e75SJijie Shao 	if (unlikely(!buffer->skb))
12640735e75SJijie Shao 		return;
12740735e75SJijie Shao 
12840735e75SJijie Shao 	dev_kfree_skb_any(buffer->skb);
12940735e75SJijie Shao 	buffer->skb = NULL;
13040735e75SJijie Shao }
13140735e75SJijie Shao 
132*f72e2559SJijie Shao static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
133*f72e2559SJijie Shao {
134*f72e2559SJijie Shao 	u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
135*f72e2559SJijie Shao 	struct hbg_priv *priv = buffer->priv;
136*f72e2559SJijie Shao 
137*f72e2559SJijie Shao 	buffer->skb = netdev_alloc_skb(priv->netdev, len);
138*f72e2559SJijie Shao 	if (unlikely(!buffer->skb))
139*f72e2559SJijie Shao 		return -ENOMEM;
140*f72e2559SJijie Shao 
141*f72e2559SJijie Shao 	buffer->skb_len = len;
142*f72e2559SJijie Shao 	memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
143*f72e2559SJijie Shao 	return 0;
144*f72e2559SJijie Shao }
145*f72e2559SJijie Shao 
14640735e75SJijie Shao static void hbg_buffer_free(struct hbg_buffer *buffer)
14740735e75SJijie Shao {
14840735e75SJijie Shao 	hbg_dma_unmap(buffer);
14940735e75SJijie Shao 	hbg_buffer_free_skb(buffer);
15040735e75SJijie Shao }
15140735e75SJijie Shao 
15240735e75SJijie Shao static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
15340735e75SJijie Shao {
15440735e75SJijie Shao 	struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
15540735e75SJijie Shao 	/* This smp_load_acquire() pairs with smp_store_release() in
15640735e75SJijie Shao 	 * hbg_net_start_xmit() called in xmit process.
15740735e75SJijie Shao 	 */
15840735e75SJijie Shao 	u32 ntu = smp_load_acquire(&ring->ntu);
15940735e75SJijie Shao 	struct hbg_priv *priv = ring->priv;
16040735e75SJijie Shao 	struct hbg_buffer *buffer;
16140735e75SJijie Shao 	u32 ntc = ring->ntc;
16240735e75SJijie Shao 	int packet_done = 0;
16340735e75SJijie Shao 
16440735e75SJijie Shao 	/* We need do cleanup even if budget is 0.
16540735e75SJijie Shao 	 * Per NAPI documentation budget is for Rx.
16640735e75SJijie Shao 	 * So We hardcode the amount of work Tx NAPI does to 128.
16740735e75SJijie Shao 	 */
16840735e75SJijie Shao 	budget = 128;
16940735e75SJijie Shao 	while (packet_done < budget) {
17040735e75SJijie Shao 		if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
17140735e75SJijie Shao 			break;
17240735e75SJijie Shao 
17340735e75SJijie Shao 		/* make sure HW write desc complete */
17440735e75SJijie Shao 		dma_rmb();
17540735e75SJijie Shao 
17640735e75SJijie Shao 		buffer = &ring->queue[ntc];
17740735e75SJijie Shao 		if (buffer->state != HBG_TX_STATE_COMPLETE)
17840735e75SJijie Shao 			break;
17940735e75SJijie Shao 
18040735e75SJijie Shao 		hbg_buffer_free(buffer);
18140735e75SJijie Shao 		ntc = hbg_queue_next_prt(ntc, ring);
18240735e75SJijie Shao 		packet_done++;
18340735e75SJijie Shao 	}
18440735e75SJijie Shao 
18540735e75SJijie Shao 	/* This smp_store_release() pairs with smp_load_acquire() in
18640735e75SJijie Shao 	 * hbg_net_start_xmit() called in xmit process.
18740735e75SJijie Shao 	 */
18840735e75SJijie Shao 	smp_store_release(&ring->ntc, ntc);
18940735e75SJijie Shao 	netif_wake_queue(priv->netdev);
19040735e75SJijie Shao 
19140735e75SJijie Shao 	if (likely(packet_done < budget &&
19240735e75SJijie Shao 		   napi_complete_done(napi, packet_done)))
19340735e75SJijie Shao 		hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
19440735e75SJijie Shao 
19540735e75SJijie Shao 	return packet_done;
19640735e75SJijie Shao }
19740735e75SJijie Shao 
198*f72e2559SJijie Shao static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
199*f72e2559SJijie Shao {
200*f72e2559SJijie Shao 	struct hbg_ring *ring = &priv->rx_ring;
201*f72e2559SJijie Shao 	struct hbg_buffer *buffer;
202*f72e2559SJijie Shao 	int ret;
203*f72e2559SJijie Shao 
204*f72e2559SJijie Shao 	if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
205*f72e2559SJijie Shao 		return 0;
206*f72e2559SJijie Shao 
207*f72e2559SJijie Shao 	buffer = &ring->queue[ring->ntu];
208*f72e2559SJijie Shao 	ret = hbg_buffer_alloc_skb(buffer);
209*f72e2559SJijie Shao 	if (unlikely(ret))
210*f72e2559SJijie Shao 		return ret;
211*f72e2559SJijie Shao 
212*f72e2559SJijie Shao 	ret = hbg_dma_map(buffer);
213*f72e2559SJijie Shao 	if (unlikely(ret)) {
214*f72e2559SJijie Shao 		hbg_buffer_free_skb(buffer);
215*f72e2559SJijie Shao 		return ret;
216*f72e2559SJijie Shao 	}
217*f72e2559SJijie Shao 
218*f72e2559SJijie Shao 	hbg_hw_fill_buffer(priv, buffer->skb_dma);
219*f72e2559SJijie Shao 	hbg_queue_move_next(ntu, ring);
220*f72e2559SJijie Shao 	return 0;
221*f72e2559SJijie Shao }
222*f72e2559SJijie Shao 
223*f72e2559SJijie Shao static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
224*f72e2559SJijie Shao 				  struct hbg_buffer *buffer)
225*f72e2559SJijie Shao {
226*f72e2559SJijie Shao 	struct hbg_rx_desc *rx_desc;
227*f72e2559SJijie Shao 
228*f72e2559SJijie Shao 	/* make sure HW write desc complete */
229*f72e2559SJijie Shao 	dma_rmb();
230*f72e2559SJijie Shao 
231*f72e2559SJijie Shao 	dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
232*f72e2559SJijie Shao 				buffer->skb_len, DMA_FROM_DEVICE);
233*f72e2559SJijie Shao 
234*f72e2559SJijie Shao 	rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
235*f72e2559SJijie Shao 	return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
236*f72e2559SJijie Shao }
237*f72e2559SJijie Shao 
238*f72e2559SJijie Shao static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
239*f72e2559SJijie Shao {
240*f72e2559SJijie Shao 	struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
241*f72e2559SJijie Shao 	struct hbg_priv *priv = ring->priv;
242*f72e2559SJijie Shao 	struct hbg_rx_desc *rx_desc;
243*f72e2559SJijie Shao 	struct hbg_buffer *buffer;
244*f72e2559SJijie Shao 	u32 packet_done = 0;
245*f72e2559SJijie Shao 	u32 pkt_len;
246*f72e2559SJijie Shao 
247*f72e2559SJijie Shao 	while (packet_done < budget) {
248*f72e2559SJijie Shao 		if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
249*f72e2559SJijie Shao 			break;
250*f72e2559SJijie Shao 
251*f72e2559SJijie Shao 		buffer = &ring->queue[ring->ntc];
252*f72e2559SJijie Shao 		if (unlikely(!buffer->skb))
253*f72e2559SJijie Shao 			goto next_buffer;
254*f72e2559SJijie Shao 
255*f72e2559SJijie Shao 		if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
256*f72e2559SJijie Shao 			break;
257*f72e2559SJijie Shao 		rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
258*f72e2559SJijie Shao 		pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
259*f72e2559SJijie Shao 
260*f72e2559SJijie Shao 		hbg_dma_unmap(buffer);
261*f72e2559SJijie Shao 
262*f72e2559SJijie Shao 		skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
263*f72e2559SJijie Shao 		skb_put(buffer->skb, pkt_len);
264*f72e2559SJijie Shao 		buffer->skb->protocol = eth_type_trans(buffer->skb,
265*f72e2559SJijie Shao 						       priv->netdev);
266*f72e2559SJijie Shao 
267*f72e2559SJijie Shao 		dev_sw_netstats_rx_add(priv->netdev, pkt_len);
268*f72e2559SJijie Shao 		napi_gro_receive(napi, buffer->skb);
269*f72e2559SJijie Shao 		buffer->skb = NULL;
270*f72e2559SJijie Shao 
271*f72e2559SJijie Shao next_buffer:
272*f72e2559SJijie Shao 		hbg_rx_fill_one_buffer(priv);
273*f72e2559SJijie Shao 		hbg_queue_move_next(ntc, ring);
274*f72e2559SJijie Shao 		packet_done++;
275*f72e2559SJijie Shao 	}
276*f72e2559SJijie Shao 
277*f72e2559SJijie Shao 	if (likely(packet_done < budget &&
278*f72e2559SJijie Shao 		   napi_complete_done(napi, packet_done)))
279*f72e2559SJijie Shao 		hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
280*f72e2559SJijie Shao 
281*f72e2559SJijie Shao 	return packet_done;
282*f72e2559SJijie Shao }
283*f72e2559SJijie Shao 
28440735e75SJijie Shao static void hbg_ring_uninit(struct hbg_ring *ring)
28540735e75SJijie Shao {
28640735e75SJijie Shao 	struct hbg_buffer *buffer;
28740735e75SJijie Shao 	u32 i;
28840735e75SJijie Shao 
28940735e75SJijie Shao 	if (!ring->queue)
29040735e75SJijie Shao 		return;
29140735e75SJijie Shao 
29240735e75SJijie Shao 	napi_disable(&ring->napi);
29340735e75SJijie Shao 	netif_napi_del(&ring->napi);
29440735e75SJijie Shao 
29540735e75SJijie Shao 	for (i = 0; i < ring->len; i++) {
29640735e75SJijie Shao 		buffer = &ring->queue[i];
29740735e75SJijie Shao 		hbg_buffer_free(buffer);
29840735e75SJijie Shao 		buffer->ring = NULL;
29940735e75SJijie Shao 		buffer->priv = NULL;
30040735e75SJijie Shao 	}
30140735e75SJijie Shao 
30240735e75SJijie Shao 	dma_free_coherent(&ring->priv->pdev->dev,
30340735e75SJijie Shao 			  ring->len * sizeof(*ring->queue),
30440735e75SJijie Shao 			  ring->queue, ring->queue_dma);
30540735e75SJijie Shao 	ring->queue = NULL;
30640735e75SJijie Shao 	ring->queue_dma = 0;
30740735e75SJijie Shao 	ring->len = 0;
30840735e75SJijie Shao 	ring->priv = NULL;
30940735e75SJijie Shao }
31040735e75SJijie Shao 
31140735e75SJijie Shao static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
31240735e75SJijie Shao 			 int (*napi_poll)(struct napi_struct *, int),
31340735e75SJijie Shao 			 enum hbg_dir dir)
31440735e75SJijie Shao {
31540735e75SJijie Shao 	struct hbg_buffer *buffer;
31640735e75SJijie Shao 	u32 i, len;
31740735e75SJijie Shao 
31840735e75SJijie Shao 	len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
31940735e75SJijie Shao 	ring->queue = dma_alloc_coherent(&priv->pdev->dev,
32040735e75SJijie Shao 					 len * sizeof(*ring->queue),
32140735e75SJijie Shao 					 &ring->queue_dma, GFP_KERNEL);
32240735e75SJijie Shao 	if (!ring->queue)
32340735e75SJijie Shao 		return -ENOMEM;
32440735e75SJijie Shao 
32540735e75SJijie Shao 	for (i = 0; i < len; i++) {
32640735e75SJijie Shao 		buffer = &ring->queue[i];
32740735e75SJijie Shao 		buffer->skb_len = 0;
32840735e75SJijie Shao 		buffer->dir = dir;
32940735e75SJijie Shao 		buffer->ring = ring;
33040735e75SJijie Shao 		buffer->priv = priv;
33140735e75SJijie Shao 		buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
33240735e75SJijie Shao 	}
33340735e75SJijie Shao 
33440735e75SJijie Shao 	ring->dir = dir;
33540735e75SJijie Shao 	ring->priv = priv;
33640735e75SJijie Shao 	ring->ntc = 0;
33740735e75SJijie Shao 	ring->ntu = 0;
33840735e75SJijie Shao 	ring->len = len;
33940735e75SJijie Shao 
340*f72e2559SJijie Shao 	if (dir == HBG_DIR_TX)
34140735e75SJijie Shao 		netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
342*f72e2559SJijie Shao 	else
343*f72e2559SJijie Shao 		netif_napi_add(priv->netdev, &ring->napi, napi_poll);
344*f72e2559SJijie Shao 
34540735e75SJijie Shao 	napi_enable(&ring->napi);
34640735e75SJijie Shao 	return 0;
34740735e75SJijie Shao }
34840735e75SJijie Shao 
34940735e75SJijie Shao static int hbg_tx_ring_init(struct hbg_priv *priv)
35040735e75SJijie Shao {
35140735e75SJijie Shao 	struct hbg_ring *tx_ring = &priv->tx_ring;
35240735e75SJijie Shao 
35340735e75SJijie Shao 	if (!tx_ring->tout_log_buf)
35440735e75SJijie Shao 		tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
35540735e75SJijie Shao 						     HBG_TX_TIMEOUT_BUF_LEN,
35640735e75SJijie Shao 						     GFP_KERNEL);
35740735e75SJijie Shao 
35840735e75SJijie Shao 	if (!tx_ring->tout_log_buf)
35940735e75SJijie Shao 		return -ENOMEM;
36040735e75SJijie Shao 
36140735e75SJijie Shao 	return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
36240735e75SJijie Shao }
36340735e75SJijie Shao 
364*f72e2559SJijie Shao static int hbg_rx_ring_init(struct hbg_priv *priv)
365*f72e2559SJijie Shao {
366*f72e2559SJijie Shao 	int ret;
367*f72e2559SJijie Shao 	u32 i;
368*f72e2559SJijie Shao 
369*f72e2559SJijie Shao 	ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
370*f72e2559SJijie Shao 	if (ret)
371*f72e2559SJijie Shao 		return ret;
372*f72e2559SJijie Shao 
373*f72e2559SJijie Shao 	for (i = 0; i < priv->rx_ring.len - 1; i++) {
374*f72e2559SJijie Shao 		ret = hbg_rx_fill_one_buffer(priv);
375*f72e2559SJijie Shao 		if (ret) {
376*f72e2559SJijie Shao 			hbg_ring_uninit(&priv->rx_ring);
377*f72e2559SJijie Shao 			return ret;
378*f72e2559SJijie Shao 		}
379*f72e2559SJijie Shao 	}
380*f72e2559SJijie Shao 
381*f72e2559SJijie Shao 	return 0;
382*f72e2559SJijie Shao }
383*f72e2559SJijie Shao 
38440735e75SJijie Shao int hbg_txrx_init(struct hbg_priv *priv)
38540735e75SJijie Shao {
38640735e75SJijie Shao 	int ret;
38740735e75SJijie Shao 
38840735e75SJijie Shao 	ret = hbg_tx_ring_init(priv);
389*f72e2559SJijie Shao 	if (ret) {
39040735e75SJijie Shao 		dev_err(&priv->pdev->dev,
39140735e75SJijie Shao 			"failed to init tx ring, ret = %d\n", ret);
392*f72e2559SJijie Shao 		return ret;
393*f72e2559SJijie Shao 	}
394*f72e2559SJijie Shao 
395*f72e2559SJijie Shao 	ret = hbg_rx_ring_init(priv);
396*f72e2559SJijie Shao 	if (ret) {
397*f72e2559SJijie Shao 		dev_err(&priv->pdev->dev,
398*f72e2559SJijie Shao 			"failed to init rx ring, ret = %d\n", ret);
399*f72e2559SJijie Shao 		hbg_ring_uninit(&priv->tx_ring);
400*f72e2559SJijie Shao 	}
40140735e75SJijie Shao 
40240735e75SJijie Shao 	return ret;
40340735e75SJijie Shao }
40440735e75SJijie Shao 
40540735e75SJijie Shao void hbg_txrx_uninit(struct hbg_priv *priv)
40640735e75SJijie Shao {
40740735e75SJijie Shao 	hbg_ring_uninit(&priv->tx_ring);
408*f72e2559SJijie Shao 	hbg_ring_uninit(&priv->rx_ring);
40940735e75SJijie Shao }
410