xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <net/gro.h>
8 #include <net/page_pool/helpers.h>
9 
10 #include "hinic3_hwdev.h"
11 #include "hinic3_nic_dev.h"
12 #include "hinic3_nic_io.h"
13 #include "hinic3_rx.h"
14 
15 #define HINIC3_RX_HDR_SIZE              256
16 #define HINIC3_RX_BUFFER_WRITE          16
17 
18 #define HINIC3_RX_TCP_PKT               0x3
19 #define HINIC3_RX_UDP_PKT               0x4
20 #define HINIC3_RX_SCTP_PKT              0x7
21 
22 #define HINIC3_RX_IPV4_PKT              0
23 #define HINIC3_RX_IPV6_PKT              1
24 #define HINIC3_RX_INVALID_IP_TYPE       2
25 
26 #define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
27 #define HINIC3_RX_PKT_FORMAT_VXLAN      1
28 
29 #define HINIC3_LRO_PKT_HDR_LEN_IPV4     66
30 #define HINIC3_LRO_PKT_HDR_LEN_IPV6     86
31 #define HINIC3_LRO_PKT_HDR_LEN(cqe) \
32 	(RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
33 	 HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
34 	 HINIC3_LRO_PKT_HDR_LEN_IPV4)
35 
hinic3_rxq_clean_stats(struct hinic3_rxq_stats * rxq_stats)36 static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
37 {
38 	u64_stats_update_begin(&rxq_stats->syncp);
39 	rxq_stats->bytes = 0;
40 	rxq_stats->packets = 0;
41 	rxq_stats->errors = 0;
42 	rxq_stats->csum_errors = 0;
43 	rxq_stats->other_errors = 0;
44 	rxq_stats->dropped = 0;
45 	rxq_stats->rx_buf_empty = 0;
46 
47 	rxq_stats->alloc_skb_err = 0;
48 	rxq_stats->alloc_rx_buf_err = 0;
49 	rxq_stats->restore_drop_sge = 0;
50 	u64_stats_update_end(&rxq_stats->syncp);
51 }
52 
hinic3_rxq_stats_init(struct hinic3_rxq * rxq)53 static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
54 {
55 	struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
56 
57 	u64_stats_init(&rxq_stats->syncp);
58 	hinic3_rxq_clean_stats(rxq_stats);
59 }
60 
hinic3_alloc_rxqs(struct net_device * netdev)61 int hinic3_alloc_rxqs(struct net_device *netdev)
62 {
63 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
64 	struct pci_dev *pdev = nic_dev->pdev;
65 	u16 num_rxqs = nic_dev->max_qps;
66 	struct hinic3_rxq *rxq;
67 	u16 q_id;
68 
69 	nic_dev->rxqs = kzalloc_objs(*nic_dev->rxqs, num_rxqs);
70 	if (!nic_dev->rxqs)
71 		return -ENOMEM;
72 
73 	for (q_id = 0; q_id < num_rxqs; q_id++) {
74 		rxq = &nic_dev->rxqs[q_id];
75 		rxq->netdev = netdev;
76 		rxq->dev = &pdev->dev;
77 		rxq->q_id = q_id;
78 		rxq->buf_len = nic_dev->rx_buf_len;
79 		rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
80 		rxq->q_depth = nic_dev->q_params.rq_depth;
81 		rxq->q_mask = nic_dev->q_params.rq_depth - 1;
82 
83 		hinic3_rxq_stats_init(rxq);
84 	}
85 
86 	return 0;
87 }
88 
hinic3_free_rxqs(struct net_device * netdev)89 void hinic3_free_rxqs(struct net_device *netdev)
90 {
91 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
92 
93 	kfree(nic_dev->rxqs);
94 }
95 
rx_alloc_mapped_page(struct page_pool * page_pool,struct hinic3_rx_info * rx_info,u16 buf_len)96 static int rx_alloc_mapped_page(struct page_pool *page_pool,
97 				struct hinic3_rx_info *rx_info, u16 buf_len)
98 {
99 	struct page *page;
100 	u32 page_offset;
101 
102 	if (likely(rx_info->page))
103 		return 0;
104 
105 	page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
106 	if (unlikely(!page))
107 		return -ENOMEM;
108 
109 	rx_info->page = page;
110 	rx_info->page_offset = page_offset;
111 
112 	return 0;
113 }
114 
115 /* Associate fixed completion element to every wqe in the rq. Every rq wqe will
116  * always post completion to the same place.
117  */
rq_associate_cqes(struct hinic3_rxq * rxq)118 static void rq_associate_cqes(struct hinic3_rxq *rxq)
119 {
120 	struct hinic3_queue_pages *qpages;
121 	struct hinic3_rq_wqe *rq_wqe;
122 	dma_addr_t cqe_dma;
123 	u32 i;
124 
125 	qpages = &rxq->rq->wq.qpages;
126 
127 	for (i = 0; i < rxq->q_depth; i++) {
128 		rq_wqe = get_q_element(qpages, i, NULL);
129 		cqe_dma = rxq->cqe_start_paddr +
130 			  i * sizeof(struct hinic3_rq_cqe);
131 		rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
132 		rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
133 	}
134 }
135 
rq_wqe_buf_set(struct hinic3_io_queue * rq,uint32_t wqe_idx,dma_addr_t dma_addr,u16 len)136 static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
137 			   dma_addr_t dma_addr, u16 len)
138 {
139 	struct hinic3_rq_wqe *rq_wqe;
140 
141 	rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
142 	rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr));
143 	rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr));
144 }
145 
hinic3_rx_fill_buffers(struct hinic3_rxq * rxq)146 static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
147 {
148 	u32 i, free_wqebbs = rxq->delta - 1;
149 	struct hinic3_rx_info *rx_info;
150 	dma_addr_t dma_addr;
151 	int err;
152 
153 	for (i = 0; i < free_wqebbs; i++) {
154 		rx_info = &rxq->rx_info[rxq->next_to_update];
155 
156 		err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
157 					   rxq->buf_len);
158 		if (unlikely(err))
159 			break;
160 
161 		dma_addr = page_pool_get_dma_addr(rx_info->page) +
162 			rx_info->page_offset;
163 		rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
164 			       rxq->buf_len);
165 		rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
166 	}
167 
168 	if (likely(i)) {
169 		hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
170 				rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
171 		rxq->delta -= i;
172 		rxq->next_to_alloc = rxq->next_to_update;
173 	}
174 
175 	return i;
176 }
177 
hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res * rqres,u32 rq_depth,u16 buf_len)178 static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
179 				   u32 rq_depth, u16 buf_len)
180 {
181 	u32 free_wqebbs = rq_depth - 1;
182 	u32 idx;
183 	int err;
184 
185 	for (idx = 0; idx < free_wqebbs; idx++) {
186 		err = rx_alloc_mapped_page(rqres->page_pool,
187 					   &rqres->rx_info[idx], buf_len);
188 		if (err)
189 			break;
190 	}
191 
192 	return idx;
193 }
194 
hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res * rqres,u32 q_depth)195 static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
196 				   u32 q_depth)
197 {
198 	struct hinic3_rx_info *rx_info;
199 	u32 i;
200 
201 	/* Free all the Rx ring sk_buffs */
202 	for (i = 0; i < q_depth; i++) {
203 		rx_info = &rqres->rx_info[i];
204 
205 		if (rx_info->page) {
206 			page_pool_put_full_page(rqres->page_pool,
207 						rx_info->page, false);
208 			rx_info->page = NULL;
209 		}
210 	}
211 }
212 
hinic3_add_rx_frag(struct hinic3_rxq * rxq,struct hinic3_rx_info * rx_info,struct sk_buff * skb,u32 size)213 static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
214 			       struct hinic3_rx_info *rx_info,
215 			       struct sk_buff *skb, u32 size)
216 {
217 	struct page *page;
218 	u8 *va;
219 
220 	page = rx_info->page;
221 	va = (u8 *)page_address(page) + rx_info->page_offset;
222 	net_prefetch(va);
223 
224 	page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
225 				   rxq->buf_len);
226 
227 	if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
228 		memcpy(__skb_put(skb, size), va,
229 		       ALIGN(size, sizeof(long)));
230 		page_pool_put_full_page(rxq->page_pool, page, false);
231 
232 		return;
233 	}
234 
235 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
236 			rx_info->page_offset, size, rxq->buf_len);
237 	skb_mark_for_recycle(skb);
238 }
239 
packaging_skb(struct hinic3_rxq * rxq,struct sk_buff * skb,u32 sge_num,u32 pkt_len)240 static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
241 			  u32 sge_num, u32 pkt_len)
242 {
243 	struct hinic3_rx_info *rx_info;
244 	u32 temp_pkt_len = pkt_len;
245 	u32 temp_sge_num = sge_num;
246 	u32 sw_ci;
247 	u32 size;
248 
249 	sw_ci = rxq->cons_idx & rxq->q_mask;
250 	while (temp_sge_num) {
251 		rx_info = &rxq->rx_info[sw_ci];
252 		sw_ci = (sw_ci + 1) & rxq->q_mask;
253 		if (unlikely(temp_pkt_len > rxq->buf_len)) {
254 			size = rxq->buf_len;
255 			temp_pkt_len -= rxq->buf_len;
256 		} else {
257 			size = temp_pkt_len;
258 		}
259 
260 		hinic3_add_rx_frag(rxq, rx_info, skb, size);
261 
262 		/* clear contents of buffer_info */
263 		rx_info->page = NULL;
264 		temp_sge_num--;
265 	}
266 }
267 
hinic3_get_sge_num(struct hinic3_rxq * rxq,u32 pkt_len)268 static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
269 {
270 	u32 sge_num;
271 
272 	sge_num = pkt_len >> rxq->buf_len_shift;
273 	sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
274 
275 	return sge_num;
276 }
277 
hinic3_fetch_rx_buffer(struct hinic3_rxq * rxq,u32 pkt_len)278 static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
279 					      u32 pkt_len)
280 {
281 	struct sk_buff *skb;
282 	u32 sge_num;
283 
284 	skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
285 	if (unlikely(!skb))
286 		return NULL;
287 
288 	sge_num = hinic3_get_sge_num(rxq, pkt_len);
289 
290 	net_prefetchw(skb->data);
291 	packaging_skb(rxq, skb, sge_num, pkt_len);
292 
293 	rxq->cons_idx += sge_num;
294 	rxq->delta += sge_num;
295 
296 	return skb;
297 }
298 
hinic3_pull_tail(struct sk_buff * skb)299 static void hinic3_pull_tail(struct sk_buff *skb)
300 {
301 	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
302 	unsigned int pull_len;
303 	unsigned char *va;
304 
305 	va = skb_frag_address(frag);
306 
307 	/* we need the header to contain the greater of either ETH_HLEN or
308 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
309 	 */
310 	pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
311 
312 	/* align pull length to size of long to optimize memcpy performance */
313 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
314 
315 	/* update all of the pointers */
316 	skb_frag_size_sub(frag, pull_len);
317 	skb_frag_off_add(frag, pull_len);
318 
319 	skb->data_len -= pull_len;
320 	skb->tail += pull_len;
321 }
322 
hinic3_rx_csum(struct hinic3_rxq * rxq,u32 offload_type,u32 status,struct sk_buff * skb)323 static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
324 			   u32 status, struct sk_buff *skb)
325 {
326 	u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
327 	u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
328 	u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
329 	u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
330 	struct net_device *netdev = rxq->netdev;
331 
332 	if (!(netdev->features & NETIF_F_RXCSUM))
333 		return;
334 
335 	if (unlikely(csum_err)) {
336 		/* pkt type is recognized by HW, and csum is wrong */
337 		skb->ip_summed = CHECKSUM_NONE;
338 		return;
339 	}
340 
341 	if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
342 	    !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
343 	      pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
344 		skb->ip_summed = CHECKSUM_NONE;
345 		return;
346 	}
347 
348 	switch (pkt_type) {
349 	case HINIC3_RX_TCP_PKT:
350 	case HINIC3_RX_UDP_PKT:
351 	case HINIC3_RX_SCTP_PKT:
352 		skb->ip_summed = CHECKSUM_UNNECESSARY;
353 		break;
354 	default:
355 		skb->ip_summed = CHECKSUM_NONE;
356 		break;
357 	}
358 }
359 
hinic3_lro_set_gso_params(struct sk_buff * skb,u16 num_lro)360 static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
361 {
362 	struct ethhdr *eth = (struct ethhdr *)(skb->data);
363 	__be16 proto;
364 
365 	proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
366 
367 	skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
368 						 num_lro);
369 	skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
370 				    SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
371 	skb_shinfo(skb)->gso_segs = num_lro;
372 }
373 
recv_one_pkt(struct hinic3_rxq * rxq,struct hinic3_rq_cqe * rx_cqe,u32 pkt_len,u32 vlan_len,u32 status)374 static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
375 			u32 pkt_len, u32 vlan_len, u32 status)
376 {
377 	struct net_device *netdev = rxq->netdev;
378 	struct sk_buff *skb;
379 	u32 offload_type;
380 	u16 num_lro;
381 
382 	skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
383 	if (unlikely(!skb))
384 		return -ENOMEM;
385 
386 	/* place header in linear portion of buffer */
387 	if (skb_is_nonlinear(skb))
388 		hinic3_pull_tail(skb);
389 
390 	offload_type = le32_to_cpu(rx_cqe->offload_type);
391 	hinic3_rx_csum(rxq, offload_type, status, skb);
392 
393 	num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
394 	if (num_lro)
395 		hinic3_lro_set_gso_params(skb, num_lro);
396 
397 	skb_record_rx_queue(skb, rxq->q_id);
398 	skb->protocol = eth_type_trans(skb, netdev);
399 
400 	if (skb_has_frag_list(skb)) {
401 		napi_gro_flush(&rxq->irq_cfg->napi, false);
402 		netif_receive_skb(skb);
403 	} else {
404 		napi_gro_receive(&rxq->irq_cfg->napi, skb);
405 	}
406 
407 	return 0;
408 }
409 
hinic3_alloc_rxqs_res(struct net_device * netdev,u16 num_rq,u32 rq_depth,struct hinic3_dyna_rxq_res * rxqs_res)410 int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
411 			  u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
412 {
413 	u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
414 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
415 	struct page_pool_params pp_params = {};
416 	struct hinic3_dyna_rxq_res *rqres;
417 	u32 pkt_idx;
418 	int idx;
419 
420 	for (idx = 0; idx < num_rq; idx++) {
421 		rqres = &rxqs_res[idx];
422 		rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth);
423 		if (!rqres->rx_info)
424 			goto err_free_rqres;
425 
426 		rqres->cqe_start_vaddr =
427 			dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size,
428 					   &rqres->cqe_start_paddr, GFP_KERNEL);
429 		if (!rqres->cqe_start_vaddr) {
430 			netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n",
431 				   idx);
432 			goto err_free_rx_info;
433 		}
434 
435 		pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
436 		pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
437 				      PAGE_SIZE;
438 		pp_params.nid = dev_to_node(&nic_dev->pdev->dev);
439 		pp_params.dev = &nic_dev->pdev->dev;
440 		pp_params.dma_dir = DMA_FROM_DEVICE;
441 		pp_params.max_len = PAGE_SIZE;
442 		rqres->page_pool = page_pool_create(&pp_params);
443 		if (IS_ERR(rqres->page_pool)) {
444 			netdev_err(netdev, "Failed to create rxq%d page pool\n",
445 				   idx);
446 			goto err_free_cqe;
447 		}
448 
449 		pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
450 						  nic_dev->rx_buf_len);
451 		if (!pkt_idx) {
452 			netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n",
453 				   idx);
454 			goto err_destroy_page_pool;
455 		}
456 		rqres->next_to_alloc = pkt_idx;
457 	}
458 
459 	return 0;
460 
461 err_destroy_page_pool:
462 	page_pool_destroy(rqres->page_pool);
463 err_free_cqe:
464 	dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
465 			  rqres->cqe_start_vaddr,
466 			  rqres->cqe_start_paddr);
467 err_free_rx_info:
468 	kfree(rqres->rx_info);
469 err_free_rqres:
470 	hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
471 
472 	return -ENOMEM;
473 }
474 
hinic3_free_rxqs_res(struct net_device * netdev,u16 num_rq,u32 rq_depth,struct hinic3_dyna_rxq_res * rxqs_res)475 void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
476 			  u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
477 {
478 	u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
479 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
480 	struct hinic3_dyna_rxq_res *rqres;
481 	int idx;
482 
483 	for (idx = 0; idx < num_rq; idx++) {
484 		rqres = &rxqs_res[idx];
485 
486 		hinic3_free_rx_buffers(rqres, rq_depth);
487 		page_pool_destroy(rqres->page_pool);
488 		dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
489 				  rqres->cqe_start_vaddr,
490 				  rqres->cqe_start_paddr);
491 		kfree(rqres->rx_info);
492 	}
493 }
494 
hinic3_configure_rxqs(struct net_device * netdev,u16 num_rq,u32 rq_depth,struct hinic3_dyna_rxq_res * rxqs_res)495 int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
496 			  u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
497 {
498 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
499 	struct hinic3_dyna_rxq_res *rqres;
500 	struct msix_entry *msix_entry;
501 	struct hinic3_rxq *rxq;
502 	u16 q_id;
503 	u32 pkts;
504 
505 	for (q_id = 0; q_id < num_rq; q_id++) {
506 		rxq = &nic_dev->rxqs[q_id];
507 		rqres = &rxqs_res[q_id];
508 		msix_entry = &nic_dev->qps_msix_entries[q_id];
509 
510 		rxq->irq_id = msix_entry->vector;
511 		rxq->msix_entry_idx = msix_entry->entry;
512 		rxq->next_to_update = 0;
513 		rxq->next_to_alloc = rqres->next_to_alloc;
514 		rxq->q_depth = rq_depth;
515 		rxq->delta = rxq->q_depth;
516 		rxq->q_mask = rxq->q_depth - 1;
517 		rxq->cons_idx = 0;
518 
519 		rxq->cqe_arr = rqres->cqe_start_vaddr;
520 		rxq->cqe_start_paddr = rqres->cqe_start_paddr;
521 		rxq->rx_info = rqres->rx_info;
522 		rxq->page_pool = rqres->page_pool;
523 
524 		rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
525 
526 		rq_associate_cqes(rxq);
527 
528 		pkts = hinic3_rx_fill_buffers(rxq);
529 		if (!pkts) {
530 			netdev_err(netdev, "Failed to fill Rx buffer\n");
531 			return -ENOMEM;
532 		}
533 	}
534 
535 	return 0;
536 }
537 
hinic3_rx_poll(struct hinic3_rxq * rxq,int budget)538 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
539 {
540 	struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
541 	u32 sw_ci, status, pkt_len, vlan_len;
542 	struct hinic3_rq_cqe *rx_cqe;
543 	u32 num_wqe = 0;
544 	int nr_pkts = 0;
545 	u16 num_lro;
546 
547 	while (likely(nr_pkts < budget)) {
548 		sw_ci = rxq->cons_idx & rxq->q_mask;
549 		rx_cqe = rxq->cqe_arr + sw_ci;
550 		status = le32_to_cpu(rx_cqe->status);
551 		if (!RQ_CQE_STATUS_GET(status, RXDONE))
552 			break;
553 
554 		/* make sure we read rx_done before packet length */
555 		rmb();
556 
557 		vlan_len = le32_to_cpu(rx_cqe->vlan_len);
558 		pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
559 		if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
560 			break;
561 
562 		nr_pkts++;
563 		num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
564 		if (num_lro)
565 			num_wqe += hinic3_get_sge_num(rxq, pkt_len);
566 
567 		rx_cqe->status = 0;
568 
569 		if (num_wqe >= nic_dev->lro_replenish_thld)
570 			break;
571 	}
572 
573 	if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
574 		hinic3_rx_fill_buffers(rxq);
575 
576 	return nr_pkts;
577 }
578