xref: /linux/drivers/net/ethernet/huawei/hinic/hinic_rx.c (revision 13091aa30535b719e269f20a7bc34002bf5afae5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/u64_stats_sync.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/prefetch.h>
20 #include <linux/cpumask.h>
21 #include <asm/barrier.h>
22 
23 #include "hinic_common.h"
24 #include "hinic_hw_if.h"
25 #include "hinic_hw_wqe.h"
26 #include "hinic_hw_wq.h"
27 #include "hinic_hw_qp.h"
28 #include "hinic_hw_dev.h"
29 #include "hinic_rx.h"
30 #include "hinic_dev.h"
31 
32 #define RX_IRQ_NO_PENDING               0
33 #define RX_IRQ_NO_COALESC               0
34 #define RX_IRQ_NO_LLI_TIMER             0
35 #define RX_IRQ_NO_CREDIT                0
36 #define RX_IRQ_NO_RESEND_TIMER          0
37 #define HINIC_RX_BUFFER_WRITE           16
38 
39 #define HINIC_RX_IPV6_PKT		7
40 #define LRO_PKT_HDR_LEN_IPV4		66
41 #define LRO_PKT_HDR_LEN_IPV6		86
42 #define LRO_REPLENISH_THLD		256
43 
44 #define LRO_PKT_HDR_LEN(cqe)		\
45 	(HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
46 	 HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
47 
48 /**
49  * hinic_rxq_clean_stats - Clean the statistics of specific queue
50  * @rxq: Logical Rx Queue
51  **/
52 void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
53 {
54 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
55 
56 	u64_stats_update_begin(&rxq_stats->syncp);
57 	rxq_stats->pkts  = 0;
58 	rxq_stats->bytes = 0;
59 	u64_stats_update_end(&rxq_stats->syncp);
60 }
61 
62 /**
63  * hinic_rxq_get_stats - get statistics of Rx Queue
64  * @rxq: Logical Rx Queue
65  * @stats: return updated stats here
66  **/
67 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
68 {
69 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
70 	unsigned int start;
71 
72 	u64_stats_update_begin(&stats->syncp);
73 	do {
74 		start = u64_stats_fetch_begin(&rxq_stats->syncp);
75 		stats->pkts = rxq_stats->pkts;
76 		stats->bytes = rxq_stats->bytes;
77 	} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
78 	u64_stats_update_end(&stats->syncp);
79 }
80 
81 /**
82  * rxq_stats_init - Initialize the statistics of specific queue
83  * @rxq: Logical Rx Queue
84  **/
85 static void rxq_stats_init(struct hinic_rxq *rxq)
86 {
87 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
88 
89 	u64_stats_init(&rxq_stats->syncp);
90 	hinic_rxq_clean_stats(rxq);
91 }
92 
93 static void rx_csum(struct hinic_rxq *rxq, u32 status,
94 		    struct sk_buff *skb)
95 {
96 	struct net_device *netdev = rxq->netdev;
97 	u32 csum_err;
98 
99 	csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
100 
101 	if (!(netdev->features & NETIF_F_RXCSUM))
102 		return;
103 
104 	if (!csum_err)
105 		skb->ip_summed = CHECKSUM_UNNECESSARY;
106 	else
107 		skb->ip_summed = CHECKSUM_NONE;
108 }
109 /**
110  * rx_alloc_skb - allocate skb and map it to dma address
111  * @rxq: rx queue
112  * @dma_addr: returned dma address for the skb
113  *
114  * Return skb
115  **/
116 static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
117 				    dma_addr_t *dma_addr)
118 {
119 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
120 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
121 	struct hinic_hwif *hwif = hwdev->hwif;
122 	struct pci_dev *pdev = hwif->pdev;
123 	struct sk_buff *skb;
124 	dma_addr_t addr;
125 	int err;
126 
127 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
128 	if (!skb) {
129 		netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
130 		return NULL;
131 	}
132 
133 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
134 			      DMA_FROM_DEVICE);
135 	err = dma_mapping_error(&pdev->dev, addr);
136 	if (err) {
137 		dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
138 		goto err_rx_map;
139 	}
140 
141 	*dma_addr = addr;
142 	return skb;
143 
144 err_rx_map:
145 	dev_kfree_skb_any(skb);
146 	return NULL;
147 }
148 
149 /**
150  * rx_unmap_skb - unmap the dma address of the skb
151  * @rxq: rx queue
152  * @dma_addr: dma address of the skb
153  **/
154 static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
155 {
156 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
157 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
158 	struct hinic_hwif *hwif = hwdev->hwif;
159 	struct pci_dev *pdev = hwif->pdev;
160 
161 	dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
162 			 DMA_FROM_DEVICE);
163 }
164 
165 /**
166  * rx_free_skb - unmap and free skb
167  * @rxq: rx queue
168  * @skb: skb to free
169  * @dma_addr: dma address of the skb
170  **/
171 static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
172 			dma_addr_t dma_addr)
173 {
174 	rx_unmap_skb(rxq, dma_addr);
175 	dev_kfree_skb_any(skb);
176 }
177 
178 /**
179  * rx_alloc_pkts - allocate pkts in rx queue
180  * @rxq: rx queue
181  *
182  * Return number of skbs allocated
183  **/
184 static int rx_alloc_pkts(struct hinic_rxq *rxq)
185 {
186 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
187 	struct hinic_rq_wqe *rq_wqe;
188 	unsigned int free_wqebbs;
189 	struct hinic_sge sge;
190 	dma_addr_t dma_addr;
191 	struct sk_buff *skb;
192 	u16 prod_idx;
193 	int i;
194 
195 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
196 
197 	/* Limit the allocation chunks */
198 	if (free_wqebbs > nic_dev->rx_weight)
199 		free_wqebbs = nic_dev->rx_weight;
200 
201 	for (i = 0; i < free_wqebbs; i++) {
202 		skb = rx_alloc_skb(rxq, &dma_addr);
203 		if (!skb) {
204 			netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
205 			goto skb_out;
206 		}
207 
208 		hinic_set_sge(&sge, dma_addr, skb->len);
209 
210 		rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
211 					  &prod_idx);
212 		if (!rq_wqe) {
213 			rx_free_skb(rxq, skb, dma_addr);
214 			goto skb_out;
215 		}
216 
217 		hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
218 
219 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
220 	}
221 
222 skb_out:
223 	if (i) {
224 		wmb();  /* write all the wqes before update PI */
225 
226 		hinic_rq_update(rxq->rq, prod_idx);
227 	}
228 
229 	return i;
230 }
231 
232 /**
233  * free_all_rx_skbs - free all skbs in rx queue
234  * @rxq: rx queue
235  **/
236 static void free_all_rx_skbs(struct hinic_rxq *rxq)
237 {
238 	struct hinic_rq *rq = rxq->rq;
239 	struct hinic_hw_wqe *hw_wqe;
240 	struct hinic_sge sge;
241 	u16 ci;
242 
243 	while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
244 		if (IS_ERR(hw_wqe))
245 			break;
246 
247 		hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
248 
249 		hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
250 
251 		rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
252 	}
253 }
254 
255 /**
256  * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
257  * @rxq: rx queue
258  * @head_skb: the first skb in the list
259  * @left_pkt_len: left size of the pkt exclude head skb
260  * @ci: consumer index
261  *
262  * Return number of wqes that used for the left of the pkt
263  **/
264 static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
265 			     unsigned int left_pkt_len, u16 ci)
266 {
267 	struct sk_buff *skb, *curr_skb = head_skb;
268 	struct hinic_rq_wqe *rq_wqe;
269 	unsigned int curr_len;
270 	struct hinic_sge sge;
271 	int num_wqes = 0;
272 
273 	while (left_pkt_len > 0) {
274 		rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
275 						&skb, &ci);
276 
277 		num_wqes++;
278 
279 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
280 
281 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
282 
283 		prefetch(skb->data);
284 
285 		curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
286 			    left_pkt_len;
287 
288 		left_pkt_len -= curr_len;
289 
290 		__skb_put(skb, curr_len);
291 
292 		if (curr_skb == head_skb)
293 			skb_shinfo(head_skb)->frag_list = skb;
294 		else
295 			curr_skb->next = skb;
296 
297 		head_skb->len += skb->len;
298 		head_skb->data_len += skb->len;
299 		head_skb->truesize += skb->truesize;
300 
301 		curr_skb = skb;
302 	}
303 
304 	return num_wqes;
305 }
306 
307 /**
308  * rxq_recv - Rx handler
309  * @rxq: rx queue
310  * @budget: maximum pkts to process
311  *
312  * Return number of pkts received
313  **/
314 static int rxq_recv(struct hinic_rxq *rxq, int budget)
315 {
316 	struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
317 	u64 pkt_len = 0, rx_bytes = 0;
318 	struct hinic_rq *rq = rxq->rq;
319 	struct hinic_rq_wqe *rq_wqe;
320 	unsigned int free_wqebbs;
321 	struct hinic_rq_cqe *cqe;
322 	int num_wqes, pkts = 0;
323 	struct hinic_sge sge;
324 	unsigned int status;
325 	struct sk_buff *skb;
326 	u16 ci, num_lro;
327 	u16 num_wqe = 0;
328 
329 	while (pkts < budget) {
330 		num_wqes = 0;
331 
332 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
333 					   &ci);
334 		if (!rq_wqe)
335 			break;
336 
337 		cqe = rq->cqe[ci];
338 		status =  be32_to_cpu(cqe->status);
339 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
340 
341 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
342 
343 		rx_csum(rxq, status, skb);
344 
345 		prefetch(skb->data);
346 
347 		pkt_len = sge.len;
348 
349 		if (pkt_len <= HINIC_RX_BUF_SZ) {
350 			__skb_put(skb, pkt_len);
351 		} else {
352 			__skb_put(skb, HINIC_RX_BUF_SZ);
353 			num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
354 						     HINIC_RX_BUF_SZ, ci);
355 		}
356 
357 		hinic_rq_put_wqe(rq, ci,
358 				 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
359 
360 		skb_record_rx_queue(skb, qp->q_id);
361 		skb->protocol = eth_type_trans(skb, rxq->netdev);
362 
363 		napi_gro_receive(&rxq->napi, skb);
364 
365 		pkts++;
366 		rx_bytes += pkt_len;
367 
368 		num_lro = HINIC_GET_RX_NUM_LRO(status);
369 		if (num_lro) {
370 			rx_bytes += ((num_lro - 1) *
371 				     LRO_PKT_HDR_LEN(cqe));
372 
373 			num_wqe +=
374 			(u16)(pkt_len >> rxq->rx_buff_shift) +
375 			((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
376 		}
377 
378 		cqe->status = 0;
379 
380 		if (num_wqe >= LRO_REPLENISH_THLD)
381 			break;
382 	}
383 
384 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
385 	if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
386 		rx_alloc_pkts(rxq);
387 
388 	u64_stats_update_begin(&rxq->rxq_stats.syncp);
389 	rxq->rxq_stats.pkts += pkts;
390 	rxq->rxq_stats.bytes += rx_bytes;
391 	u64_stats_update_end(&rxq->rxq_stats.syncp);
392 
393 	return pkts;
394 }
395 
396 static int rx_poll(struct napi_struct *napi, int budget)
397 {
398 	struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
399 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
400 	struct hinic_rq *rq = rxq->rq;
401 	int pkts;
402 
403 	pkts = rxq_recv(rxq, budget);
404 	if (pkts >= budget)
405 		return budget;
406 
407 	napi_complete(napi);
408 	hinic_hwdev_set_msix_state(nic_dev->hwdev,
409 				   rq->msix_entry,
410 				   HINIC_MSIX_ENABLE);
411 
412 	return pkts;
413 }
414 
415 static void rx_add_napi(struct hinic_rxq *rxq)
416 {
417 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
418 
419 	netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
420 	napi_enable(&rxq->napi);
421 }
422 
423 static void rx_del_napi(struct hinic_rxq *rxq)
424 {
425 	napi_disable(&rxq->napi);
426 	netif_napi_del(&rxq->napi);
427 }
428 
429 static irqreturn_t rx_irq(int irq, void *data)
430 {
431 	struct hinic_rxq *rxq = (struct hinic_rxq *)data;
432 	struct hinic_rq *rq = rxq->rq;
433 	struct hinic_dev *nic_dev;
434 
435 	/* Disable the interrupt until napi will be completed */
436 	nic_dev = netdev_priv(rxq->netdev);
437 	hinic_hwdev_set_msix_state(nic_dev->hwdev,
438 				   rq->msix_entry,
439 				   HINIC_MSIX_DISABLE);
440 
441 	nic_dev = netdev_priv(rxq->netdev);
442 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
443 
444 	napi_schedule(&rxq->napi);
445 	return IRQ_HANDLED;
446 }
447 
448 static int rx_request_irq(struct hinic_rxq *rxq)
449 {
450 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
451 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
452 	struct hinic_rq *rq = rxq->rq;
453 	struct hinic_qp *qp;
454 	struct cpumask mask;
455 	int err;
456 
457 	rx_add_napi(rxq);
458 
459 	hinic_hwdev_msix_set(hwdev, rq->msix_entry,
460 			     RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
461 			     RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
462 			     RX_IRQ_NO_RESEND_TIMER);
463 
464 	err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
465 	if (err) {
466 		rx_del_napi(rxq);
467 		return err;
468 	}
469 
470 	qp = container_of(rq, struct hinic_qp, rq);
471 	cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
472 	return irq_set_affinity_hint(rq->irq, &mask);
473 }
474 
475 static void rx_free_irq(struct hinic_rxq *rxq)
476 {
477 	struct hinic_rq *rq = rxq->rq;
478 
479 	irq_set_affinity_hint(rq->irq, NULL);
480 	free_irq(rq->irq, rxq);
481 	rx_del_napi(rxq);
482 }
483 
484 /**
485  * hinic_init_rxq - Initialize the Rx Queue
486  * @rxq: Logical Rx Queue
487  * @rq: Hardware Rx Queue to connect the Logical queue with
488  * @netdev: network device to connect the Logical queue with
489  *
490  * Return 0 - Success, negative - Failure
491  **/
492 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
493 		   struct net_device *netdev)
494 {
495 	struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
496 	int err, pkts;
497 
498 	rxq->netdev = netdev;
499 	rxq->rq = rq;
500 	rxq->buf_len = HINIC_RX_BUF_SZ;
501 	rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
502 
503 	rxq_stats_init(rxq);
504 
505 	rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
506 				       "hinic_rxq%d", qp->q_id);
507 	if (!rxq->irq_name)
508 		return -ENOMEM;
509 
510 	pkts = rx_alloc_pkts(rxq);
511 	if (!pkts) {
512 		err = -ENOMEM;
513 		goto err_rx_pkts;
514 	}
515 
516 	err = rx_request_irq(rxq);
517 	if (err) {
518 		netdev_err(netdev, "Failed to request Rx irq\n");
519 		goto err_req_rx_irq;
520 	}
521 
522 	return 0;
523 
524 err_req_rx_irq:
525 err_rx_pkts:
526 	free_all_rx_skbs(rxq);
527 	devm_kfree(&netdev->dev, rxq->irq_name);
528 	return err;
529 }
530 
531 /**
532  * hinic_clean_rxq - Clean the Rx Queue
533  * @rxq: Logical Rx Queue
534  **/
535 void hinic_clean_rxq(struct hinic_rxq *rxq)
536 {
537 	struct net_device *netdev = rxq->netdev;
538 
539 	rx_free_irq(rxq);
540 
541 	free_all_rx_skbs(rxq);
542 	devm_kfree(&netdev->dev, rxq->irq_name);
543 }
544