xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision d5ffba0f254d29a13908d4510762b31d4247a94c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 #include <net/libeth/tx.h>
6 
7 #include "idpf.h"
8 #include "idpf_ptp.h"
9 #include "idpf_virtchnl.h"
10 
11 struct idpf_tx_stash {
12 	struct hlist_node hlist;
13 	struct libeth_sqe buf;
14 };
15 
16 #define idpf_tx_buf_compl_tag(buf)	(*(u32 *)&(buf)->priv)
17 LIBETH_SQE_CHECK_PRIV(u32);
18 
19 /**
20  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
21  * @skb: send buffer
22  * @max_bufs: maximum scatter gather buffers for single packet
23  * @count: number of buffers this packet needs
24  *
25  * Make sure we don't exceed maximum scatter gather buffers for a single
26  * packet.
27  * TSO case has been handled earlier from idpf_features_check().
28  */
29 static bool idpf_chk_linearize(const struct sk_buff *skb,
30 			       unsigned int max_bufs,
31 			       unsigned int count)
32 {
33 	if (likely(count <= max_bufs))
34 		return false;
35 
36 	if (skb_is_gso(skb))
37 		return false;
38 
39 	return true;
40 }
41 
42 /**
43  * idpf_buf_lifo_push - push a buffer pointer onto stack
44  * @stack: pointer to stack struct
45  * @buf: pointer to buf to push
46  *
47  * Returns 0 on success, negative on failure
48  **/
49 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
50 			      struct idpf_tx_stash *buf)
51 {
52 	if (unlikely(stack->top == stack->size))
53 		return -ENOSPC;
54 
55 	stack->bufs[stack->top++] = buf;
56 
57 	return 0;
58 }
59 
60 /**
61  * idpf_buf_lifo_pop - pop a buffer pointer from stack
62  * @stack: pointer to stack struct
63  **/
64 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
65 {
66 	if (unlikely(!stack->top))
67 		return NULL;
68 
69 	return stack->bufs[--stack->top];
70 }
71 
72 /**
73  * idpf_tx_timeout - Respond to a Tx Hang
74  * @netdev: network interface device structure
75  * @txqueue: TX queue
76  */
77 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
78 {
79 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
80 
81 	adapter->tx_timeout_count++;
82 
83 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
84 		   adapter->tx_timeout_count, txqueue);
85 	if (!idpf_is_reset_in_prog(adapter)) {
86 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
87 		queue_delayed_work(adapter->vc_event_wq,
88 				   &adapter->vc_event_task,
89 				   msecs_to_jiffies(10));
90 	}
91 }
92 
93 /**
94  * idpf_tx_buf_rel_all - Free any empty Tx buffers
95  * @txq: queue to be cleaned
96  */
97 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
98 {
99 	struct libeth_sq_napi_stats ss = { };
100 	struct idpf_buf_lifo *buf_stack;
101 	struct idpf_tx_stash *stash;
102 	struct libeth_cq_pp cp = {
103 		.dev	= txq->dev,
104 		.ss	= &ss,
105 	};
106 	struct hlist_node *tmp;
107 	u32 i, tag;
108 
109 	/* Buffers already cleared, nothing to do */
110 	if (!txq->tx_buf)
111 		return;
112 
113 	/* Free all the Tx buffer sk_buffs */
114 	for (i = 0; i < txq->desc_count; i++)
115 		libeth_tx_complete(&txq->tx_buf[i], &cp);
116 
117 	kfree(txq->tx_buf);
118 	txq->tx_buf = NULL;
119 
120 	if (!idpf_queue_has(FLOW_SCH_EN, txq))
121 		return;
122 
123 	buf_stack = &txq->stash->buf_stack;
124 	if (!buf_stack->bufs)
125 		return;
126 
127 	/*
128 	 * If a Tx timeout occurred, there are potentially still bufs in the
129 	 * hash table, free them here.
130 	 */
131 	hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
132 			   hlist) {
133 		if (!stash)
134 			continue;
135 
136 		libeth_tx_complete(&stash->buf, &cp);
137 		hash_del(&stash->hlist);
138 		idpf_buf_lifo_push(buf_stack, stash);
139 	}
140 
141 	for (i = 0; i < buf_stack->size; i++)
142 		kfree(buf_stack->bufs[i]);
143 
144 	kfree(buf_stack->bufs);
145 	buf_stack->bufs = NULL;
146 }
147 
148 /**
149  * idpf_tx_desc_rel - Free Tx resources per queue
150  * @txq: Tx descriptor ring for a specific queue
151  *
152  * Free all transmit software resources
153  */
154 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
155 {
156 	idpf_tx_buf_rel_all(txq);
157 	netdev_tx_reset_subqueue(txq->netdev, txq->idx);
158 
159 	if (!txq->desc_ring)
160 		return;
161 
162 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
163 	txq->desc_ring = NULL;
164 	txq->next_to_use = 0;
165 	txq->next_to_clean = 0;
166 }
167 
168 /**
169  * idpf_compl_desc_rel - Free completion resources per queue
170  * @complq: completion queue
171  *
172  * Free all completion software resources.
173  */
174 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
175 {
176 	if (!complq->comp)
177 		return;
178 
179 	dma_free_coherent(complq->netdev->dev.parent, complq->size,
180 			  complq->comp, complq->dma);
181 	complq->comp = NULL;
182 	complq->next_to_use = 0;
183 	complq->next_to_clean = 0;
184 }
185 
186 /**
187  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
188  * @vport: virtual port structure
189  *
190  * Free all transmit software resources
191  */
192 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
193 {
194 	int i, j;
195 
196 	if (!vport->txq_grps)
197 		return;
198 
199 	for (i = 0; i < vport->num_txq_grp; i++) {
200 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
201 
202 		for (j = 0; j < txq_grp->num_txq; j++)
203 			idpf_tx_desc_rel(txq_grp->txqs[j]);
204 
205 		if (idpf_is_queue_model_split(vport->txq_model))
206 			idpf_compl_desc_rel(txq_grp->complq);
207 	}
208 }
209 
210 /**
211  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
212  * @tx_q: queue for which the buffers are allocated
213  *
214  * Returns 0 on success, negative on failure
215  */
216 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
217 {
218 	struct idpf_buf_lifo *buf_stack;
219 	int buf_size;
220 	int i;
221 
222 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
223 	 * are allocated by kernel network stack and received as part of skb
224 	 */
225 	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
226 	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
227 	if (!tx_q->tx_buf)
228 		return -ENOMEM;
229 
230 	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
231 		return 0;
232 
233 	buf_stack = &tx_q->stash->buf_stack;
234 
235 	/* Initialize tx buf stack for out-of-order completions if
236 	 * flow scheduling offload is enabled
237 	 */
238 	buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
239 				  GFP_KERNEL);
240 	if (!buf_stack->bufs)
241 		return -ENOMEM;
242 
243 	buf_stack->size = tx_q->desc_count;
244 	buf_stack->top = tx_q->desc_count;
245 
246 	for (i = 0; i < tx_q->desc_count; i++) {
247 		buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
248 					     GFP_KERNEL);
249 		if (!buf_stack->bufs[i])
250 			return -ENOMEM;
251 	}
252 
253 	return 0;
254 }
255 
256 /**
257  * idpf_tx_desc_alloc - Allocate the Tx descriptors
258  * @vport: vport to allocate resources for
259  * @tx_q: the tx ring to set up
260  *
261  * Returns 0 on success, negative on failure
262  */
263 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
264 			      struct idpf_tx_queue *tx_q)
265 {
266 	struct device *dev = tx_q->dev;
267 	int err;
268 
269 	err = idpf_tx_buf_alloc_all(tx_q);
270 	if (err)
271 		goto err_alloc;
272 
273 	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
274 
275 	/* Allocate descriptors also round up to nearest 4K */
276 	tx_q->size = ALIGN(tx_q->size, 4096);
277 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
278 					      GFP_KERNEL);
279 	if (!tx_q->desc_ring) {
280 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
281 			tx_q->size);
282 		err = -ENOMEM;
283 		goto err_alloc;
284 	}
285 
286 	tx_q->next_to_use = 0;
287 	tx_q->next_to_clean = 0;
288 	idpf_queue_set(GEN_CHK, tx_q);
289 
290 	return 0;
291 
292 err_alloc:
293 	idpf_tx_desc_rel(tx_q);
294 
295 	return err;
296 }
297 
298 /**
299  * idpf_compl_desc_alloc - allocate completion descriptors
300  * @vport: vport to allocate resources for
301  * @complq: completion queue to set up
302  *
303  * Return: 0 on success, -errno on failure.
304  */
305 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
306 				 struct idpf_compl_queue *complq)
307 {
308 	complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
309 
310 	complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
311 					  complq->size, &complq->dma,
312 					  GFP_KERNEL);
313 	if (!complq->comp)
314 		return -ENOMEM;
315 
316 	complq->next_to_use = 0;
317 	complq->next_to_clean = 0;
318 	idpf_queue_set(GEN_CHK, complq);
319 
320 	return 0;
321 }
322 
323 /**
324  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
325  * @vport: virtual port private structure
326  *
327  * Returns 0 on success, negative on failure
328  */
329 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
330 {
331 	int err = 0;
332 	int i, j;
333 
334 	/* Setup buffer queues. In single queue model buffer queues and
335 	 * completion queues will be same
336 	 */
337 	for (i = 0; i < vport->num_txq_grp; i++) {
338 		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
339 			struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
340 			u8 gen_bits = 0;
341 			u16 bufidx_mask;
342 
343 			err = idpf_tx_desc_alloc(vport, txq);
344 			if (err) {
345 				pci_err(vport->adapter->pdev,
346 					"Allocation for Tx Queue %u failed\n",
347 					i);
348 				goto err_out;
349 			}
350 
351 			if (!idpf_is_queue_model_split(vport->txq_model))
352 				continue;
353 
354 			txq->compl_tag_cur_gen = 0;
355 
356 			/* Determine the number of bits in the bufid
357 			 * mask and add one to get the start of the
358 			 * generation bits
359 			 */
360 			bufidx_mask = txq->desc_count - 1;
361 			while (bufidx_mask >> 1) {
362 				txq->compl_tag_gen_s++;
363 				bufidx_mask = bufidx_mask >> 1;
364 			}
365 			txq->compl_tag_gen_s++;
366 
367 			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
368 							txq->compl_tag_gen_s;
369 			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
370 
371 			/* Set bufid mask based on location of first
372 			 * gen bit; it cannot simply be the descriptor
373 			 * ring size-1 since we can have size values
374 			 * where not all of those bits are set.
375 			 */
376 			txq->compl_tag_bufid_m =
377 				GETMAXVAL(txq->compl_tag_gen_s);
378 		}
379 
380 		if (!idpf_is_queue_model_split(vport->txq_model))
381 			continue;
382 
383 		/* Setup completion queues */
384 		err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
385 		if (err) {
386 			pci_err(vport->adapter->pdev,
387 				"Allocation for Tx Completion Queue %u failed\n",
388 				i);
389 			goto err_out;
390 		}
391 	}
392 
393 err_out:
394 	if (err)
395 		idpf_tx_desc_rel_all(vport);
396 
397 	return err;
398 }
399 
400 /**
401  * idpf_rx_page_rel - Release an rx buffer page
402  * @rx_buf: the buffer to free
403  */
404 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
405 {
406 	if (unlikely(!rx_buf->netmem))
407 		return;
408 
409 	libeth_rx_recycle_slow(rx_buf->netmem);
410 
411 	rx_buf->netmem = 0;
412 	rx_buf->offset = 0;
413 }
414 
415 /**
416  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
417  * @bufq: queue to use
418  */
419 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
420 {
421 	struct libeth_fq fq = {
422 		.fqes	= bufq->hdr_buf,
423 		.pp	= bufq->hdr_pp,
424 	};
425 
426 	for (u32 i = 0; i < bufq->desc_count; i++)
427 		idpf_rx_page_rel(&bufq->hdr_buf[i]);
428 
429 	libeth_rx_fq_destroy(&fq);
430 	bufq->hdr_buf = NULL;
431 	bufq->hdr_pp = NULL;
432 }
433 
434 /**
435  * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
436  * @bufq: queue to be cleaned
437  */
438 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
439 {
440 	struct libeth_fq fq = {
441 		.fqes	= bufq->buf,
442 		.pp	= bufq->pp,
443 	};
444 
445 	/* queue already cleared, nothing to do */
446 	if (!bufq->buf)
447 		return;
448 
449 	/* Free all the bufs allocated and given to hw on Rx queue */
450 	for (u32 i = 0; i < bufq->desc_count; i++)
451 		idpf_rx_page_rel(&bufq->buf[i]);
452 
453 	if (idpf_queue_has(HSPLIT_EN, bufq))
454 		idpf_rx_hdr_buf_rel_all(bufq);
455 
456 	libeth_rx_fq_destroy(&fq);
457 	bufq->buf = NULL;
458 	bufq->pp = NULL;
459 }
460 
461 /**
462  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
463  * @rxq: queue to be cleaned
464  */
465 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
466 {
467 	struct libeth_fq fq = {
468 		.fqes	= rxq->rx_buf,
469 		.pp	= rxq->pp,
470 	};
471 
472 	if (!rxq->rx_buf)
473 		return;
474 
475 	for (u32 i = 0; i < rxq->desc_count; i++)
476 		idpf_rx_page_rel(&rxq->rx_buf[i]);
477 
478 	libeth_rx_fq_destroy(&fq);
479 	rxq->rx_buf = NULL;
480 	rxq->pp = NULL;
481 }
482 
483 /**
484  * idpf_rx_desc_rel - Free a specific Rx q resources
485  * @rxq: queue to clean the resources from
486  * @dev: device to free DMA memory
487  * @model: single or split queue model
488  *
489  * Free a specific rx queue resources
490  */
491 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
492 			     u32 model)
493 {
494 	if (!rxq)
495 		return;
496 
497 	if (rxq->skb) {
498 		dev_kfree_skb_any(rxq->skb);
499 		rxq->skb = NULL;
500 	}
501 
502 	if (!idpf_is_queue_model_split(model))
503 		idpf_rx_buf_rel_all(rxq);
504 
505 	rxq->next_to_alloc = 0;
506 	rxq->next_to_clean = 0;
507 	rxq->next_to_use = 0;
508 	if (!rxq->desc_ring)
509 		return;
510 
511 	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
512 	rxq->desc_ring = NULL;
513 }
514 
515 /**
516  * idpf_rx_desc_rel_bufq - free buffer queue resources
517  * @bufq: buffer queue to clean the resources from
518  * @dev: device to free DMA memory
519  */
520 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
521 				  struct device *dev)
522 {
523 	if (!bufq)
524 		return;
525 
526 	idpf_rx_buf_rel_bufq(bufq);
527 
528 	bufq->next_to_alloc = 0;
529 	bufq->next_to_clean = 0;
530 	bufq->next_to_use = 0;
531 
532 	if (!bufq->split_buf)
533 		return;
534 
535 	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
536 	bufq->split_buf = NULL;
537 }
538 
539 /**
540  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
541  * @vport: virtual port structure
542  *
543  * Free all rx queues resources
544  */
545 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
546 {
547 	struct device *dev = &vport->adapter->pdev->dev;
548 	struct idpf_rxq_group *rx_qgrp;
549 	u16 num_rxq;
550 	int i, j;
551 
552 	if (!vport->rxq_grps)
553 		return;
554 
555 	for (i = 0; i < vport->num_rxq_grp; i++) {
556 		rx_qgrp = &vport->rxq_grps[i];
557 
558 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
559 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
560 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
561 						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
562 			continue;
563 		}
564 
565 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
566 		for (j = 0; j < num_rxq; j++)
567 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
568 					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
569 
570 		if (!rx_qgrp->splitq.bufq_sets)
571 			continue;
572 
573 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
574 			struct idpf_bufq_set *bufq_set =
575 				&rx_qgrp->splitq.bufq_sets[j];
576 
577 			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
578 		}
579 	}
580 }
581 
582 /**
583  * idpf_rx_buf_hw_update - Store the new tail and head values
584  * @bufq: queue to bump
585  * @val: new head index
586  */
587 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
588 {
589 	bufq->next_to_use = val;
590 
591 	if (unlikely(!bufq->tail))
592 		return;
593 
594 	/* writel has an implicit memory barrier */
595 	writel(val, bufq->tail);
596 }
597 
598 /**
599  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
600  * @bufq: ring to use
601  *
602  * Returns 0 on success, negative on failure.
603  */
604 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
605 {
606 	struct libeth_fq fq = {
607 		.count	= bufq->desc_count,
608 		.type	= LIBETH_FQE_HDR,
609 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
610 	};
611 	int ret;
612 
613 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
614 	if (ret)
615 		return ret;
616 
617 	bufq->hdr_pp = fq.pp;
618 	bufq->hdr_buf = fq.fqes;
619 	bufq->hdr_truesize = fq.truesize;
620 	bufq->rx_hbuf_size = fq.buf_len;
621 
622 	return 0;
623 }
624 
625 /**
626  * idpf_rx_post_buf_refill - Post buffer id to refill queue
627  * @refillq: refill queue to post to
628  * @buf_id: buffer id to post
629  */
630 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
631 {
632 	u32 nta = refillq->next_to_use;
633 
634 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
635 	refillq->ring[nta] =
636 		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
637 		FIELD_PREP(IDPF_RX_BI_GEN_M,
638 			   idpf_queue_has(GEN_CHK, refillq));
639 
640 	if (unlikely(++nta == refillq->desc_count)) {
641 		nta = 0;
642 		idpf_queue_change(GEN_CHK, refillq);
643 	}
644 
645 	refillq->next_to_use = nta;
646 }
647 
648 /**
649  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
650  * @bufq: buffer queue to post to
651  * @buf_id: buffer id to post
652  *
653  * Returns false if buffer could not be allocated, true otherwise.
654  */
655 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
656 {
657 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
658 	struct libeth_fq_fp fq = {
659 		.count	= bufq->desc_count,
660 	};
661 	u16 nta = bufq->next_to_alloc;
662 	dma_addr_t addr;
663 
664 	splitq_rx_desc = &bufq->split_buf[nta];
665 
666 	if (idpf_queue_has(HSPLIT_EN, bufq)) {
667 		fq.pp = bufq->hdr_pp;
668 		fq.fqes = bufq->hdr_buf;
669 		fq.truesize = bufq->hdr_truesize;
670 
671 		addr = libeth_rx_alloc(&fq, buf_id);
672 		if (addr == DMA_MAPPING_ERROR)
673 			return false;
674 
675 		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
676 	}
677 
678 	fq.pp = bufq->pp;
679 	fq.fqes = bufq->buf;
680 	fq.truesize = bufq->truesize;
681 
682 	addr = libeth_rx_alloc(&fq, buf_id);
683 	if (addr == DMA_MAPPING_ERROR)
684 		return false;
685 
686 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
687 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
688 
689 	nta++;
690 	if (unlikely(nta == bufq->desc_count))
691 		nta = 0;
692 	bufq->next_to_alloc = nta;
693 
694 	return true;
695 }
696 
697 /**
698  * idpf_rx_post_init_bufs - Post initial buffers to bufq
699  * @bufq: buffer queue to post working set to
700  * @working_set: number of buffers to put in working set
701  *
702  * Returns true if @working_set bufs were posted successfully, false otherwise.
703  */
704 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
705 				   u16 working_set)
706 {
707 	int i;
708 
709 	for (i = 0; i < working_set; i++) {
710 		if (!idpf_rx_post_buf_desc(bufq, i))
711 			return false;
712 	}
713 
714 	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
715 					       IDPF_RX_BUF_STRIDE));
716 
717 	return true;
718 }
719 
720 /**
721  * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
722  * @rxq: queue for which the buffers are allocated
723  *
724  * Return: 0 on success, -ENOMEM on failure.
725  */
726 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
727 {
728 	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
729 		goto err;
730 
731 	return 0;
732 
733 err:
734 	idpf_rx_buf_rel_all(rxq);
735 
736 	return -ENOMEM;
737 }
738 
739 /**
740  * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
741  * @rxq: buffer queue to create page pool for
742  *
743  * Return: 0 on success, -errno on failure.
744  */
745 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
746 {
747 	struct libeth_fq fq = {
748 		.count	= rxq->desc_count,
749 		.type	= LIBETH_FQE_MTU,
750 		.nid	= idpf_q_vector_to_mem(rxq->q_vector),
751 	};
752 	int ret;
753 
754 	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
755 	if (ret)
756 		return ret;
757 
758 	rxq->pp = fq.pp;
759 	rxq->rx_buf = fq.fqes;
760 	rxq->truesize = fq.truesize;
761 	rxq->rx_buf_size = fq.buf_len;
762 
763 	return idpf_rx_buf_alloc_singleq(rxq);
764 }
765 
766 /**
767  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
768  * @rxbufq: queue for which the buffers are allocated
769  *
770  * Returns 0 on success, negative on failure
771  */
772 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
773 {
774 	int err = 0;
775 
776 	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
777 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
778 		if (err)
779 			goto rx_buf_alloc_all_out;
780 	}
781 
782 	/* Allocate buffers to be given to HW.	 */
783 	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
784 		err = -ENOMEM;
785 
786 rx_buf_alloc_all_out:
787 	if (err)
788 		idpf_rx_buf_rel_bufq(rxbufq);
789 
790 	return err;
791 }
792 
793 /**
794  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
795  * @bufq: buffer queue to create page pool for
796  * @type: type of Rx buffers to allocate
797  *
798  * Returns 0 on success, negative on failure
799  */
800 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
801 			     enum libeth_fqe_type type)
802 {
803 	struct libeth_fq fq = {
804 		.truesize	= bufq->truesize,
805 		.count		= bufq->desc_count,
806 		.type		= type,
807 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
808 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
809 	};
810 	int ret;
811 
812 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
813 	if (ret)
814 		return ret;
815 
816 	bufq->pp = fq.pp;
817 	bufq->buf = fq.fqes;
818 	bufq->truesize = fq.truesize;
819 	bufq->rx_buf_size = fq.buf_len;
820 
821 	return idpf_rx_buf_alloc_all(bufq);
822 }
823 
824 /**
825  * idpf_rx_bufs_init_all - Initialize all RX bufs
826  * @vport: virtual port struct
827  *
828  * Returns 0 on success, negative on failure
829  */
830 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
831 {
832 	bool split = idpf_is_queue_model_split(vport->rxq_model);
833 	int i, j, err;
834 
835 	for (i = 0; i < vport->num_rxq_grp; i++) {
836 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
837 		u32 truesize = 0;
838 
839 		/* Allocate bufs for the rxq itself in singleq */
840 		if (!split) {
841 			int num_rxq = rx_qgrp->singleq.num_rxq;
842 
843 			for (j = 0; j < num_rxq; j++) {
844 				struct idpf_rx_queue *q;
845 
846 				q = rx_qgrp->singleq.rxqs[j];
847 				err = idpf_rx_bufs_init_singleq(q);
848 				if (err)
849 					return err;
850 			}
851 
852 			continue;
853 		}
854 
855 		/* Otherwise, allocate bufs for the buffer queues */
856 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
857 			enum libeth_fqe_type type;
858 			struct idpf_buf_queue *q;
859 
860 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
861 			q->truesize = truesize;
862 
863 			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
864 
865 			err = idpf_rx_bufs_init(q, type);
866 			if (err)
867 				return err;
868 
869 			truesize = q->truesize >> 1;
870 		}
871 	}
872 
873 	return 0;
874 }
875 
876 /**
877  * idpf_rx_desc_alloc - Allocate queue Rx resources
878  * @vport: vport to allocate resources for
879  * @rxq: Rx queue for which the resources are setup
880  *
881  * Returns 0 on success, negative on failure
882  */
883 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
884 			      struct idpf_rx_queue *rxq)
885 {
886 	struct device *dev = &vport->adapter->pdev->dev;
887 
888 	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
889 
890 	/* Allocate descriptors and also round up to nearest 4K */
891 	rxq->size = ALIGN(rxq->size, 4096);
892 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
893 					     &rxq->dma, GFP_KERNEL);
894 	if (!rxq->desc_ring) {
895 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
896 			rxq->size);
897 		return -ENOMEM;
898 	}
899 
900 	rxq->next_to_alloc = 0;
901 	rxq->next_to_clean = 0;
902 	rxq->next_to_use = 0;
903 	idpf_queue_set(GEN_CHK, rxq);
904 
905 	return 0;
906 }
907 
908 /**
909  * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
910  * @vport: vport to allocate resources for
911  * @bufq: buffer queue for which the resources are set up
912  *
913  * Return: 0 on success, -ENOMEM on failure.
914  */
915 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
916 				struct idpf_buf_queue *bufq)
917 {
918 	struct device *dev = &vport->adapter->pdev->dev;
919 
920 	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
921 
922 	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
923 					     GFP_KERNEL);
924 	if (!bufq->split_buf)
925 		return -ENOMEM;
926 
927 	bufq->next_to_alloc = 0;
928 	bufq->next_to_clean = 0;
929 	bufq->next_to_use = 0;
930 
931 	idpf_queue_set(GEN_CHK, bufq);
932 
933 	return 0;
934 }
935 
936 /**
937  * idpf_rx_desc_alloc_all - allocate all RX queues resources
938  * @vport: virtual port structure
939  *
940  * Returns 0 on success, negative on failure
941  */
942 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
943 {
944 	struct idpf_rxq_group *rx_qgrp;
945 	int i, j, err;
946 	u16 num_rxq;
947 
948 	for (i = 0; i < vport->num_rxq_grp; i++) {
949 		rx_qgrp = &vport->rxq_grps[i];
950 		if (idpf_is_queue_model_split(vport->rxq_model))
951 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
952 		else
953 			num_rxq = rx_qgrp->singleq.num_rxq;
954 
955 		for (j = 0; j < num_rxq; j++) {
956 			struct idpf_rx_queue *q;
957 
958 			if (idpf_is_queue_model_split(vport->rxq_model))
959 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
960 			else
961 				q = rx_qgrp->singleq.rxqs[j];
962 
963 			err = idpf_rx_desc_alloc(vport, q);
964 			if (err) {
965 				pci_err(vport->adapter->pdev,
966 					"Memory allocation for Rx Queue %u failed\n",
967 					i);
968 				goto err_out;
969 			}
970 		}
971 
972 		if (!idpf_is_queue_model_split(vport->rxq_model))
973 			continue;
974 
975 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
976 			struct idpf_buf_queue *q;
977 
978 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
979 
980 			err = idpf_bufq_desc_alloc(vport, q);
981 			if (err) {
982 				pci_err(vport->adapter->pdev,
983 					"Memory allocation for Rx Buffer Queue %u failed\n",
984 					i);
985 				goto err_out;
986 			}
987 		}
988 	}
989 
990 	return 0;
991 
992 err_out:
993 	idpf_rx_desc_rel_all(vport);
994 
995 	return err;
996 }
997 
998 /**
999  * idpf_txq_group_rel - Release all resources for txq groups
1000  * @vport: vport to release txq groups on
1001  */
1002 static void idpf_txq_group_rel(struct idpf_vport *vport)
1003 {
1004 	bool split, flow_sch_en;
1005 	int i, j;
1006 
1007 	if (!vport->txq_grps)
1008 		return;
1009 
1010 	split = idpf_is_queue_model_split(vport->txq_model);
1011 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1012 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1013 
1014 	for (i = 0; i < vport->num_txq_grp; i++) {
1015 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1016 
1017 		for (j = 0; j < txq_grp->num_txq; j++) {
1018 			kfree(txq_grp->txqs[j]);
1019 			txq_grp->txqs[j] = NULL;
1020 		}
1021 
1022 		if (!split)
1023 			continue;
1024 
1025 		kfree(txq_grp->complq);
1026 		txq_grp->complq = NULL;
1027 
1028 		if (flow_sch_en)
1029 			kfree(txq_grp->stashes);
1030 	}
1031 	kfree(vport->txq_grps);
1032 	vport->txq_grps = NULL;
1033 }
1034 
1035 /**
1036  * idpf_rxq_sw_queue_rel - Release software queue resources
1037  * @rx_qgrp: rx queue group with software queues
1038  */
1039 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1040 {
1041 	int i, j;
1042 
1043 	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
1044 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1045 
1046 		for (j = 0; j < bufq_set->num_refillqs; j++) {
1047 			kfree(bufq_set->refillqs[j].ring);
1048 			bufq_set->refillqs[j].ring = NULL;
1049 		}
1050 		kfree(bufq_set->refillqs);
1051 		bufq_set->refillqs = NULL;
1052 	}
1053 }
1054 
1055 /**
1056  * idpf_rxq_group_rel - Release all resources for rxq groups
1057  * @vport: vport to release rxq groups on
1058  */
1059 static void idpf_rxq_group_rel(struct idpf_vport *vport)
1060 {
1061 	int i;
1062 
1063 	if (!vport->rxq_grps)
1064 		return;
1065 
1066 	for (i = 0; i < vport->num_rxq_grp; i++) {
1067 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1068 		u16 num_rxq;
1069 		int j;
1070 
1071 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1072 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1073 			for (j = 0; j < num_rxq; j++) {
1074 				kfree(rx_qgrp->splitq.rxq_sets[j]);
1075 				rx_qgrp->splitq.rxq_sets[j] = NULL;
1076 			}
1077 
1078 			idpf_rxq_sw_queue_rel(rx_qgrp);
1079 			kfree(rx_qgrp->splitq.bufq_sets);
1080 			rx_qgrp->splitq.bufq_sets = NULL;
1081 		} else {
1082 			num_rxq = rx_qgrp->singleq.num_rxq;
1083 			for (j = 0; j < num_rxq; j++) {
1084 				kfree(rx_qgrp->singleq.rxqs[j]);
1085 				rx_qgrp->singleq.rxqs[j] = NULL;
1086 			}
1087 		}
1088 	}
1089 	kfree(vport->rxq_grps);
1090 	vport->rxq_grps = NULL;
1091 }
1092 
1093 /**
1094  * idpf_vport_queue_grp_rel_all - Release all queue groups
1095  * @vport: vport to release queue groups for
1096  */
1097 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
1098 {
1099 	idpf_txq_group_rel(vport);
1100 	idpf_rxq_group_rel(vport);
1101 }
1102 
1103 /**
1104  * idpf_vport_queues_rel - Free memory for all queues
1105  * @vport: virtual port
1106  *
1107  * Free the memory allocated for queues associated to a vport
1108  */
1109 void idpf_vport_queues_rel(struct idpf_vport *vport)
1110 {
1111 	idpf_tx_desc_rel_all(vport);
1112 	idpf_rx_desc_rel_all(vport);
1113 	idpf_vport_queue_grp_rel_all(vport);
1114 
1115 	kfree(vport->txqs);
1116 	vport->txqs = NULL;
1117 }
1118 
1119 /**
1120  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1121  * @vport: vport to init txqs on
1122  *
1123  * We get a queue index from skb->queue_mapping and we need a fast way to
1124  * dereference the queue from queue groups.  This allows us to quickly pull a
1125  * txq based on a queue index.
1126  *
1127  * Returns 0 on success, negative on failure
1128  */
1129 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1130 {
1131 	struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
1132 	struct work_struct *tstamp_task = &vport->tstamp_task;
1133 	int i, j, k = 0;
1134 
1135 	vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1136 			      GFP_KERNEL);
1137 
1138 	if (!vport->txqs)
1139 		return -ENOMEM;
1140 
1141 	for (i = 0; i < vport->num_txq_grp; i++) {
1142 		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1143 
1144 		for (j = 0; j < tx_grp->num_txq; j++, k++) {
1145 			vport->txqs[k] = tx_grp->txqs[j];
1146 			vport->txqs[k]->idx = k;
1147 
1148 			if (!caps)
1149 				continue;
1150 
1151 			vport->txqs[k]->cached_tstamp_caps = caps;
1152 			vport->txqs[k]->tstamp_task = tstamp_task;
1153 		}
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 /**
1160  * idpf_vport_init_num_qs - Initialize number of queues
1161  * @vport: vport to initialize queues
1162  * @vport_msg: data to be filled into vport
1163  */
1164 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1165 			    struct virtchnl2_create_vport *vport_msg)
1166 {
1167 	struct idpf_vport_user_config_data *config_data;
1168 	u16 idx = vport->idx;
1169 
1170 	config_data = &vport->adapter->vport_config[idx]->user_config;
1171 	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1172 	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1173 	/* number of txqs and rxqs in config data will be zeros only in the
1174 	 * driver load path and we dont update them there after
1175 	 */
1176 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1177 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1178 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1179 	}
1180 
1181 	if (idpf_is_queue_model_split(vport->txq_model))
1182 		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1183 	if (idpf_is_queue_model_split(vport->rxq_model))
1184 		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1185 
1186 	/* Adjust number of buffer queues per Rx queue group. */
1187 	if (!idpf_is_queue_model_split(vport->rxq_model)) {
1188 		vport->num_bufqs_per_qgrp = 0;
1189 
1190 		return;
1191 	}
1192 
1193 	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1194 }
1195 
1196 /**
1197  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1198  * @vport: vport to calculate q groups for
1199  */
1200 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1201 {
1202 	struct idpf_vport_user_config_data *config_data;
1203 	int num_bufqs = vport->num_bufqs_per_qgrp;
1204 	u32 num_req_txq_desc, num_req_rxq_desc;
1205 	u16 idx = vport->idx;
1206 	int i;
1207 
1208 	config_data =  &vport->adapter->vport_config[idx]->user_config;
1209 	num_req_txq_desc = config_data->num_req_txq_desc;
1210 	num_req_rxq_desc = config_data->num_req_rxq_desc;
1211 
1212 	vport->complq_desc_count = 0;
1213 	if (num_req_txq_desc) {
1214 		vport->txq_desc_count = num_req_txq_desc;
1215 		if (idpf_is_queue_model_split(vport->txq_model)) {
1216 			vport->complq_desc_count = num_req_txq_desc;
1217 			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1218 				vport->complq_desc_count =
1219 					IDPF_MIN_TXQ_COMPLQ_DESC;
1220 		}
1221 	} else {
1222 		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1223 		if (idpf_is_queue_model_split(vport->txq_model))
1224 			vport->complq_desc_count =
1225 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1226 	}
1227 
1228 	if (num_req_rxq_desc)
1229 		vport->rxq_desc_count = num_req_rxq_desc;
1230 	else
1231 		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1232 
1233 	for (i = 0; i < num_bufqs; i++) {
1234 		if (!vport->bufq_desc_count[i])
1235 			vport->bufq_desc_count[i] =
1236 				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1237 							num_bufqs);
1238 	}
1239 }
1240 
1241 /**
1242  * idpf_vport_calc_total_qs - Calculate total number of queues
1243  * @adapter: private data struct
1244  * @vport_idx: vport idx to retrieve vport pointer
1245  * @vport_msg: message to fill with data
1246  * @max_q: vport max queue info
1247  *
1248  * Return 0 on success, error value on failure.
1249  */
1250 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1251 			     struct virtchnl2_create_vport *vport_msg,
1252 			     struct idpf_vport_max_q *max_q)
1253 {
1254 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1255 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1256 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1257 	struct idpf_vport_config *vport_config;
1258 	u16 num_txq_grps, num_rxq_grps;
1259 	u32 num_qs;
1260 
1261 	vport_config = adapter->vport_config[vport_idx];
1262 	if (vport_config) {
1263 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1264 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1265 	} else {
1266 		int num_cpus;
1267 
1268 		/* Restrict num of queues to cpus online as a default
1269 		 * configuration to give best performance. User can always
1270 		 * override to a max number of queues via ethtool.
1271 		 */
1272 		num_cpus = num_online_cpus();
1273 
1274 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1275 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1276 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1277 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1278 	}
1279 
1280 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1281 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1282 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1283 						       IDPF_COMPLQ_PER_GROUP);
1284 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1285 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1286 	} else {
1287 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1288 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1289 					 dflt_singleq_txqs);
1290 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1291 		vport_msg->num_tx_complq = 0;
1292 	}
1293 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1294 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1295 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1296 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1297 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1298 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1299 	} else {
1300 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1301 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1302 					 dflt_singleq_rxqs);
1303 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1304 		vport_msg->num_rx_bufq = 0;
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 /**
1311  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1312  * @vport: vport to calculate q groups for
1313  */
1314 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1315 {
1316 	if (idpf_is_queue_model_split(vport->txq_model))
1317 		vport->num_txq_grp = vport->num_txq;
1318 	else
1319 		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1320 
1321 	if (idpf_is_queue_model_split(vport->rxq_model))
1322 		vport->num_rxq_grp = vport->num_rxq;
1323 	else
1324 		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1325 }
1326 
1327 /**
1328  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1329  * @vport: vport to calculate queues for
1330  * @num_txq: return parameter for number of TX queues
1331  * @num_rxq: return parameter for number of RX queues
1332  */
1333 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1334 					 u16 *num_txq, u16 *num_rxq)
1335 {
1336 	if (idpf_is_queue_model_split(vport->txq_model))
1337 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1338 	else
1339 		*num_txq = vport->num_txq;
1340 
1341 	if (idpf_is_queue_model_split(vport->rxq_model))
1342 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1343 	else
1344 		*num_rxq = vport->num_rxq;
1345 }
1346 
1347 /**
1348  * idpf_rxq_set_descids - set the descids supported by this queue
1349  * @vport: virtual port data structure
1350  * @q: rx queue for which descids are set
1351  *
1352  */
1353 static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1354 				 struct idpf_rx_queue *q)
1355 {
1356 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1357 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1358 	} else {
1359 		if (vport->base_rxd)
1360 			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1361 		else
1362 			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1363 	}
1364 }
1365 
1366 /**
1367  * idpf_txq_group_alloc - Allocate all txq group resources
1368  * @vport: vport to allocate txq groups for
1369  * @num_txq: number of txqs to allocate for each group
1370  *
1371  * Returns 0 on success, negative on failure
1372  */
1373 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1374 {
1375 	bool split, flow_sch_en;
1376 	int i;
1377 
1378 	vport->txq_grps = kcalloc(vport->num_txq_grp,
1379 				  sizeof(*vport->txq_grps), GFP_KERNEL);
1380 	if (!vport->txq_grps)
1381 		return -ENOMEM;
1382 
1383 	split = idpf_is_queue_model_split(vport->txq_model);
1384 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1385 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1386 
1387 	for (i = 0; i < vport->num_txq_grp; i++) {
1388 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1389 		struct idpf_adapter *adapter = vport->adapter;
1390 		struct idpf_txq_stash *stashes;
1391 		int j;
1392 
1393 		tx_qgrp->vport = vport;
1394 		tx_qgrp->num_txq = num_txq;
1395 
1396 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1397 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1398 						   GFP_KERNEL);
1399 			if (!tx_qgrp->txqs[j])
1400 				goto err_alloc;
1401 		}
1402 
1403 		if (split && flow_sch_en) {
1404 			stashes = kcalloc(num_txq, sizeof(*stashes),
1405 					  GFP_KERNEL);
1406 			if (!stashes)
1407 				goto err_alloc;
1408 
1409 			tx_qgrp->stashes = stashes;
1410 		}
1411 
1412 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1413 			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1414 
1415 			q->dev = &adapter->pdev->dev;
1416 			q->desc_count = vport->txq_desc_count;
1417 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1418 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1419 			q->netdev = vport->netdev;
1420 			q->txq_grp = tx_qgrp;
1421 
1422 			if (!split) {
1423 				q->clean_budget = vport->compln_clean_budget;
1424 				idpf_queue_assign(CRC_EN, q,
1425 						  vport->crc_enable);
1426 			}
1427 
1428 			if (!flow_sch_en)
1429 				continue;
1430 
1431 			if (split) {
1432 				q->stash = &stashes[j];
1433 				hash_init(q->stash->sched_buf_hash);
1434 			}
1435 
1436 			idpf_queue_set(FLOW_SCH_EN, q);
1437 		}
1438 
1439 		if (!split)
1440 			continue;
1441 
1442 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1443 					  sizeof(*tx_qgrp->complq),
1444 					  GFP_KERNEL);
1445 		if (!tx_qgrp->complq)
1446 			goto err_alloc;
1447 
1448 		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1449 		tx_qgrp->complq->txq_grp = tx_qgrp;
1450 		tx_qgrp->complq->netdev = vport->netdev;
1451 		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1452 
1453 		if (flow_sch_en)
1454 			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1455 	}
1456 
1457 	return 0;
1458 
1459 err_alloc:
1460 	idpf_txq_group_rel(vport);
1461 
1462 	return -ENOMEM;
1463 }
1464 
1465 /**
1466  * idpf_rxq_group_alloc - Allocate all rxq group resources
1467  * @vport: vport to allocate rxq groups for
1468  * @num_rxq: number of rxqs to allocate for each group
1469  *
1470  * Returns 0 on success, negative on failure
1471  */
1472 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1473 {
1474 	int i, k, err = 0;
1475 	bool hs;
1476 
1477 	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1478 				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1479 	if (!vport->rxq_grps)
1480 		return -ENOMEM;
1481 
1482 	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1483 
1484 	for (i = 0; i < vport->num_rxq_grp; i++) {
1485 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1486 		int j;
1487 
1488 		rx_qgrp->vport = vport;
1489 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1490 			rx_qgrp->singleq.num_rxq = num_rxq;
1491 			for (j = 0; j < num_rxq; j++) {
1492 				rx_qgrp->singleq.rxqs[j] =
1493 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1494 							GFP_KERNEL);
1495 				if (!rx_qgrp->singleq.rxqs[j]) {
1496 					err = -ENOMEM;
1497 					goto err_alloc;
1498 				}
1499 			}
1500 			goto skip_splitq_rx_init;
1501 		}
1502 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1503 
1504 		for (j = 0; j < num_rxq; j++) {
1505 			rx_qgrp->splitq.rxq_sets[j] =
1506 				kzalloc(sizeof(struct idpf_rxq_set),
1507 					GFP_KERNEL);
1508 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1509 				err = -ENOMEM;
1510 				goto err_alloc;
1511 			}
1512 		}
1513 
1514 		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1515 						    sizeof(struct idpf_bufq_set),
1516 						    GFP_KERNEL);
1517 		if (!rx_qgrp->splitq.bufq_sets) {
1518 			err = -ENOMEM;
1519 			goto err_alloc;
1520 		}
1521 
1522 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1523 			struct idpf_bufq_set *bufq_set =
1524 				&rx_qgrp->splitq.bufq_sets[j];
1525 			int swq_size = sizeof(struct idpf_sw_queue);
1526 			struct idpf_buf_queue *q;
1527 
1528 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1529 			q->desc_count = vport->bufq_desc_count[j];
1530 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1531 
1532 			idpf_queue_assign(HSPLIT_EN, q, hs);
1533 
1534 			bufq_set->num_refillqs = num_rxq;
1535 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1536 						     GFP_KERNEL);
1537 			if (!bufq_set->refillqs) {
1538 				err = -ENOMEM;
1539 				goto err_alloc;
1540 			}
1541 			for (k = 0; k < bufq_set->num_refillqs; k++) {
1542 				struct idpf_sw_queue *refillq =
1543 					&bufq_set->refillqs[k];
1544 
1545 				refillq->desc_count =
1546 					vport->bufq_desc_count[j];
1547 				idpf_queue_set(GEN_CHK, refillq);
1548 				idpf_queue_set(RFL_GEN_CHK, refillq);
1549 				refillq->ring = kcalloc(refillq->desc_count,
1550 							sizeof(*refillq->ring),
1551 							GFP_KERNEL);
1552 				if (!refillq->ring) {
1553 					err = -ENOMEM;
1554 					goto err_alloc;
1555 				}
1556 			}
1557 		}
1558 
1559 skip_splitq_rx_init:
1560 		for (j = 0; j < num_rxq; j++) {
1561 			struct idpf_rx_queue *q;
1562 
1563 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1564 				q = rx_qgrp->singleq.rxqs[j];
1565 				goto setup_rxq;
1566 			}
1567 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1568 			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1569 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1570 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1571 				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1572 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1573 
1574 			idpf_queue_assign(HSPLIT_EN, q, hs);
1575 
1576 setup_rxq:
1577 			q->desc_count = vport->rxq_desc_count;
1578 			q->rx_ptype_lkup = vport->rx_ptype_lkup;
1579 			q->netdev = vport->netdev;
1580 			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1581 			q->idx = (i * num_rxq) + j;
1582 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1583 			q->rx_max_pkt_size = vport->netdev->mtu +
1584 							LIBETH_RX_LL_LEN;
1585 			idpf_rxq_set_descids(vport, q);
1586 		}
1587 	}
1588 
1589 err_alloc:
1590 	if (err)
1591 		idpf_rxq_group_rel(vport);
1592 
1593 	return err;
1594 }
1595 
1596 /**
1597  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1598  * @vport: vport with qgrps to allocate
1599  *
1600  * Returns 0 on success, negative on failure
1601  */
1602 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1603 {
1604 	u16 num_txq, num_rxq;
1605 	int err;
1606 
1607 	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1608 
1609 	err = idpf_txq_group_alloc(vport, num_txq);
1610 	if (err)
1611 		goto err_out;
1612 
1613 	err = idpf_rxq_group_alloc(vport, num_rxq);
1614 	if (err)
1615 		goto err_out;
1616 
1617 	return 0;
1618 
1619 err_out:
1620 	idpf_vport_queue_grp_rel_all(vport);
1621 
1622 	return err;
1623 }
1624 
1625 /**
1626  * idpf_vport_queues_alloc - Allocate memory for all queues
1627  * @vport: virtual port
1628  *
1629  * Allocate memory for queues associated with a vport.  Returns 0 on success,
1630  * negative on failure.
1631  */
1632 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1633 {
1634 	int err;
1635 
1636 	err = idpf_vport_queue_grp_alloc_all(vport);
1637 	if (err)
1638 		goto err_out;
1639 
1640 	err = idpf_tx_desc_alloc_all(vport);
1641 	if (err)
1642 		goto err_out;
1643 
1644 	err = idpf_rx_desc_alloc_all(vport);
1645 	if (err)
1646 		goto err_out;
1647 
1648 	err = idpf_vport_init_fast_path_txqs(vport);
1649 	if (err)
1650 		goto err_out;
1651 
1652 	return 0;
1653 
1654 err_out:
1655 	idpf_vport_queues_rel(vport);
1656 
1657 	return err;
1658 }
1659 
1660 /**
1661  * idpf_tx_handle_sw_marker - Handle queue marker packet
1662  * @tx_q: tx queue to handle software marker
1663  */
1664 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1665 {
1666 	struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1667 	struct idpf_vport *vport = priv->vport;
1668 	int i;
1669 
1670 	idpf_queue_clear(SW_MARKER, tx_q);
1671 	/* Hardware must write marker packets to all queues associated with
1672 	 * completion queues. So check if all queues received marker packets
1673 	 */
1674 	for (i = 0; i < vport->num_txq; i++)
1675 		/* If we're still waiting on any other TXQ marker completions,
1676 		 * just return now since we cannot wake up the marker_wq yet.
1677 		 */
1678 		if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1679 			return;
1680 
1681 	/* Drain complete */
1682 	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1683 	wake_up(&vport->sw_marker_wq);
1684 }
1685 
1686 /**
1687  * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
1688  * @txq: queue to read the timestamp from
1689  * @skb: socket buffer to provide Tx timestamp value
1690  *
1691  * Schedule a work to read Tx timestamp value generated once the packet is
1692  * transmitted.
1693  */
1694 static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
1695 {
1696 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
1697 	struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
1698 
1699 	tx_tstamp_caps = txq->cached_tstamp_caps;
1700 	spin_lock_bh(&tx_tstamp_caps->status_lock);
1701 
1702 	for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
1703 		tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
1704 		if (tx_tstamp_status->state != IDPF_PTP_FREE)
1705 			continue;
1706 
1707 		tx_tstamp_status->skb = skb;
1708 		tx_tstamp_status->state = IDPF_PTP_REQUEST;
1709 
1710 		/* Fetch timestamp from completion descriptor through
1711 		 * virtchnl msg to report to stack.
1712 		 */
1713 		queue_work(system_unbound_wq, txq->tstamp_task);
1714 		break;
1715 	}
1716 
1717 	spin_unlock_bh(&tx_tstamp_caps->status_lock);
1718 }
1719 
1720 /**
1721  * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1722  * out of order completions
1723  * @txq: queue to clean
1724  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1725  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1726  * @budget: Used to determine if we are in netpoll
1727  */
1728 static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
1729 				       u16 compl_tag,
1730 				       struct libeth_sq_napi_stats *cleaned,
1731 				       int budget)
1732 {
1733 	struct idpf_tx_stash *stash;
1734 	struct hlist_node *tmp_buf;
1735 	struct libeth_cq_pp cp = {
1736 		.dev	= txq->dev,
1737 		.ss	= cleaned,
1738 		.napi	= budget,
1739 	};
1740 
1741 	/* Buffer completion */
1742 	hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
1743 				    hlist, compl_tag) {
1744 		if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
1745 			continue;
1746 
1747 		hash_del(&stash->hlist);
1748 
1749 		if (stash->buf.type == LIBETH_SQE_SKB &&
1750 		    (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
1751 			idpf_tx_read_tstamp(txq, stash->buf.skb);
1752 
1753 		libeth_tx_complete(&stash->buf, &cp);
1754 
1755 		/* Push shadow buf back onto stack */
1756 		idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
1757 	}
1758 }
1759 
1760 /**
1761  * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1762  * later time (only relevant for flow scheduling mode)
1763  * @txq: Tx queue to clean
1764  * @tx_buf: buffer to store
1765  */
1766 static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
1767 				       struct idpf_tx_buf *tx_buf)
1768 {
1769 	struct idpf_tx_stash *stash;
1770 
1771 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
1772 		return 0;
1773 
1774 	stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
1775 	if (unlikely(!stash)) {
1776 		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1777 				    netdev_name(txq->netdev));
1778 
1779 		return -ENOMEM;
1780 	}
1781 
1782 	/* Store buffer params in shadow buffer */
1783 	stash->buf.skb = tx_buf->skb;
1784 	stash->buf.bytes = tx_buf->bytes;
1785 	stash->buf.packets = tx_buf->packets;
1786 	stash->buf.type = tx_buf->type;
1787 	stash->buf.nr_frags = tx_buf->nr_frags;
1788 	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1789 	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1790 	idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
1791 
1792 	/* Add buffer to buf_hash table to be freed later */
1793 	hash_add(txq->stash->sched_buf_hash, &stash->hlist,
1794 		 idpf_tx_buf_compl_tag(&stash->buf));
1795 
1796 	tx_buf->type = LIBETH_SQE_EMPTY;
1797 
1798 	return 0;
1799 }
1800 
1801 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1802 do {								\
1803 	if (unlikely(++(ntc) == (txq)->desc_count)) {		\
1804 		ntc = 0;					\
1805 		buf = (txq)->tx_buf;				\
1806 		desc = &(txq)->flex_tx[0];			\
1807 	} else {						\
1808 		(buf)++;					\
1809 		(desc)++;					\
1810 	}							\
1811 } while (0)
1812 
1813 /**
1814  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1815  * @tx_q: Tx queue to clean
1816  * @end: queue index until which it should be cleaned
1817  * @napi_budget: Used to determine if we are in netpoll
1818  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1819  * @descs_only: true if queue is using flow-based scheduling and should
1820  * not clean buffers at this time
1821  *
1822  * Cleans the queue descriptor ring. If the queue is using queue-based
1823  * scheduling, the buffers will be cleaned as well. If the queue is using
1824  * flow-based scheduling, only the descriptors are cleaned at this time.
1825  * Separate packet completion events will be reported on the completion queue,
1826  * and the buffers will be cleaned separately. The stats are not updated from
1827  * this function when using flow-based scheduling.
1828  *
1829  * Furthermore, in flow scheduling mode, check to make sure there are enough
1830  * reserve buffers to stash the packet. If there are not, return early, which
1831  * will leave next_to_clean pointing to the packet that failed to be stashed.
1832  *
1833  * Return: false in the scenario above, true otherwise.
1834  */
1835 static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1836 				 int napi_budget,
1837 				 struct libeth_sq_napi_stats *cleaned,
1838 				 bool descs_only)
1839 {
1840 	union idpf_tx_flex_desc *next_pending_desc = NULL;
1841 	union idpf_tx_flex_desc *tx_desc;
1842 	u32 ntc = tx_q->next_to_clean;
1843 	struct libeth_cq_pp cp = {
1844 		.dev	= tx_q->dev,
1845 		.ss	= cleaned,
1846 		.napi	= napi_budget,
1847 	};
1848 	struct idpf_tx_buf *tx_buf;
1849 	bool clean_complete = true;
1850 
1851 	tx_desc = &tx_q->flex_tx[ntc];
1852 	next_pending_desc = &tx_q->flex_tx[end];
1853 	tx_buf = &tx_q->tx_buf[ntc];
1854 
1855 	while (tx_desc != next_pending_desc) {
1856 		u32 eop_idx;
1857 
1858 		/* If this entry in the ring was used as a context descriptor,
1859 		 * it's corresponding entry in the buffer ring is reserved. We
1860 		 * can skip this descriptor since there is no buffer to clean.
1861 		 */
1862 		if (tx_buf->type <= LIBETH_SQE_CTX)
1863 			goto fetch_next_txq_desc;
1864 
1865 		if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
1866 			break;
1867 
1868 		eop_idx = tx_buf->rs_idx;
1869 
1870 		if (descs_only) {
1871 			if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
1872 				clean_complete = false;
1873 				goto tx_splitq_clean_out;
1874 			}
1875 
1876 			idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1877 
1878 			while (ntc != eop_idx) {
1879 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1880 							      tx_desc, tx_buf);
1881 				idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1882 			}
1883 		} else {
1884 			libeth_tx_complete(tx_buf, &cp);
1885 
1886 			/* unmap remaining buffers */
1887 			while (ntc != eop_idx) {
1888 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1889 							      tx_desc, tx_buf);
1890 
1891 				/* unmap any remaining paged data */
1892 				libeth_tx_complete(tx_buf, &cp);
1893 			}
1894 		}
1895 
1896 fetch_next_txq_desc:
1897 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1898 	}
1899 
1900 tx_splitq_clean_out:
1901 	tx_q->next_to_clean = ntc;
1902 
1903 	return clean_complete;
1904 }
1905 
1906 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1907 do {							\
1908 	(buf)++;					\
1909 	(ntc)++;					\
1910 	if (unlikely((ntc) == (txq)->desc_count)) {	\
1911 		buf = (txq)->tx_buf;			\
1912 		ntc = 0;				\
1913 	}						\
1914 } while (0)
1915 
1916 /**
1917  * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1918  * @txq: queue to clean
1919  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1920  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1921  * @budget: Used to determine if we are in netpoll
1922  *
1923  * Cleans all buffers associated with the input completion tag either from the
1924  * TX buffer ring or from the hash table if the buffers were previously
1925  * stashed. Returns the byte/segment count for the cleaned packet associated
1926  * this completion tag.
1927  */
1928 static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
1929 				   struct libeth_sq_napi_stats *cleaned,
1930 				   int budget)
1931 {
1932 	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1933 	struct idpf_tx_buf *tx_buf = NULL;
1934 	struct libeth_cq_pp cp = {
1935 		.dev	= txq->dev,
1936 		.ss	= cleaned,
1937 		.napi	= budget,
1938 	};
1939 	u16 ntc, orig_idx = idx;
1940 
1941 	tx_buf = &txq->tx_buf[idx];
1942 
1943 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
1944 		     idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
1945 		return false;
1946 
1947 	if (tx_buf->type == LIBETH_SQE_SKB) {
1948 		if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
1949 			idpf_tx_read_tstamp(txq, tx_buf->skb);
1950 
1951 		libeth_tx_complete(tx_buf, &cp);
1952 	}
1953 
1954 	idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1955 
1956 	while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
1957 		libeth_tx_complete(tx_buf, &cp);
1958 		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1959 	}
1960 
1961 	/*
1962 	 * It's possible the packet we just cleaned was an out of order
1963 	 * completion, which means we can stash the buffers starting from
1964 	 * the original next_to_clean and reuse the descriptors. We need
1965 	 * to compare the descriptor ring next_to_clean packet's "first" buffer
1966 	 * to the "first" buffer of the packet we just cleaned to determine if
1967 	 * this is the case. Howevever, next_to_clean can point to either a
1968 	 * reserved buffer that corresponds to a context descriptor used for the
1969 	 * next_to_clean packet (TSO packet) or the "first" buffer (single
1970 	 * packet). The orig_idx from the packet we just cleaned will always
1971 	 * point to the "first" buffer. If next_to_clean points to a reserved
1972 	 * buffer, let's bump ntc once and start the comparison from there.
1973 	 */
1974 	ntc = txq->next_to_clean;
1975 	tx_buf = &txq->tx_buf[ntc];
1976 
1977 	if (tx_buf->type == LIBETH_SQE_CTX)
1978 		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1979 
1980 	/*
1981 	 * If ntc still points to a different "first" buffer, clean the
1982 	 * descriptor ring and stash all of the buffers for later cleaning. If
1983 	 * we cannot stash all of the buffers, next_to_clean will point to the
1984 	 * "first" buffer of the packet that could not be stashed and cleaning
1985 	 * will start there next time.
1986 	 */
1987 	if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
1988 		     !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
1989 					   true)))
1990 		return true;
1991 
1992 	/*
1993 	 * Otherwise, update next_to_clean to reflect the cleaning that was
1994 	 * done above.
1995 	 */
1996 	txq->next_to_clean = idx;
1997 
1998 	return true;
1999 }
2000 
2001 /**
2002  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
2003  * whether on the buffer ring or in the hash table
2004  * @txq: Tx ring to clean
2005  * @desc: pointer to completion queue descriptor to extract completion
2006  * information from
2007  * @cleaned: pointer to stats struct to track cleaned packets/bytes
2008  * @budget: Used to determine if we are in netpoll
2009  *
2010  * Returns bytes/packets cleaned
2011  */
2012 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
2013 					 struct idpf_splitq_tx_compl_desc *desc,
2014 					 struct libeth_sq_napi_stats *cleaned,
2015 					 int budget)
2016 {
2017 	u16 compl_tag;
2018 
2019 	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
2020 		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
2021 
2022 		idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
2023 		return;
2024 	}
2025 
2026 	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
2027 
2028 	/* If we didn't clean anything on the ring, this packet must be
2029 	 * in the hash table. Go clean it there.
2030 	 */
2031 	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
2032 		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
2033 }
2034 
2035 /**
2036  * idpf_tx_clean_complq - Reclaim resources on completion queue
2037  * @complq: Tx ring to clean
2038  * @budget: Used to determine if we are in netpoll
2039  * @cleaned: returns number of packets cleaned
2040  *
2041  * Returns true if there's any budget left (e.g. the clean is finished)
2042  */
2043 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
2044 				 int *cleaned)
2045 {
2046 	struct idpf_splitq_tx_compl_desc *tx_desc;
2047 	s16 ntc = complq->next_to_clean;
2048 	struct idpf_netdev_priv *np;
2049 	unsigned int complq_budget;
2050 	bool complq_ok = true;
2051 	int i;
2052 
2053 	complq_budget = complq->clean_budget;
2054 	tx_desc = &complq->comp[ntc];
2055 	ntc -= complq->desc_count;
2056 
2057 	do {
2058 		struct libeth_sq_napi_stats cleaned_stats = { };
2059 		struct idpf_tx_queue *tx_q;
2060 		int rel_tx_qid;
2061 		u16 hw_head;
2062 		u8 ctype;	/* completion type */
2063 		u16 gen;
2064 
2065 		/* if the descriptor isn't done, no work yet to do */
2066 		gen = le16_get_bits(tx_desc->qid_comptype_gen,
2067 				    IDPF_TXD_COMPLQ_GEN_M);
2068 		if (idpf_queue_has(GEN_CHK, complq) != gen)
2069 			break;
2070 
2071 		/* Find necessary info of TX queue to clean buffers */
2072 		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
2073 					   IDPF_TXD_COMPLQ_QID_M);
2074 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2075 		    !complq->txq_grp->txqs[rel_tx_qid]) {
2076 			netdev_err(complq->netdev, "TxQ not found\n");
2077 			goto fetch_next_desc;
2078 		}
2079 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2080 
2081 		/* Determine completion type */
2082 		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
2083 				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2084 		switch (ctype) {
2085 		case IDPF_TXD_COMPLT_RE:
2086 			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
2087 
2088 			idpf_tx_splitq_clean(tx_q, hw_head, budget,
2089 					     &cleaned_stats, true);
2090 			break;
2091 		case IDPF_TXD_COMPLT_RS:
2092 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2093 						     &cleaned_stats, budget);
2094 			break;
2095 		case IDPF_TXD_COMPLT_SW_MARKER:
2096 			idpf_tx_handle_sw_marker(tx_q);
2097 			break;
2098 		default:
2099 			netdev_err(tx_q->netdev,
2100 				   "Unknown TX completion type: %d\n", ctype);
2101 			goto fetch_next_desc;
2102 		}
2103 
2104 		u64_stats_update_begin(&tx_q->stats_sync);
2105 		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2106 		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2107 		tx_q->cleaned_pkts += cleaned_stats.packets;
2108 		tx_q->cleaned_bytes += cleaned_stats.bytes;
2109 		complq->num_completions++;
2110 		u64_stats_update_end(&tx_q->stats_sync);
2111 
2112 fetch_next_desc:
2113 		tx_desc++;
2114 		ntc++;
2115 		if (unlikely(!ntc)) {
2116 			ntc -= complq->desc_count;
2117 			tx_desc = &complq->comp[0];
2118 			idpf_queue_change(GEN_CHK, complq);
2119 		}
2120 
2121 		prefetch(tx_desc);
2122 
2123 		/* update budget accounting */
2124 		complq_budget--;
2125 	} while (likely(complq_budget));
2126 
2127 	/* Store the state of the complq to be used later in deciding if a
2128 	 * TXQ can be started again
2129 	 */
2130 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2131 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2132 		complq_ok = false;
2133 
2134 	np = netdev_priv(complq->netdev);
2135 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2136 		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2137 		struct netdev_queue *nq;
2138 		bool dont_wake;
2139 
2140 		/* We didn't clean anything on this queue, move along */
2141 		if (!tx_q->cleaned_bytes)
2142 			continue;
2143 
2144 		*cleaned += tx_q->cleaned_pkts;
2145 
2146 		/* Update BQL */
2147 		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2148 
2149 		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2150 			    np->state != __IDPF_VPORT_UP ||
2151 			    !netif_carrier_ok(tx_q->netdev);
2152 		/* Check if the TXQ needs to and can be restarted */
2153 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2154 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2155 					   dont_wake);
2156 
2157 		/* Reset cleaned stats for the next time this queue is
2158 		 * cleaned
2159 		 */
2160 		tx_q->cleaned_bytes = 0;
2161 		tx_q->cleaned_pkts = 0;
2162 	}
2163 
2164 	ntc += complq->desc_count;
2165 	complq->next_to_clean = ntc;
2166 
2167 	return !!complq_budget;
2168 }
2169 
2170 /**
2171  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2172  * based scheduling descriptors
2173  * @desc: descriptor to populate
2174  * @params: pointer to tx params struct
2175  * @td_cmd: command to be filled in desc
2176  * @size: size of buffer
2177  */
2178 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2179 			      struct idpf_tx_splitq_params *params,
2180 			      u16 td_cmd, u16 size)
2181 {
2182 	desc->q.qw1.cmd_dtype =
2183 		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2184 	desc->q.qw1.cmd_dtype |=
2185 		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2186 	desc->q.qw1.buf_size = cpu_to_le16(size);
2187 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2188 }
2189 
2190 /**
2191  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2192  * scheduling descriptors
2193  * @desc: descriptor to populate
2194  * @params: pointer to tx params struct
2195  * @td_cmd: command to be filled in desc
2196  * @size: size of buffer
2197  */
2198 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2199 				    struct idpf_tx_splitq_params *params,
2200 				    u16 td_cmd, u16 size)
2201 {
2202 	*(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
2203 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2204 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2205 }
2206 
2207 /* Global conditions to tell whether the txq (and related resources)
2208  * has room to allow the use of "size" descriptors.
2209  */
2210 static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
2211 {
2212 	if (IDPF_DESC_UNUSED(tx_q) < size ||
2213 	    IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2214 		IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
2215 	    IDPF_TX_BUF_RSV_LOW(tx_q))
2216 		return 0;
2217 	return 1;
2218 }
2219 
2220 /**
2221  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2222  * @tx_q: the queue to be checked
2223  * @descs_needed: number of descriptors required for this packet
2224  *
2225  * Returns 0 if stop is not needed
2226  */
2227 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2228 				     unsigned int descs_needed)
2229 {
2230 	if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
2231 				      idpf_txq_has_room(tx_q, descs_needed),
2232 				      1, 1))
2233 		return 0;
2234 
2235 	u64_stats_update_begin(&tx_q->stats_sync);
2236 	u64_stats_inc(&tx_q->q_stats.q_busy);
2237 	u64_stats_update_end(&tx_q->stats_sync);
2238 
2239 	return -EBUSY;
2240 }
2241 
2242 /**
2243  * idpf_tx_buf_hw_update - Store the new tail value
2244  * @tx_q: queue to bump
2245  * @val: new tail index
2246  * @xmit_more: more skb's pending
2247  *
2248  * The naming here is special in that 'hw' signals that this function is about
2249  * to do a register write to update our queue status. We know this can only
2250  * mean tail here as HW should be owning head for TX.
2251  */
2252 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2253 			   bool xmit_more)
2254 {
2255 	struct netdev_queue *nq;
2256 
2257 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2258 	tx_q->next_to_use = val;
2259 
2260 	/* Force memory writes to complete before letting h/w
2261 	 * know there are new descriptors to fetch.  (Only
2262 	 * applicable for weak-ordered memory model archs,
2263 	 * such as IA-64).
2264 	 */
2265 	wmb();
2266 
2267 	/* notify HW of packet */
2268 	if (netif_xmit_stopped(nq) || !xmit_more)
2269 		writel(val, tx_q->tail);
2270 }
2271 
2272 /**
2273  * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2274  * @txq: queue to send buffer on
2275  * @skb: send buffer
2276  *
2277  * Returns number of data descriptors needed for this skb.
2278  */
2279 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
2280 					 struct sk_buff *skb)
2281 {
2282 	const struct skb_shared_info *shinfo;
2283 	unsigned int count = 0, i;
2284 
2285 	count += !!skb_headlen(skb);
2286 
2287 	if (!skb_is_nonlinear(skb))
2288 		return count;
2289 
2290 	shinfo = skb_shinfo(skb);
2291 	for (i = 0; i < shinfo->nr_frags; i++) {
2292 		unsigned int size;
2293 
2294 		size = skb_frag_size(&shinfo->frags[i]);
2295 
2296 		/* We only need to use the idpf_size_to_txd_count check if the
2297 		 * fragment is going to span multiple descriptors,
2298 		 * i.e. size >= 16K.
2299 		 */
2300 		if (size >= SZ_16K)
2301 			count += idpf_size_to_txd_count(size);
2302 		else
2303 			count++;
2304 	}
2305 
2306 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2307 		if (__skb_linearize(skb))
2308 			return 0;
2309 
2310 		count = idpf_size_to_txd_count(skb->len);
2311 		u64_stats_update_begin(&txq->stats_sync);
2312 		u64_stats_inc(&txq->q_stats.linearize);
2313 		u64_stats_update_end(&txq->stats_sync);
2314 	}
2315 
2316 	return count;
2317 }
2318 
2319 /**
2320  * idpf_tx_dma_map_error - handle TX DMA map errors
2321  * @txq: queue to send buffer on
2322  * @skb: send buffer
2323  * @first: original first buffer info buffer for packet
2324  * @idx: starting point on ring to unwind
2325  */
2326 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2327 			   struct idpf_tx_buf *first, u16 idx)
2328 {
2329 	struct libeth_sq_napi_stats ss = { };
2330 	struct libeth_cq_pp cp = {
2331 		.dev	= txq->dev,
2332 		.ss	= &ss,
2333 	};
2334 
2335 	u64_stats_update_begin(&txq->stats_sync);
2336 	u64_stats_inc(&txq->q_stats.dma_map_errs);
2337 	u64_stats_update_end(&txq->stats_sync);
2338 
2339 	/* clear dma mappings for failed tx_buf map */
2340 	for (;;) {
2341 		struct idpf_tx_buf *tx_buf;
2342 
2343 		tx_buf = &txq->tx_buf[idx];
2344 		libeth_tx_complete(tx_buf, &cp);
2345 		if (tx_buf == first)
2346 			break;
2347 		if (idx == 0)
2348 			idx = txq->desc_count;
2349 		idx--;
2350 	}
2351 
2352 	if (skb_is_gso(skb)) {
2353 		union idpf_tx_flex_desc *tx_desc;
2354 
2355 		/* If we failed a DMA mapping for a TSO packet, we will have
2356 		 * used one additional descriptor for a context
2357 		 * descriptor. Reset that here.
2358 		 */
2359 		tx_desc = &txq->flex_tx[idx];
2360 		memset(tx_desc, 0, sizeof(*tx_desc));
2361 		if (idx == 0)
2362 			idx = txq->desc_count;
2363 		idx--;
2364 	}
2365 
2366 	/* Update tail in case netdev_xmit_more was previously true */
2367 	idpf_tx_buf_hw_update(txq, idx, false);
2368 }
2369 
2370 /**
2371  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2372  * @txq: the tx ring to wrap
2373  * @ntu: ring index to bump
2374  */
2375 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2376 {
2377 	ntu++;
2378 
2379 	if (ntu == txq->desc_count) {
2380 		ntu = 0;
2381 		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2382 	}
2383 
2384 	return ntu;
2385 }
2386 
2387 /**
2388  * idpf_tx_splitq_map - Build the Tx flex descriptor
2389  * @tx_q: queue to send buffer on
2390  * @params: pointer to splitq params struct
2391  * @first: first buffer info buffer to use
2392  *
2393  * This function loops over the skb data pointed to by *first
2394  * and gets a physical address for each memory location and programs
2395  * it and the length into the transmit flex descriptor.
2396  */
2397 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2398 			       struct idpf_tx_splitq_params *params,
2399 			       struct idpf_tx_buf *first)
2400 {
2401 	union idpf_tx_flex_desc *tx_desc;
2402 	unsigned int data_len, size;
2403 	struct idpf_tx_buf *tx_buf;
2404 	u16 i = tx_q->next_to_use;
2405 	struct netdev_queue *nq;
2406 	struct sk_buff *skb;
2407 	skb_frag_t *frag;
2408 	u16 td_cmd = 0;
2409 	dma_addr_t dma;
2410 
2411 	skb = first->skb;
2412 
2413 	td_cmd = params->offload.td_cmd;
2414 
2415 	data_len = skb->data_len;
2416 	size = skb_headlen(skb);
2417 
2418 	tx_desc = &tx_q->flex_tx[i];
2419 
2420 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2421 
2422 	tx_buf = first;
2423 	first->nr_frags = 0;
2424 
2425 	params->compl_tag =
2426 		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2427 
2428 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2429 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2430 
2431 		if (dma_mapping_error(tx_q->dev, dma))
2432 			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2433 
2434 		first->nr_frags++;
2435 		idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2436 		tx_buf->type = LIBETH_SQE_FRAG;
2437 
2438 		/* record length, and DMA address */
2439 		dma_unmap_len_set(tx_buf, len, size);
2440 		dma_unmap_addr_set(tx_buf, dma, dma);
2441 
2442 		/* buf_addr is in same location for both desc types */
2443 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2444 
2445 		/* The stack can send us fragments that are too large for a
2446 		 * single descriptor i.e. frag size > 16K-1. We will need to
2447 		 * split the fragment across multiple descriptors in this case.
2448 		 * To adhere to HW alignment restrictions, the fragment needs
2449 		 * to be split such that the first chunk ends on a 4K boundary
2450 		 * and all subsequent chunks start on a 4K boundary. We still
2451 		 * want to send as much data as possible though, so our
2452 		 * intermediate descriptor chunk size will be 12K.
2453 		 *
2454 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2455 		 * ------------------------------------------------------------
2456 		 * |                    frag_size = 32K                       |
2457 		 * ------------------------------------------------------------
2458 		 * |2600		  |16384	    |28672
2459 		 *
2460 		 * 3 descriptors will be used for this fragment. The HW expects
2461 		 * the descriptors to contain the following:
2462 		 * ------------------------------------------------------------
2463 		 * | size = 13784         | size = 12K      | size = 6696     |
2464 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2465 		 * ------------------------------------------------------------
2466 		 *
2467 		 * We need to first adjust the max_data for the first chunk so
2468 		 * that it ends on a 4K boundary. By negating the value of the
2469 		 * DMA address and taking only the low order bits, we're
2470 		 * effectively calculating
2471 		 *	4K - (DMA addr lower order bits) =
2472 		 *				bytes to next boundary.
2473 		 *
2474 		 * Add that to our base aligned max_data (12K) and we have
2475 		 * our first chunk size. In the example above,
2476 		 *	13784 = 12K + (4096-2600)
2477 		 *
2478 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2479 		 * will give the intermediate descriptors 12K chunks and
2480 		 * whatever is left to the final descriptor. This ensures that
2481 		 * all descriptors used for the remaining chunks of the
2482 		 * fragment start on a 4K boundary and we use as few
2483 		 * descriptors as possible.
2484 		 */
2485 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2486 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2487 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2488 						  max_data);
2489 
2490 			if (unlikely(++i == tx_q->desc_count)) {
2491 				tx_buf = tx_q->tx_buf;
2492 				tx_desc = &tx_q->flex_tx[0];
2493 				i = 0;
2494 				tx_q->compl_tag_cur_gen =
2495 					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2496 			} else {
2497 				tx_buf++;
2498 				tx_desc++;
2499 			}
2500 
2501 			/* Since this packet has a buffer that is going to span
2502 			 * multiple descriptors, it's going to leave holes in
2503 			 * to the TX buffer ring. To ensure these holes do not
2504 			 * cause issues in the cleaning routines, we will clear
2505 			 * them of any stale data and assign them the same
2506 			 * completion tag as the current packet. Then when the
2507 			 * packet is being cleaned, the cleaning routines will
2508 			 * simply pass over these holes and finish cleaning the
2509 			 * rest of the packet.
2510 			 */
2511 			tx_buf->type = LIBETH_SQE_EMPTY;
2512 			idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2513 
2514 			/* Adjust the DMA offset and the remaining size of the
2515 			 * fragment.  On the first iteration of this loop,
2516 			 * max_data will be >= 12K and <= 16K-1.  On any
2517 			 * subsequent iteration of this loop, max_data will
2518 			 * always be 12K.
2519 			 */
2520 			dma += max_data;
2521 			size -= max_data;
2522 
2523 			/* Reset max_data since remaining chunks will be 12K
2524 			 * at most
2525 			 */
2526 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2527 
2528 			/* buf_addr is in same location for both desc types */
2529 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2530 		}
2531 
2532 		if (!data_len)
2533 			break;
2534 
2535 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2536 
2537 		if (unlikely(++i == tx_q->desc_count)) {
2538 			tx_buf = tx_q->tx_buf;
2539 			tx_desc = &tx_q->flex_tx[0];
2540 			i = 0;
2541 			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2542 		} else {
2543 			tx_buf++;
2544 			tx_desc++;
2545 		}
2546 
2547 		size = skb_frag_size(frag);
2548 		data_len -= size;
2549 
2550 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2551 				       DMA_TO_DEVICE);
2552 	}
2553 
2554 	/* record SW timestamp if HW timestamp is not available */
2555 	skb_tx_timestamp(skb);
2556 
2557 	first->type = LIBETH_SQE_SKB;
2558 
2559 	/* write last descriptor with RS and EOP bits */
2560 	first->rs_idx = i;
2561 	td_cmd |= params->eop_cmd;
2562 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2563 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2564 
2565 	tx_q->txq_grp->num_completions_pending++;
2566 
2567 	/* record bytecount for BQL */
2568 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2569 	netdev_tx_sent_queue(nq, first->bytes);
2570 
2571 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2572 }
2573 
2574 /**
2575  * idpf_tso - computes mss and TSO length to prepare for TSO
2576  * @skb: pointer to skb
2577  * @off: pointer to struct that holds offload parameters
2578  *
2579  * Returns error (negative) if TSO was requested but cannot be applied to the
2580  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2581  */
2582 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2583 {
2584 	const struct skb_shared_info *shinfo;
2585 	union {
2586 		struct iphdr *v4;
2587 		struct ipv6hdr *v6;
2588 		unsigned char *hdr;
2589 	} ip;
2590 	union {
2591 		struct tcphdr *tcp;
2592 		struct udphdr *udp;
2593 		unsigned char *hdr;
2594 	} l4;
2595 	u32 paylen, l4_start;
2596 	int err;
2597 
2598 	if (!skb_is_gso(skb))
2599 		return 0;
2600 
2601 	err = skb_cow_head(skb, 0);
2602 	if (err < 0)
2603 		return err;
2604 
2605 	shinfo = skb_shinfo(skb);
2606 
2607 	ip.hdr = skb_network_header(skb);
2608 	l4.hdr = skb_transport_header(skb);
2609 
2610 	/* initialize outer IP header fields */
2611 	if (ip.v4->version == 4) {
2612 		ip.v4->tot_len = 0;
2613 		ip.v4->check = 0;
2614 	} else if (ip.v6->version == 6) {
2615 		ip.v6->payload_len = 0;
2616 	}
2617 
2618 	l4_start = skb_transport_offset(skb);
2619 
2620 	/* remove payload length from checksum */
2621 	paylen = skb->len - l4_start;
2622 
2623 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2624 	case SKB_GSO_TCPV4:
2625 	case SKB_GSO_TCPV6:
2626 		csum_replace_by_diff(&l4.tcp->check,
2627 				     (__force __wsum)htonl(paylen));
2628 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2629 		break;
2630 	case SKB_GSO_UDP_L4:
2631 		csum_replace_by_diff(&l4.udp->check,
2632 				     (__force __wsum)htonl(paylen));
2633 		/* compute length of segmentation header */
2634 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2635 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2636 		break;
2637 	default:
2638 		return -EINVAL;
2639 	}
2640 
2641 	off->tso_len = skb->len - off->tso_hdr_len;
2642 	off->mss = shinfo->gso_size;
2643 	off->tso_segs = shinfo->gso_segs;
2644 
2645 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2646 
2647 	return 1;
2648 }
2649 
2650 
2651 /**
2652  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2653  * @txq: queue to put context descriptor on
2654  *
2655  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2656  * ring entry to reflect that this index is a context descriptor
2657  */
2658 static union idpf_flex_tx_ctx_desc *
2659 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2660 {
2661 	union idpf_flex_tx_ctx_desc *desc;
2662 	int i = txq->next_to_use;
2663 
2664 	txq->tx_buf[i].type = LIBETH_SQE_CTX;
2665 
2666 	/* grab the next descriptor */
2667 	desc = &txq->flex_ctx[i];
2668 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2669 
2670 	return desc;
2671 }
2672 
2673 /**
2674  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2675  * @tx_q: queue to send buffer on
2676  * @skb: pointer to skb
2677  */
2678 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2679 {
2680 	u64_stats_update_begin(&tx_q->stats_sync);
2681 	u64_stats_inc(&tx_q->q_stats.skb_drops);
2682 	u64_stats_update_end(&tx_q->stats_sync);
2683 
2684 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2685 
2686 	dev_kfree_skb(skb);
2687 
2688 	return NETDEV_TX_OK;
2689 }
2690 
2691 #if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
2692 /**
2693  * idpf_tx_tstamp - set up context descriptor for hardware timestamp
2694  * @tx_q: queue to send buffer on
2695  * @skb: pointer to the SKB we're sending
2696  * @off: pointer to the offload struct
2697  *
2698  * Return: Positive index number on success, negative otherwise.
2699  */
2700 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2701 			  struct idpf_tx_offload_params *off)
2702 {
2703 	int err, idx;
2704 
2705 	/* only timestamp the outbound packet if the user has requested it */
2706 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2707 		return -1;
2708 
2709 	if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
2710 		return -1;
2711 
2712 	/* Tx timestamps cannot be sampled when doing TSO */
2713 	if (off->tx_flags & IDPF_TX_FLAGS_TSO)
2714 		return -1;
2715 
2716 	/* Grab an open timestamp slot */
2717 	err = idpf_ptp_request_ts(tx_q, skb, &idx);
2718 	if (err) {
2719 		u64_stats_update_begin(&tx_q->stats_sync);
2720 		u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
2721 		u64_stats_update_end(&tx_q->stats_sync);
2722 
2723 		return -1;
2724 	}
2725 
2726 	off->tx_flags |= IDPF_TX_FLAGS_TSYN;
2727 
2728 	return idx;
2729 }
2730 
2731 /**
2732  * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
2733  *			     PHY Tx timestamp
2734  * @ctx_desc: Context descriptor
2735  * @idx: Index of the Tx timestamp latch
2736  */
2737 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2738 				    u32 idx)
2739 {
2740 	ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
2741 					      IDPF_TX_CTX_DTYPE_M) |
2742 			     le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
2743 					      IDPF_TX_CTX_CMD_M) |
2744 			     le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
2745 }
2746 #else /* CONFIG_PTP_1588_CLOCK */
2747 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2748 			  struct idpf_tx_offload_params *off)
2749 {
2750 	return -1;
2751 }
2752 
2753 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2754 				    u32 idx)
2755 { }
2756 #endif /* CONFIG_PTP_1588_CLOCK */
2757 
2758 /**
2759  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2760  * @skb: send buffer
2761  * @tx_q: queue to send buffer on
2762  *
2763  * Returns NETDEV_TX_OK if sent, else an error code
2764  */
2765 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2766 					struct idpf_tx_queue *tx_q)
2767 {
2768 	struct idpf_tx_splitq_params tx_params = { };
2769 	union idpf_flex_tx_ctx_desc *ctx_desc;
2770 	struct idpf_tx_buf *first;
2771 	unsigned int count;
2772 	int tso, idx;
2773 
2774 	count = idpf_tx_desc_count_required(tx_q, skb);
2775 	if (unlikely(!count))
2776 		return idpf_tx_drop_skb(tx_q, skb);
2777 
2778 	tso = idpf_tso(skb, &tx_params.offload);
2779 	if (unlikely(tso < 0))
2780 		return idpf_tx_drop_skb(tx_q, skb);
2781 
2782 	/* Check for splitq specific TX resources */
2783 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2784 	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2785 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2786 
2787 		return NETDEV_TX_BUSY;
2788 	}
2789 
2790 	if (tso) {
2791 		/* If tso is needed, set up context desc */
2792 		ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2793 
2794 		ctx_desc->tso.qw1.cmd_dtype =
2795 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2796 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2797 		ctx_desc->tso.qw0.flex_tlen =
2798 				cpu_to_le32(tx_params.offload.tso_len &
2799 					    IDPF_TXD_FLEX_CTX_TLEN_M);
2800 		ctx_desc->tso.qw0.mss_rt =
2801 				cpu_to_le16(tx_params.offload.mss &
2802 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2803 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2804 
2805 		u64_stats_update_begin(&tx_q->stats_sync);
2806 		u64_stats_inc(&tx_q->q_stats.lso_pkts);
2807 		u64_stats_update_end(&tx_q->stats_sync);
2808 	}
2809 
2810 	idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
2811 	if (idx != -1) {
2812 		ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2813 		idpf_tx_set_tstamp_desc(ctx_desc, idx);
2814 	}
2815 
2816 	/* record the location of the first descriptor for this packet */
2817 	first = &tx_q->tx_buf[tx_q->next_to_use];
2818 	first->skb = skb;
2819 
2820 	if (tso) {
2821 		first->packets = tx_params.offload.tso_segs;
2822 		first->bytes = skb->len +
2823 			((first->packets - 1) * tx_params.offload.tso_hdr_len);
2824 	} else {
2825 		first->packets = 1;
2826 		first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
2827 	}
2828 
2829 	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2830 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2831 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2832 		/* Set the RE bit to catch any packets that may have not been
2833 		 * stashed during RS completion cleaning. MIN_GAP is set to
2834 		 * MIN_RING size to ensure it will be set at least once each
2835 		 * time around the ring.
2836 		 */
2837 		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2838 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2839 			tx_q->txq_grp->num_completions_pending++;
2840 		}
2841 
2842 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2843 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2844 
2845 	} else {
2846 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2847 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2848 
2849 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2850 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2851 	}
2852 
2853 	idpf_tx_splitq_map(tx_q, &tx_params, first);
2854 
2855 	return NETDEV_TX_OK;
2856 }
2857 
2858 /**
2859  * idpf_tx_start - Selects the right Tx queue to send buffer
2860  * @skb: send buffer
2861  * @netdev: network interface device structure
2862  *
2863  * Returns NETDEV_TX_OK if sent, else an error code
2864  */
2865 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
2866 {
2867 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2868 	struct idpf_tx_queue *tx_q;
2869 
2870 	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2871 		dev_kfree_skb_any(skb);
2872 
2873 		return NETDEV_TX_OK;
2874 	}
2875 
2876 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2877 
2878 	/* hardware can't handle really short frames, hardware padding works
2879 	 * beyond this point
2880 	 */
2881 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2882 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2883 
2884 		return NETDEV_TX_OK;
2885 	}
2886 
2887 	if (idpf_is_queue_model_split(vport->txq_model))
2888 		return idpf_tx_splitq_frame(skb, tx_q);
2889 	else
2890 		return idpf_tx_singleq_frame(skb, tx_q);
2891 }
2892 
2893 /**
2894  * idpf_rx_hash - set the hash value in the skb
2895  * @rxq: Rx descriptor ring packet is being transacted on
2896  * @skb: pointer to current skb being populated
2897  * @rx_desc: Receive descriptor
2898  * @decoded: Decoded Rx packet type related fields
2899  */
2900 static void
2901 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2902 	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2903 	     struct libeth_rx_pt decoded)
2904 {
2905 	u32 hash;
2906 
2907 	if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2908 		return;
2909 
2910 	hash = le16_to_cpu(rx_desc->hash1) |
2911 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2912 	       (rx_desc->hash3 << 24);
2913 
2914 	libeth_rx_pt_set_hash(skb, hash, decoded);
2915 }
2916 
2917 /**
2918  * idpf_rx_csum - Indicate in skb if checksum is good
2919  * @rxq: Rx descriptor ring packet is being transacted on
2920  * @skb: pointer to current skb being populated
2921  * @csum_bits: checksum fields extracted from the descriptor
2922  * @decoded: Decoded Rx packet type related fields
2923  *
2924  * skb->protocol must be set before this function is called
2925  */
2926 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2927 			 struct libeth_rx_csum csum_bits,
2928 			 struct libeth_rx_pt decoded)
2929 {
2930 	bool ipv4, ipv6;
2931 
2932 	/* check if Rx checksum is enabled */
2933 	if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2934 		return;
2935 
2936 	/* check if HW has decoded the packet and checksum */
2937 	if (unlikely(!csum_bits.l3l4p))
2938 		return;
2939 
2940 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2941 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2942 
2943 	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2944 		goto checksum_fail;
2945 
2946 	if (unlikely(ipv6 && csum_bits.ipv6exadd))
2947 		return;
2948 
2949 	/* check for L4 errors and handle packets that were not able to be
2950 	 * checksummed
2951 	 */
2952 	if (unlikely(csum_bits.l4e))
2953 		goto checksum_fail;
2954 
2955 	if (!csum_bits.raw_csum_valid ||
2956 	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
2957 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2958 		return;
2959 	}
2960 
2961 	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2962 	skb->ip_summed = CHECKSUM_COMPLETE;
2963 
2964 	return;
2965 
2966 checksum_fail:
2967 	u64_stats_update_begin(&rxq->stats_sync);
2968 	u64_stats_inc(&rxq->q_stats.hw_csum_err);
2969 	u64_stats_update_end(&rxq->stats_sync);
2970 }
2971 
2972 /**
2973  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2974  * @rx_desc: receive descriptor
2975  *
2976  * Return: parsed checksum status.
2977  **/
2978 static struct libeth_rx_csum
2979 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2980 {
2981 	struct libeth_rx_csum csum = { };
2982 	u8 qword0, qword1;
2983 
2984 	qword0 = rx_desc->status_err0_qw0;
2985 	qword1 = rx_desc->status_err0_qw1;
2986 
2987 	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2988 			     qword1);
2989 	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2990 			      qword1);
2991 	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2992 			     qword1);
2993 	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2994 			       qword1);
2995 	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2996 				   qword0);
2997 	csum.raw_csum_valid =
2998 		!le16_get_bits(rx_desc->ptype_err_fflags0,
2999 			       VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
3000 	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
3001 
3002 	return csum;
3003 }
3004 
3005 /**
3006  * idpf_rx_rsc - Set the RSC fields in the skb
3007  * @rxq : Rx descriptor ring packet is being transacted on
3008  * @skb : pointer to current skb being populated
3009  * @rx_desc: Receive descriptor
3010  * @decoded: Decoded Rx packet type related fields
3011  *
3012  * Return 0 on success and error code on failure
3013  *
3014  * Populate the skb fields with the total number of RSC segments, RSC payload
3015  * length and packet type.
3016  */
3017 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3018 		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3019 		       struct libeth_rx_pt decoded)
3020 {
3021 	u16 rsc_segments, rsc_seg_len;
3022 	bool ipv4, ipv6;
3023 	int len;
3024 
3025 	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
3026 		     LIBETH_RX_PT_OUTER_L2))
3027 		return -EINVAL;
3028 
3029 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3030 	if (unlikely(!rsc_seg_len))
3031 		return -EINVAL;
3032 
3033 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3034 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3035 
3036 	if (unlikely(!(ipv4 ^ ipv6)))
3037 		return -EINVAL;
3038 
3039 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3040 
3041 	NAPI_GRO_CB(skb)->count = rsc_segments;
3042 	skb_shinfo(skb)->gso_size = rsc_seg_len;
3043 
3044 	skb_reset_network_header(skb);
3045 
3046 	if (ipv4) {
3047 		struct iphdr *ipv4h = ip_hdr(skb);
3048 
3049 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3050 
3051 		/* Reset and set transport header offset in skb */
3052 		skb_set_transport_header(skb, sizeof(struct iphdr));
3053 		len = skb->len - skb_transport_offset(skb);
3054 
3055 		/* Compute the TCP pseudo header checksum*/
3056 		tcp_hdr(skb)->check =
3057 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3058 	} else {
3059 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3060 
3061 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3062 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3063 		len = skb->len - skb_transport_offset(skb);
3064 		tcp_hdr(skb)->check =
3065 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3066 	}
3067 
3068 	tcp_gro_complete(skb);
3069 
3070 	u64_stats_update_begin(&rxq->stats_sync);
3071 	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3072 	u64_stats_update_end(&rxq->stats_sync);
3073 
3074 	return 0;
3075 }
3076 
3077 /**
3078  * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
3079  * @rxq: pointer to the rx queue that receives the timestamp
3080  * @rx_desc: pointer to rx descriptor containing timestamp
3081  * @skb: skb to put timestamp in
3082  */
3083 static void
3084 idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
3085 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3086 		 struct sk_buff *skb)
3087 {
3088 	u64 cached_time, ts_ns;
3089 	u32 ts_high;
3090 
3091 	if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
3092 		return;
3093 
3094 	cached_time = READ_ONCE(rxq->cached_phc_time);
3095 
3096 	ts_high = le32_to_cpu(rx_desc->ts_high);
3097 	ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
3098 
3099 	*skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
3100 		.hwtstamp = ns_to_ktime(ts_ns),
3101 	};
3102 }
3103 
3104 /**
3105  * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3106  * @rxq: Rx descriptor ring packet is being transacted on
3107  * @skb: pointer to current skb being populated
3108  * @rx_desc: Receive descriptor
3109  *
3110  * This function checks the ring, descriptor, and packet information in
3111  * order to populate the hash, checksum, protocol, and
3112  * other fields within the skb.
3113  */
3114 static int
3115 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3116 			   const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3117 {
3118 	struct libeth_rx_csum csum_bits;
3119 	struct libeth_rx_pt decoded;
3120 	u16 rx_ptype;
3121 
3122 	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3123 				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3124 	decoded = rxq->rx_ptype_lkup[rx_ptype];
3125 
3126 	/* process RSS/hash */
3127 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3128 
3129 	if (idpf_queue_has(PTP, rxq))
3130 		idpf_rx_hwtstamp(rxq, rx_desc, skb);
3131 
3132 	skb->protocol = eth_type_trans(skb, rxq->netdev);
3133 	skb_record_rx_queue(skb, rxq->idx);
3134 
3135 	if (le16_get_bits(rx_desc->hdrlen_flags,
3136 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3137 		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3138 
3139 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3140 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3141 
3142 	return 0;
3143 }
3144 
3145 /**
3146  * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3147  * @rx_buf: buffer containing page to add
3148  * @skb: sk_buff to place the data into
3149  * @size: packet length from rx_desc
3150  *
3151  * This function will add the data contained in rx_buf->page to the skb.
3152  * It will just attach the page as a frag to the skb.
3153  * The function will then update the page offset.
3154  */
3155 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3156 		      unsigned int size)
3157 {
3158 	u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
3159 
3160 	skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
3161 			       rx_buf->offset + hr, size, rx_buf->truesize);
3162 }
3163 
3164 /**
3165  * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3166  * @hdr: Rx buffer for the headers
3167  * @buf: Rx buffer for the payload
3168  * @data_len: number of bytes received to the payload buffer
3169  *
3170  * When a header buffer overflow occurs or the HW was unable do parse the
3171  * packet type to perform header split, the whole frame gets placed to the
3172  * payload buffer. We can't build a valid skb around a payload buffer when
3173  * the header split is active since it doesn't reserve any head- or tailroom.
3174  * In that case, copy either the whole frame when it's short or just the
3175  * Ethernet header to the header buffer to be able to build an skb and adjust
3176  * the data offset in the payload buffer, IOW emulate the header split.
3177  *
3178  * Return: number of bytes copied to the header buffer.
3179  */
3180 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3181 			     struct libeth_fqe *buf, u32 data_len)
3182 {
3183 	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3184 	struct page *hdr_page, *buf_page;
3185 	const void *src;
3186 	void *dst;
3187 
3188 	if (unlikely(netmem_is_net_iov(buf->netmem)) ||
3189 	    !libeth_rx_sync_for_cpu(buf, copy))
3190 		return 0;
3191 
3192 	hdr_page = __netmem_to_page(hdr->netmem);
3193 	buf_page = __netmem_to_page(buf->netmem);
3194 	dst = page_address(hdr_page) + hdr->offset +
3195 		pp_page_to_nmdesc(hdr_page)->pp->p.offset;
3196 	src = page_address(buf_page) + buf->offset +
3197 		pp_page_to_nmdesc(buf_page)->pp->p.offset;
3198 
3199 	memcpy(dst, src, LARGEST_ALIGN(copy));
3200 	buf->offset += copy;
3201 
3202 	return copy;
3203 }
3204 
3205 /**
3206  * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3207  * @buf: Rx buffer to pull data from
3208  * @size: the length of the packet
3209  *
3210  * This function allocates an skb. It then populates it with the page data from
3211  * the current receive descriptor, taking care to set up the skb correctly.
3212  */
3213 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
3214 {
3215 	struct page *buf_page = __netmem_to_page(buf->netmem);
3216 	u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
3217 	struct sk_buff *skb;
3218 	void *va;
3219 
3220 	va = page_address(buf_page) + buf->offset;
3221 	prefetch(va + hr);
3222 
3223 	skb = napi_build_skb(va, buf->truesize);
3224 	if (unlikely(!skb))
3225 		return NULL;
3226 
3227 	skb_mark_for_recycle(skb);
3228 
3229 	skb_reserve(skb, hr);
3230 	__skb_put(skb, size);
3231 
3232 	return skb;
3233 }
3234 
3235 /**
3236  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3237  * status and error fields
3238  * @stat_err_field: field from descriptor to test bits in
3239  * @stat_err_bits: value to mask
3240  *
3241  */
3242 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3243 					const u8 stat_err_bits)
3244 {
3245 	return !!(stat_err_field & stat_err_bits);
3246 }
3247 
3248 /**
3249  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3250  * @rx_desc: Rx descriptor for current buffer
3251  *
3252  * If the buffer is an EOP buffer, this function exits returning true,
3253  * otherwise return false indicating that this is in fact a non-EOP buffer.
3254  */
3255 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3256 {
3257 	/* if we are the last buffer then there is nothing else to do */
3258 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3259 						  IDPF_RXD_EOF_SPLITQ));
3260 }
3261 
3262 /**
3263  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3264  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3265  * @budget: Total limit on number of packets to process
3266  *
3267  * This function provides a "bounce buffer" approach to Rx interrupt
3268  * processing. The advantage to this is that on systems that have
3269  * expensive overhead for IOMMU access this provides a means of avoiding
3270  * it by maintaining the mapping of the page to the system.
3271  *
3272  * Returns amount of work completed
3273  */
3274 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3275 {
3276 	int total_rx_bytes = 0, total_rx_pkts = 0;
3277 	struct idpf_buf_queue *rx_bufq = NULL;
3278 	struct sk_buff *skb = rxq->skb;
3279 	u16 ntc = rxq->next_to_clean;
3280 
3281 	/* Process Rx packets bounded by budget */
3282 	while (likely(total_rx_pkts < budget)) {
3283 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3284 		struct libeth_fqe *hdr, *rx_buf = NULL;
3285 		struct idpf_sw_queue *refillq = NULL;
3286 		struct idpf_rxq_set *rxq_set = NULL;
3287 		unsigned int pkt_len = 0;
3288 		unsigned int hdr_len = 0;
3289 		u16 gen_id, buf_id = 0;
3290 		int bufq_id;
3291 		u8 rxdid;
3292 
3293 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3294 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3295 
3296 		/* This memory barrier is needed to keep us from reading
3297 		 * any other fields out of the rx_desc
3298 		 */
3299 		dma_rmb();
3300 
3301 		/* if the descriptor isn't done, no work yet to do */
3302 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3303 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3304 
3305 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3306 			break;
3307 
3308 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3309 				  rx_desc->rxdid_ucast);
3310 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3311 			IDPF_RX_BUMP_NTC(rxq, ntc);
3312 			u64_stats_update_begin(&rxq->stats_sync);
3313 			u64_stats_inc(&rxq->q_stats.bad_descs);
3314 			u64_stats_update_end(&rxq->stats_sync);
3315 			continue;
3316 		}
3317 
3318 		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3319 					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3320 
3321 		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3322 					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3323 
3324 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3325 		refillq = rxq_set->refillq[bufq_id];
3326 
3327 		/* retrieve buffer from the rxq */
3328 		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3329 
3330 		buf_id = le16_to_cpu(rx_desc->buf_id);
3331 
3332 		rx_buf = &rx_bufq->buf[buf_id];
3333 
3334 		if (!rx_bufq->hdr_pp)
3335 			goto payload;
3336 
3337 #define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3338 #define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3339 		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3340 			/* If a header buffer overflow, occurs, i.e. header is
3341 			 * too large to fit in the header split buffer, HW will
3342 			 * put the entire packet, including headers, in the
3343 			 * data/payload buffer.
3344 			 */
3345 			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3346 						__HDR_LEN_MASK);
3347 #undef __HDR_LEN_MASK
3348 #undef __HBO_BIT
3349 
3350 		hdr = &rx_bufq->hdr_buf[buf_id];
3351 
3352 		if (unlikely(!hdr_len && !skb)) {
3353 			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3354 			/* If failed, drop both buffers by setting len to 0 */
3355 			pkt_len -= hdr_len ? : pkt_len;
3356 
3357 			u64_stats_update_begin(&rxq->stats_sync);
3358 			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3359 			u64_stats_update_end(&rxq->stats_sync);
3360 		}
3361 
3362 		if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3363 			skb = idpf_rx_build_skb(hdr, hdr_len);
3364 			if (!skb)
3365 				break;
3366 
3367 			u64_stats_update_begin(&rxq->stats_sync);
3368 			u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3369 			u64_stats_update_end(&rxq->stats_sync);
3370 		}
3371 
3372 		hdr->netmem = 0;
3373 
3374 payload:
3375 		if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3376 			goto skip_data;
3377 
3378 		if (skb)
3379 			idpf_rx_add_frag(rx_buf, skb, pkt_len);
3380 		else
3381 			skb = idpf_rx_build_skb(rx_buf, pkt_len);
3382 
3383 		/* exit if we failed to retrieve a buffer */
3384 		if (!skb)
3385 			break;
3386 
3387 skip_data:
3388 		rx_buf->netmem = 0;
3389 
3390 		idpf_rx_post_buf_refill(refillq, buf_id);
3391 		IDPF_RX_BUMP_NTC(rxq, ntc);
3392 
3393 		/* skip if it is non EOP desc */
3394 		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3395 			continue;
3396 
3397 		/* pad skb if needed (to make valid ethernet frame) */
3398 		if (eth_skb_pad(skb)) {
3399 			skb = NULL;
3400 			continue;
3401 		}
3402 
3403 		/* probably a little skewed due to removing CRC */
3404 		total_rx_bytes += skb->len;
3405 
3406 		/* protocol */
3407 		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3408 			dev_kfree_skb_any(skb);
3409 			skb = NULL;
3410 			continue;
3411 		}
3412 
3413 		/* send completed skb up the stack */
3414 		napi_gro_receive(rxq->napi, skb);
3415 		skb = NULL;
3416 
3417 		/* update budget accounting */
3418 		total_rx_pkts++;
3419 	}
3420 
3421 	rxq->next_to_clean = ntc;
3422 
3423 	rxq->skb = skb;
3424 	u64_stats_update_begin(&rxq->stats_sync);
3425 	u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3426 	u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3427 	u64_stats_update_end(&rxq->stats_sync);
3428 
3429 	/* guarantee a trip back through this routine if there was a failure */
3430 	return total_rx_pkts;
3431 }
3432 
3433 /**
3434  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3435  * @bufq: Pointer to the buffer queue
3436  * @buf_id: buffer ID
3437  * @buf_desc: Buffer queue descriptor
3438  *
3439  * Return 0 on success and negative on failure.
3440  */
3441 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3442 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3443 {
3444 	struct libeth_fq_fp fq = {
3445 		.pp		= bufq->pp,
3446 		.fqes		= bufq->buf,
3447 		.truesize	= bufq->truesize,
3448 		.count		= bufq->desc_count,
3449 	};
3450 	dma_addr_t addr;
3451 
3452 	addr = libeth_rx_alloc(&fq, buf_id);
3453 	if (addr == DMA_MAPPING_ERROR)
3454 		return -ENOMEM;
3455 
3456 	buf_desc->pkt_addr = cpu_to_le64(addr);
3457 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3458 
3459 	if (!idpf_queue_has(HSPLIT_EN, bufq))
3460 		return 0;
3461 
3462 	fq.pp = bufq->hdr_pp;
3463 	fq.fqes = bufq->hdr_buf;
3464 	fq.truesize = bufq->hdr_truesize;
3465 
3466 	addr = libeth_rx_alloc(&fq, buf_id);
3467 	if (addr == DMA_MAPPING_ERROR)
3468 		return -ENOMEM;
3469 
3470 	buf_desc->hdr_addr = cpu_to_le64(addr);
3471 
3472 	return 0;
3473 }
3474 
3475 /**
3476  * idpf_rx_clean_refillq - Clean refill queue buffers
3477  * @bufq: buffer queue to post buffers back to
3478  * @refillq: refill queue to clean
3479  *
3480  * This function takes care of the buffer refill management
3481  */
3482 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3483 				  struct idpf_sw_queue *refillq)
3484 {
3485 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3486 	u16 bufq_nta = bufq->next_to_alloc;
3487 	u16 ntc = refillq->next_to_clean;
3488 	int cleaned = 0;
3489 
3490 	buf_desc = &bufq->split_buf[bufq_nta];
3491 
3492 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3493 	while (likely(cleaned < refillq->desc_count)) {
3494 		u32 buf_id, refill_desc = refillq->ring[ntc];
3495 		bool failure;
3496 
3497 		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3498 		    !!(refill_desc & IDPF_RX_BI_GEN_M))
3499 			break;
3500 
3501 		buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3502 		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3503 		if (failure)
3504 			break;
3505 
3506 		if (unlikely(++ntc == refillq->desc_count)) {
3507 			idpf_queue_change(RFL_GEN_CHK, refillq);
3508 			ntc = 0;
3509 		}
3510 
3511 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3512 			buf_desc = &bufq->split_buf[0];
3513 			bufq_nta = 0;
3514 		} else {
3515 			buf_desc++;
3516 		}
3517 
3518 		cleaned++;
3519 	}
3520 
3521 	if (!cleaned)
3522 		return;
3523 
3524 	/* We want to limit how many transactions on the bus we trigger with
3525 	 * tail writes so we only do it in strides. It's also important we
3526 	 * align the write to a multiple of 8 as required by HW.
3527 	 */
3528 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3529 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3530 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3531 						       IDPF_RX_BUF_POST_STRIDE));
3532 
3533 	/* update next to alloc since we have filled the ring */
3534 	refillq->next_to_clean = ntc;
3535 	bufq->next_to_alloc = bufq_nta;
3536 }
3537 
3538 /**
3539  * idpf_rx_clean_refillq_all - Clean all refill queues
3540  * @bufq: buffer queue with refill queues
3541  * @nid: ID of the closest NUMA node with memory
3542  *
3543  * Iterates through all refill queues assigned to the buffer queue assigned to
3544  * this vector.  Returns true if clean is complete within budget, false
3545  * otherwise.
3546  */
3547 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3548 {
3549 	struct idpf_bufq_set *bufq_set;
3550 	int i;
3551 
3552 	page_pool_nid_changed(bufq->pp, nid);
3553 	if (bufq->hdr_pp)
3554 		page_pool_nid_changed(bufq->hdr_pp, nid);
3555 
3556 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3557 	for (i = 0; i < bufq_set->num_refillqs; i++)
3558 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3559 }
3560 
3561 /**
3562  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3563  * @irq: interrupt number
3564  * @data: pointer to a q_vector
3565  *
3566  */
3567 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3568 						void *data)
3569 {
3570 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3571 
3572 	q_vector->total_events++;
3573 	napi_schedule(&q_vector->napi);
3574 
3575 	return IRQ_HANDLED;
3576 }
3577 
3578 /**
3579  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3580  * @vport: virtual port structure
3581  *
3582  */
3583 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3584 {
3585 	u16 v_idx;
3586 
3587 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3588 		netif_napi_del(&vport->q_vectors[v_idx].napi);
3589 }
3590 
3591 /**
3592  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3593  * @vport: main vport structure
3594  */
3595 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3596 {
3597 	int v_idx;
3598 
3599 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3600 		napi_disable(&vport->q_vectors[v_idx].napi);
3601 }
3602 
3603 /**
3604  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3605  * @vport: virtual port
3606  *
3607  * Free the memory allocated for interrupt vectors  associated to a vport
3608  */
3609 void idpf_vport_intr_rel(struct idpf_vport *vport)
3610 {
3611 	for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3612 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3613 
3614 		kfree(q_vector->complq);
3615 		q_vector->complq = NULL;
3616 		kfree(q_vector->bufq);
3617 		q_vector->bufq = NULL;
3618 		kfree(q_vector->tx);
3619 		q_vector->tx = NULL;
3620 		kfree(q_vector->rx);
3621 		q_vector->rx = NULL;
3622 	}
3623 
3624 	kfree(vport->q_vectors);
3625 	vport->q_vectors = NULL;
3626 }
3627 
3628 /**
3629  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3630  * @vport: main vport structure
3631  */
3632 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3633 {
3634 	struct idpf_adapter *adapter = vport->adapter;
3635 	int vector;
3636 
3637 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3638 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3639 		int irq_num, vidx;
3640 
3641 		/* free only the irqs that were actually requested */
3642 		if (!q_vector)
3643 			continue;
3644 
3645 		vidx = vport->q_vector_idxs[vector];
3646 		irq_num = adapter->msix_entries[vidx].vector;
3647 
3648 		kfree(free_irq(irq_num, q_vector));
3649 	}
3650 }
3651 
3652 /**
3653  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3654  * @vport: main vport structure
3655  */
3656 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3657 {
3658 	struct idpf_q_vector *q_vector = vport->q_vectors;
3659 	int q_idx;
3660 
3661 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3662 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3663 }
3664 
3665 /**
3666  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3667  * @q_vector: pointer to q_vector
3668  */
3669 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
3670 {
3671 	u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3672 	int type = IDPF_NO_ITR_UPDATE_IDX;
3673 	u16 itr = 0;
3674 
3675 	if (q_vector->wb_on_itr) {
3676 		/*
3677 		 * Trigger a software interrupt when exiting wb_on_itr, to make
3678 		 * sure we catch any pending write backs that might have been
3679 		 * missed due to interrupt state transition.
3680 		 */
3681 		itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3682 			   q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3683 		type = IDPF_SW_ITR_UPDATE_IDX;
3684 		itr = IDPF_ITR_20K;
3685 	}
3686 
3687 	itr &= IDPF_ITR_MASK;
3688 	/* Don't clear PBA because that can cause lost interrupts that
3689 	 * came in while we were cleaning/polling
3690 	 */
3691 	itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3692 		   (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3693 
3694 	return itr_val;
3695 }
3696 
3697 /**
3698  * idpf_update_dim_sample - Update dim sample with packets and bytes
3699  * @q_vector: the vector associated with the interrupt
3700  * @dim_sample: dim sample to update
3701  * @dim: dim instance structure
3702  * @packets: total packets
3703  * @bytes: total bytes
3704  *
3705  * Update the dim sample with the packets and bytes which are passed to this
3706  * function. Set the dim state appropriately if the dim settings gets stale.
3707  */
3708 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3709 				   struct dim_sample *dim_sample,
3710 				   struct dim *dim, u64 packets, u64 bytes)
3711 {
3712 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3713 	dim_sample->comp_ctr = 0;
3714 
3715 	/* if dim settings get stale, like when not updated for 1 second or
3716 	 * longer, force it to start again. This addresses the frequent case
3717 	 * of an idle queue being switched to by the scheduler.
3718 	 */
3719 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3720 		dim->state = DIM_START_MEASURE;
3721 }
3722 
3723 /**
3724  * idpf_net_dim - Update net DIM algorithm
3725  * @q_vector: the vector associated with the interrupt
3726  *
3727  * Create a DIM sample and notify net_dim() so that it can possibly decide
3728  * a new ITR value based on incoming packets, bytes, and interrupts.
3729  *
3730  * This function is a no-op if the queue is not configured to dynamic ITR.
3731  */
3732 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3733 {
3734 	struct dim_sample dim_sample = { };
3735 	u64 packets, bytes;
3736 	u32 i;
3737 
3738 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3739 		goto check_rx_itr;
3740 
3741 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3742 		struct idpf_tx_queue *txq = q_vector->tx[i];
3743 		unsigned int start;
3744 
3745 		do {
3746 			start = u64_stats_fetch_begin(&txq->stats_sync);
3747 			packets += u64_stats_read(&txq->q_stats.packets);
3748 			bytes += u64_stats_read(&txq->q_stats.bytes);
3749 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3750 	}
3751 
3752 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3753 			       packets, bytes);
3754 	net_dim(&q_vector->tx_dim, &dim_sample);
3755 
3756 check_rx_itr:
3757 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3758 		return;
3759 
3760 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3761 		struct idpf_rx_queue *rxq = q_vector->rx[i];
3762 		unsigned int start;
3763 
3764 		do {
3765 			start = u64_stats_fetch_begin(&rxq->stats_sync);
3766 			packets += u64_stats_read(&rxq->q_stats.packets);
3767 			bytes += u64_stats_read(&rxq->q_stats.bytes);
3768 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3769 	}
3770 
3771 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3772 			       packets, bytes);
3773 	net_dim(&q_vector->rx_dim, &dim_sample);
3774 }
3775 
3776 /**
3777  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3778  * @q_vector: q_vector for which itr is being updated and interrupt enabled
3779  *
3780  * Update the net_dim() algorithm and re-enable the interrupt associated with
3781  * this vector.
3782  */
3783 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3784 {
3785 	u32 intval;
3786 
3787 	/* net_dim() updates ITR out-of-band using a work item */
3788 	idpf_net_dim(q_vector);
3789 
3790 	intval = idpf_vport_intr_buildreg_itr(q_vector);
3791 	q_vector->wb_on_itr = false;
3792 
3793 	writel(intval, q_vector->intr_reg.dyn_ctl);
3794 }
3795 
3796 /**
3797  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3798  * @vport: main vport structure
3799  */
3800 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3801 {
3802 	struct idpf_adapter *adapter = vport->adapter;
3803 	const char *drv_name, *if_name, *vec_name;
3804 	int vector, err, irq_num, vidx;
3805 
3806 	drv_name = dev_driver_string(&adapter->pdev->dev);
3807 	if_name = netdev_name(vport->netdev);
3808 
3809 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3810 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3811 		char *name;
3812 
3813 		vidx = vport->q_vector_idxs[vector];
3814 		irq_num = adapter->msix_entries[vidx].vector;
3815 
3816 		if (q_vector->num_rxq && q_vector->num_txq)
3817 			vec_name = "TxRx";
3818 		else if (q_vector->num_rxq)
3819 			vec_name = "Rx";
3820 		else if (q_vector->num_txq)
3821 			vec_name = "Tx";
3822 		else
3823 			continue;
3824 
3825 		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3826 				 vec_name, vidx);
3827 
3828 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3829 				  name, q_vector);
3830 		if (err) {
3831 			netdev_err(vport->netdev,
3832 				   "Request_irq failed, error: %d\n", err);
3833 			goto free_q_irqs;
3834 		}
3835 	}
3836 
3837 	return 0;
3838 
3839 free_q_irqs:
3840 	while (--vector >= 0) {
3841 		vidx = vport->q_vector_idxs[vector];
3842 		irq_num = adapter->msix_entries[vidx].vector;
3843 		kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3844 	}
3845 
3846 	return err;
3847 }
3848 
3849 /**
3850  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3851  * @q_vector: q_vector structure
3852  * @itr: Interrupt throttling rate
3853  * @tx: Tx or Rx ITR
3854  */
3855 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3856 {
3857 	struct idpf_intr_reg *intr_reg;
3858 
3859 	if (tx && !q_vector->tx)
3860 		return;
3861 	else if (!tx && !q_vector->rx)
3862 		return;
3863 
3864 	intr_reg = &q_vector->intr_reg;
3865 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3866 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3867 }
3868 
3869 /**
3870  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3871  * @vport: main vport structure
3872  */
3873 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3874 {
3875 	bool dynamic;
3876 	int q_idx;
3877 	u16 itr;
3878 
3879 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3880 		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3881 
3882 		/* Set the initial ITR values */
3883 		if (qv->num_txq) {
3884 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3885 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3886 			idpf_vport_intr_write_itr(qv, dynamic ?
3887 						  itr : qv->tx_itr_value,
3888 						  true);
3889 		}
3890 
3891 		if (qv->num_rxq) {
3892 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3893 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3894 			idpf_vport_intr_write_itr(qv, dynamic ?
3895 						  itr : qv->rx_itr_value,
3896 						  false);
3897 		}
3898 
3899 		if (qv->num_txq || qv->num_rxq)
3900 			idpf_vport_intr_update_itr_ena_irq(qv);
3901 	}
3902 }
3903 
3904 /**
3905  * idpf_vport_intr_deinit - Release all vector associations for the vport
3906  * @vport: main vport structure
3907  */
3908 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3909 {
3910 	idpf_vport_intr_dis_irq_all(vport);
3911 	idpf_vport_intr_napi_dis_all(vport);
3912 	idpf_vport_intr_napi_del_all(vport);
3913 	idpf_vport_intr_rel_irq(vport);
3914 }
3915 
3916 /**
3917  * idpf_tx_dim_work - Call back from the stack
3918  * @work: work queue structure
3919  */
3920 static void idpf_tx_dim_work(struct work_struct *work)
3921 {
3922 	struct idpf_q_vector *q_vector;
3923 	struct idpf_vport *vport;
3924 	struct dim *dim;
3925 	u16 itr;
3926 
3927 	dim = container_of(work, struct dim, work);
3928 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3929 	vport = q_vector->vport;
3930 
3931 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3932 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3933 
3934 	/* look up the values in our local table */
3935 	itr = vport->tx_itr_profile[dim->profile_ix];
3936 
3937 	idpf_vport_intr_write_itr(q_vector, itr, true);
3938 
3939 	dim->state = DIM_START_MEASURE;
3940 }
3941 
3942 /**
3943  * idpf_rx_dim_work - Call back from the stack
3944  * @work: work queue structure
3945  */
3946 static void idpf_rx_dim_work(struct work_struct *work)
3947 {
3948 	struct idpf_q_vector *q_vector;
3949 	struct idpf_vport *vport;
3950 	struct dim *dim;
3951 	u16 itr;
3952 
3953 	dim = container_of(work, struct dim, work);
3954 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3955 	vport = q_vector->vport;
3956 
3957 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3958 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3959 
3960 	/* look up the values in our local table */
3961 	itr = vport->rx_itr_profile[dim->profile_ix];
3962 
3963 	idpf_vport_intr_write_itr(q_vector, itr, false);
3964 
3965 	dim->state = DIM_START_MEASURE;
3966 }
3967 
3968 /**
3969  * idpf_init_dim - Set up dynamic interrupt moderation
3970  * @qv: q_vector structure
3971  */
3972 static void idpf_init_dim(struct idpf_q_vector *qv)
3973 {
3974 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3975 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3976 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3977 
3978 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3979 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3980 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3981 }
3982 
3983 /**
3984  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3985  * @vport: main vport structure
3986  */
3987 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3988 {
3989 	int q_idx;
3990 
3991 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3992 		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3993 
3994 		idpf_init_dim(q_vector);
3995 		napi_enable(&q_vector->napi);
3996 	}
3997 }
3998 
3999 /**
4000  * idpf_tx_splitq_clean_all- Clean completion queues
4001  * @q_vec: queue vector
4002  * @budget: Used to determine if we are in netpoll
4003  * @cleaned: returns number of packets cleaned
4004  *
4005  * Returns false if clean is not complete else returns true
4006  */
4007 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
4008 				     int budget, int *cleaned)
4009 {
4010 	u16 num_complq = q_vec->num_complq;
4011 	bool clean_complete = true;
4012 	int i, budget_per_q;
4013 
4014 	if (unlikely(!num_complq))
4015 		return true;
4016 
4017 	budget_per_q = DIV_ROUND_UP(budget, num_complq);
4018 
4019 	for (i = 0; i < num_complq; i++)
4020 		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
4021 						       budget_per_q, cleaned);
4022 
4023 	return clean_complete;
4024 }
4025 
4026 /**
4027  * idpf_rx_splitq_clean_all- Clean completion queues
4028  * @q_vec: queue vector
4029  * @budget: Used to determine if we are in netpoll
4030  * @cleaned: returns number of packets cleaned
4031  *
4032  * Returns false if clean is not complete else returns true
4033  */
4034 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
4035 				     int *cleaned)
4036 {
4037 	u16 num_rxq = q_vec->num_rxq;
4038 	bool clean_complete = true;
4039 	int pkts_cleaned = 0;
4040 	int i, budget_per_q;
4041 	int nid;
4042 
4043 	/* We attempt to distribute budget to each Rx queue fairly, but don't
4044 	 * allow the budget to go below 1 because that would exit polling early.
4045 	 */
4046 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
4047 	for (i = 0; i < num_rxq; i++) {
4048 		struct idpf_rx_queue *rxq = q_vec->rx[i];
4049 		int pkts_cleaned_per_q;
4050 
4051 		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
4052 		/* if we clean as many as budgeted, we must not be done */
4053 		if (pkts_cleaned_per_q >= budget_per_q)
4054 			clean_complete = false;
4055 		pkts_cleaned += pkts_cleaned_per_q;
4056 	}
4057 	*cleaned = pkts_cleaned;
4058 
4059 	nid = numa_mem_id();
4060 
4061 	for (i = 0; i < q_vec->num_bufq; i++)
4062 		idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4063 
4064 	return clean_complete;
4065 }
4066 
4067 /**
4068  * idpf_vport_splitq_napi_poll - NAPI handler
4069  * @napi: struct from which you get q_vector
4070  * @budget: budget provided by stack
4071  */
4072 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4073 {
4074 	struct idpf_q_vector *q_vector =
4075 				container_of(napi, struct idpf_q_vector, napi);
4076 	bool clean_complete;
4077 	int work_done = 0;
4078 
4079 	/* Handle case where we are called by netpoll with a budget of 0 */
4080 	if (unlikely(!budget)) {
4081 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4082 
4083 		return 0;
4084 	}
4085 
4086 	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
4087 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4088 
4089 	/* If work not completed, return budget and polling will return */
4090 	if (!clean_complete) {
4091 		idpf_vport_intr_set_wb_on_itr(q_vector);
4092 		return budget;
4093 	}
4094 
4095 	/* Switch to poll mode in the tear-down path after sending disable
4096 	 * queues virtchnl message, as the interrupts will be disabled after
4097 	 * that.
4098 	 */
4099 	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
4100 							 q_vector->tx[0])))
4101 		return budget;
4102 
4103 	work_done = min_t(int, work_done, budget - 1);
4104 
4105 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4106 	 * poll us due to busy-polling
4107 	 */
4108 	if (likely(napi_complete_done(napi, work_done)))
4109 		idpf_vport_intr_update_itr_ena_irq(q_vector);
4110 	else
4111 		idpf_vport_intr_set_wb_on_itr(q_vector);
4112 
4113 	return work_done;
4114 }
4115 
4116 /**
4117  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4118  * @vport: virtual port
4119  *
4120  * Mapping for vectors to queues
4121  */
4122 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4123 {
4124 	bool split = idpf_is_queue_model_split(vport->rxq_model);
4125 	u16 num_txq_grp = vport->num_txq_grp;
4126 	struct idpf_rxq_group *rx_qgrp;
4127 	struct idpf_txq_group *tx_qgrp;
4128 	u32 i, qv_idx, q_index;
4129 
4130 	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4131 		u16 num_rxq;
4132 
4133 		if (qv_idx >= vport->num_q_vectors)
4134 			qv_idx = 0;
4135 
4136 		rx_qgrp = &vport->rxq_grps[i];
4137 		if (split)
4138 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4139 		else
4140 			num_rxq = rx_qgrp->singleq.num_rxq;
4141 
4142 		for (u32 j = 0; j < num_rxq; j++) {
4143 			struct idpf_rx_queue *q;
4144 
4145 			if (split)
4146 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4147 			else
4148 				q = rx_qgrp->singleq.rxqs[j];
4149 			q->q_vector = &vport->q_vectors[qv_idx];
4150 			q_index = q->q_vector->num_rxq;
4151 			q->q_vector->rx[q_index] = q;
4152 			q->q_vector->num_rxq++;
4153 
4154 			if (split)
4155 				q->napi = &q->q_vector->napi;
4156 		}
4157 
4158 		if (split) {
4159 			for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4160 				struct idpf_buf_queue *bufq;
4161 
4162 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4163 				bufq->q_vector = &vport->q_vectors[qv_idx];
4164 				q_index = bufq->q_vector->num_bufq;
4165 				bufq->q_vector->bufq[q_index] = bufq;
4166 				bufq->q_vector->num_bufq++;
4167 			}
4168 		}
4169 
4170 		qv_idx++;
4171 	}
4172 
4173 	split = idpf_is_queue_model_split(vport->txq_model);
4174 
4175 	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4176 		u16 num_txq;
4177 
4178 		if (qv_idx >= vport->num_q_vectors)
4179 			qv_idx = 0;
4180 
4181 		tx_qgrp = &vport->txq_grps[i];
4182 		num_txq = tx_qgrp->num_txq;
4183 
4184 		for (u32 j = 0; j < num_txq; j++) {
4185 			struct idpf_tx_queue *q;
4186 
4187 			q = tx_qgrp->txqs[j];
4188 			q->q_vector = &vport->q_vectors[qv_idx];
4189 			q->q_vector->tx[q->q_vector->num_txq++] = q;
4190 		}
4191 
4192 		if (split) {
4193 			struct idpf_compl_queue *q = tx_qgrp->complq;
4194 
4195 			q->q_vector = &vport->q_vectors[qv_idx];
4196 			q->q_vector->complq[q->q_vector->num_complq++] = q;
4197 		}
4198 
4199 		qv_idx++;
4200 	}
4201 }
4202 
4203 /**
4204  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4205  * @vport: virtual port
4206  *
4207  * Initialize vector indexes with values returened over mailbox
4208  */
4209 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4210 {
4211 	struct idpf_adapter *adapter = vport->adapter;
4212 	struct virtchnl2_alloc_vectors *ac;
4213 	u16 *vecids, total_vecs;
4214 	int i;
4215 
4216 	ac = adapter->req_vec_chunks;
4217 	if (!ac) {
4218 		for (i = 0; i < vport->num_q_vectors; i++)
4219 			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4220 
4221 		return 0;
4222 	}
4223 
4224 	total_vecs = idpf_get_reserved_vecs(adapter);
4225 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4226 	if (!vecids)
4227 		return -ENOMEM;
4228 
4229 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4230 
4231 	for (i = 0; i < vport->num_q_vectors; i++)
4232 		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4233 
4234 	kfree(vecids);
4235 
4236 	return 0;
4237 }
4238 
4239 /**
4240  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4241  * @vport: virtual port structure
4242  */
4243 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4244 {
4245 	int (*napi_poll)(struct napi_struct *napi, int budget);
4246 	u16 v_idx, qv_idx;
4247 	int irq_num;
4248 
4249 	if (idpf_is_queue_model_split(vport->txq_model))
4250 		napi_poll = idpf_vport_splitq_napi_poll;
4251 	else
4252 		napi_poll = idpf_vport_singleq_napi_poll;
4253 
4254 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4255 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4256 		qv_idx = vport->q_vector_idxs[v_idx];
4257 		irq_num = vport->adapter->msix_entries[qv_idx].vector;
4258 
4259 		netif_napi_add_config(vport->netdev, &q_vector->napi,
4260 				      napi_poll, v_idx);
4261 		netif_napi_set_irq(&q_vector->napi, irq_num);
4262 	}
4263 }
4264 
4265 /**
4266  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4267  * @vport: virtual port
4268  *
4269  * We allocate one q_vector per queue interrupt. If allocation fails we
4270  * return -ENOMEM.
4271  */
4272 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4273 {
4274 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4275 	struct idpf_vport_user_config_data *user_config;
4276 	struct idpf_q_vector *q_vector;
4277 	struct idpf_q_coalesce *q_coal;
4278 	u32 complqs_per_vector, v_idx;
4279 	u16 idx = vport->idx;
4280 
4281 	user_config = &vport->adapter->vport_config[idx]->user_config;
4282 	vport->q_vectors = kcalloc(vport->num_q_vectors,
4283 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4284 	if (!vport->q_vectors)
4285 		return -ENOMEM;
4286 
4287 	txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4288 				       vport->num_q_vectors);
4289 	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4290 				       vport->num_q_vectors);
4291 	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4292 			   DIV_ROUND_UP(vport->num_rxq_grp,
4293 					vport->num_q_vectors);
4294 	complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4295 					  vport->num_q_vectors);
4296 
4297 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4298 		q_vector = &vport->q_vectors[v_idx];
4299 		q_coal = &user_config->q_coalesce[v_idx];
4300 		q_vector->vport = vport;
4301 
4302 		q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
4303 		q_vector->tx_intr_mode = q_coal->tx_intr_mode;
4304 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4305 
4306 		q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
4307 		q_vector->rx_intr_mode = q_coal->rx_intr_mode;
4308 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4309 
4310 		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4311 				       GFP_KERNEL);
4312 		if (!q_vector->tx)
4313 			goto error;
4314 
4315 		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4316 				       GFP_KERNEL);
4317 		if (!q_vector->rx)
4318 			goto error;
4319 
4320 		if (!idpf_is_queue_model_split(vport->rxq_model))
4321 			continue;
4322 
4323 		q_vector->bufq = kcalloc(bufqs_per_vector,
4324 					 sizeof(*q_vector->bufq),
4325 					 GFP_KERNEL);
4326 		if (!q_vector->bufq)
4327 			goto error;
4328 
4329 		q_vector->complq = kcalloc(complqs_per_vector,
4330 					   sizeof(*q_vector->complq),
4331 					   GFP_KERNEL);
4332 		if (!q_vector->complq)
4333 			goto error;
4334 	}
4335 
4336 	return 0;
4337 
4338 error:
4339 	idpf_vport_intr_rel(vport);
4340 
4341 	return -ENOMEM;
4342 }
4343 
4344 /**
4345  * idpf_vport_intr_init - Setup all vectors for the given vport
4346  * @vport: virtual port
4347  *
4348  * Returns 0 on success or negative on failure
4349  */
4350 int idpf_vport_intr_init(struct idpf_vport *vport)
4351 {
4352 	int err;
4353 
4354 	err = idpf_vport_intr_init_vec_idx(vport);
4355 	if (err)
4356 		return err;
4357 
4358 	idpf_vport_intr_map_vector_to_qs(vport);
4359 	idpf_vport_intr_napi_add_all(vport);
4360 
4361 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4362 	if (err)
4363 		goto unroll_vectors_alloc;
4364 
4365 	err = idpf_vport_intr_req_irq(vport);
4366 	if (err)
4367 		goto unroll_vectors_alloc;
4368 
4369 	return 0;
4370 
4371 unroll_vectors_alloc:
4372 	idpf_vport_intr_napi_del_all(vport);
4373 
4374 	return err;
4375 }
4376 
4377 void idpf_vport_intr_ena(struct idpf_vport *vport)
4378 {
4379 	idpf_vport_intr_napi_ena_all(vport);
4380 	idpf_vport_intr_ena_irq_all(vport);
4381 }
4382 
4383 /**
4384  * idpf_config_rss - Send virtchnl messages to configure RSS
4385  * @vport: virtual port
4386  *
4387  * Return 0 on success, negative on failure
4388  */
4389 int idpf_config_rss(struct idpf_vport *vport)
4390 {
4391 	int err;
4392 
4393 	err = idpf_send_get_set_rss_key_msg(vport, false);
4394 	if (err)
4395 		return err;
4396 
4397 	return idpf_send_get_set_rss_lut_msg(vport, false);
4398 }
4399 
4400 /**
4401  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4402  * @vport: virtual port structure
4403  */
4404 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4405 {
4406 	struct idpf_adapter *adapter = vport->adapter;
4407 	u16 num_active_rxq = vport->num_rxq;
4408 	struct idpf_rss_data *rss_data;
4409 	int i;
4410 
4411 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4412 
4413 	for (i = 0; i < rss_data->rss_lut_size; i++) {
4414 		rss_data->rss_lut[i] = i % num_active_rxq;
4415 		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4416 	}
4417 }
4418 
4419 /**
4420  * idpf_init_rss - Allocate and initialize RSS resources
4421  * @vport: virtual port
4422  *
4423  * Return 0 on success, negative on failure
4424  */
4425 int idpf_init_rss(struct idpf_vport *vport)
4426 {
4427 	struct idpf_adapter *adapter = vport->adapter;
4428 	struct idpf_rss_data *rss_data;
4429 	u32 lut_size;
4430 
4431 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4432 
4433 	lut_size = rss_data->rss_lut_size * sizeof(u32);
4434 	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4435 	if (!rss_data->rss_lut)
4436 		return -ENOMEM;
4437 
4438 	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4439 	if (!rss_data->cached_lut) {
4440 		kfree(rss_data->rss_lut);
4441 		rss_data->rss_lut = NULL;
4442 
4443 		return -ENOMEM;
4444 	}
4445 
4446 	/* Fill the default RSS lut values */
4447 	idpf_fill_dflt_rss_lut(vport);
4448 
4449 	return idpf_config_rss(vport);
4450 }
4451 
4452 /**
4453  * idpf_deinit_rss - Release RSS resources
4454  * @vport: virtual port
4455  */
4456 void idpf_deinit_rss(struct idpf_vport *vport)
4457 {
4458 	struct idpf_adapter *adapter = vport->adapter;
4459 	struct idpf_rss_data *rss_data;
4460 
4461 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4462 	kfree(rss_data->cached_lut);
4463 	rss_data->cached_lut = NULL;
4464 	kfree(rss_data->rss_lut);
4465 	rss_data->rss_lut = NULL;
4466 }
4467