xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision 50c374c6d1a43db9444cb74cc09552c817db2a9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 
6 #include "idpf.h"
7 #include "idpf_virtchnl.h"
8 
9 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
10 			       unsigned int count);
11 
12 /**
13  * idpf_buf_lifo_push - push a buffer pointer onto stack
14  * @stack: pointer to stack struct
15  * @buf: pointer to buf to push
16  *
17  * Returns 0 on success, negative on failure
18  **/
19 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
20 			      struct idpf_tx_stash *buf)
21 {
22 	if (unlikely(stack->top == stack->size))
23 		return -ENOSPC;
24 
25 	stack->bufs[stack->top++] = buf;
26 
27 	return 0;
28 }
29 
30 /**
31  * idpf_buf_lifo_pop - pop a buffer pointer from stack
32  * @stack: pointer to stack struct
33  **/
34 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
35 {
36 	if (unlikely(!stack->top))
37 		return NULL;
38 
39 	return stack->bufs[--stack->top];
40 }
41 
42 /**
43  * idpf_tx_timeout - Respond to a Tx Hang
44  * @netdev: network interface device structure
45  * @txqueue: TX queue
46  */
47 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
48 {
49 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
50 
51 	adapter->tx_timeout_count++;
52 
53 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
54 		   adapter->tx_timeout_count, txqueue);
55 	if (!idpf_is_reset_in_prog(adapter)) {
56 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
57 		queue_delayed_work(adapter->vc_event_wq,
58 				   &adapter->vc_event_task,
59 				   msecs_to_jiffies(10));
60 	}
61 }
62 
63 /**
64  * idpf_tx_buf_rel - Release a Tx buffer
65  * @tx_q: the queue that owns the buffer
66  * @tx_buf: the buffer to free
67  */
68 static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
69 			    struct idpf_tx_buf *tx_buf)
70 {
71 	if (tx_buf->skb) {
72 		if (dma_unmap_len(tx_buf, len))
73 			dma_unmap_single(tx_q->dev,
74 					 dma_unmap_addr(tx_buf, dma),
75 					 dma_unmap_len(tx_buf, len),
76 					 DMA_TO_DEVICE);
77 		dev_kfree_skb_any(tx_buf->skb);
78 	} else if (dma_unmap_len(tx_buf, len)) {
79 		dma_unmap_page(tx_q->dev,
80 			       dma_unmap_addr(tx_buf, dma),
81 			       dma_unmap_len(tx_buf, len),
82 			       DMA_TO_DEVICE);
83 	}
84 
85 	tx_buf->next_to_watch = NULL;
86 	tx_buf->skb = NULL;
87 	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
88 	dma_unmap_len_set(tx_buf, len, 0);
89 }
90 
91 /**
92  * idpf_tx_buf_rel_all - Free any empty Tx buffers
93  * @txq: queue to be cleaned
94  */
95 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
96 {
97 	struct idpf_buf_lifo *buf_stack;
98 	u16 i;
99 
100 	/* Buffers already cleared, nothing to do */
101 	if (!txq->tx_buf)
102 		return;
103 
104 	/* Free all the Tx buffer sk_buffs */
105 	for (i = 0; i < txq->desc_count; i++)
106 		idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
107 
108 	kfree(txq->tx_buf);
109 	txq->tx_buf = NULL;
110 
111 	if (!idpf_queue_has(FLOW_SCH_EN, txq))
112 		return;
113 
114 	buf_stack = &txq->stash->buf_stack;
115 	if (!buf_stack->bufs)
116 		return;
117 
118 	for (i = 0; i < buf_stack->size; i++)
119 		kfree(buf_stack->bufs[i]);
120 
121 	kfree(buf_stack->bufs);
122 	buf_stack->bufs = NULL;
123 }
124 
125 /**
126  * idpf_tx_desc_rel - Free Tx resources per queue
127  * @txq: Tx descriptor ring for a specific queue
128  *
129  * Free all transmit software resources
130  */
131 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
132 {
133 	idpf_tx_buf_rel_all(txq);
134 
135 	if (!txq->desc_ring)
136 		return;
137 
138 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
139 	txq->desc_ring = NULL;
140 	txq->next_to_use = 0;
141 	txq->next_to_clean = 0;
142 }
143 
144 /**
145  * idpf_compl_desc_rel - Free completion resources per queue
146  * @complq: completion queue
147  *
148  * Free all completion software resources.
149  */
150 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
151 {
152 	if (!complq->comp)
153 		return;
154 
155 	dma_free_coherent(complq->netdev->dev.parent, complq->size,
156 			  complq->comp, complq->dma);
157 	complq->comp = NULL;
158 	complq->next_to_use = 0;
159 	complq->next_to_clean = 0;
160 }
161 
162 /**
163  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
164  * @vport: virtual port structure
165  *
166  * Free all transmit software resources
167  */
168 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
169 {
170 	int i, j;
171 
172 	if (!vport->txq_grps)
173 		return;
174 
175 	for (i = 0; i < vport->num_txq_grp; i++) {
176 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
177 
178 		for (j = 0; j < txq_grp->num_txq; j++)
179 			idpf_tx_desc_rel(txq_grp->txqs[j]);
180 
181 		if (idpf_is_queue_model_split(vport->txq_model))
182 			idpf_compl_desc_rel(txq_grp->complq);
183 	}
184 }
185 
186 /**
187  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
188  * @tx_q: queue for which the buffers are allocated
189  *
190  * Returns 0 on success, negative on failure
191  */
192 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
193 {
194 	struct idpf_buf_lifo *buf_stack;
195 	int buf_size;
196 	int i;
197 
198 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
199 	 * are allocated by kernel network stack and received as part of skb
200 	 */
201 	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
202 	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
203 	if (!tx_q->tx_buf)
204 		return -ENOMEM;
205 
206 	/* Initialize tx_bufs with invalid completion tags */
207 	for (i = 0; i < tx_q->desc_count; i++)
208 		tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
209 
210 	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
211 		return 0;
212 
213 	buf_stack = &tx_q->stash->buf_stack;
214 
215 	/* Initialize tx buf stack for out-of-order completions if
216 	 * flow scheduling offload is enabled
217 	 */
218 	buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
219 				  GFP_KERNEL);
220 	if (!buf_stack->bufs)
221 		return -ENOMEM;
222 
223 	buf_stack->size = tx_q->desc_count;
224 	buf_stack->top = tx_q->desc_count;
225 
226 	for (i = 0; i < tx_q->desc_count; i++) {
227 		buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
228 					     GFP_KERNEL);
229 		if (!buf_stack->bufs[i])
230 			return -ENOMEM;
231 	}
232 
233 	return 0;
234 }
235 
236 /**
237  * idpf_tx_desc_alloc - Allocate the Tx descriptors
238  * @vport: vport to allocate resources for
239  * @tx_q: the tx ring to set up
240  *
241  * Returns 0 on success, negative on failure
242  */
243 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
244 			      struct idpf_tx_queue *tx_q)
245 {
246 	struct device *dev = tx_q->dev;
247 	int err;
248 
249 	err = idpf_tx_buf_alloc_all(tx_q);
250 	if (err)
251 		goto err_alloc;
252 
253 	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
254 
255 	/* Allocate descriptors also round up to nearest 4K */
256 	tx_q->size = ALIGN(tx_q->size, 4096);
257 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
258 					      GFP_KERNEL);
259 	if (!tx_q->desc_ring) {
260 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
261 			tx_q->size);
262 		err = -ENOMEM;
263 		goto err_alloc;
264 	}
265 
266 	tx_q->next_to_use = 0;
267 	tx_q->next_to_clean = 0;
268 	idpf_queue_set(GEN_CHK, tx_q);
269 
270 	return 0;
271 
272 err_alloc:
273 	idpf_tx_desc_rel(tx_q);
274 
275 	return err;
276 }
277 
278 /**
279  * idpf_compl_desc_alloc - allocate completion descriptors
280  * @vport: vport to allocate resources for
281  * @complq: completion queue to set up
282  *
283  * Return: 0 on success, -errno on failure.
284  */
285 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
286 				 struct idpf_compl_queue *complq)
287 {
288 	complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
289 
290 	complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
291 					  complq->size, &complq->dma,
292 					  GFP_KERNEL);
293 	if (!complq->comp)
294 		return -ENOMEM;
295 
296 	complq->next_to_use = 0;
297 	complq->next_to_clean = 0;
298 	idpf_queue_set(GEN_CHK, complq);
299 
300 	return 0;
301 }
302 
303 /**
304  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
305  * @vport: virtual port private structure
306  *
307  * Returns 0 on success, negative on failure
308  */
309 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
310 {
311 	int err = 0;
312 	int i, j;
313 
314 	/* Setup buffer queues. In single queue model buffer queues and
315 	 * completion queues will be same
316 	 */
317 	for (i = 0; i < vport->num_txq_grp; i++) {
318 		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
319 			struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
320 			u8 gen_bits = 0;
321 			u16 bufidx_mask;
322 
323 			err = idpf_tx_desc_alloc(vport, txq);
324 			if (err) {
325 				pci_err(vport->adapter->pdev,
326 					"Allocation for Tx Queue %u failed\n",
327 					i);
328 				goto err_out;
329 			}
330 
331 			if (!idpf_is_queue_model_split(vport->txq_model))
332 				continue;
333 
334 			txq->compl_tag_cur_gen = 0;
335 
336 			/* Determine the number of bits in the bufid
337 			 * mask and add one to get the start of the
338 			 * generation bits
339 			 */
340 			bufidx_mask = txq->desc_count - 1;
341 			while (bufidx_mask >> 1) {
342 				txq->compl_tag_gen_s++;
343 				bufidx_mask = bufidx_mask >> 1;
344 			}
345 			txq->compl_tag_gen_s++;
346 
347 			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
348 							txq->compl_tag_gen_s;
349 			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
350 
351 			/* Set bufid mask based on location of first
352 			 * gen bit; it cannot simply be the descriptor
353 			 * ring size-1 since we can have size values
354 			 * where not all of those bits are set.
355 			 */
356 			txq->compl_tag_bufid_m =
357 				GETMAXVAL(txq->compl_tag_gen_s);
358 		}
359 
360 		if (!idpf_is_queue_model_split(vport->txq_model))
361 			continue;
362 
363 		/* Setup completion queues */
364 		err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
365 		if (err) {
366 			pci_err(vport->adapter->pdev,
367 				"Allocation for Tx Completion Queue %u failed\n",
368 				i);
369 			goto err_out;
370 		}
371 	}
372 
373 err_out:
374 	if (err)
375 		idpf_tx_desc_rel_all(vport);
376 
377 	return err;
378 }
379 
380 /**
381  * idpf_rx_page_rel - Release an rx buffer page
382  * @rx_buf: the buffer to free
383  */
384 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
385 {
386 	if (unlikely(!rx_buf->page))
387 		return;
388 
389 	page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
390 
391 	rx_buf->page = NULL;
392 	rx_buf->offset = 0;
393 }
394 
395 /**
396  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
397  * @bufq: queue to use
398  */
399 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
400 {
401 	struct libeth_fq fq = {
402 		.fqes	= bufq->hdr_buf,
403 		.pp	= bufq->hdr_pp,
404 	};
405 
406 	for (u32 i = 0; i < bufq->desc_count; i++)
407 		idpf_rx_page_rel(&bufq->hdr_buf[i]);
408 
409 	libeth_rx_fq_destroy(&fq);
410 	bufq->hdr_buf = NULL;
411 	bufq->hdr_pp = NULL;
412 }
413 
414 /**
415  * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
416  * @bufq: queue to be cleaned
417  */
418 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
419 {
420 	struct libeth_fq fq = {
421 		.fqes	= bufq->buf,
422 		.pp	= bufq->pp,
423 	};
424 
425 	/* queue already cleared, nothing to do */
426 	if (!bufq->buf)
427 		return;
428 
429 	/* Free all the bufs allocated and given to hw on Rx queue */
430 	for (u32 i = 0; i < bufq->desc_count; i++)
431 		idpf_rx_page_rel(&bufq->buf[i]);
432 
433 	if (idpf_queue_has(HSPLIT_EN, bufq))
434 		idpf_rx_hdr_buf_rel_all(bufq);
435 
436 	libeth_rx_fq_destroy(&fq);
437 	bufq->buf = NULL;
438 	bufq->pp = NULL;
439 }
440 
441 /**
442  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
443  * @rxq: queue to be cleaned
444  */
445 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
446 {
447 	struct libeth_fq fq = {
448 		.fqes	= rxq->rx_buf,
449 		.pp	= rxq->pp,
450 	};
451 
452 	if (!rxq->rx_buf)
453 		return;
454 
455 	for (u32 i = 0; i < rxq->desc_count; i++)
456 		idpf_rx_page_rel(&rxq->rx_buf[i]);
457 
458 	libeth_rx_fq_destroy(&fq);
459 	rxq->rx_buf = NULL;
460 	rxq->pp = NULL;
461 }
462 
463 /**
464  * idpf_rx_desc_rel - Free a specific Rx q resources
465  * @rxq: queue to clean the resources from
466  * @dev: device to free DMA memory
467  * @model: single or split queue model
468  *
469  * Free a specific rx queue resources
470  */
471 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
472 			     u32 model)
473 {
474 	if (!rxq)
475 		return;
476 
477 	if (rxq->skb) {
478 		dev_kfree_skb_any(rxq->skb);
479 		rxq->skb = NULL;
480 	}
481 
482 	if (!idpf_is_queue_model_split(model))
483 		idpf_rx_buf_rel_all(rxq);
484 
485 	rxq->next_to_alloc = 0;
486 	rxq->next_to_clean = 0;
487 	rxq->next_to_use = 0;
488 	if (!rxq->desc_ring)
489 		return;
490 
491 	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
492 	rxq->desc_ring = NULL;
493 }
494 
495 /**
496  * idpf_rx_desc_rel_bufq - free buffer queue resources
497  * @bufq: buffer queue to clean the resources from
498  * @dev: device to free DMA memory
499  */
500 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
501 				  struct device *dev)
502 {
503 	if (!bufq)
504 		return;
505 
506 	idpf_rx_buf_rel_bufq(bufq);
507 
508 	bufq->next_to_alloc = 0;
509 	bufq->next_to_clean = 0;
510 	bufq->next_to_use = 0;
511 
512 	if (!bufq->split_buf)
513 		return;
514 
515 	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
516 	bufq->split_buf = NULL;
517 }
518 
519 /**
520  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
521  * @vport: virtual port structure
522  *
523  * Free all rx queues resources
524  */
525 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
526 {
527 	struct device *dev = &vport->adapter->pdev->dev;
528 	struct idpf_rxq_group *rx_qgrp;
529 	u16 num_rxq;
530 	int i, j;
531 
532 	if (!vport->rxq_grps)
533 		return;
534 
535 	for (i = 0; i < vport->num_rxq_grp; i++) {
536 		rx_qgrp = &vport->rxq_grps[i];
537 
538 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
539 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
540 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
541 						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
542 			continue;
543 		}
544 
545 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
546 		for (j = 0; j < num_rxq; j++)
547 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
548 					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
549 
550 		if (!rx_qgrp->splitq.bufq_sets)
551 			continue;
552 
553 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
554 			struct idpf_bufq_set *bufq_set =
555 				&rx_qgrp->splitq.bufq_sets[j];
556 
557 			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
558 		}
559 	}
560 }
561 
562 /**
563  * idpf_rx_buf_hw_update - Store the new tail and head values
564  * @bufq: queue to bump
565  * @val: new head index
566  */
567 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
568 {
569 	bufq->next_to_use = val;
570 
571 	if (unlikely(!bufq->tail))
572 		return;
573 
574 	/* writel has an implicit memory barrier */
575 	writel(val, bufq->tail);
576 }
577 
578 /**
579  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
580  * @bufq: ring to use
581  *
582  * Returns 0 on success, negative on failure.
583  */
584 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
585 {
586 	struct libeth_fq fq = {
587 		.count	= bufq->desc_count,
588 		.type	= LIBETH_FQE_HDR,
589 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
590 	};
591 	int ret;
592 
593 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
594 	if (ret)
595 		return ret;
596 
597 	bufq->hdr_pp = fq.pp;
598 	bufq->hdr_buf = fq.fqes;
599 	bufq->hdr_truesize = fq.truesize;
600 	bufq->rx_hbuf_size = fq.buf_len;
601 
602 	return 0;
603 }
604 
605 /**
606  * idpf_rx_post_buf_refill - Post buffer id to refill queue
607  * @refillq: refill queue to post to
608  * @buf_id: buffer id to post
609  */
610 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
611 {
612 	u32 nta = refillq->next_to_use;
613 
614 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
615 	refillq->ring[nta] =
616 		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
617 		FIELD_PREP(IDPF_RX_BI_GEN_M,
618 			   idpf_queue_has(GEN_CHK, refillq));
619 
620 	if (unlikely(++nta == refillq->desc_count)) {
621 		nta = 0;
622 		idpf_queue_change(GEN_CHK, refillq);
623 	}
624 
625 	refillq->next_to_use = nta;
626 }
627 
628 /**
629  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
630  * @bufq: buffer queue to post to
631  * @buf_id: buffer id to post
632  *
633  * Returns false if buffer could not be allocated, true otherwise.
634  */
635 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
636 {
637 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
638 	struct libeth_fq_fp fq = {
639 		.count	= bufq->desc_count,
640 	};
641 	u16 nta = bufq->next_to_alloc;
642 	dma_addr_t addr;
643 
644 	splitq_rx_desc = &bufq->split_buf[nta];
645 
646 	if (idpf_queue_has(HSPLIT_EN, bufq)) {
647 		fq.pp = bufq->hdr_pp;
648 		fq.fqes = bufq->hdr_buf;
649 		fq.truesize = bufq->hdr_truesize;
650 
651 		addr = libeth_rx_alloc(&fq, buf_id);
652 		if (addr == DMA_MAPPING_ERROR)
653 			return false;
654 
655 		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
656 	}
657 
658 	fq.pp = bufq->pp;
659 	fq.fqes = bufq->buf;
660 	fq.truesize = bufq->truesize;
661 
662 	addr = libeth_rx_alloc(&fq, buf_id);
663 	if (addr == DMA_MAPPING_ERROR)
664 		return false;
665 
666 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
667 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
668 
669 	nta++;
670 	if (unlikely(nta == bufq->desc_count))
671 		nta = 0;
672 	bufq->next_to_alloc = nta;
673 
674 	return true;
675 }
676 
677 /**
678  * idpf_rx_post_init_bufs - Post initial buffers to bufq
679  * @bufq: buffer queue to post working set to
680  * @working_set: number of buffers to put in working set
681  *
682  * Returns true if @working_set bufs were posted successfully, false otherwise.
683  */
684 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
685 				   u16 working_set)
686 {
687 	int i;
688 
689 	for (i = 0; i < working_set; i++) {
690 		if (!idpf_rx_post_buf_desc(bufq, i))
691 			return false;
692 	}
693 
694 	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
695 					       IDPF_RX_BUF_STRIDE));
696 
697 	return true;
698 }
699 
700 /**
701  * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
702  * @rxq: queue for which the buffers are allocated
703  *
704  * Return: 0 on success, -ENOMEM on failure.
705  */
706 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
707 {
708 	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
709 		goto err;
710 
711 	return 0;
712 
713 err:
714 	idpf_rx_buf_rel_all(rxq);
715 
716 	return -ENOMEM;
717 }
718 
719 /**
720  * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
721  * @rxq: buffer queue to create page pool for
722  *
723  * Return: 0 on success, -errno on failure.
724  */
725 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
726 {
727 	struct libeth_fq fq = {
728 		.count	= rxq->desc_count,
729 		.type	= LIBETH_FQE_MTU,
730 		.nid	= idpf_q_vector_to_mem(rxq->q_vector),
731 	};
732 	int ret;
733 
734 	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
735 	if (ret)
736 		return ret;
737 
738 	rxq->pp = fq.pp;
739 	rxq->rx_buf = fq.fqes;
740 	rxq->truesize = fq.truesize;
741 	rxq->rx_buf_size = fq.buf_len;
742 
743 	return idpf_rx_buf_alloc_singleq(rxq);
744 }
745 
746 /**
747  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
748  * @rxbufq: queue for which the buffers are allocated
749  *
750  * Returns 0 on success, negative on failure
751  */
752 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
753 {
754 	int err = 0;
755 
756 	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
757 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
758 		if (err)
759 			goto rx_buf_alloc_all_out;
760 	}
761 
762 	/* Allocate buffers to be given to HW.	 */
763 	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
764 		err = -ENOMEM;
765 
766 rx_buf_alloc_all_out:
767 	if (err)
768 		idpf_rx_buf_rel_bufq(rxbufq);
769 
770 	return err;
771 }
772 
773 /**
774  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
775  * @bufq: buffer queue to create page pool for
776  * @type: type of Rx buffers to allocate
777  *
778  * Returns 0 on success, negative on failure
779  */
780 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
781 			     enum libeth_fqe_type type)
782 {
783 	struct libeth_fq fq = {
784 		.truesize	= bufq->truesize,
785 		.count		= bufq->desc_count,
786 		.type		= type,
787 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
788 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
789 	};
790 	int ret;
791 
792 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
793 	if (ret)
794 		return ret;
795 
796 	bufq->pp = fq.pp;
797 	bufq->buf = fq.fqes;
798 	bufq->truesize = fq.truesize;
799 	bufq->rx_buf_size = fq.buf_len;
800 
801 	return idpf_rx_buf_alloc_all(bufq);
802 }
803 
804 /**
805  * idpf_rx_bufs_init_all - Initialize all RX bufs
806  * @vport: virtual port struct
807  *
808  * Returns 0 on success, negative on failure
809  */
810 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
811 {
812 	bool split = idpf_is_queue_model_split(vport->rxq_model);
813 	int i, j, err;
814 
815 	for (i = 0; i < vport->num_rxq_grp; i++) {
816 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
817 		u32 truesize = 0;
818 
819 		/* Allocate bufs for the rxq itself in singleq */
820 		if (!split) {
821 			int num_rxq = rx_qgrp->singleq.num_rxq;
822 
823 			for (j = 0; j < num_rxq; j++) {
824 				struct idpf_rx_queue *q;
825 
826 				q = rx_qgrp->singleq.rxqs[j];
827 				err = idpf_rx_bufs_init_singleq(q);
828 				if (err)
829 					return err;
830 			}
831 
832 			continue;
833 		}
834 
835 		/* Otherwise, allocate bufs for the buffer queues */
836 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
837 			enum libeth_fqe_type type;
838 			struct idpf_buf_queue *q;
839 
840 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
841 			q->truesize = truesize;
842 
843 			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
844 
845 			err = idpf_rx_bufs_init(q, type);
846 			if (err)
847 				return err;
848 
849 			truesize = q->truesize >> 1;
850 		}
851 	}
852 
853 	return 0;
854 }
855 
856 /**
857  * idpf_rx_desc_alloc - Allocate queue Rx resources
858  * @vport: vport to allocate resources for
859  * @rxq: Rx queue for which the resources are setup
860  *
861  * Returns 0 on success, negative on failure
862  */
863 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
864 			      struct idpf_rx_queue *rxq)
865 {
866 	struct device *dev = &vport->adapter->pdev->dev;
867 
868 	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
869 
870 	/* Allocate descriptors and also round up to nearest 4K */
871 	rxq->size = ALIGN(rxq->size, 4096);
872 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
873 					     &rxq->dma, GFP_KERNEL);
874 	if (!rxq->desc_ring) {
875 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
876 			rxq->size);
877 		return -ENOMEM;
878 	}
879 
880 	rxq->next_to_alloc = 0;
881 	rxq->next_to_clean = 0;
882 	rxq->next_to_use = 0;
883 	idpf_queue_set(GEN_CHK, rxq);
884 
885 	return 0;
886 }
887 
888 /**
889  * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
890  * @vport: vport to allocate resources for
891  * @bufq: buffer queue for which the resources are set up
892  *
893  * Return: 0 on success, -ENOMEM on failure.
894  */
895 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
896 				struct idpf_buf_queue *bufq)
897 {
898 	struct device *dev = &vport->adapter->pdev->dev;
899 
900 	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
901 
902 	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
903 					     GFP_KERNEL);
904 	if (!bufq->split_buf)
905 		return -ENOMEM;
906 
907 	bufq->next_to_alloc = 0;
908 	bufq->next_to_clean = 0;
909 	bufq->next_to_use = 0;
910 
911 	idpf_queue_set(GEN_CHK, bufq);
912 
913 	return 0;
914 }
915 
916 /**
917  * idpf_rx_desc_alloc_all - allocate all RX queues resources
918  * @vport: virtual port structure
919  *
920  * Returns 0 on success, negative on failure
921  */
922 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
923 {
924 	struct idpf_rxq_group *rx_qgrp;
925 	int i, j, err;
926 	u16 num_rxq;
927 
928 	for (i = 0; i < vport->num_rxq_grp; i++) {
929 		rx_qgrp = &vport->rxq_grps[i];
930 		if (idpf_is_queue_model_split(vport->rxq_model))
931 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
932 		else
933 			num_rxq = rx_qgrp->singleq.num_rxq;
934 
935 		for (j = 0; j < num_rxq; j++) {
936 			struct idpf_rx_queue *q;
937 
938 			if (idpf_is_queue_model_split(vport->rxq_model))
939 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
940 			else
941 				q = rx_qgrp->singleq.rxqs[j];
942 
943 			err = idpf_rx_desc_alloc(vport, q);
944 			if (err) {
945 				pci_err(vport->adapter->pdev,
946 					"Memory allocation for Rx Queue %u failed\n",
947 					i);
948 				goto err_out;
949 			}
950 		}
951 
952 		if (!idpf_is_queue_model_split(vport->rxq_model))
953 			continue;
954 
955 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
956 			struct idpf_buf_queue *q;
957 
958 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
959 
960 			err = idpf_bufq_desc_alloc(vport, q);
961 			if (err) {
962 				pci_err(vport->adapter->pdev,
963 					"Memory allocation for Rx Buffer Queue %u failed\n",
964 					i);
965 				goto err_out;
966 			}
967 		}
968 	}
969 
970 	return 0;
971 
972 err_out:
973 	idpf_rx_desc_rel_all(vport);
974 
975 	return err;
976 }
977 
978 /**
979  * idpf_txq_group_rel - Release all resources for txq groups
980  * @vport: vport to release txq groups on
981  */
982 static void idpf_txq_group_rel(struct idpf_vport *vport)
983 {
984 	bool split, flow_sch_en;
985 	int i, j;
986 
987 	if (!vport->txq_grps)
988 		return;
989 
990 	split = idpf_is_queue_model_split(vport->txq_model);
991 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
992 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
993 
994 	for (i = 0; i < vport->num_txq_grp; i++) {
995 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
996 
997 		for (j = 0; j < txq_grp->num_txq; j++) {
998 			kfree(txq_grp->txqs[j]);
999 			txq_grp->txqs[j] = NULL;
1000 		}
1001 
1002 		if (!split)
1003 			continue;
1004 
1005 		kfree(txq_grp->complq);
1006 		txq_grp->complq = NULL;
1007 
1008 		if (flow_sch_en)
1009 			kfree(txq_grp->stashes);
1010 	}
1011 	kfree(vport->txq_grps);
1012 	vport->txq_grps = NULL;
1013 }
1014 
1015 /**
1016  * idpf_rxq_sw_queue_rel - Release software queue resources
1017  * @rx_qgrp: rx queue group with software queues
1018  */
1019 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1020 {
1021 	int i, j;
1022 
1023 	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
1024 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1025 
1026 		for (j = 0; j < bufq_set->num_refillqs; j++) {
1027 			kfree(bufq_set->refillqs[j].ring);
1028 			bufq_set->refillqs[j].ring = NULL;
1029 		}
1030 		kfree(bufq_set->refillqs);
1031 		bufq_set->refillqs = NULL;
1032 	}
1033 }
1034 
1035 /**
1036  * idpf_rxq_group_rel - Release all resources for rxq groups
1037  * @vport: vport to release rxq groups on
1038  */
1039 static void idpf_rxq_group_rel(struct idpf_vport *vport)
1040 {
1041 	int i;
1042 
1043 	if (!vport->rxq_grps)
1044 		return;
1045 
1046 	for (i = 0; i < vport->num_rxq_grp; i++) {
1047 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1048 		u16 num_rxq;
1049 		int j;
1050 
1051 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1052 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1053 			for (j = 0; j < num_rxq; j++) {
1054 				kfree(rx_qgrp->splitq.rxq_sets[j]);
1055 				rx_qgrp->splitq.rxq_sets[j] = NULL;
1056 			}
1057 
1058 			idpf_rxq_sw_queue_rel(rx_qgrp);
1059 			kfree(rx_qgrp->splitq.bufq_sets);
1060 			rx_qgrp->splitq.bufq_sets = NULL;
1061 		} else {
1062 			num_rxq = rx_qgrp->singleq.num_rxq;
1063 			for (j = 0; j < num_rxq; j++) {
1064 				kfree(rx_qgrp->singleq.rxqs[j]);
1065 				rx_qgrp->singleq.rxqs[j] = NULL;
1066 			}
1067 		}
1068 	}
1069 	kfree(vport->rxq_grps);
1070 	vport->rxq_grps = NULL;
1071 }
1072 
1073 /**
1074  * idpf_vport_queue_grp_rel_all - Release all queue groups
1075  * @vport: vport to release queue groups for
1076  */
1077 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
1078 {
1079 	idpf_txq_group_rel(vport);
1080 	idpf_rxq_group_rel(vport);
1081 }
1082 
1083 /**
1084  * idpf_vport_queues_rel - Free memory for all queues
1085  * @vport: virtual port
1086  *
1087  * Free the memory allocated for queues associated to a vport
1088  */
1089 void idpf_vport_queues_rel(struct idpf_vport *vport)
1090 {
1091 	idpf_tx_desc_rel_all(vport);
1092 	idpf_rx_desc_rel_all(vport);
1093 	idpf_vport_queue_grp_rel_all(vport);
1094 
1095 	kfree(vport->txqs);
1096 	vport->txqs = NULL;
1097 }
1098 
1099 /**
1100  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1101  * @vport: vport to init txqs on
1102  *
1103  * We get a queue index from skb->queue_mapping and we need a fast way to
1104  * dereference the queue from queue groups.  This allows us to quickly pull a
1105  * txq based on a queue index.
1106  *
1107  * Returns 0 on success, negative on failure
1108  */
1109 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1110 {
1111 	int i, j, k = 0;
1112 
1113 	vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1114 			      GFP_KERNEL);
1115 
1116 	if (!vport->txqs)
1117 		return -ENOMEM;
1118 
1119 	for (i = 0; i < vport->num_txq_grp; i++) {
1120 		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1121 
1122 		for (j = 0; j < tx_grp->num_txq; j++, k++) {
1123 			vport->txqs[k] = tx_grp->txqs[j];
1124 			vport->txqs[k]->idx = k;
1125 		}
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 /**
1132  * idpf_vport_init_num_qs - Initialize number of queues
1133  * @vport: vport to initialize queues
1134  * @vport_msg: data to be filled into vport
1135  */
1136 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1137 			    struct virtchnl2_create_vport *vport_msg)
1138 {
1139 	struct idpf_vport_user_config_data *config_data;
1140 	u16 idx = vport->idx;
1141 
1142 	config_data = &vport->adapter->vport_config[idx]->user_config;
1143 	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1144 	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1145 	/* number of txqs and rxqs in config data will be zeros only in the
1146 	 * driver load path and we dont update them there after
1147 	 */
1148 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1149 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1150 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1151 	}
1152 
1153 	if (idpf_is_queue_model_split(vport->txq_model))
1154 		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1155 	if (idpf_is_queue_model_split(vport->rxq_model))
1156 		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1157 
1158 	/* Adjust number of buffer queues per Rx queue group. */
1159 	if (!idpf_is_queue_model_split(vport->rxq_model)) {
1160 		vport->num_bufqs_per_qgrp = 0;
1161 
1162 		return;
1163 	}
1164 
1165 	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1166 }
1167 
1168 /**
1169  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1170  * @vport: vport to calculate q groups for
1171  */
1172 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1173 {
1174 	struct idpf_vport_user_config_data *config_data;
1175 	int num_bufqs = vport->num_bufqs_per_qgrp;
1176 	u32 num_req_txq_desc, num_req_rxq_desc;
1177 	u16 idx = vport->idx;
1178 	int i;
1179 
1180 	config_data =  &vport->adapter->vport_config[idx]->user_config;
1181 	num_req_txq_desc = config_data->num_req_txq_desc;
1182 	num_req_rxq_desc = config_data->num_req_rxq_desc;
1183 
1184 	vport->complq_desc_count = 0;
1185 	if (num_req_txq_desc) {
1186 		vport->txq_desc_count = num_req_txq_desc;
1187 		if (idpf_is_queue_model_split(vport->txq_model)) {
1188 			vport->complq_desc_count = num_req_txq_desc;
1189 			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1190 				vport->complq_desc_count =
1191 					IDPF_MIN_TXQ_COMPLQ_DESC;
1192 		}
1193 	} else {
1194 		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1195 		if (idpf_is_queue_model_split(vport->txq_model))
1196 			vport->complq_desc_count =
1197 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1198 	}
1199 
1200 	if (num_req_rxq_desc)
1201 		vport->rxq_desc_count = num_req_rxq_desc;
1202 	else
1203 		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1204 
1205 	for (i = 0; i < num_bufqs; i++) {
1206 		if (!vport->bufq_desc_count[i])
1207 			vport->bufq_desc_count[i] =
1208 				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1209 							num_bufqs);
1210 	}
1211 }
1212 
1213 /**
1214  * idpf_vport_calc_total_qs - Calculate total number of queues
1215  * @adapter: private data struct
1216  * @vport_idx: vport idx to retrieve vport pointer
1217  * @vport_msg: message to fill with data
1218  * @max_q: vport max queue info
1219  *
1220  * Return 0 on success, error value on failure.
1221  */
1222 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1223 			     struct virtchnl2_create_vport *vport_msg,
1224 			     struct idpf_vport_max_q *max_q)
1225 {
1226 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1227 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1228 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1229 	struct idpf_vport_config *vport_config;
1230 	u16 num_txq_grps, num_rxq_grps;
1231 	u32 num_qs;
1232 
1233 	vport_config = adapter->vport_config[vport_idx];
1234 	if (vport_config) {
1235 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1236 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1237 	} else {
1238 		int num_cpus;
1239 
1240 		/* Restrict num of queues to cpus online as a default
1241 		 * configuration to give best performance. User can always
1242 		 * override to a max number of queues via ethtool.
1243 		 */
1244 		num_cpus = num_online_cpus();
1245 
1246 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1247 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1248 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1249 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1250 	}
1251 
1252 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1253 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1254 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1255 						       IDPF_COMPLQ_PER_GROUP);
1256 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1257 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1258 	} else {
1259 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1260 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1261 					 dflt_singleq_txqs);
1262 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1263 		vport_msg->num_tx_complq = 0;
1264 	}
1265 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1266 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1267 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1268 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1269 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1270 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1271 	} else {
1272 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1273 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1274 					 dflt_singleq_rxqs);
1275 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1276 		vport_msg->num_rx_bufq = 0;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 /**
1283  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1284  * @vport: vport to calculate q groups for
1285  */
1286 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1287 {
1288 	if (idpf_is_queue_model_split(vport->txq_model))
1289 		vport->num_txq_grp = vport->num_txq;
1290 	else
1291 		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1292 
1293 	if (idpf_is_queue_model_split(vport->rxq_model))
1294 		vport->num_rxq_grp = vport->num_rxq;
1295 	else
1296 		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1297 }
1298 
1299 /**
1300  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1301  * @vport: vport to calculate queues for
1302  * @num_txq: return parameter for number of TX queues
1303  * @num_rxq: return parameter for number of RX queues
1304  */
1305 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1306 					 u16 *num_txq, u16 *num_rxq)
1307 {
1308 	if (idpf_is_queue_model_split(vport->txq_model))
1309 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1310 	else
1311 		*num_txq = vport->num_txq;
1312 
1313 	if (idpf_is_queue_model_split(vport->rxq_model))
1314 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1315 	else
1316 		*num_rxq = vport->num_rxq;
1317 }
1318 
1319 /**
1320  * idpf_rxq_set_descids - set the descids supported by this queue
1321  * @vport: virtual port data structure
1322  * @q: rx queue for which descids are set
1323  *
1324  */
1325 static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1326 				 struct idpf_rx_queue *q)
1327 {
1328 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1329 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1330 	} else {
1331 		if (vport->base_rxd)
1332 			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1333 		else
1334 			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1335 	}
1336 }
1337 
1338 /**
1339  * idpf_txq_group_alloc - Allocate all txq group resources
1340  * @vport: vport to allocate txq groups for
1341  * @num_txq: number of txqs to allocate for each group
1342  *
1343  * Returns 0 on success, negative on failure
1344  */
1345 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1346 {
1347 	bool split, flow_sch_en;
1348 	int i;
1349 
1350 	vport->txq_grps = kcalloc(vport->num_txq_grp,
1351 				  sizeof(*vport->txq_grps), GFP_KERNEL);
1352 	if (!vport->txq_grps)
1353 		return -ENOMEM;
1354 
1355 	split = idpf_is_queue_model_split(vport->txq_model);
1356 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1357 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1358 
1359 	for (i = 0; i < vport->num_txq_grp; i++) {
1360 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1361 		struct idpf_adapter *adapter = vport->adapter;
1362 		struct idpf_txq_stash *stashes;
1363 		int j;
1364 
1365 		tx_qgrp->vport = vport;
1366 		tx_qgrp->num_txq = num_txq;
1367 
1368 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1369 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1370 						   GFP_KERNEL);
1371 			if (!tx_qgrp->txqs[j])
1372 				goto err_alloc;
1373 		}
1374 
1375 		if (split && flow_sch_en) {
1376 			stashes = kcalloc(num_txq, sizeof(*stashes),
1377 					  GFP_KERNEL);
1378 			if (!stashes)
1379 				goto err_alloc;
1380 
1381 			tx_qgrp->stashes = stashes;
1382 		}
1383 
1384 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1385 			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1386 
1387 			q->dev = &adapter->pdev->dev;
1388 			q->desc_count = vport->txq_desc_count;
1389 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1390 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1391 			q->netdev = vport->netdev;
1392 			q->txq_grp = tx_qgrp;
1393 
1394 			if (!split) {
1395 				q->clean_budget = vport->compln_clean_budget;
1396 				idpf_queue_assign(CRC_EN, q,
1397 						  vport->crc_enable);
1398 			}
1399 
1400 			if (!flow_sch_en)
1401 				continue;
1402 
1403 			if (split) {
1404 				q->stash = &stashes[j];
1405 				hash_init(q->stash->sched_buf_hash);
1406 			}
1407 
1408 			idpf_queue_set(FLOW_SCH_EN, q);
1409 		}
1410 
1411 		if (!split)
1412 			continue;
1413 
1414 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1415 					  sizeof(*tx_qgrp->complq),
1416 					  GFP_KERNEL);
1417 		if (!tx_qgrp->complq)
1418 			goto err_alloc;
1419 
1420 		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1421 		tx_qgrp->complq->txq_grp = tx_qgrp;
1422 		tx_qgrp->complq->netdev = vport->netdev;
1423 		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1424 
1425 		if (flow_sch_en)
1426 			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1427 	}
1428 
1429 	return 0;
1430 
1431 err_alloc:
1432 	idpf_txq_group_rel(vport);
1433 
1434 	return -ENOMEM;
1435 }
1436 
1437 /**
1438  * idpf_rxq_group_alloc - Allocate all rxq group resources
1439  * @vport: vport to allocate rxq groups for
1440  * @num_rxq: number of rxqs to allocate for each group
1441  *
1442  * Returns 0 on success, negative on failure
1443  */
1444 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1445 {
1446 	int i, k, err = 0;
1447 	bool hs;
1448 
1449 	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1450 				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1451 	if (!vport->rxq_grps)
1452 		return -ENOMEM;
1453 
1454 	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1455 
1456 	for (i = 0; i < vport->num_rxq_grp; i++) {
1457 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1458 		int j;
1459 
1460 		rx_qgrp->vport = vport;
1461 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1462 			rx_qgrp->singleq.num_rxq = num_rxq;
1463 			for (j = 0; j < num_rxq; j++) {
1464 				rx_qgrp->singleq.rxqs[j] =
1465 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1466 							GFP_KERNEL);
1467 				if (!rx_qgrp->singleq.rxqs[j]) {
1468 					err = -ENOMEM;
1469 					goto err_alloc;
1470 				}
1471 			}
1472 			goto skip_splitq_rx_init;
1473 		}
1474 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1475 
1476 		for (j = 0; j < num_rxq; j++) {
1477 			rx_qgrp->splitq.rxq_sets[j] =
1478 				kzalloc(sizeof(struct idpf_rxq_set),
1479 					GFP_KERNEL);
1480 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1481 				err = -ENOMEM;
1482 				goto err_alloc;
1483 			}
1484 		}
1485 
1486 		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1487 						    sizeof(struct idpf_bufq_set),
1488 						    GFP_KERNEL);
1489 		if (!rx_qgrp->splitq.bufq_sets) {
1490 			err = -ENOMEM;
1491 			goto err_alloc;
1492 		}
1493 
1494 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1495 			struct idpf_bufq_set *bufq_set =
1496 				&rx_qgrp->splitq.bufq_sets[j];
1497 			int swq_size = sizeof(struct idpf_sw_queue);
1498 			struct idpf_buf_queue *q;
1499 
1500 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1501 			q->desc_count = vport->bufq_desc_count[j];
1502 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1503 
1504 			idpf_queue_assign(HSPLIT_EN, q, hs);
1505 
1506 			bufq_set->num_refillqs = num_rxq;
1507 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1508 						     GFP_KERNEL);
1509 			if (!bufq_set->refillqs) {
1510 				err = -ENOMEM;
1511 				goto err_alloc;
1512 			}
1513 			for (k = 0; k < bufq_set->num_refillqs; k++) {
1514 				struct idpf_sw_queue *refillq =
1515 					&bufq_set->refillqs[k];
1516 
1517 				refillq->desc_count =
1518 					vport->bufq_desc_count[j];
1519 				idpf_queue_set(GEN_CHK, refillq);
1520 				idpf_queue_set(RFL_GEN_CHK, refillq);
1521 				refillq->ring = kcalloc(refillq->desc_count,
1522 							sizeof(*refillq->ring),
1523 							GFP_KERNEL);
1524 				if (!refillq->ring) {
1525 					err = -ENOMEM;
1526 					goto err_alloc;
1527 				}
1528 			}
1529 		}
1530 
1531 skip_splitq_rx_init:
1532 		for (j = 0; j < num_rxq; j++) {
1533 			struct idpf_rx_queue *q;
1534 
1535 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1536 				q = rx_qgrp->singleq.rxqs[j];
1537 				goto setup_rxq;
1538 			}
1539 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1540 			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1541 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1542 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1543 				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1544 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1545 
1546 			idpf_queue_assign(HSPLIT_EN, q, hs);
1547 
1548 setup_rxq:
1549 			q->desc_count = vport->rxq_desc_count;
1550 			q->rx_ptype_lkup = vport->rx_ptype_lkup;
1551 			q->netdev = vport->netdev;
1552 			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1553 			q->idx = (i * num_rxq) + j;
1554 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1555 			q->rx_max_pkt_size = vport->netdev->mtu +
1556 							LIBETH_RX_LL_LEN;
1557 			idpf_rxq_set_descids(vport, q);
1558 		}
1559 	}
1560 
1561 err_alloc:
1562 	if (err)
1563 		idpf_rxq_group_rel(vport);
1564 
1565 	return err;
1566 }
1567 
1568 /**
1569  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1570  * @vport: vport with qgrps to allocate
1571  *
1572  * Returns 0 on success, negative on failure
1573  */
1574 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1575 {
1576 	u16 num_txq, num_rxq;
1577 	int err;
1578 
1579 	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1580 
1581 	err = idpf_txq_group_alloc(vport, num_txq);
1582 	if (err)
1583 		goto err_out;
1584 
1585 	err = idpf_rxq_group_alloc(vport, num_rxq);
1586 	if (err)
1587 		goto err_out;
1588 
1589 	return 0;
1590 
1591 err_out:
1592 	idpf_vport_queue_grp_rel_all(vport);
1593 
1594 	return err;
1595 }
1596 
1597 /**
1598  * idpf_vport_queues_alloc - Allocate memory for all queues
1599  * @vport: virtual port
1600  *
1601  * Allocate memory for queues associated with a vport.  Returns 0 on success,
1602  * negative on failure.
1603  */
1604 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1605 {
1606 	int err;
1607 
1608 	err = idpf_vport_queue_grp_alloc_all(vport);
1609 	if (err)
1610 		goto err_out;
1611 
1612 	err = idpf_tx_desc_alloc_all(vport);
1613 	if (err)
1614 		goto err_out;
1615 
1616 	err = idpf_rx_desc_alloc_all(vport);
1617 	if (err)
1618 		goto err_out;
1619 
1620 	err = idpf_vport_init_fast_path_txqs(vport);
1621 	if (err)
1622 		goto err_out;
1623 
1624 	return 0;
1625 
1626 err_out:
1627 	idpf_vport_queues_rel(vport);
1628 
1629 	return err;
1630 }
1631 
1632 /**
1633  * idpf_tx_handle_sw_marker - Handle queue marker packet
1634  * @tx_q: tx queue to handle software marker
1635  */
1636 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1637 {
1638 	struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1639 	struct idpf_vport *vport = priv->vport;
1640 	int i;
1641 
1642 	idpf_queue_clear(SW_MARKER, tx_q);
1643 	/* Hardware must write marker packets to all queues associated with
1644 	 * completion queues. So check if all queues received marker packets
1645 	 */
1646 	for (i = 0; i < vport->num_txq; i++)
1647 		/* If we're still waiting on any other TXQ marker completions,
1648 		 * just return now since we cannot wake up the marker_wq yet.
1649 		 */
1650 		if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1651 			return;
1652 
1653 	/* Drain complete */
1654 	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1655 	wake_up(&vport->sw_marker_wq);
1656 }
1657 
1658 /**
1659  * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
1660  * packet
1661  * @tx_q: tx queue to clean buffer from
1662  * @tx_buf: buffer to be cleaned
1663  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1664  * @napi_budget: Used to determine if we are in netpoll
1665  */
1666 static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
1667 				     struct idpf_tx_buf *tx_buf,
1668 				     struct idpf_cleaned_stats *cleaned,
1669 				     int napi_budget)
1670 {
1671 	napi_consume_skb(tx_buf->skb, napi_budget);
1672 
1673 	if (dma_unmap_len(tx_buf, len)) {
1674 		dma_unmap_single(tx_q->dev,
1675 				 dma_unmap_addr(tx_buf, dma),
1676 				 dma_unmap_len(tx_buf, len),
1677 				 DMA_TO_DEVICE);
1678 
1679 		dma_unmap_len_set(tx_buf, len, 0);
1680 	}
1681 
1682 	/* clear tx_buf data */
1683 	tx_buf->skb = NULL;
1684 
1685 	cleaned->bytes += tx_buf->bytecount;
1686 	cleaned->packets += tx_buf->gso_segs;
1687 }
1688 
1689 /**
1690  * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1691  * out of order completions
1692  * @txq: queue to clean
1693  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1694  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1695  * @budget: Used to determine if we are in netpoll
1696  */
1697 static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
1698 				       u16 compl_tag,
1699 				       struct idpf_cleaned_stats *cleaned,
1700 				       int budget)
1701 {
1702 	struct idpf_tx_stash *stash;
1703 	struct hlist_node *tmp_buf;
1704 
1705 	/* Buffer completion */
1706 	hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
1707 				    hlist, compl_tag) {
1708 		if (unlikely(stash->buf.compl_tag != (int)compl_tag))
1709 			continue;
1710 
1711 		if (stash->buf.skb) {
1712 			idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
1713 						 budget);
1714 		} else if (dma_unmap_len(&stash->buf, len)) {
1715 			dma_unmap_page(txq->dev,
1716 				       dma_unmap_addr(&stash->buf, dma),
1717 				       dma_unmap_len(&stash->buf, len),
1718 				       DMA_TO_DEVICE);
1719 			dma_unmap_len_set(&stash->buf, len, 0);
1720 		}
1721 
1722 		/* Push shadow buf back onto stack */
1723 		idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
1724 
1725 		hash_del(&stash->hlist);
1726 	}
1727 }
1728 
1729 /**
1730  * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1731  * later time (only relevant for flow scheduling mode)
1732  * @txq: Tx queue to clean
1733  * @tx_buf: buffer to store
1734  */
1735 static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
1736 				       struct idpf_tx_buf *tx_buf)
1737 {
1738 	struct idpf_tx_stash *stash;
1739 
1740 	if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
1741 		     !dma_unmap_len(tx_buf, len)))
1742 		return 0;
1743 
1744 	stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
1745 	if (unlikely(!stash)) {
1746 		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1747 				    netdev_name(txq->netdev));
1748 
1749 		return -ENOMEM;
1750 	}
1751 
1752 	/* Store buffer params in shadow buffer */
1753 	stash->buf.skb = tx_buf->skb;
1754 	stash->buf.bytecount = tx_buf->bytecount;
1755 	stash->buf.gso_segs = tx_buf->gso_segs;
1756 	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1757 	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1758 	stash->buf.compl_tag = tx_buf->compl_tag;
1759 
1760 	/* Add buffer to buf_hash table to be freed later */
1761 	hash_add(txq->stash->sched_buf_hash, &stash->hlist,
1762 		 stash->buf.compl_tag);
1763 
1764 	memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1765 
1766 	/* Reinitialize buf_id portion of tag */
1767 	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1768 
1769 	return 0;
1770 }
1771 
1772 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1773 do {								\
1774 	(ntc)++;						\
1775 	if (unlikely(!(ntc))) {					\
1776 		ntc -= (txq)->desc_count;			\
1777 		buf = (txq)->tx_buf;				\
1778 		desc = &(txq)->flex_tx[0];			\
1779 	} else {						\
1780 		(buf)++;					\
1781 		(desc)++;					\
1782 	}							\
1783 } while (0)
1784 
1785 /**
1786  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1787  * @tx_q: Tx queue to clean
1788  * @end: queue index until which it should be cleaned
1789  * @napi_budget: Used to determine if we are in netpoll
1790  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1791  * @descs_only: true if queue is using flow-based scheduling and should
1792  * not clean buffers at this time
1793  *
1794  * Cleans the queue descriptor ring. If the queue is using queue-based
1795  * scheduling, the buffers will be cleaned as well. If the queue is using
1796  * flow-based scheduling, only the descriptors are cleaned at this time.
1797  * Separate packet completion events will be reported on the completion queue,
1798  * and the buffers will be cleaned separately. The stats are not updated from
1799  * this function when using flow-based scheduling.
1800  */
1801 static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1802 				 int napi_budget,
1803 				 struct idpf_cleaned_stats *cleaned,
1804 				 bool descs_only)
1805 {
1806 	union idpf_tx_flex_desc *next_pending_desc = NULL;
1807 	union idpf_tx_flex_desc *tx_desc;
1808 	s16 ntc = tx_q->next_to_clean;
1809 	struct idpf_tx_buf *tx_buf;
1810 
1811 	tx_desc = &tx_q->flex_tx[ntc];
1812 	next_pending_desc = &tx_q->flex_tx[end];
1813 	tx_buf = &tx_q->tx_buf[ntc];
1814 	ntc -= tx_q->desc_count;
1815 
1816 	while (tx_desc != next_pending_desc) {
1817 		union idpf_tx_flex_desc *eop_desc;
1818 
1819 		/* If this entry in the ring was used as a context descriptor,
1820 		 * it's corresponding entry in the buffer ring will have an
1821 		 * invalid completion tag since no buffer was used.  We can
1822 		 * skip this descriptor since there is no buffer to clean.
1823 		 */
1824 		if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
1825 			goto fetch_next_txq_desc;
1826 
1827 		eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
1828 
1829 		/* clear next_to_watch to prevent false hangs */
1830 		tx_buf->next_to_watch = NULL;
1831 
1832 		if (descs_only) {
1833 			if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
1834 				goto tx_splitq_clean_out;
1835 
1836 			while (tx_desc != eop_desc) {
1837 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1838 							      tx_desc, tx_buf);
1839 
1840 				if (dma_unmap_len(tx_buf, len)) {
1841 					if (idpf_stash_flow_sch_buffers(tx_q,
1842 									tx_buf))
1843 						goto tx_splitq_clean_out;
1844 				}
1845 			}
1846 		} else {
1847 			idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
1848 						 napi_budget);
1849 
1850 			/* unmap remaining buffers */
1851 			while (tx_desc != eop_desc) {
1852 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1853 							      tx_desc, tx_buf);
1854 
1855 				/* unmap any remaining paged data */
1856 				if (dma_unmap_len(tx_buf, len)) {
1857 					dma_unmap_page(tx_q->dev,
1858 						       dma_unmap_addr(tx_buf, dma),
1859 						       dma_unmap_len(tx_buf, len),
1860 						       DMA_TO_DEVICE);
1861 					dma_unmap_len_set(tx_buf, len, 0);
1862 				}
1863 			}
1864 		}
1865 
1866 fetch_next_txq_desc:
1867 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1868 	}
1869 
1870 tx_splitq_clean_out:
1871 	ntc += tx_q->desc_count;
1872 	tx_q->next_to_clean = ntc;
1873 }
1874 
1875 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1876 do {							\
1877 	(buf)++;					\
1878 	(ntc)++;					\
1879 	if (unlikely((ntc) == (txq)->desc_count)) {	\
1880 		buf = (txq)->tx_buf;			\
1881 		ntc = 0;				\
1882 	}						\
1883 } while (0)
1884 
1885 /**
1886  * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1887  * @txq: queue to clean
1888  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1889  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1890  * @budget: Used to determine if we are in netpoll
1891  *
1892  * Cleans all buffers associated with the input completion tag either from the
1893  * TX buffer ring or from the hash table if the buffers were previously
1894  * stashed. Returns the byte/segment count for the cleaned packet associated
1895  * this completion tag.
1896  */
1897 static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
1898 				   struct idpf_cleaned_stats *cleaned,
1899 				   int budget)
1900 {
1901 	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1902 	struct idpf_tx_buf *tx_buf = NULL;
1903 	u16 ntc = txq->next_to_clean;
1904 	u16 num_descs_cleaned = 0;
1905 	u16 orig_idx = idx;
1906 
1907 	tx_buf = &txq->tx_buf[idx];
1908 
1909 	while (tx_buf->compl_tag == (int)compl_tag) {
1910 		if (tx_buf->skb) {
1911 			idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
1912 		} else if (dma_unmap_len(tx_buf, len)) {
1913 			dma_unmap_page(txq->dev,
1914 				       dma_unmap_addr(tx_buf, dma),
1915 				       dma_unmap_len(tx_buf, len),
1916 				       DMA_TO_DEVICE);
1917 			dma_unmap_len_set(tx_buf, len, 0);
1918 		}
1919 
1920 		memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1921 		tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1922 
1923 		num_descs_cleaned++;
1924 		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1925 	}
1926 
1927 	/* If we didn't clean anything on the ring for this completion, there's
1928 	 * nothing more to do.
1929 	 */
1930 	if (unlikely(!num_descs_cleaned))
1931 		return false;
1932 
1933 	/* Otherwise, if we did clean a packet on the ring directly, it's safe
1934 	 * to assume that the descriptors starting from the original
1935 	 * next_to_clean up until the previously cleaned packet can be reused.
1936 	 * Therefore, we will go back in the ring and stash any buffers still
1937 	 * in the ring into the hash table to be cleaned later.
1938 	 */
1939 	tx_buf = &txq->tx_buf[ntc];
1940 	while (tx_buf != &txq->tx_buf[orig_idx]) {
1941 		idpf_stash_flow_sch_buffers(txq, tx_buf);
1942 		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1943 	}
1944 
1945 	/* Finally, update next_to_clean to reflect the work that was just done
1946 	 * on the ring, if any. If the packet was only cleaned from the hash
1947 	 * table, the ring will not be impacted, therefore we should not touch
1948 	 * next_to_clean. The updated idx is used here
1949 	 */
1950 	txq->next_to_clean = idx;
1951 
1952 	return true;
1953 }
1954 
1955 /**
1956  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1957  * whether on the buffer ring or in the hash table
1958  * @txq: Tx ring to clean
1959  * @desc: pointer to completion queue descriptor to extract completion
1960  * information from
1961  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1962  * @budget: Used to determine if we are in netpoll
1963  *
1964  * Returns bytes/packets cleaned
1965  */
1966 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
1967 					 struct idpf_splitq_tx_compl_desc *desc,
1968 					 struct idpf_cleaned_stats *cleaned,
1969 					 int budget)
1970 {
1971 	u16 compl_tag;
1972 
1973 	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
1974 		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1975 
1976 		return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
1977 	}
1978 
1979 	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1980 
1981 	/* If we didn't clean anything on the ring, this packet must be
1982 	 * in the hash table. Go clean it there.
1983 	 */
1984 	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1985 		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1986 }
1987 
1988 /**
1989  * idpf_tx_clean_complq - Reclaim resources on completion queue
1990  * @complq: Tx ring to clean
1991  * @budget: Used to determine if we are in netpoll
1992  * @cleaned: returns number of packets cleaned
1993  *
1994  * Returns true if there's any budget left (e.g. the clean is finished)
1995  */
1996 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
1997 				 int *cleaned)
1998 {
1999 	struct idpf_splitq_tx_compl_desc *tx_desc;
2000 	s16 ntc = complq->next_to_clean;
2001 	struct idpf_netdev_priv *np;
2002 	unsigned int complq_budget;
2003 	bool complq_ok = true;
2004 	int i;
2005 
2006 	complq_budget = complq->clean_budget;
2007 	tx_desc = &complq->comp[ntc];
2008 	ntc -= complq->desc_count;
2009 
2010 	do {
2011 		struct idpf_cleaned_stats cleaned_stats = { };
2012 		struct idpf_tx_queue *tx_q;
2013 		int rel_tx_qid;
2014 		u16 hw_head;
2015 		u8 ctype;	/* completion type */
2016 		u16 gen;
2017 
2018 		/* if the descriptor isn't done, no work yet to do */
2019 		gen = le16_get_bits(tx_desc->qid_comptype_gen,
2020 				    IDPF_TXD_COMPLQ_GEN_M);
2021 		if (idpf_queue_has(GEN_CHK, complq) != gen)
2022 			break;
2023 
2024 		/* Find necessary info of TX queue to clean buffers */
2025 		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
2026 					   IDPF_TXD_COMPLQ_QID_M);
2027 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2028 		    !complq->txq_grp->txqs[rel_tx_qid]) {
2029 			netdev_err(complq->netdev, "TxQ not found\n");
2030 			goto fetch_next_desc;
2031 		}
2032 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2033 
2034 		/* Determine completion type */
2035 		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
2036 				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2037 		switch (ctype) {
2038 		case IDPF_TXD_COMPLT_RE:
2039 			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
2040 
2041 			idpf_tx_splitq_clean(tx_q, hw_head, budget,
2042 					     &cleaned_stats, true);
2043 			break;
2044 		case IDPF_TXD_COMPLT_RS:
2045 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2046 						     &cleaned_stats, budget);
2047 			break;
2048 		case IDPF_TXD_COMPLT_SW_MARKER:
2049 			idpf_tx_handle_sw_marker(tx_q);
2050 			break;
2051 		default:
2052 			netdev_err(tx_q->netdev,
2053 				   "Unknown TX completion type: %d\n", ctype);
2054 			goto fetch_next_desc;
2055 		}
2056 
2057 		u64_stats_update_begin(&tx_q->stats_sync);
2058 		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2059 		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2060 		tx_q->cleaned_pkts += cleaned_stats.packets;
2061 		tx_q->cleaned_bytes += cleaned_stats.bytes;
2062 		complq->num_completions++;
2063 		u64_stats_update_end(&tx_q->stats_sync);
2064 
2065 fetch_next_desc:
2066 		tx_desc++;
2067 		ntc++;
2068 		if (unlikely(!ntc)) {
2069 			ntc -= complq->desc_count;
2070 			tx_desc = &complq->comp[0];
2071 			idpf_queue_change(GEN_CHK, complq);
2072 		}
2073 
2074 		prefetch(tx_desc);
2075 
2076 		/* update budget accounting */
2077 		complq_budget--;
2078 	} while (likely(complq_budget));
2079 
2080 	/* Store the state of the complq to be used later in deciding if a
2081 	 * TXQ can be started again
2082 	 */
2083 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2084 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2085 		complq_ok = false;
2086 
2087 	np = netdev_priv(complq->netdev);
2088 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2089 		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2090 		struct netdev_queue *nq;
2091 		bool dont_wake;
2092 
2093 		/* We didn't clean anything on this queue, move along */
2094 		if (!tx_q->cleaned_bytes)
2095 			continue;
2096 
2097 		*cleaned += tx_q->cleaned_pkts;
2098 
2099 		/* Update BQL */
2100 		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2101 
2102 		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2103 			    np->state != __IDPF_VPORT_UP ||
2104 			    !netif_carrier_ok(tx_q->netdev);
2105 		/* Check if the TXQ needs to and can be restarted */
2106 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2107 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2108 					   dont_wake);
2109 
2110 		/* Reset cleaned stats for the next time this queue is
2111 		 * cleaned
2112 		 */
2113 		tx_q->cleaned_bytes = 0;
2114 		tx_q->cleaned_pkts = 0;
2115 	}
2116 
2117 	ntc += complq->desc_count;
2118 	complq->next_to_clean = ntc;
2119 
2120 	return !!complq_budget;
2121 }
2122 
2123 /**
2124  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2125  * based scheduling descriptors
2126  * @desc: descriptor to populate
2127  * @params: pointer to tx params struct
2128  * @td_cmd: command to be filled in desc
2129  * @size: size of buffer
2130  */
2131 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2132 			      struct idpf_tx_splitq_params *params,
2133 			      u16 td_cmd, u16 size)
2134 {
2135 	desc->q.qw1.cmd_dtype =
2136 		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2137 	desc->q.qw1.cmd_dtype |=
2138 		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2139 	desc->q.qw1.buf_size = cpu_to_le16(size);
2140 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2141 }
2142 
2143 /**
2144  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2145  * scheduling descriptors
2146  * @desc: descriptor to populate
2147  * @params: pointer to tx params struct
2148  * @td_cmd: command to be filled in desc
2149  * @size: size of buffer
2150  */
2151 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2152 				    struct idpf_tx_splitq_params *params,
2153 				    u16 td_cmd, u16 size)
2154 {
2155 	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
2156 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2157 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2158 }
2159 
2160 /**
2161  * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
2162  * @tx_q: the queue to be checked
2163  * @size: number of descriptors we want to assure is available
2164  *
2165  * Returns 0 if stop is not needed
2166  */
2167 int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
2168 {
2169 	struct netdev_queue *nq;
2170 
2171 	if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
2172 		return 0;
2173 
2174 	u64_stats_update_begin(&tx_q->stats_sync);
2175 	u64_stats_inc(&tx_q->q_stats.q_busy);
2176 	u64_stats_update_end(&tx_q->stats_sync);
2177 
2178 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2179 
2180 	return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
2181 }
2182 
2183 /**
2184  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2185  * @tx_q: the queue to be checked
2186  * @descs_needed: number of descriptors required for this packet
2187  *
2188  * Returns 0 if stop is not needed
2189  */
2190 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2191 				     unsigned int descs_needed)
2192 {
2193 	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2194 		goto splitq_stop;
2195 
2196 	/* If there are too many outstanding completions expected on the
2197 	 * completion queue, stop the TX queue to give the device some time to
2198 	 * catch up
2199 	 */
2200 	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2201 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2202 		goto splitq_stop;
2203 
2204 	/* Also check for available book keeping buffers; if we are low, stop
2205 	 * the queue to wait for more completions
2206 	 */
2207 	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2208 		goto splitq_stop;
2209 
2210 	return 0;
2211 
2212 splitq_stop:
2213 	u64_stats_update_begin(&tx_q->stats_sync);
2214 	u64_stats_inc(&tx_q->q_stats.q_busy);
2215 	u64_stats_update_end(&tx_q->stats_sync);
2216 	netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2217 
2218 	return -EBUSY;
2219 }
2220 
2221 /**
2222  * idpf_tx_buf_hw_update - Store the new tail value
2223  * @tx_q: queue to bump
2224  * @val: new tail index
2225  * @xmit_more: more skb's pending
2226  *
2227  * The naming here is special in that 'hw' signals that this function is about
2228  * to do a register write to update our queue status. We know this can only
2229  * mean tail here as HW should be owning head for TX.
2230  */
2231 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2232 			   bool xmit_more)
2233 {
2234 	struct netdev_queue *nq;
2235 
2236 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2237 	tx_q->next_to_use = val;
2238 
2239 	idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
2240 
2241 	/* Force memory writes to complete before letting h/w
2242 	 * know there are new descriptors to fetch.  (Only
2243 	 * applicable for weak-ordered memory model archs,
2244 	 * such as IA-64).
2245 	 */
2246 	wmb();
2247 
2248 	/* notify HW of packet */
2249 	if (netif_xmit_stopped(nq) || !xmit_more)
2250 		writel(val, tx_q->tail);
2251 }
2252 
2253 /**
2254  * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2255  * @txq: queue to send buffer on
2256  * @skb: send buffer
2257  *
2258  * Returns number of data descriptors needed for this skb.
2259  */
2260 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
2261 					 struct sk_buff *skb)
2262 {
2263 	const struct skb_shared_info *shinfo;
2264 	unsigned int count = 0, i;
2265 
2266 	count += !!skb_headlen(skb);
2267 
2268 	if (!skb_is_nonlinear(skb))
2269 		return count;
2270 
2271 	shinfo = skb_shinfo(skb);
2272 	for (i = 0; i < shinfo->nr_frags; i++) {
2273 		unsigned int size;
2274 
2275 		size = skb_frag_size(&shinfo->frags[i]);
2276 
2277 		/* We only need to use the idpf_size_to_txd_count check if the
2278 		 * fragment is going to span multiple descriptors,
2279 		 * i.e. size >= 16K.
2280 		 */
2281 		if (size >= SZ_16K)
2282 			count += idpf_size_to_txd_count(size);
2283 		else
2284 			count++;
2285 	}
2286 
2287 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2288 		if (__skb_linearize(skb))
2289 			return 0;
2290 
2291 		count = idpf_size_to_txd_count(skb->len);
2292 		u64_stats_update_begin(&txq->stats_sync);
2293 		u64_stats_inc(&txq->q_stats.linearize);
2294 		u64_stats_update_end(&txq->stats_sync);
2295 	}
2296 
2297 	return count;
2298 }
2299 
2300 /**
2301  * idpf_tx_dma_map_error - handle TX DMA map errors
2302  * @txq: queue to send buffer on
2303  * @skb: send buffer
2304  * @first: original first buffer info buffer for packet
2305  * @idx: starting point on ring to unwind
2306  */
2307 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2308 			   struct idpf_tx_buf *first, u16 idx)
2309 {
2310 	u64_stats_update_begin(&txq->stats_sync);
2311 	u64_stats_inc(&txq->q_stats.dma_map_errs);
2312 	u64_stats_update_end(&txq->stats_sync);
2313 
2314 	/* clear dma mappings for failed tx_buf map */
2315 	for (;;) {
2316 		struct idpf_tx_buf *tx_buf;
2317 
2318 		tx_buf = &txq->tx_buf[idx];
2319 		idpf_tx_buf_rel(txq, tx_buf);
2320 		if (tx_buf == first)
2321 			break;
2322 		if (idx == 0)
2323 			idx = txq->desc_count;
2324 		idx--;
2325 	}
2326 
2327 	if (skb_is_gso(skb)) {
2328 		union idpf_tx_flex_desc *tx_desc;
2329 
2330 		/* If we failed a DMA mapping for a TSO packet, we will have
2331 		 * used one additional descriptor for a context
2332 		 * descriptor. Reset that here.
2333 		 */
2334 		tx_desc = &txq->flex_tx[idx];
2335 		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2336 		if (idx == 0)
2337 			idx = txq->desc_count;
2338 		idx--;
2339 	}
2340 
2341 	/* Update tail in case netdev_xmit_more was previously true */
2342 	idpf_tx_buf_hw_update(txq, idx, false);
2343 }
2344 
2345 /**
2346  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2347  * @txq: the tx ring to wrap
2348  * @ntu: ring index to bump
2349  */
2350 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2351 {
2352 	ntu++;
2353 
2354 	if (ntu == txq->desc_count) {
2355 		ntu = 0;
2356 		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2357 	}
2358 
2359 	return ntu;
2360 }
2361 
2362 /**
2363  * idpf_tx_splitq_map - Build the Tx flex descriptor
2364  * @tx_q: queue to send buffer on
2365  * @params: pointer to splitq params struct
2366  * @first: first buffer info buffer to use
2367  *
2368  * This function loops over the skb data pointed to by *first
2369  * and gets a physical address for each memory location and programs
2370  * it and the length into the transmit flex descriptor.
2371  */
2372 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2373 			       struct idpf_tx_splitq_params *params,
2374 			       struct idpf_tx_buf *first)
2375 {
2376 	union idpf_tx_flex_desc *tx_desc;
2377 	unsigned int data_len, size;
2378 	struct idpf_tx_buf *tx_buf;
2379 	u16 i = tx_q->next_to_use;
2380 	struct netdev_queue *nq;
2381 	struct sk_buff *skb;
2382 	skb_frag_t *frag;
2383 	u16 td_cmd = 0;
2384 	dma_addr_t dma;
2385 
2386 	skb = first->skb;
2387 
2388 	td_cmd = params->offload.td_cmd;
2389 
2390 	data_len = skb->data_len;
2391 	size = skb_headlen(skb);
2392 
2393 	tx_desc = &tx_q->flex_tx[i];
2394 
2395 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2396 
2397 	tx_buf = first;
2398 
2399 	params->compl_tag =
2400 		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2401 
2402 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2403 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2404 
2405 		if (dma_mapping_error(tx_q->dev, dma))
2406 			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2407 
2408 		tx_buf->compl_tag = params->compl_tag;
2409 
2410 		/* record length, and DMA address */
2411 		dma_unmap_len_set(tx_buf, len, size);
2412 		dma_unmap_addr_set(tx_buf, dma, dma);
2413 
2414 		/* buf_addr is in same location for both desc types */
2415 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2416 
2417 		/* The stack can send us fragments that are too large for a
2418 		 * single descriptor i.e. frag size > 16K-1. We will need to
2419 		 * split the fragment across multiple descriptors in this case.
2420 		 * To adhere to HW alignment restrictions, the fragment needs
2421 		 * to be split such that the first chunk ends on a 4K boundary
2422 		 * and all subsequent chunks start on a 4K boundary. We still
2423 		 * want to send as much data as possible though, so our
2424 		 * intermediate descriptor chunk size will be 12K.
2425 		 *
2426 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2427 		 * ------------------------------------------------------------
2428 		 * |                    frag_size = 32K                       |
2429 		 * ------------------------------------------------------------
2430 		 * |2600		  |16384	    |28672
2431 		 *
2432 		 * 3 descriptors will be used for this fragment. The HW expects
2433 		 * the descriptors to contain the following:
2434 		 * ------------------------------------------------------------
2435 		 * | size = 13784         | size = 12K      | size = 6696     |
2436 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2437 		 * ------------------------------------------------------------
2438 		 *
2439 		 * We need to first adjust the max_data for the first chunk so
2440 		 * that it ends on a 4K boundary. By negating the value of the
2441 		 * DMA address and taking only the low order bits, we're
2442 		 * effectively calculating
2443 		 *	4K - (DMA addr lower order bits) =
2444 		 *				bytes to next boundary.
2445 		 *
2446 		 * Add that to our base aligned max_data (12K) and we have
2447 		 * our first chunk size. In the example above,
2448 		 *	13784 = 12K + (4096-2600)
2449 		 *
2450 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2451 		 * will give the intermediate descriptors 12K chunks and
2452 		 * whatever is left to the final descriptor. This ensures that
2453 		 * all descriptors used for the remaining chunks of the
2454 		 * fragment start on a 4K boundary and we use as few
2455 		 * descriptors as possible.
2456 		 */
2457 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2458 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2459 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2460 						  max_data);
2461 
2462 			tx_desc++;
2463 			i++;
2464 
2465 			if (i == tx_q->desc_count) {
2466 				tx_desc = &tx_q->flex_tx[0];
2467 				i = 0;
2468 				tx_q->compl_tag_cur_gen =
2469 					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2470 			}
2471 
2472 			/* Since this packet has a buffer that is going to span
2473 			 * multiple descriptors, it's going to leave holes in
2474 			 * to the TX buffer ring. To ensure these holes do not
2475 			 * cause issues in the cleaning routines, we will clear
2476 			 * them of any stale data and assign them the same
2477 			 * completion tag as the current packet. Then when the
2478 			 * packet is being cleaned, the cleaning routines will
2479 			 * simply pass over these holes and finish cleaning the
2480 			 * rest of the packet.
2481 			 */
2482 			memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2483 			tx_q->tx_buf[i].compl_tag = params->compl_tag;
2484 
2485 			/* Adjust the DMA offset and the remaining size of the
2486 			 * fragment.  On the first iteration of this loop,
2487 			 * max_data will be >= 12K and <= 16K-1.  On any
2488 			 * subsequent iteration of this loop, max_data will
2489 			 * always be 12K.
2490 			 */
2491 			dma += max_data;
2492 			size -= max_data;
2493 
2494 			/* Reset max_data since remaining chunks will be 12K
2495 			 * at most
2496 			 */
2497 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2498 
2499 			/* buf_addr is in same location for both desc types */
2500 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2501 		}
2502 
2503 		if (!data_len)
2504 			break;
2505 
2506 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2507 		tx_desc++;
2508 		i++;
2509 
2510 		if (i == tx_q->desc_count) {
2511 			tx_desc = &tx_q->flex_tx[0];
2512 			i = 0;
2513 			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2514 		}
2515 
2516 		size = skb_frag_size(frag);
2517 		data_len -= size;
2518 
2519 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2520 				       DMA_TO_DEVICE);
2521 
2522 		tx_buf = &tx_q->tx_buf[i];
2523 	}
2524 
2525 	/* record SW timestamp if HW timestamp is not available */
2526 	skb_tx_timestamp(skb);
2527 
2528 	/* write last descriptor with RS and EOP bits */
2529 	td_cmd |= params->eop_cmd;
2530 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2531 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2532 
2533 	/* set next_to_watch value indicating a packet is present */
2534 	first->next_to_watch = tx_desc;
2535 
2536 	tx_q->txq_grp->num_completions_pending++;
2537 
2538 	/* record bytecount for BQL */
2539 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2540 	netdev_tx_sent_queue(nq, first->bytecount);
2541 
2542 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2543 }
2544 
2545 /**
2546  * idpf_tso - computes mss and TSO length to prepare for TSO
2547  * @skb: pointer to skb
2548  * @off: pointer to struct that holds offload parameters
2549  *
2550  * Returns error (negative) if TSO was requested but cannot be applied to the
2551  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2552  */
2553 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2554 {
2555 	const struct skb_shared_info *shinfo;
2556 	union {
2557 		struct iphdr *v4;
2558 		struct ipv6hdr *v6;
2559 		unsigned char *hdr;
2560 	} ip;
2561 	union {
2562 		struct tcphdr *tcp;
2563 		struct udphdr *udp;
2564 		unsigned char *hdr;
2565 	} l4;
2566 	u32 paylen, l4_start;
2567 	int err;
2568 
2569 	if (!skb_is_gso(skb))
2570 		return 0;
2571 
2572 	err = skb_cow_head(skb, 0);
2573 	if (err < 0)
2574 		return err;
2575 
2576 	shinfo = skb_shinfo(skb);
2577 
2578 	ip.hdr = skb_network_header(skb);
2579 	l4.hdr = skb_transport_header(skb);
2580 
2581 	/* initialize outer IP header fields */
2582 	if (ip.v4->version == 4) {
2583 		ip.v4->tot_len = 0;
2584 		ip.v4->check = 0;
2585 	} else if (ip.v6->version == 6) {
2586 		ip.v6->payload_len = 0;
2587 	}
2588 
2589 	l4_start = skb_transport_offset(skb);
2590 
2591 	/* remove payload length from checksum */
2592 	paylen = skb->len - l4_start;
2593 
2594 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2595 	case SKB_GSO_TCPV4:
2596 	case SKB_GSO_TCPV6:
2597 		csum_replace_by_diff(&l4.tcp->check,
2598 				     (__force __wsum)htonl(paylen));
2599 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2600 		break;
2601 	case SKB_GSO_UDP_L4:
2602 		csum_replace_by_diff(&l4.udp->check,
2603 				     (__force __wsum)htonl(paylen));
2604 		/* compute length of segmentation header */
2605 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2606 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2607 		break;
2608 	default:
2609 		return -EINVAL;
2610 	}
2611 
2612 	off->tso_len = skb->len - off->tso_hdr_len;
2613 	off->mss = shinfo->gso_size;
2614 	off->tso_segs = shinfo->gso_segs;
2615 
2616 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2617 
2618 	return 1;
2619 }
2620 
2621 /**
2622  * __idpf_chk_linearize - Check skb is not using too many buffers
2623  * @skb: send buffer
2624  * @max_bufs: maximum number of buffers
2625  *
2626  * For TSO we need to count the TSO header and segment payload separately.  As
2627  * such we need to check cases where we have max_bufs-1 fragments or more as we
2628  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2629  * for the segment payload in the first descriptor, and another max_buf-1 for
2630  * the fragments.
2631  */
2632 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2633 {
2634 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2635 	const skb_frag_t *frag, *stale;
2636 	int nr_frags, sum;
2637 
2638 	/* no need to check if number of frags is less than max_bufs - 1 */
2639 	nr_frags = shinfo->nr_frags;
2640 	if (nr_frags < (max_bufs - 1))
2641 		return false;
2642 
2643 	/* We need to walk through the list and validate that each group
2644 	 * of max_bufs-2 fragments totals at least gso_size.
2645 	 */
2646 	nr_frags -= max_bufs - 2;
2647 	frag = &shinfo->frags[0];
2648 
2649 	/* Initialize size to the negative value of gso_size minus 1.  We use
2650 	 * this as the worst case scenario in which the frag ahead of us only
2651 	 * provides one byte which is why we are limited to max_bufs-2
2652 	 * descriptors for a single transmit as the header and previous
2653 	 * fragment are already consuming 2 descriptors.
2654 	 */
2655 	sum = 1 - shinfo->gso_size;
2656 
2657 	/* Add size of frags 0 through 4 to create our initial sum */
2658 	sum += skb_frag_size(frag++);
2659 	sum += skb_frag_size(frag++);
2660 	sum += skb_frag_size(frag++);
2661 	sum += skb_frag_size(frag++);
2662 	sum += skb_frag_size(frag++);
2663 
2664 	/* Walk through fragments adding latest fragment, testing it, and
2665 	 * then removing stale fragments from the sum.
2666 	 */
2667 	for (stale = &shinfo->frags[0];; stale++) {
2668 		int stale_size = skb_frag_size(stale);
2669 
2670 		sum += skb_frag_size(frag++);
2671 
2672 		/* The stale fragment may present us with a smaller
2673 		 * descriptor than the actual fragment size. To account
2674 		 * for that we need to remove all the data on the front and
2675 		 * figure out what the remainder would be in the last
2676 		 * descriptor associated with the fragment.
2677 		 */
2678 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2679 			int align_pad = -(skb_frag_off(stale)) &
2680 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2681 
2682 			sum -= align_pad;
2683 			stale_size -= align_pad;
2684 
2685 			do {
2686 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2687 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2688 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2689 		}
2690 
2691 		/* if sum is negative we failed to make sufficient progress */
2692 		if (sum < 0)
2693 			return true;
2694 
2695 		if (!nr_frags--)
2696 			break;
2697 
2698 		sum -= stale_size;
2699 	}
2700 
2701 	return false;
2702 }
2703 
2704 /**
2705  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2706  * @skb: send buffer
2707  * @max_bufs: maximum scatter gather buffers for single packet
2708  * @count: number of buffers this packet needs
2709  *
2710  * Make sure we don't exceed maximum scatter gather buffers for a single
2711  * packet. We have to do some special checking around the boundary (max_bufs-1)
2712  * if TSO is on since we need count the TSO header and payload separately.
2713  * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2714  * header, 1 for segment payload, and then 7 for the fragments.
2715  */
2716 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2717 			       unsigned int count)
2718 {
2719 	if (likely(count < max_bufs))
2720 		return false;
2721 	if (skb_is_gso(skb))
2722 		return __idpf_chk_linearize(skb, max_bufs);
2723 
2724 	return count > max_bufs;
2725 }
2726 
2727 /**
2728  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2729  * @txq: queue to put context descriptor on
2730  *
2731  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2732  * ring entry to reflect that this index is a context descriptor
2733  */
2734 static struct idpf_flex_tx_ctx_desc *
2735 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2736 {
2737 	struct idpf_flex_tx_ctx_desc *desc;
2738 	int i = txq->next_to_use;
2739 
2740 	memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2741 	txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
2742 
2743 	/* grab the next descriptor */
2744 	desc = &txq->flex_ctx[i];
2745 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2746 
2747 	return desc;
2748 }
2749 
2750 /**
2751  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2752  * @tx_q: queue to send buffer on
2753  * @skb: pointer to skb
2754  */
2755 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2756 {
2757 	u64_stats_update_begin(&tx_q->stats_sync);
2758 	u64_stats_inc(&tx_q->q_stats.skb_drops);
2759 	u64_stats_update_end(&tx_q->stats_sync);
2760 
2761 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2762 
2763 	dev_kfree_skb(skb);
2764 
2765 	return NETDEV_TX_OK;
2766 }
2767 
2768 /**
2769  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2770  * @skb: send buffer
2771  * @tx_q: queue to send buffer on
2772  *
2773  * Returns NETDEV_TX_OK if sent, else an error code
2774  */
2775 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2776 					struct idpf_tx_queue *tx_q)
2777 {
2778 	struct idpf_tx_splitq_params tx_params = { };
2779 	struct idpf_tx_buf *first;
2780 	unsigned int count;
2781 	int tso;
2782 
2783 	count = idpf_tx_desc_count_required(tx_q, skb);
2784 	if (unlikely(!count))
2785 		return idpf_tx_drop_skb(tx_q, skb);
2786 
2787 	tso = idpf_tso(skb, &tx_params.offload);
2788 	if (unlikely(tso < 0))
2789 		return idpf_tx_drop_skb(tx_q, skb);
2790 
2791 	/* Check for splitq specific TX resources */
2792 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2793 	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2794 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2795 
2796 		return NETDEV_TX_BUSY;
2797 	}
2798 
2799 	if (tso) {
2800 		/* If tso is needed, set up context desc */
2801 		struct idpf_flex_tx_ctx_desc *ctx_desc =
2802 			idpf_tx_splitq_get_ctx_desc(tx_q);
2803 
2804 		ctx_desc->tso.qw1.cmd_dtype =
2805 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2806 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2807 		ctx_desc->tso.qw0.flex_tlen =
2808 				cpu_to_le32(tx_params.offload.tso_len &
2809 					    IDPF_TXD_FLEX_CTX_TLEN_M);
2810 		ctx_desc->tso.qw0.mss_rt =
2811 				cpu_to_le16(tx_params.offload.mss &
2812 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2813 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2814 
2815 		u64_stats_update_begin(&tx_q->stats_sync);
2816 		u64_stats_inc(&tx_q->q_stats.lso_pkts);
2817 		u64_stats_update_end(&tx_q->stats_sync);
2818 	}
2819 
2820 	/* record the location of the first descriptor for this packet */
2821 	first = &tx_q->tx_buf[tx_q->next_to_use];
2822 	first->skb = skb;
2823 
2824 	if (tso) {
2825 		first->gso_segs = tx_params.offload.tso_segs;
2826 		first->bytecount = skb->len +
2827 			((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
2828 	} else {
2829 		first->gso_segs = 1;
2830 		first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2831 	}
2832 
2833 	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2834 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2835 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2836 		/* Set the RE bit to catch any packets that may have not been
2837 		 * stashed during RS completion cleaning. MIN_GAP is set to
2838 		 * MIN_RING size to ensure it will be set at least once each
2839 		 * time around the ring.
2840 		 */
2841 		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2842 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2843 			tx_q->txq_grp->num_completions_pending++;
2844 		}
2845 
2846 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2847 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2848 
2849 	} else {
2850 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2851 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2852 
2853 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2854 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2855 	}
2856 
2857 	idpf_tx_splitq_map(tx_q, &tx_params, first);
2858 
2859 	return NETDEV_TX_OK;
2860 }
2861 
2862 /**
2863  * idpf_tx_start - Selects the right Tx queue to send buffer
2864  * @skb: send buffer
2865  * @netdev: network interface device structure
2866  *
2867  * Returns NETDEV_TX_OK if sent, else an error code
2868  */
2869 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
2870 {
2871 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2872 	struct idpf_tx_queue *tx_q;
2873 
2874 	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2875 		dev_kfree_skb_any(skb);
2876 
2877 		return NETDEV_TX_OK;
2878 	}
2879 
2880 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2881 
2882 	/* hardware can't handle really short frames, hardware padding works
2883 	 * beyond this point
2884 	 */
2885 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2886 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2887 
2888 		return NETDEV_TX_OK;
2889 	}
2890 
2891 	if (idpf_is_queue_model_split(vport->txq_model))
2892 		return idpf_tx_splitq_frame(skb, tx_q);
2893 	else
2894 		return idpf_tx_singleq_frame(skb, tx_q);
2895 }
2896 
2897 /**
2898  * idpf_rx_hash - set the hash value in the skb
2899  * @rxq: Rx descriptor ring packet is being transacted on
2900  * @skb: pointer to current skb being populated
2901  * @rx_desc: Receive descriptor
2902  * @decoded: Decoded Rx packet type related fields
2903  */
2904 static void
2905 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2906 	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2907 	     struct libeth_rx_pt decoded)
2908 {
2909 	u32 hash;
2910 
2911 	if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2912 		return;
2913 
2914 	hash = le16_to_cpu(rx_desc->hash1) |
2915 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2916 	       (rx_desc->hash3 << 24);
2917 
2918 	libeth_rx_pt_set_hash(skb, hash, decoded);
2919 }
2920 
2921 /**
2922  * idpf_rx_csum - Indicate in skb if checksum is good
2923  * @rxq: Rx descriptor ring packet is being transacted on
2924  * @skb: pointer to current skb being populated
2925  * @csum_bits: checksum fields extracted from the descriptor
2926  * @decoded: Decoded Rx packet type related fields
2927  *
2928  * skb->protocol must be set before this function is called
2929  */
2930 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2931 			 struct idpf_rx_csum_decoded csum_bits,
2932 			 struct libeth_rx_pt decoded)
2933 {
2934 	bool ipv4, ipv6;
2935 
2936 	/* check if Rx checksum is enabled */
2937 	if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2938 		return;
2939 
2940 	/* check if HW has decoded the packet and checksum */
2941 	if (unlikely(!csum_bits.l3l4p))
2942 		return;
2943 
2944 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2945 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2946 
2947 	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2948 		goto checksum_fail;
2949 
2950 	if (unlikely(ipv6 && csum_bits.ipv6exadd))
2951 		return;
2952 
2953 	/* check for L4 errors and handle packets that were not able to be
2954 	 * checksummed
2955 	 */
2956 	if (unlikely(csum_bits.l4e))
2957 		goto checksum_fail;
2958 
2959 	if (csum_bits.raw_csum_inv ||
2960 	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
2961 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2962 		return;
2963 	}
2964 
2965 	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2966 	skb->ip_summed = CHECKSUM_COMPLETE;
2967 
2968 	return;
2969 
2970 checksum_fail:
2971 	u64_stats_update_begin(&rxq->stats_sync);
2972 	u64_stats_inc(&rxq->q_stats.hw_csum_err);
2973 	u64_stats_update_end(&rxq->stats_sync);
2974 }
2975 
2976 /**
2977  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2978  * @rx_desc: receive descriptor
2979  *
2980  * Return: parsed checksum status.
2981  **/
2982 static struct idpf_rx_csum_decoded
2983 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2984 {
2985 	struct idpf_rx_csum_decoded csum = { };
2986 	u8 qword0, qword1;
2987 
2988 	qword0 = rx_desc->status_err0_qw0;
2989 	qword1 = rx_desc->status_err0_qw1;
2990 
2991 	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2992 			     qword1);
2993 	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2994 			      qword1);
2995 	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2996 			     qword1);
2997 	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2998 			       qword1);
2999 	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
3000 				   qword0);
3001 	csum.raw_csum_inv =
3002 		le16_get_bits(rx_desc->ptype_err_fflags0,
3003 			      VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
3004 	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
3005 
3006 	return csum;
3007 }
3008 
3009 /**
3010  * idpf_rx_rsc - Set the RSC fields in the skb
3011  * @rxq : Rx descriptor ring packet is being transacted on
3012  * @skb : pointer to current skb being populated
3013  * @rx_desc: Receive descriptor
3014  * @decoded: Decoded Rx packet type related fields
3015  *
3016  * Return 0 on success and error code on failure
3017  *
3018  * Populate the skb fields with the total number of RSC segments, RSC payload
3019  * length and packet type.
3020  */
3021 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3022 		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3023 		       struct libeth_rx_pt decoded)
3024 {
3025 	u16 rsc_segments, rsc_seg_len;
3026 	bool ipv4, ipv6;
3027 	int len;
3028 
3029 	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
3030 		     LIBETH_RX_PT_OUTER_L2))
3031 		return -EINVAL;
3032 
3033 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3034 	if (unlikely(!rsc_seg_len))
3035 		return -EINVAL;
3036 
3037 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3038 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3039 
3040 	if (unlikely(!(ipv4 ^ ipv6)))
3041 		return -EINVAL;
3042 
3043 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3044 	if (unlikely(rsc_segments == 1))
3045 		return 0;
3046 
3047 	NAPI_GRO_CB(skb)->count = rsc_segments;
3048 	skb_shinfo(skb)->gso_size = rsc_seg_len;
3049 
3050 	skb_reset_network_header(skb);
3051 	len = skb->len - skb_transport_offset(skb);
3052 
3053 	if (ipv4) {
3054 		struct iphdr *ipv4h = ip_hdr(skb);
3055 
3056 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3057 
3058 		/* Reset and set transport header offset in skb */
3059 		skb_set_transport_header(skb, sizeof(struct iphdr));
3060 
3061 		/* Compute the TCP pseudo header checksum*/
3062 		tcp_hdr(skb)->check =
3063 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3064 	} else {
3065 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3066 
3067 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3068 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3069 		tcp_hdr(skb)->check =
3070 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3071 	}
3072 
3073 	tcp_gro_complete(skb);
3074 
3075 	u64_stats_update_begin(&rxq->stats_sync);
3076 	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3077 	u64_stats_update_end(&rxq->stats_sync);
3078 
3079 	return 0;
3080 }
3081 
3082 /**
3083  * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3084  * @rxq: Rx descriptor ring packet is being transacted on
3085  * @skb: pointer to current skb being populated
3086  * @rx_desc: Receive descriptor
3087  *
3088  * This function checks the ring, descriptor, and packet information in
3089  * order to populate the hash, checksum, protocol, and
3090  * other fields within the skb.
3091  */
3092 static int
3093 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3094 			   const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3095 {
3096 	struct idpf_rx_csum_decoded csum_bits;
3097 	struct libeth_rx_pt decoded;
3098 	u16 rx_ptype;
3099 
3100 	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3101 				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3102 	decoded = rxq->rx_ptype_lkup[rx_ptype];
3103 
3104 	/* process RSS/hash */
3105 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3106 
3107 	skb->protocol = eth_type_trans(skb, rxq->netdev);
3108 
3109 	if (le16_get_bits(rx_desc->hdrlen_flags,
3110 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3111 		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3112 
3113 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3114 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3115 
3116 	skb_record_rx_queue(skb, rxq->idx);
3117 
3118 	return 0;
3119 }
3120 
3121 /**
3122  * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3123  * @rx_buf: buffer containing page to add
3124  * @skb: sk_buff to place the data into
3125  * @size: packet length from rx_desc
3126  *
3127  * This function will add the data contained in rx_buf->page to the skb.
3128  * It will just attach the page as a frag to the skb.
3129  * The function will then update the page offset.
3130  */
3131 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3132 		      unsigned int size)
3133 {
3134 	u32 hr = rx_buf->page->pp->p.offset;
3135 
3136 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
3137 			rx_buf->offset + hr, size, rx_buf->truesize);
3138 }
3139 
3140 /**
3141  * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3142  * @hdr: Rx buffer for the headers
3143  * @buf: Rx buffer for the payload
3144  * @data_len: number of bytes received to the payload buffer
3145  *
3146  * When a header buffer overflow occurs or the HW was unable do parse the
3147  * packet type to perform header split, the whole frame gets placed to the
3148  * payload buffer. We can't build a valid skb around a payload buffer when
3149  * the header split is active since it doesn't reserve any head- or tailroom.
3150  * In that case, copy either the whole frame when it's short or just the
3151  * Ethernet header to the header buffer to be able to build an skb and adjust
3152  * the data offset in the payload buffer, IOW emulate the header split.
3153  *
3154  * Return: number of bytes copied to the header buffer.
3155  */
3156 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3157 			     struct libeth_fqe *buf, u32 data_len)
3158 {
3159 	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3160 	const void *src;
3161 	void *dst;
3162 
3163 	if (!libeth_rx_sync_for_cpu(buf, copy))
3164 		return 0;
3165 
3166 	dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
3167 	src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
3168 	memcpy(dst, src, LARGEST_ALIGN(copy));
3169 
3170 	buf->offset += copy;
3171 
3172 	return copy;
3173 }
3174 
3175 /**
3176  * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3177  * @buf: Rx buffer to pull data from
3178  * @size: the length of the packet
3179  *
3180  * This function allocates an skb. It then populates it with the page data from
3181  * the current receive descriptor, taking care to set up the skb correctly.
3182  */
3183 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
3184 {
3185 	u32 hr = buf->page->pp->p.offset;
3186 	struct sk_buff *skb;
3187 	void *va;
3188 
3189 	va = page_address(buf->page) + buf->offset;
3190 	prefetch(va + hr);
3191 
3192 	skb = napi_build_skb(va, buf->truesize);
3193 	if (unlikely(!skb))
3194 		return NULL;
3195 
3196 	skb_mark_for_recycle(skb);
3197 
3198 	skb_reserve(skb, hr);
3199 	__skb_put(skb, size);
3200 
3201 	return skb;
3202 }
3203 
3204 /**
3205  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3206  * status and error fields
3207  * @stat_err_field: field from descriptor to test bits in
3208  * @stat_err_bits: value to mask
3209  *
3210  */
3211 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3212 					const u8 stat_err_bits)
3213 {
3214 	return !!(stat_err_field & stat_err_bits);
3215 }
3216 
3217 /**
3218  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3219  * @rx_desc: Rx descriptor for current buffer
3220  *
3221  * If the buffer is an EOP buffer, this function exits returning true,
3222  * otherwise return false indicating that this is in fact a non-EOP buffer.
3223  */
3224 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3225 {
3226 	/* if we are the last buffer then there is nothing else to do */
3227 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3228 						  IDPF_RXD_EOF_SPLITQ));
3229 }
3230 
3231 /**
3232  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3233  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3234  * @budget: Total limit on number of packets to process
3235  *
3236  * This function provides a "bounce buffer" approach to Rx interrupt
3237  * processing. The advantage to this is that on systems that have
3238  * expensive overhead for IOMMU access this provides a means of avoiding
3239  * it by maintaining the mapping of the page to the system.
3240  *
3241  * Returns amount of work completed
3242  */
3243 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3244 {
3245 	int total_rx_bytes = 0, total_rx_pkts = 0;
3246 	struct idpf_buf_queue *rx_bufq = NULL;
3247 	struct sk_buff *skb = rxq->skb;
3248 	u16 ntc = rxq->next_to_clean;
3249 
3250 	/* Process Rx packets bounded by budget */
3251 	while (likely(total_rx_pkts < budget)) {
3252 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3253 		struct libeth_fqe *hdr, *rx_buf = NULL;
3254 		struct idpf_sw_queue *refillq = NULL;
3255 		struct idpf_rxq_set *rxq_set = NULL;
3256 		unsigned int pkt_len = 0;
3257 		unsigned int hdr_len = 0;
3258 		u16 gen_id, buf_id = 0;
3259 		int bufq_id;
3260 		u8 rxdid;
3261 
3262 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3263 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3264 
3265 		/* This memory barrier is needed to keep us from reading
3266 		 * any other fields out of the rx_desc
3267 		 */
3268 		dma_rmb();
3269 
3270 		/* if the descriptor isn't done, no work yet to do */
3271 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3272 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3273 
3274 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3275 			break;
3276 
3277 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3278 				  rx_desc->rxdid_ucast);
3279 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3280 			IDPF_RX_BUMP_NTC(rxq, ntc);
3281 			u64_stats_update_begin(&rxq->stats_sync);
3282 			u64_stats_inc(&rxq->q_stats.bad_descs);
3283 			u64_stats_update_end(&rxq->stats_sync);
3284 			continue;
3285 		}
3286 
3287 		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3288 					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3289 
3290 		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3291 					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3292 
3293 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3294 		refillq = rxq_set->refillq[bufq_id];
3295 
3296 		/* retrieve buffer from the rxq */
3297 		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3298 
3299 		buf_id = le16_to_cpu(rx_desc->buf_id);
3300 
3301 		rx_buf = &rx_bufq->buf[buf_id];
3302 
3303 		if (!rx_bufq->hdr_pp)
3304 			goto payload;
3305 
3306 #define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3307 #define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3308 		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3309 			/* If a header buffer overflow, occurs, i.e. header is
3310 			 * too large to fit in the header split buffer, HW will
3311 			 * put the entire packet, including headers, in the
3312 			 * data/payload buffer.
3313 			 */
3314 			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3315 						__HDR_LEN_MASK);
3316 #undef __HDR_LEN_MASK
3317 #undef __HBO_BIT
3318 
3319 		hdr = &rx_bufq->hdr_buf[buf_id];
3320 
3321 		if (unlikely(!hdr_len && !skb)) {
3322 			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3323 			pkt_len -= hdr_len;
3324 
3325 			u64_stats_update_begin(&rxq->stats_sync);
3326 			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3327 			u64_stats_update_end(&rxq->stats_sync);
3328 		}
3329 
3330 		if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3331 			skb = idpf_rx_build_skb(hdr, hdr_len);
3332 			if (!skb)
3333 				break;
3334 
3335 			u64_stats_update_begin(&rxq->stats_sync);
3336 			u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3337 			u64_stats_update_end(&rxq->stats_sync);
3338 		}
3339 
3340 		hdr->page = NULL;
3341 
3342 payload:
3343 		if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3344 			goto skip_data;
3345 
3346 		if (skb)
3347 			idpf_rx_add_frag(rx_buf, skb, pkt_len);
3348 		else
3349 			skb = idpf_rx_build_skb(rx_buf, pkt_len);
3350 
3351 		/* exit if we failed to retrieve a buffer */
3352 		if (!skb)
3353 			break;
3354 
3355 skip_data:
3356 		rx_buf->page = NULL;
3357 
3358 		idpf_rx_post_buf_refill(refillq, buf_id);
3359 		IDPF_RX_BUMP_NTC(rxq, ntc);
3360 
3361 		/* skip if it is non EOP desc */
3362 		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3363 			continue;
3364 
3365 		/* pad skb if needed (to make valid ethernet frame) */
3366 		if (eth_skb_pad(skb)) {
3367 			skb = NULL;
3368 			continue;
3369 		}
3370 
3371 		/* probably a little skewed due to removing CRC */
3372 		total_rx_bytes += skb->len;
3373 
3374 		/* protocol */
3375 		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3376 			dev_kfree_skb_any(skb);
3377 			skb = NULL;
3378 			continue;
3379 		}
3380 
3381 		/* send completed skb up the stack */
3382 		napi_gro_receive(rxq->napi, skb);
3383 		skb = NULL;
3384 
3385 		/* update budget accounting */
3386 		total_rx_pkts++;
3387 	}
3388 
3389 	rxq->next_to_clean = ntc;
3390 
3391 	rxq->skb = skb;
3392 	u64_stats_update_begin(&rxq->stats_sync);
3393 	u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3394 	u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3395 	u64_stats_update_end(&rxq->stats_sync);
3396 
3397 	/* guarantee a trip back through this routine if there was a failure */
3398 	return total_rx_pkts;
3399 }
3400 
3401 /**
3402  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3403  * @bufq: Pointer to the buffer queue
3404  * @buf_id: buffer ID
3405  * @buf_desc: Buffer queue descriptor
3406  *
3407  * Return 0 on success and negative on failure.
3408  */
3409 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3410 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3411 {
3412 	struct libeth_fq_fp fq = {
3413 		.pp		= bufq->pp,
3414 		.fqes		= bufq->buf,
3415 		.truesize	= bufq->truesize,
3416 		.count		= bufq->desc_count,
3417 	};
3418 	dma_addr_t addr;
3419 
3420 	addr = libeth_rx_alloc(&fq, buf_id);
3421 	if (addr == DMA_MAPPING_ERROR)
3422 		return -ENOMEM;
3423 
3424 	buf_desc->pkt_addr = cpu_to_le64(addr);
3425 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3426 
3427 	if (!idpf_queue_has(HSPLIT_EN, bufq))
3428 		return 0;
3429 
3430 	fq.pp = bufq->hdr_pp;
3431 	fq.fqes = bufq->hdr_buf;
3432 	fq.truesize = bufq->hdr_truesize;
3433 
3434 	addr = libeth_rx_alloc(&fq, buf_id);
3435 	if (addr == DMA_MAPPING_ERROR)
3436 		return -ENOMEM;
3437 
3438 	buf_desc->hdr_addr = cpu_to_le64(addr);
3439 
3440 	return 0;
3441 }
3442 
3443 /**
3444  * idpf_rx_clean_refillq - Clean refill queue buffers
3445  * @bufq: buffer queue to post buffers back to
3446  * @refillq: refill queue to clean
3447  *
3448  * This function takes care of the buffer refill management
3449  */
3450 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3451 				  struct idpf_sw_queue *refillq)
3452 {
3453 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3454 	u16 bufq_nta = bufq->next_to_alloc;
3455 	u16 ntc = refillq->next_to_clean;
3456 	int cleaned = 0;
3457 
3458 	buf_desc = &bufq->split_buf[bufq_nta];
3459 
3460 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3461 	while (likely(cleaned < refillq->desc_count)) {
3462 		u32 buf_id, refill_desc = refillq->ring[ntc];
3463 		bool failure;
3464 
3465 		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3466 		    !!(refill_desc & IDPF_RX_BI_GEN_M))
3467 			break;
3468 
3469 		buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3470 		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3471 		if (failure)
3472 			break;
3473 
3474 		if (unlikely(++ntc == refillq->desc_count)) {
3475 			idpf_queue_change(RFL_GEN_CHK, refillq);
3476 			ntc = 0;
3477 		}
3478 
3479 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3480 			buf_desc = &bufq->split_buf[0];
3481 			bufq_nta = 0;
3482 		} else {
3483 			buf_desc++;
3484 		}
3485 
3486 		cleaned++;
3487 	}
3488 
3489 	if (!cleaned)
3490 		return;
3491 
3492 	/* We want to limit how many transactions on the bus we trigger with
3493 	 * tail writes so we only do it in strides. It's also important we
3494 	 * align the write to a multiple of 8 as required by HW.
3495 	 */
3496 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3497 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3498 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3499 						       IDPF_RX_BUF_POST_STRIDE));
3500 
3501 	/* update next to alloc since we have filled the ring */
3502 	refillq->next_to_clean = ntc;
3503 	bufq->next_to_alloc = bufq_nta;
3504 }
3505 
3506 /**
3507  * idpf_rx_clean_refillq_all - Clean all refill queues
3508  * @bufq: buffer queue with refill queues
3509  * @nid: ID of the closest NUMA node with memory
3510  *
3511  * Iterates through all refill queues assigned to the buffer queue assigned to
3512  * this vector.  Returns true if clean is complete within budget, false
3513  * otherwise.
3514  */
3515 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3516 {
3517 	struct idpf_bufq_set *bufq_set;
3518 	int i;
3519 
3520 	page_pool_nid_changed(bufq->pp, nid);
3521 	if (bufq->hdr_pp)
3522 		page_pool_nid_changed(bufq->hdr_pp, nid);
3523 
3524 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3525 	for (i = 0; i < bufq_set->num_refillqs; i++)
3526 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3527 }
3528 
3529 /**
3530  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3531  * @irq: interrupt number
3532  * @data: pointer to a q_vector
3533  *
3534  */
3535 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3536 						void *data)
3537 {
3538 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3539 
3540 	q_vector->total_events++;
3541 	napi_schedule(&q_vector->napi);
3542 
3543 	return IRQ_HANDLED;
3544 }
3545 
3546 /**
3547  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3548  * @vport: virtual port structure
3549  *
3550  */
3551 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3552 {
3553 	u16 v_idx;
3554 
3555 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3556 		netif_napi_del(&vport->q_vectors[v_idx].napi);
3557 }
3558 
3559 /**
3560  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3561  * @vport: main vport structure
3562  */
3563 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3564 {
3565 	int v_idx;
3566 
3567 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3568 		napi_disable(&vport->q_vectors[v_idx].napi);
3569 }
3570 
3571 /**
3572  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3573  * @vport: virtual port
3574  *
3575  * Free the memory allocated for interrupt vectors  associated to a vport
3576  */
3577 void idpf_vport_intr_rel(struct idpf_vport *vport)
3578 {
3579 	for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3580 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3581 
3582 		kfree(q_vector->complq);
3583 		q_vector->complq = NULL;
3584 		kfree(q_vector->bufq);
3585 		q_vector->bufq = NULL;
3586 		kfree(q_vector->tx);
3587 		q_vector->tx = NULL;
3588 		kfree(q_vector->rx);
3589 		q_vector->rx = NULL;
3590 
3591 		free_cpumask_var(q_vector->affinity_mask);
3592 	}
3593 
3594 	kfree(vport->q_vectors);
3595 	vport->q_vectors = NULL;
3596 }
3597 
3598 /**
3599  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3600  * @vport: main vport structure
3601  */
3602 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3603 {
3604 	struct idpf_adapter *adapter = vport->adapter;
3605 	int vector;
3606 
3607 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3608 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3609 		int irq_num, vidx;
3610 
3611 		/* free only the irqs that were actually requested */
3612 		if (!q_vector)
3613 			continue;
3614 
3615 		vidx = vport->q_vector_idxs[vector];
3616 		irq_num = adapter->msix_entries[vidx].vector;
3617 
3618 		/* clear the affinity_mask in the IRQ descriptor */
3619 		irq_set_affinity_hint(irq_num, NULL);
3620 		kfree(free_irq(irq_num, q_vector));
3621 	}
3622 }
3623 
3624 /**
3625  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3626  * @vport: main vport structure
3627  */
3628 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3629 {
3630 	struct idpf_q_vector *q_vector = vport->q_vectors;
3631 	int q_idx;
3632 
3633 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3634 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3635 }
3636 
3637 /**
3638  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3639  * @q_vector: pointer to q_vector
3640  * @type: itr index
3641  * @itr: itr value
3642  */
3643 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
3644 					const int type, u16 itr)
3645 {
3646 	u32 itr_val;
3647 
3648 	itr &= IDPF_ITR_MASK;
3649 	/* Don't clear PBA because that can cause lost interrupts that
3650 	 * came in while we were cleaning/polling
3651 	 */
3652 	itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
3653 		  (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3654 		  (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3655 
3656 	return itr_val;
3657 }
3658 
3659 /**
3660  * idpf_update_dim_sample - Update dim sample with packets and bytes
3661  * @q_vector: the vector associated with the interrupt
3662  * @dim_sample: dim sample to update
3663  * @dim: dim instance structure
3664  * @packets: total packets
3665  * @bytes: total bytes
3666  *
3667  * Update the dim sample with the packets and bytes which are passed to this
3668  * function. Set the dim state appropriately if the dim settings gets stale.
3669  */
3670 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3671 				   struct dim_sample *dim_sample,
3672 				   struct dim *dim, u64 packets, u64 bytes)
3673 {
3674 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3675 	dim_sample->comp_ctr = 0;
3676 
3677 	/* if dim settings get stale, like when not updated for 1 second or
3678 	 * longer, force it to start again. This addresses the frequent case
3679 	 * of an idle queue being switched to by the scheduler.
3680 	 */
3681 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3682 		dim->state = DIM_START_MEASURE;
3683 }
3684 
3685 /**
3686  * idpf_net_dim - Update net DIM algorithm
3687  * @q_vector: the vector associated with the interrupt
3688  *
3689  * Create a DIM sample and notify net_dim() so that it can possibly decide
3690  * a new ITR value based on incoming packets, bytes, and interrupts.
3691  *
3692  * This function is a no-op if the queue is not configured to dynamic ITR.
3693  */
3694 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3695 {
3696 	struct dim_sample dim_sample = { };
3697 	u64 packets, bytes;
3698 	u32 i;
3699 
3700 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3701 		goto check_rx_itr;
3702 
3703 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3704 		struct idpf_tx_queue *txq = q_vector->tx[i];
3705 		unsigned int start;
3706 
3707 		do {
3708 			start = u64_stats_fetch_begin(&txq->stats_sync);
3709 			packets += u64_stats_read(&txq->q_stats.packets);
3710 			bytes += u64_stats_read(&txq->q_stats.bytes);
3711 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3712 	}
3713 
3714 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3715 			       packets, bytes);
3716 	net_dim(&q_vector->tx_dim, dim_sample);
3717 
3718 check_rx_itr:
3719 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3720 		return;
3721 
3722 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3723 		struct idpf_rx_queue *rxq = q_vector->rx[i];
3724 		unsigned int start;
3725 
3726 		do {
3727 			start = u64_stats_fetch_begin(&rxq->stats_sync);
3728 			packets += u64_stats_read(&rxq->q_stats.packets);
3729 			bytes += u64_stats_read(&rxq->q_stats.bytes);
3730 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3731 	}
3732 
3733 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3734 			       packets, bytes);
3735 	net_dim(&q_vector->rx_dim, dim_sample);
3736 }
3737 
3738 /**
3739  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3740  * @q_vector: q_vector for which itr is being updated and interrupt enabled
3741  *
3742  * Update the net_dim() algorithm and re-enable the interrupt associated with
3743  * this vector.
3744  */
3745 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3746 {
3747 	u32 intval;
3748 
3749 	/* net_dim() updates ITR out-of-band using a work item */
3750 	idpf_net_dim(q_vector);
3751 
3752 	intval = idpf_vport_intr_buildreg_itr(q_vector,
3753 					      IDPF_NO_ITR_UPDATE_IDX, 0);
3754 
3755 	writel(intval, q_vector->intr_reg.dyn_ctl);
3756 }
3757 
3758 /**
3759  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3760  * @vport: main vport structure
3761  */
3762 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3763 {
3764 	struct idpf_adapter *adapter = vport->adapter;
3765 	const char *drv_name, *if_name, *vec_name;
3766 	int vector, err, irq_num, vidx;
3767 
3768 	drv_name = dev_driver_string(&adapter->pdev->dev);
3769 	if_name = netdev_name(vport->netdev);
3770 
3771 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3772 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3773 		char *name;
3774 
3775 		vidx = vport->q_vector_idxs[vector];
3776 		irq_num = adapter->msix_entries[vidx].vector;
3777 
3778 		if (q_vector->num_rxq && q_vector->num_txq)
3779 			vec_name = "TxRx";
3780 		else if (q_vector->num_rxq)
3781 			vec_name = "Rx";
3782 		else if (q_vector->num_txq)
3783 			vec_name = "Tx";
3784 		else
3785 			continue;
3786 
3787 		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3788 				 vec_name, vidx);
3789 
3790 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3791 				  name, q_vector);
3792 		if (err) {
3793 			netdev_err(vport->netdev,
3794 				   "Request_irq failed, error: %d\n", err);
3795 			goto free_q_irqs;
3796 		}
3797 		/* assign the mask for this irq */
3798 		irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
3799 	}
3800 
3801 	return 0;
3802 
3803 free_q_irqs:
3804 	while (--vector >= 0) {
3805 		vidx = vport->q_vector_idxs[vector];
3806 		irq_num = adapter->msix_entries[vidx].vector;
3807 		kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3808 	}
3809 
3810 	return err;
3811 }
3812 
3813 /**
3814  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3815  * @q_vector: q_vector structure
3816  * @itr: Interrupt throttling rate
3817  * @tx: Tx or Rx ITR
3818  */
3819 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3820 {
3821 	struct idpf_intr_reg *intr_reg;
3822 
3823 	if (tx && !q_vector->tx)
3824 		return;
3825 	else if (!tx && !q_vector->rx)
3826 		return;
3827 
3828 	intr_reg = &q_vector->intr_reg;
3829 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3830 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3831 }
3832 
3833 /**
3834  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3835  * @vport: main vport structure
3836  */
3837 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3838 {
3839 	bool dynamic;
3840 	int q_idx;
3841 	u16 itr;
3842 
3843 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3844 		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3845 
3846 		/* Set the initial ITR values */
3847 		if (qv->num_txq) {
3848 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3849 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3850 			idpf_vport_intr_write_itr(qv, dynamic ?
3851 						  itr : qv->tx_itr_value,
3852 						  true);
3853 		}
3854 
3855 		if (qv->num_rxq) {
3856 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3857 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3858 			idpf_vport_intr_write_itr(qv, dynamic ?
3859 						  itr : qv->rx_itr_value,
3860 						  false);
3861 		}
3862 
3863 		if (qv->num_txq || qv->num_rxq)
3864 			idpf_vport_intr_update_itr_ena_irq(qv);
3865 	}
3866 }
3867 
3868 /**
3869  * idpf_vport_intr_deinit - Release all vector associations for the vport
3870  * @vport: main vport structure
3871  */
3872 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3873 {
3874 	idpf_vport_intr_dis_irq_all(vport);
3875 	idpf_vport_intr_napi_dis_all(vport);
3876 	idpf_vport_intr_napi_del_all(vport);
3877 	idpf_vport_intr_rel_irq(vport);
3878 }
3879 
3880 /**
3881  * idpf_tx_dim_work - Call back from the stack
3882  * @work: work queue structure
3883  */
3884 static void idpf_tx_dim_work(struct work_struct *work)
3885 {
3886 	struct idpf_q_vector *q_vector;
3887 	struct idpf_vport *vport;
3888 	struct dim *dim;
3889 	u16 itr;
3890 
3891 	dim = container_of(work, struct dim, work);
3892 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3893 	vport = q_vector->vport;
3894 
3895 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3896 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3897 
3898 	/* look up the values in our local table */
3899 	itr = vport->tx_itr_profile[dim->profile_ix];
3900 
3901 	idpf_vport_intr_write_itr(q_vector, itr, true);
3902 
3903 	dim->state = DIM_START_MEASURE;
3904 }
3905 
3906 /**
3907  * idpf_rx_dim_work - Call back from the stack
3908  * @work: work queue structure
3909  */
3910 static void idpf_rx_dim_work(struct work_struct *work)
3911 {
3912 	struct idpf_q_vector *q_vector;
3913 	struct idpf_vport *vport;
3914 	struct dim *dim;
3915 	u16 itr;
3916 
3917 	dim = container_of(work, struct dim, work);
3918 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3919 	vport = q_vector->vport;
3920 
3921 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3922 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3923 
3924 	/* look up the values in our local table */
3925 	itr = vport->rx_itr_profile[dim->profile_ix];
3926 
3927 	idpf_vport_intr_write_itr(q_vector, itr, false);
3928 
3929 	dim->state = DIM_START_MEASURE;
3930 }
3931 
3932 /**
3933  * idpf_init_dim - Set up dynamic interrupt moderation
3934  * @qv: q_vector structure
3935  */
3936 static void idpf_init_dim(struct idpf_q_vector *qv)
3937 {
3938 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3939 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3940 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3941 
3942 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3943 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3944 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3945 }
3946 
3947 /**
3948  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3949  * @vport: main vport structure
3950  */
3951 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3952 {
3953 	int q_idx;
3954 
3955 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3956 		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3957 
3958 		idpf_init_dim(q_vector);
3959 		napi_enable(&q_vector->napi);
3960 	}
3961 }
3962 
3963 /**
3964  * idpf_tx_splitq_clean_all- Clean completion queues
3965  * @q_vec: queue vector
3966  * @budget: Used to determine if we are in netpoll
3967  * @cleaned: returns number of packets cleaned
3968  *
3969  * Returns false if clean is not complete else returns true
3970  */
3971 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3972 				     int budget, int *cleaned)
3973 {
3974 	u16 num_complq = q_vec->num_complq;
3975 	bool clean_complete = true;
3976 	int i, budget_per_q;
3977 
3978 	if (unlikely(!num_complq))
3979 		return true;
3980 
3981 	budget_per_q = DIV_ROUND_UP(budget, num_complq);
3982 
3983 	for (i = 0; i < num_complq; i++)
3984 		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
3985 						       budget_per_q, cleaned);
3986 
3987 	return clean_complete;
3988 }
3989 
3990 /**
3991  * idpf_rx_splitq_clean_all- Clean completion queues
3992  * @q_vec: queue vector
3993  * @budget: Used to determine if we are in netpoll
3994  * @cleaned: returns number of packets cleaned
3995  *
3996  * Returns false if clean is not complete else returns true
3997  */
3998 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3999 				     int *cleaned)
4000 {
4001 	u16 num_rxq = q_vec->num_rxq;
4002 	bool clean_complete = true;
4003 	int pkts_cleaned = 0;
4004 	int i, budget_per_q;
4005 	int nid;
4006 
4007 	/* We attempt to distribute budget to each Rx queue fairly, but don't
4008 	 * allow the budget to go below 1 because that would exit polling early.
4009 	 */
4010 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
4011 	for (i = 0; i < num_rxq; i++) {
4012 		struct idpf_rx_queue *rxq = q_vec->rx[i];
4013 		int pkts_cleaned_per_q;
4014 
4015 		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
4016 		/* if we clean as many as budgeted, we must not be done */
4017 		if (pkts_cleaned_per_q >= budget_per_q)
4018 			clean_complete = false;
4019 		pkts_cleaned += pkts_cleaned_per_q;
4020 	}
4021 	*cleaned = pkts_cleaned;
4022 
4023 	nid = numa_mem_id();
4024 
4025 	for (i = 0; i < q_vec->num_bufq; i++)
4026 		idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4027 
4028 	return clean_complete;
4029 }
4030 
4031 /**
4032  * idpf_vport_splitq_napi_poll - NAPI handler
4033  * @napi: struct from which you get q_vector
4034  * @budget: budget provided by stack
4035  */
4036 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4037 {
4038 	struct idpf_q_vector *q_vector =
4039 				container_of(napi, struct idpf_q_vector, napi);
4040 	bool clean_complete;
4041 	int work_done = 0;
4042 
4043 	/* Handle case where we are called by netpoll with a budget of 0 */
4044 	if (unlikely(!budget)) {
4045 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4046 
4047 		return 0;
4048 	}
4049 
4050 	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
4051 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4052 
4053 	/* If work not completed, return budget and polling will return */
4054 	if (!clean_complete)
4055 		return budget;
4056 
4057 	work_done = min_t(int, work_done, budget - 1);
4058 
4059 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4060 	 * poll us due to busy-polling
4061 	 */
4062 	if (likely(napi_complete_done(napi, work_done)))
4063 		idpf_vport_intr_update_itr_ena_irq(q_vector);
4064 
4065 	/* Switch to poll mode in the tear-down path after sending disable
4066 	 * queues virtchnl message, as the interrupts will be disabled after
4067 	 * that
4068 	 */
4069 	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
4070 							 q_vector->tx[0])))
4071 		return budget;
4072 	else
4073 		return work_done;
4074 }
4075 
4076 /**
4077  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4078  * @vport: virtual port
4079  *
4080  * Mapping for vectors to queues
4081  */
4082 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4083 {
4084 	bool split = idpf_is_queue_model_split(vport->rxq_model);
4085 	u16 num_txq_grp = vport->num_txq_grp;
4086 	struct idpf_rxq_group *rx_qgrp;
4087 	struct idpf_txq_group *tx_qgrp;
4088 	u32 i, qv_idx, q_index;
4089 
4090 	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4091 		u16 num_rxq;
4092 
4093 		if (qv_idx >= vport->num_q_vectors)
4094 			qv_idx = 0;
4095 
4096 		rx_qgrp = &vport->rxq_grps[i];
4097 		if (split)
4098 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4099 		else
4100 			num_rxq = rx_qgrp->singleq.num_rxq;
4101 
4102 		for (u32 j = 0; j < num_rxq; j++) {
4103 			struct idpf_rx_queue *q;
4104 
4105 			if (split)
4106 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4107 			else
4108 				q = rx_qgrp->singleq.rxqs[j];
4109 			q->q_vector = &vport->q_vectors[qv_idx];
4110 			q_index = q->q_vector->num_rxq;
4111 			q->q_vector->rx[q_index] = q;
4112 			q->q_vector->num_rxq++;
4113 
4114 			if (split)
4115 				q->napi = &q->q_vector->napi;
4116 		}
4117 
4118 		if (split) {
4119 			for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4120 				struct idpf_buf_queue *bufq;
4121 
4122 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4123 				bufq->q_vector = &vport->q_vectors[qv_idx];
4124 				q_index = bufq->q_vector->num_bufq;
4125 				bufq->q_vector->bufq[q_index] = bufq;
4126 				bufq->q_vector->num_bufq++;
4127 			}
4128 		}
4129 
4130 		qv_idx++;
4131 	}
4132 
4133 	split = idpf_is_queue_model_split(vport->txq_model);
4134 
4135 	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4136 		u16 num_txq;
4137 
4138 		if (qv_idx >= vport->num_q_vectors)
4139 			qv_idx = 0;
4140 
4141 		tx_qgrp = &vport->txq_grps[i];
4142 		num_txq = tx_qgrp->num_txq;
4143 
4144 		for (u32 j = 0; j < num_txq; j++) {
4145 			struct idpf_tx_queue *q;
4146 
4147 			q = tx_qgrp->txqs[j];
4148 			q->q_vector = &vport->q_vectors[qv_idx];
4149 			q->q_vector->tx[q->q_vector->num_txq++] = q;
4150 		}
4151 
4152 		if (split) {
4153 			struct idpf_compl_queue *q = tx_qgrp->complq;
4154 
4155 			q->q_vector = &vport->q_vectors[qv_idx];
4156 			q->q_vector->complq[q->q_vector->num_complq++] = q;
4157 		}
4158 
4159 		qv_idx++;
4160 	}
4161 }
4162 
4163 /**
4164  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4165  * @vport: virtual port
4166  *
4167  * Initialize vector indexes with values returened over mailbox
4168  */
4169 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4170 {
4171 	struct idpf_adapter *adapter = vport->adapter;
4172 	struct virtchnl2_alloc_vectors *ac;
4173 	u16 *vecids, total_vecs;
4174 	int i;
4175 
4176 	ac = adapter->req_vec_chunks;
4177 	if (!ac) {
4178 		for (i = 0; i < vport->num_q_vectors; i++)
4179 			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4180 
4181 		return 0;
4182 	}
4183 
4184 	total_vecs = idpf_get_reserved_vecs(adapter);
4185 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4186 	if (!vecids)
4187 		return -ENOMEM;
4188 
4189 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4190 
4191 	for (i = 0; i < vport->num_q_vectors; i++)
4192 		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4193 
4194 	kfree(vecids);
4195 
4196 	return 0;
4197 }
4198 
4199 /**
4200  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4201  * @vport: virtual port structure
4202  */
4203 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4204 {
4205 	int (*napi_poll)(struct napi_struct *napi, int budget);
4206 	u16 v_idx;
4207 
4208 	if (idpf_is_queue_model_split(vport->txq_model))
4209 		napi_poll = idpf_vport_splitq_napi_poll;
4210 	else
4211 		napi_poll = idpf_vport_singleq_napi_poll;
4212 
4213 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4214 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4215 
4216 		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4217 
4218 		/* only set affinity_mask if the CPU is online */
4219 		if (cpu_online(v_idx))
4220 			cpumask_set_cpu(v_idx, q_vector->affinity_mask);
4221 	}
4222 }
4223 
4224 /**
4225  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4226  * @vport: virtual port
4227  *
4228  * We allocate one q_vector per queue interrupt. If allocation fails we
4229  * return -ENOMEM.
4230  */
4231 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4232 {
4233 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4234 	struct idpf_q_vector *q_vector;
4235 	u32 complqs_per_vector, v_idx;
4236 
4237 	vport->q_vectors = kcalloc(vport->num_q_vectors,
4238 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4239 	if (!vport->q_vectors)
4240 		return -ENOMEM;
4241 
4242 	txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4243 				       vport->num_q_vectors);
4244 	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4245 				       vport->num_q_vectors);
4246 	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4247 			   DIV_ROUND_UP(vport->num_rxq_grp,
4248 					vport->num_q_vectors);
4249 	complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4250 					  vport->num_q_vectors);
4251 
4252 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4253 		q_vector = &vport->q_vectors[v_idx];
4254 		q_vector->vport = vport;
4255 
4256 		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4257 		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4258 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4259 
4260 		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4261 		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4262 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4263 
4264 		if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
4265 			goto error;
4266 
4267 		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4268 				       GFP_KERNEL);
4269 		if (!q_vector->tx)
4270 			goto error;
4271 
4272 		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4273 				       GFP_KERNEL);
4274 		if (!q_vector->rx)
4275 			goto error;
4276 
4277 		if (!idpf_is_queue_model_split(vport->rxq_model))
4278 			continue;
4279 
4280 		q_vector->bufq = kcalloc(bufqs_per_vector,
4281 					 sizeof(*q_vector->bufq),
4282 					 GFP_KERNEL);
4283 		if (!q_vector->bufq)
4284 			goto error;
4285 
4286 		q_vector->complq = kcalloc(complqs_per_vector,
4287 					   sizeof(*q_vector->complq),
4288 					   GFP_KERNEL);
4289 		if (!q_vector->complq)
4290 			goto error;
4291 	}
4292 
4293 	return 0;
4294 
4295 error:
4296 	idpf_vport_intr_rel(vport);
4297 
4298 	return -ENOMEM;
4299 }
4300 
4301 /**
4302  * idpf_vport_intr_init - Setup all vectors for the given vport
4303  * @vport: virtual port
4304  *
4305  * Returns 0 on success or negative on failure
4306  */
4307 int idpf_vport_intr_init(struct idpf_vport *vport)
4308 {
4309 	int err;
4310 
4311 	err = idpf_vport_intr_init_vec_idx(vport);
4312 	if (err)
4313 		return err;
4314 
4315 	idpf_vport_intr_map_vector_to_qs(vport);
4316 	idpf_vport_intr_napi_add_all(vport);
4317 
4318 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4319 	if (err)
4320 		goto unroll_vectors_alloc;
4321 
4322 	err = idpf_vport_intr_req_irq(vport);
4323 	if (err)
4324 		goto unroll_vectors_alloc;
4325 
4326 	return 0;
4327 
4328 unroll_vectors_alloc:
4329 	idpf_vport_intr_napi_del_all(vport);
4330 
4331 	return err;
4332 }
4333 
4334 void idpf_vport_intr_ena(struct idpf_vport *vport)
4335 {
4336 	idpf_vport_intr_napi_ena_all(vport);
4337 	idpf_vport_intr_ena_irq_all(vport);
4338 }
4339 
4340 /**
4341  * idpf_config_rss - Send virtchnl messages to configure RSS
4342  * @vport: virtual port
4343  *
4344  * Return 0 on success, negative on failure
4345  */
4346 int idpf_config_rss(struct idpf_vport *vport)
4347 {
4348 	int err;
4349 
4350 	err = idpf_send_get_set_rss_key_msg(vport, false);
4351 	if (err)
4352 		return err;
4353 
4354 	return idpf_send_get_set_rss_lut_msg(vport, false);
4355 }
4356 
4357 /**
4358  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4359  * @vport: virtual port structure
4360  */
4361 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4362 {
4363 	struct idpf_adapter *adapter = vport->adapter;
4364 	u16 num_active_rxq = vport->num_rxq;
4365 	struct idpf_rss_data *rss_data;
4366 	int i;
4367 
4368 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4369 
4370 	for (i = 0; i < rss_data->rss_lut_size; i++) {
4371 		rss_data->rss_lut[i] = i % num_active_rxq;
4372 		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4373 	}
4374 }
4375 
4376 /**
4377  * idpf_init_rss - Allocate and initialize RSS resources
4378  * @vport: virtual port
4379  *
4380  * Return 0 on success, negative on failure
4381  */
4382 int idpf_init_rss(struct idpf_vport *vport)
4383 {
4384 	struct idpf_adapter *adapter = vport->adapter;
4385 	struct idpf_rss_data *rss_data;
4386 	u32 lut_size;
4387 
4388 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4389 
4390 	lut_size = rss_data->rss_lut_size * sizeof(u32);
4391 	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4392 	if (!rss_data->rss_lut)
4393 		return -ENOMEM;
4394 
4395 	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4396 	if (!rss_data->cached_lut) {
4397 		kfree(rss_data->rss_lut);
4398 		rss_data->rss_lut = NULL;
4399 
4400 		return -ENOMEM;
4401 	}
4402 
4403 	/* Fill the default RSS lut values */
4404 	idpf_fill_dflt_rss_lut(vport);
4405 
4406 	return idpf_config_rss(vport);
4407 }
4408 
4409 /**
4410  * idpf_deinit_rss - Release RSS resources
4411  * @vport: virtual port
4412  */
4413 void idpf_deinit_rss(struct idpf_vport *vport)
4414 {
4415 	struct idpf_adapter *adapter = vport->adapter;
4416 	struct idpf_rss_data *rss_data;
4417 
4418 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4419 	kfree(rss_data->cached_lut);
4420 	rss_data->cached_lut = NULL;
4421 	kfree(rss_data->rss_lut);
4422 	rss_data->rss_lut = NULL;
4423 }
4424