xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 #include <net/libeth/tx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 
10 struct idpf_tx_stash {
11 	struct hlist_node hlist;
12 	struct libeth_sqe buf;
13 };
14 
15 #define idpf_tx_buf_compl_tag(buf)	(*(u32 *)&(buf)->priv)
16 LIBETH_SQE_CHECK_PRIV(u32);
17 
18 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
19 			       unsigned int count);
20 
21 /**
22  * idpf_buf_lifo_push - push a buffer pointer onto stack
23  * @stack: pointer to stack struct
24  * @buf: pointer to buf to push
25  *
26  * Returns 0 on success, negative on failure
27  **/
28 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
29 			      struct idpf_tx_stash *buf)
30 {
31 	if (unlikely(stack->top == stack->size))
32 		return -ENOSPC;
33 
34 	stack->bufs[stack->top++] = buf;
35 
36 	return 0;
37 }
38 
39 /**
40  * idpf_buf_lifo_pop - pop a buffer pointer from stack
41  * @stack: pointer to stack struct
42  **/
43 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
44 {
45 	if (unlikely(!stack->top))
46 		return NULL;
47 
48 	return stack->bufs[--stack->top];
49 }
50 
51 /**
52  * idpf_tx_timeout - Respond to a Tx Hang
53  * @netdev: network interface device structure
54  * @txqueue: TX queue
55  */
56 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
57 {
58 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
59 
60 	adapter->tx_timeout_count++;
61 
62 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
63 		   adapter->tx_timeout_count, txqueue);
64 	if (!idpf_is_reset_in_prog(adapter)) {
65 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
66 		queue_delayed_work(adapter->vc_event_wq,
67 				   &adapter->vc_event_task,
68 				   msecs_to_jiffies(10));
69 	}
70 }
71 
72 /**
73  * idpf_tx_buf_rel_all - Free any empty Tx buffers
74  * @txq: queue to be cleaned
75  */
76 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
77 {
78 	struct libeth_sq_napi_stats ss = { };
79 	struct idpf_buf_lifo *buf_stack;
80 	struct idpf_tx_stash *stash;
81 	struct libeth_cq_pp cp = {
82 		.dev	= txq->dev,
83 		.ss	= &ss,
84 	};
85 	struct hlist_node *tmp;
86 	u32 i, tag;
87 
88 	/* Buffers already cleared, nothing to do */
89 	if (!txq->tx_buf)
90 		return;
91 
92 	/* Free all the Tx buffer sk_buffs */
93 	for (i = 0; i < txq->desc_count; i++)
94 		libeth_tx_complete(&txq->tx_buf[i], &cp);
95 
96 	kfree(txq->tx_buf);
97 	txq->tx_buf = NULL;
98 
99 	if (!idpf_queue_has(FLOW_SCH_EN, txq))
100 		return;
101 
102 	buf_stack = &txq->stash->buf_stack;
103 	if (!buf_stack->bufs)
104 		return;
105 
106 	/*
107 	 * If a Tx timeout occurred, there are potentially still bufs in the
108 	 * hash table, free them here.
109 	 */
110 	hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
111 			   hlist) {
112 		if (!stash)
113 			continue;
114 
115 		libeth_tx_complete(&stash->buf, &cp);
116 		hash_del(&stash->hlist);
117 		idpf_buf_lifo_push(buf_stack, stash);
118 	}
119 
120 	for (i = 0; i < buf_stack->size; i++)
121 		kfree(buf_stack->bufs[i]);
122 
123 	kfree(buf_stack->bufs);
124 	buf_stack->bufs = NULL;
125 }
126 
127 /**
128  * idpf_tx_desc_rel - Free Tx resources per queue
129  * @txq: Tx descriptor ring for a specific queue
130  *
131  * Free all transmit software resources
132  */
133 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
134 {
135 	idpf_tx_buf_rel_all(txq);
136 	netdev_tx_reset_subqueue(txq->netdev, txq->idx);
137 
138 	if (!txq->desc_ring)
139 		return;
140 
141 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
142 	txq->desc_ring = NULL;
143 	txq->next_to_use = 0;
144 	txq->next_to_clean = 0;
145 }
146 
147 /**
148  * idpf_compl_desc_rel - Free completion resources per queue
149  * @complq: completion queue
150  *
151  * Free all completion software resources.
152  */
153 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
154 {
155 	if (!complq->comp)
156 		return;
157 
158 	dma_free_coherent(complq->netdev->dev.parent, complq->size,
159 			  complq->comp, complq->dma);
160 	complq->comp = NULL;
161 	complq->next_to_use = 0;
162 	complq->next_to_clean = 0;
163 }
164 
165 /**
166  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
167  * @vport: virtual port structure
168  *
169  * Free all transmit software resources
170  */
171 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
172 {
173 	int i, j;
174 
175 	if (!vport->txq_grps)
176 		return;
177 
178 	for (i = 0; i < vport->num_txq_grp; i++) {
179 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
180 
181 		for (j = 0; j < txq_grp->num_txq; j++)
182 			idpf_tx_desc_rel(txq_grp->txqs[j]);
183 
184 		if (idpf_is_queue_model_split(vport->txq_model))
185 			idpf_compl_desc_rel(txq_grp->complq);
186 	}
187 }
188 
189 /**
190  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
191  * @tx_q: queue for which the buffers are allocated
192  *
193  * Returns 0 on success, negative on failure
194  */
195 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
196 {
197 	struct idpf_buf_lifo *buf_stack;
198 	int buf_size;
199 	int i;
200 
201 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
202 	 * are allocated by kernel network stack and received as part of skb
203 	 */
204 	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
205 	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
206 	if (!tx_q->tx_buf)
207 		return -ENOMEM;
208 
209 	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
210 		return 0;
211 
212 	buf_stack = &tx_q->stash->buf_stack;
213 
214 	/* Initialize tx buf stack for out-of-order completions if
215 	 * flow scheduling offload is enabled
216 	 */
217 	buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
218 				  GFP_KERNEL);
219 	if (!buf_stack->bufs)
220 		return -ENOMEM;
221 
222 	buf_stack->size = tx_q->desc_count;
223 	buf_stack->top = tx_q->desc_count;
224 
225 	for (i = 0; i < tx_q->desc_count; i++) {
226 		buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
227 					     GFP_KERNEL);
228 		if (!buf_stack->bufs[i])
229 			return -ENOMEM;
230 	}
231 
232 	return 0;
233 }
234 
235 /**
236  * idpf_tx_desc_alloc - Allocate the Tx descriptors
237  * @vport: vport to allocate resources for
238  * @tx_q: the tx ring to set up
239  *
240  * Returns 0 on success, negative on failure
241  */
242 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
243 			      struct idpf_tx_queue *tx_q)
244 {
245 	struct device *dev = tx_q->dev;
246 	int err;
247 
248 	err = idpf_tx_buf_alloc_all(tx_q);
249 	if (err)
250 		goto err_alloc;
251 
252 	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
253 
254 	/* Allocate descriptors also round up to nearest 4K */
255 	tx_q->size = ALIGN(tx_q->size, 4096);
256 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
257 					      GFP_KERNEL);
258 	if (!tx_q->desc_ring) {
259 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
260 			tx_q->size);
261 		err = -ENOMEM;
262 		goto err_alloc;
263 	}
264 
265 	tx_q->next_to_use = 0;
266 	tx_q->next_to_clean = 0;
267 	idpf_queue_set(GEN_CHK, tx_q);
268 
269 	return 0;
270 
271 err_alloc:
272 	idpf_tx_desc_rel(tx_q);
273 
274 	return err;
275 }
276 
277 /**
278  * idpf_compl_desc_alloc - allocate completion descriptors
279  * @vport: vport to allocate resources for
280  * @complq: completion queue to set up
281  *
282  * Return: 0 on success, -errno on failure.
283  */
284 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
285 				 struct idpf_compl_queue *complq)
286 {
287 	complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
288 
289 	complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
290 					  complq->size, &complq->dma,
291 					  GFP_KERNEL);
292 	if (!complq->comp)
293 		return -ENOMEM;
294 
295 	complq->next_to_use = 0;
296 	complq->next_to_clean = 0;
297 	idpf_queue_set(GEN_CHK, complq);
298 
299 	return 0;
300 }
301 
302 /**
303  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
304  * @vport: virtual port private structure
305  *
306  * Returns 0 on success, negative on failure
307  */
308 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
309 {
310 	int err = 0;
311 	int i, j;
312 
313 	/* Setup buffer queues. In single queue model buffer queues and
314 	 * completion queues will be same
315 	 */
316 	for (i = 0; i < vport->num_txq_grp; i++) {
317 		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
318 			struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
319 			u8 gen_bits = 0;
320 			u16 bufidx_mask;
321 
322 			err = idpf_tx_desc_alloc(vport, txq);
323 			if (err) {
324 				pci_err(vport->adapter->pdev,
325 					"Allocation for Tx Queue %u failed\n",
326 					i);
327 				goto err_out;
328 			}
329 
330 			if (!idpf_is_queue_model_split(vport->txq_model))
331 				continue;
332 
333 			txq->compl_tag_cur_gen = 0;
334 
335 			/* Determine the number of bits in the bufid
336 			 * mask and add one to get the start of the
337 			 * generation bits
338 			 */
339 			bufidx_mask = txq->desc_count - 1;
340 			while (bufidx_mask >> 1) {
341 				txq->compl_tag_gen_s++;
342 				bufidx_mask = bufidx_mask >> 1;
343 			}
344 			txq->compl_tag_gen_s++;
345 
346 			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
347 							txq->compl_tag_gen_s;
348 			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
349 
350 			/* Set bufid mask based on location of first
351 			 * gen bit; it cannot simply be the descriptor
352 			 * ring size-1 since we can have size values
353 			 * where not all of those bits are set.
354 			 */
355 			txq->compl_tag_bufid_m =
356 				GETMAXVAL(txq->compl_tag_gen_s);
357 		}
358 
359 		if (!idpf_is_queue_model_split(vport->txq_model))
360 			continue;
361 
362 		/* Setup completion queues */
363 		err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
364 		if (err) {
365 			pci_err(vport->adapter->pdev,
366 				"Allocation for Tx Completion Queue %u failed\n",
367 				i);
368 			goto err_out;
369 		}
370 	}
371 
372 err_out:
373 	if (err)
374 		idpf_tx_desc_rel_all(vport);
375 
376 	return err;
377 }
378 
379 /**
380  * idpf_rx_page_rel - Release an rx buffer page
381  * @rx_buf: the buffer to free
382  */
383 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
384 {
385 	if (unlikely(!rx_buf->page))
386 		return;
387 
388 	page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
389 
390 	rx_buf->page = NULL;
391 	rx_buf->offset = 0;
392 }
393 
394 /**
395  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
396  * @bufq: queue to use
397  */
398 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
399 {
400 	struct libeth_fq fq = {
401 		.fqes	= bufq->hdr_buf,
402 		.pp	= bufq->hdr_pp,
403 	};
404 
405 	for (u32 i = 0; i < bufq->desc_count; i++)
406 		idpf_rx_page_rel(&bufq->hdr_buf[i]);
407 
408 	libeth_rx_fq_destroy(&fq);
409 	bufq->hdr_buf = NULL;
410 	bufq->hdr_pp = NULL;
411 }
412 
413 /**
414  * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
415  * @bufq: queue to be cleaned
416  */
417 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
418 {
419 	struct libeth_fq fq = {
420 		.fqes	= bufq->buf,
421 		.pp	= bufq->pp,
422 	};
423 
424 	/* queue already cleared, nothing to do */
425 	if (!bufq->buf)
426 		return;
427 
428 	/* Free all the bufs allocated and given to hw on Rx queue */
429 	for (u32 i = 0; i < bufq->desc_count; i++)
430 		idpf_rx_page_rel(&bufq->buf[i]);
431 
432 	if (idpf_queue_has(HSPLIT_EN, bufq))
433 		idpf_rx_hdr_buf_rel_all(bufq);
434 
435 	libeth_rx_fq_destroy(&fq);
436 	bufq->buf = NULL;
437 	bufq->pp = NULL;
438 }
439 
440 /**
441  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
442  * @rxq: queue to be cleaned
443  */
444 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
445 {
446 	struct libeth_fq fq = {
447 		.fqes	= rxq->rx_buf,
448 		.pp	= rxq->pp,
449 	};
450 
451 	if (!rxq->rx_buf)
452 		return;
453 
454 	for (u32 i = 0; i < rxq->desc_count; i++)
455 		idpf_rx_page_rel(&rxq->rx_buf[i]);
456 
457 	libeth_rx_fq_destroy(&fq);
458 	rxq->rx_buf = NULL;
459 	rxq->pp = NULL;
460 }
461 
462 /**
463  * idpf_rx_desc_rel - Free a specific Rx q resources
464  * @rxq: queue to clean the resources from
465  * @dev: device to free DMA memory
466  * @model: single or split queue model
467  *
468  * Free a specific rx queue resources
469  */
470 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
471 			     u32 model)
472 {
473 	if (!rxq)
474 		return;
475 
476 	if (rxq->skb) {
477 		dev_kfree_skb_any(rxq->skb);
478 		rxq->skb = NULL;
479 	}
480 
481 	if (!idpf_is_queue_model_split(model))
482 		idpf_rx_buf_rel_all(rxq);
483 
484 	rxq->next_to_alloc = 0;
485 	rxq->next_to_clean = 0;
486 	rxq->next_to_use = 0;
487 	if (!rxq->desc_ring)
488 		return;
489 
490 	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
491 	rxq->desc_ring = NULL;
492 }
493 
494 /**
495  * idpf_rx_desc_rel_bufq - free buffer queue resources
496  * @bufq: buffer queue to clean the resources from
497  * @dev: device to free DMA memory
498  */
499 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
500 				  struct device *dev)
501 {
502 	if (!bufq)
503 		return;
504 
505 	idpf_rx_buf_rel_bufq(bufq);
506 
507 	bufq->next_to_alloc = 0;
508 	bufq->next_to_clean = 0;
509 	bufq->next_to_use = 0;
510 
511 	if (!bufq->split_buf)
512 		return;
513 
514 	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
515 	bufq->split_buf = NULL;
516 }
517 
518 /**
519  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
520  * @vport: virtual port structure
521  *
522  * Free all rx queues resources
523  */
524 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
525 {
526 	struct device *dev = &vport->adapter->pdev->dev;
527 	struct idpf_rxq_group *rx_qgrp;
528 	u16 num_rxq;
529 	int i, j;
530 
531 	if (!vport->rxq_grps)
532 		return;
533 
534 	for (i = 0; i < vport->num_rxq_grp; i++) {
535 		rx_qgrp = &vport->rxq_grps[i];
536 
537 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
538 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
539 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
540 						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
541 			continue;
542 		}
543 
544 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
545 		for (j = 0; j < num_rxq; j++)
546 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
547 					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
548 
549 		if (!rx_qgrp->splitq.bufq_sets)
550 			continue;
551 
552 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
553 			struct idpf_bufq_set *bufq_set =
554 				&rx_qgrp->splitq.bufq_sets[j];
555 
556 			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
557 		}
558 	}
559 }
560 
561 /**
562  * idpf_rx_buf_hw_update - Store the new tail and head values
563  * @bufq: queue to bump
564  * @val: new head index
565  */
566 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
567 {
568 	bufq->next_to_use = val;
569 
570 	if (unlikely(!bufq->tail))
571 		return;
572 
573 	/* writel has an implicit memory barrier */
574 	writel(val, bufq->tail);
575 }
576 
577 /**
578  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
579  * @bufq: ring to use
580  *
581  * Returns 0 on success, negative on failure.
582  */
583 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
584 {
585 	struct libeth_fq fq = {
586 		.count	= bufq->desc_count,
587 		.type	= LIBETH_FQE_HDR,
588 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
589 	};
590 	int ret;
591 
592 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
593 	if (ret)
594 		return ret;
595 
596 	bufq->hdr_pp = fq.pp;
597 	bufq->hdr_buf = fq.fqes;
598 	bufq->hdr_truesize = fq.truesize;
599 	bufq->rx_hbuf_size = fq.buf_len;
600 
601 	return 0;
602 }
603 
604 /**
605  * idpf_rx_post_buf_refill - Post buffer id to refill queue
606  * @refillq: refill queue to post to
607  * @buf_id: buffer id to post
608  */
609 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
610 {
611 	u32 nta = refillq->next_to_use;
612 
613 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
614 	refillq->ring[nta] =
615 		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
616 		FIELD_PREP(IDPF_RX_BI_GEN_M,
617 			   idpf_queue_has(GEN_CHK, refillq));
618 
619 	if (unlikely(++nta == refillq->desc_count)) {
620 		nta = 0;
621 		idpf_queue_change(GEN_CHK, refillq);
622 	}
623 
624 	refillq->next_to_use = nta;
625 }
626 
627 /**
628  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
629  * @bufq: buffer queue to post to
630  * @buf_id: buffer id to post
631  *
632  * Returns false if buffer could not be allocated, true otherwise.
633  */
634 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
635 {
636 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
637 	struct libeth_fq_fp fq = {
638 		.count	= bufq->desc_count,
639 	};
640 	u16 nta = bufq->next_to_alloc;
641 	dma_addr_t addr;
642 
643 	splitq_rx_desc = &bufq->split_buf[nta];
644 
645 	if (idpf_queue_has(HSPLIT_EN, bufq)) {
646 		fq.pp = bufq->hdr_pp;
647 		fq.fqes = bufq->hdr_buf;
648 		fq.truesize = bufq->hdr_truesize;
649 
650 		addr = libeth_rx_alloc(&fq, buf_id);
651 		if (addr == DMA_MAPPING_ERROR)
652 			return false;
653 
654 		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
655 	}
656 
657 	fq.pp = bufq->pp;
658 	fq.fqes = bufq->buf;
659 	fq.truesize = bufq->truesize;
660 
661 	addr = libeth_rx_alloc(&fq, buf_id);
662 	if (addr == DMA_MAPPING_ERROR)
663 		return false;
664 
665 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
666 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
667 
668 	nta++;
669 	if (unlikely(nta == bufq->desc_count))
670 		nta = 0;
671 	bufq->next_to_alloc = nta;
672 
673 	return true;
674 }
675 
676 /**
677  * idpf_rx_post_init_bufs - Post initial buffers to bufq
678  * @bufq: buffer queue to post working set to
679  * @working_set: number of buffers to put in working set
680  *
681  * Returns true if @working_set bufs were posted successfully, false otherwise.
682  */
683 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
684 				   u16 working_set)
685 {
686 	int i;
687 
688 	for (i = 0; i < working_set; i++) {
689 		if (!idpf_rx_post_buf_desc(bufq, i))
690 			return false;
691 	}
692 
693 	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
694 					       IDPF_RX_BUF_STRIDE));
695 
696 	return true;
697 }
698 
699 /**
700  * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
701  * @rxq: queue for which the buffers are allocated
702  *
703  * Return: 0 on success, -ENOMEM on failure.
704  */
705 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
706 {
707 	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
708 		goto err;
709 
710 	return 0;
711 
712 err:
713 	idpf_rx_buf_rel_all(rxq);
714 
715 	return -ENOMEM;
716 }
717 
718 /**
719  * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
720  * @rxq: buffer queue to create page pool for
721  *
722  * Return: 0 on success, -errno on failure.
723  */
724 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
725 {
726 	struct libeth_fq fq = {
727 		.count	= rxq->desc_count,
728 		.type	= LIBETH_FQE_MTU,
729 		.nid	= idpf_q_vector_to_mem(rxq->q_vector),
730 	};
731 	int ret;
732 
733 	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
734 	if (ret)
735 		return ret;
736 
737 	rxq->pp = fq.pp;
738 	rxq->rx_buf = fq.fqes;
739 	rxq->truesize = fq.truesize;
740 	rxq->rx_buf_size = fq.buf_len;
741 
742 	return idpf_rx_buf_alloc_singleq(rxq);
743 }
744 
745 /**
746  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
747  * @rxbufq: queue for which the buffers are allocated
748  *
749  * Returns 0 on success, negative on failure
750  */
751 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
752 {
753 	int err = 0;
754 
755 	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
756 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
757 		if (err)
758 			goto rx_buf_alloc_all_out;
759 	}
760 
761 	/* Allocate buffers to be given to HW.	 */
762 	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
763 		err = -ENOMEM;
764 
765 rx_buf_alloc_all_out:
766 	if (err)
767 		idpf_rx_buf_rel_bufq(rxbufq);
768 
769 	return err;
770 }
771 
772 /**
773  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
774  * @bufq: buffer queue to create page pool for
775  * @type: type of Rx buffers to allocate
776  *
777  * Returns 0 on success, negative on failure
778  */
779 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
780 			     enum libeth_fqe_type type)
781 {
782 	struct libeth_fq fq = {
783 		.truesize	= bufq->truesize,
784 		.count		= bufq->desc_count,
785 		.type		= type,
786 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
787 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
788 	};
789 	int ret;
790 
791 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
792 	if (ret)
793 		return ret;
794 
795 	bufq->pp = fq.pp;
796 	bufq->buf = fq.fqes;
797 	bufq->truesize = fq.truesize;
798 	bufq->rx_buf_size = fq.buf_len;
799 
800 	return idpf_rx_buf_alloc_all(bufq);
801 }
802 
803 /**
804  * idpf_rx_bufs_init_all - Initialize all RX bufs
805  * @vport: virtual port struct
806  *
807  * Returns 0 on success, negative on failure
808  */
809 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
810 {
811 	bool split = idpf_is_queue_model_split(vport->rxq_model);
812 	int i, j, err;
813 
814 	for (i = 0; i < vport->num_rxq_grp; i++) {
815 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
816 		u32 truesize = 0;
817 
818 		/* Allocate bufs for the rxq itself in singleq */
819 		if (!split) {
820 			int num_rxq = rx_qgrp->singleq.num_rxq;
821 
822 			for (j = 0; j < num_rxq; j++) {
823 				struct idpf_rx_queue *q;
824 
825 				q = rx_qgrp->singleq.rxqs[j];
826 				err = idpf_rx_bufs_init_singleq(q);
827 				if (err)
828 					return err;
829 			}
830 
831 			continue;
832 		}
833 
834 		/* Otherwise, allocate bufs for the buffer queues */
835 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
836 			enum libeth_fqe_type type;
837 			struct idpf_buf_queue *q;
838 
839 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
840 			q->truesize = truesize;
841 
842 			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
843 
844 			err = idpf_rx_bufs_init(q, type);
845 			if (err)
846 				return err;
847 
848 			truesize = q->truesize >> 1;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  * idpf_rx_desc_alloc - Allocate queue Rx resources
857  * @vport: vport to allocate resources for
858  * @rxq: Rx queue for which the resources are setup
859  *
860  * Returns 0 on success, negative on failure
861  */
862 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
863 			      struct idpf_rx_queue *rxq)
864 {
865 	struct device *dev = &vport->adapter->pdev->dev;
866 
867 	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
868 
869 	/* Allocate descriptors and also round up to nearest 4K */
870 	rxq->size = ALIGN(rxq->size, 4096);
871 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
872 					     &rxq->dma, GFP_KERNEL);
873 	if (!rxq->desc_ring) {
874 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
875 			rxq->size);
876 		return -ENOMEM;
877 	}
878 
879 	rxq->next_to_alloc = 0;
880 	rxq->next_to_clean = 0;
881 	rxq->next_to_use = 0;
882 	idpf_queue_set(GEN_CHK, rxq);
883 
884 	return 0;
885 }
886 
887 /**
888  * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
889  * @vport: vport to allocate resources for
890  * @bufq: buffer queue for which the resources are set up
891  *
892  * Return: 0 on success, -ENOMEM on failure.
893  */
894 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
895 				struct idpf_buf_queue *bufq)
896 {
897 	struct device *dev = &vport->adapter->pdev->dev;
898 
899 	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
900 
901 	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
902 					     GFP_KERNEL);
903 	if (!bufq->split_buf)
904 		return -ENOMEM;
905 
906 	bufq->next_to_alloc = 0;
907 	bufq->next_to_clean = 0;
908 	bufq->next_to_use = 0;
909 
910 	idpf_queue_set(GEN_CHK, bufq);
911 
912 	return 0;
913 }
914 
915 /**
916  * idpf_rx_desc_alloc_all - allocate all RX queues resources
917  * @vport: virtual port structure
918  *
919  * Returns 0 on success, negative on failure
920  */
921 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
922 {
923 	struct idpf_rxq_group *rx_qgrp;
924 	int i, j, err;
925 	u16 num_rxq;
926 
927 	for (i = 0; i < vport->num_rxq_grp; i++) {
928 		rx_qgrp = &vport->rxq_grps[i];
929 		if (idpf_is_queue_model_split(vport->rxq_model))
930 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
931 		else
932 			num_rxq = rx_qgrp->singleq.num_rxq;
933 
934 		for (j = 0; j < num_rxq; j++) {
935 			struct idpf_rx_queue *q;
936 
937 			if (idpf_is_queue_model_split(vport->rxq_model))
938 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
939 			else
940 				q = rx_qgrp->singleq.rxqs[j];
941 
942 			err = idpf_rx_desc_alloc(vport, q);
943 			if (err) {
944 				pci_err(vport->adapter->pdev,
945 					"Memory allocation for Rx Queue %u failed\n",
946 					i);
947 				goto err_out;
948 			}
949 		}
950 
951 		if (!idpf_is_queue_model_split(vport->rxq_model))
952 			continue;
953 
954 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
955 			struct idpf_buf_queue *q;
956 
957 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
958 
959 			err = idpf_bufq_desc_alloc(vport, q);
960 			if (err) {
961 				pci_err(vport->adapter->pdev,
962 					"Memory allocation for Rx Buffer Queue %u failed\n",
963 					i);
964 				goto err_out;
965 			}
966 		}
967 	}
968 
969 	return 0;
970 
971 err_out:
972 	idpf_rx_desc_rel_all(vport);
973 
974 	return err;
975 }
976 
977 /**
978  * idpf_txq_group_rel - Release all resources for txq groups
979  * @vport: vport to release txq groups on
980  */
981 static void idpf_txq_group_rel(struct idpf_vport *vport)
982 {
983 	bool split, flow_sch_en;
984 	int i, j;
985 
986 	if (!vport->txq_grps)
987 		return;
988 
989 	split = idpf_is_queue_model_split(vport->txq_model);
990 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
991 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
992 
993 	for (i = 0; i < vport->num_txq_grp; i++) {
994 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
995 
996 		for (j = 0; j < txq_grp->num_txq; j++) {
997 			kfree(txq_grp->txqs[j]);
998 			txq_grp->txqs[j] = NULL;
999 		}
1000 
1001 		if (!split)
1002 			continue;
1003 
1004 		kfree(txq_grp->complq);
1005 		txq_grp->complq = NULL;
1006 
1007 		if (flow_sch_en)
1008 			kfree(txq_grp->stashes);
1009 	}
1010 	kfree(vport->txq_grps);
1011 	vport->txq_grps = NULL;
1012 }
1013 
1014 /**
1015  * idpf_rxq_sw_queue_rel - Release software queue resources
1016  * @rx_qgrp: rx queue group with software queues
1017  */
1018 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1019 {
1020 	int i, j;
1021 
1022 	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
1023 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1024 
1025 		for (j = 0; j < bufq_set->num_refillqs; j++) {
1026 			kfree(bufq_set->refillqs[j].ring);
1027 			bufq_set->refillqs[j].ring = NULL;
1028 		}
1029 		kfree(bufq_set->refillqs);
1030 		bufq_set->refillqs = NULL;
1031 	}
1032 }
1033 
1034 /**
1035  * idpf_rxq_group_rel - Release all resources for rxq groups
1036  * @vport: vport to release rxq groups on
1037  */
1038 static void idpf_rxq_group_rel(struct idpf_vport *vport)
1039 {
1040 	int i;
1041 
1042 	if (!vport->rxq_grps)
1043 		return;
1044 
1045 	for (i = 0; i < vport->num_rxq_grp; i++) {
1046 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1047 		u16 num_rxq;
1048 		int j;
1049 
1050 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1051 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1052 			for (j = 0; j < num_rxq; j++) {
1053 				kfree(rx_qgrp->splitq.rxq_sets[j]);
1054 				rx_qgrp->splitq.rxq_sets[j] = NULL;
1055 			}
1056 
1057 			idpf_rxq_sw_queue_rel(rx_qgrp);
1058 			kfree(rx_qgrp->splitq.bufq_sets);
1059 			rx_qgrp->splitq.bufq_sets = NULL;
1060 		} else {
1061 			num_rxq = rx_qgrp->singleq.num_rxq;
1062 			for (j = 0; j < num_rxq; j++) {
1063 				kfree(rx_qgrp->singleq.rxqs[j]);
1064 				rx_qgrp->singleq.rxqs[j] = NULL;
1065 			}
1066 		}
1067 	}
1068 	kfree(vport->rxq_grps);
1069 	vport->rxq_grps = NULL;
1070 }
1071 
1072 /**
1073  * idpf_vport_queue_grp_rel_all - Release all queue groups
1074  * @vport: vport to release queue groups for
1075  */
1076 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
1077 {
1078 	idpf_txq_group_rel(vport);
1079 	idpf_rxq_group_rel(vport);
1080 }
1081 
1082 /**
1083  * idpf_vport_queues_rel - Free memory for all queues
1084  * @vport: virtual port
1085  *
1086  * Free the memory allocated for queues associated to a vport
1087  */
1088 void idpf_vport_queues_rel(struct idpf_vport *vport)
1089 {
1090 	idpf_tx_desc_rel_all(vport);
1091 	idpf_rx_desc_rel_all(vport);
1092 	idpf_vport_queue_grp_rel_all(vport);
1093 
1094 	kfree(vport->txqs);
1095 	vport->txqs = NULL;
1096 }
1097 
1098 /**
1099  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1100  * @vport: vport to init txqs on
1101  *
1102  * We get a queue index from skb->queue_mapping and we need a fast way to
1103  * dereference the queue from queue groups.  This allows us to quickly pull a
1104  * txq based on a queue index.
1105  *
1106  * Returns 0 on success, negative on failure
1107  */
1108 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1109 {
1110 	int i, j, k = 0;
1111 
1112 	vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1113 			      GFP_KERNEL);
1114 
1115 	if (!vport->txqs)
1116 		return -ENOMEM;
1117 
1118 	for (i = 0; i < vport->num_txq_grp; i++) {
1119 		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1120 
1121 		for (j = 0; j < tx_grp->num_txq; j++, k++) {
1122 			vport->txqs[k] = tx_grp->txqs[j];
1123 			vport->txqs[k]->idx = k;
1124 		}
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 /**
1131  * idpf_vport_init_num_qs - Initialize number of queues
1132  * @vport: vport to initialize queues
1133  * @vport_msg: data to be filled into vport
1134  */
1135 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1136 			    struct virtchnl2_create_vport *vport_msg)
1137 {
1138 	struct idpf_vport_user_config_data *config_data;
1139 	u16 idx = vport->idx;
1140 
1141 	config_data = &vport->adapter->vport_config[idx]->user_config;
1142 	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1143 	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1144 	/* number of txqs and rxqs in config data will be zeros only in the
1145 	 * driver load path and we dont update them there after
1146 	 */
1147 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1148 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1149 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1150 	}
1151 
1152 	if (idpf_is_queue_model_split(vport->txq_model))
1153 		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1154 	if (idpf_is_queue_model_split(vport->rxq_model))
1155 		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1156 
1157 	/* Adjust number of buffer queues per Rx queue group. */
1158 	if (!idpf_is_queue_model_split(vport->rxq_model)) {
1159 		vport->num_bufqs_per_qgrp = 0;
1160 
1161 		return;
1162 	}
1163 
1164 	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1165 }
1166 
1167 /**
1168  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1169  * @vport: vport to calculate q groups for
1170  */
1171 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1172 {
1173 	struct idpf_vport_user_config_data *config_data;
1174 	int num_bufqs = vport->num_bufqs_per_qgrp;
1175 	u32 num_req_txq_desc, num_req_rxq_desc;
1176 	u16 idx = vport->idx;
1177 	int i;
1178 
1179 	config_data =  &vport->adapter->vport_config[idx]->user_config;
1180 	num_req_txq_desc = config_data->num_req_txq_desc;
1181 	num_req_rxq_desc = config_data->num_req_rxq_desc;
1182 
1183 	vport->complq_desc_count = 0;
1184 	if (num_req_txq_desc) {
1185 		vport->txq_desc_count = num_req_txq_desc;
1186 		if (idpf_is_queue_model_split(vport->txq_model)) {
1187 			vport->complq_desc_count = num_req_txq_desc;
1188 			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1189 				vport->complq_desc_count =
1190 					IDPF_MIN_TXQ_COMPLQ_DESC;
1191 		}
1192 	} else {
1193 		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1194 		if (idpf_is_queue_model_split(vport->txq_model))
1195 			vport->complq_desc_count =
1196 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1197 	}
1198 
1199 	if (num_req_rxq_desc)
1200 		vport->rxq_desc_count = num_req_rxq_desc;
1201 	else
1202 		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1203 
1204 	for (i = 0; i < num_bufqs; i++) {
1205 		if (!vport->bufq_desc_count[i])
1206 			vport->bufq_desc_count[i] =
1207 				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1208 							num_bufqs);
1209 	}
1210 }
1211 
1212 /**
1213  * idpf_vport_calc_total_qs - Calculate total number of queues
1214  * @adapter: private data struct
1215  * @vport_idx: vport idx to retrieve vport pointer
1216  * @vport_msg: message to fill with data
1217  * @max_q: vport max queue info
1218  *
1219  * Return 0 on success, error value on failure.
1220  */
1221 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1222 			     struct virtchnl2_create_vport *vport_msg,
1223 			     struct idpf_vport_max_q *max_q)
1224 {
1225 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1226 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1227 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1228 	struct idpf_vport_config *vport_config;
1229 	u16 num_txq_grps, num_rxq_grps;
1230 	u32 num_qs;
1231 
1232 	vport_config = adapter->vport_config[vport_idx];
1233 	if (vport_config) {
1234 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1235 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1236 	} else {
1237 		int num_cpus;
1238 
1239 		/* Restrict num of queues to cpus online as a default
1240 		 * configuration to give best performance. User can always
1241 		 * override to a max number of queues via ethtool.
1242 		 */
1243 		num_cpus = num_online_cpus();
1244 
1245 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1246 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1247 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1248 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1249 	}
1250 
1251 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1252 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1253 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1254 						       IDPF_COMPLQ_PER_GROUP);
1255 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1256 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1257 	} else {
1258 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1259 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1260 					 dflt_singleq_txqs);
1261 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1262 		vport_msg->num_tx_complq = 0;
1263 	}
1264 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1265 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1266 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1267 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1268 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1269 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1270 	} else {
1271 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1272 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1273 					 dflt_singleq_rxqs);
1274 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1275 		vport_msg->num_rx_bufq = 0;
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 /**
1282  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1283  * @vport: vport to calculate q groups for
1284  */
1285 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1286 {
1287 	if (idpf_is_queue_model_split(vport->txq_model))
1288 		vport->num_txq_grp = vport->num_txq;
1289 	else
1290 		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1291 
1292 	if (idpf_is_queue_model_split(vport->rxq_model))
1293 		vport->num_rxq_grp = vport->num_rxq;
1294 	else
1295 		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1296 }
1297 
1298 /**
1299  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1300  * @vport: vport to calculate queues for
1301  * @num_txq: return parameter for number of TX queues
1302  * @num_rxq: return parameter for number of RX queues
1303  */
1304 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1305 					 u16 *num_txq, u16 *num_rxq)
1306 {
1307 	if (idpf_is_queue_model_split(vport->txq_model))
1308 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1309 	else
1310 		*num_txq = vport->num_txq;
1311 
1312 	if (idpf_is_queue_model_split(vport->rxq_model))
1313 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1314 	else
1315 		*num_rxq = vport->num_rxq;
1316 }
1317 
1318 /**
1319  * idpf_rxq_set_descids - set the descids supported by this queue
1320  * @vport: virtual port data structure
1321  * @q: rx queue for which descids are set
1322  *
1323  */
1324 static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1325 				 struct idpf_rx_queue *q)
1326 {
1327 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1328 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1329 	} else {
1330 		if (vport->base_rxd)
1331 			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1332 		else
1333 			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1334 	}
1335 }
1336 
1337 /**
1338  * idpf_txq_group_alloc - Allocate all txq group resources
1339  * @vport: vport to allocate txq groups for
1340  * @num_txq: number of txqs to allocate for each group
1341  *
1342  * Returns 0 on success, negative on failure
1343  */
1344 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1345 {
1346 	bool split, flow_sch_en;
1347 	int i;
1348 
1349 	vport->txq_grps = kcalloc(vport->num_txq_grp,
1350 				  sizeof(*vport->txq_grps), GFP_KERNEL);
1351 	if (!vport->txq_grps)
1352 		return -ENOMEM;
1353 
1354 	split = idpf_is_queue_model_split(vport->txq_model);
1355 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1356 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1357 
1358 	for (i = 0; i < vport->num_txq_grp; i++) {
1359 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1360 		struct idpf_adapter *adapter = vport->adapter;
1361 		struct idpf_txq_stash *stashes;
1362 		int j;
1363 
1364 		tx_qgrp->vport = vport;
1365 		tx_qgrp->num_txq = num_txq;
1366 
1367 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1368 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1369 						   GFP_KERNEL);
1370 			if (!tx_qgrp->txqs[j])
1371 				goto err_alloc;
1372 		}
1373 
1374 		if (split && flow_sch_en) {
1375 			stashes = kcalloc(num_txq, sizeof(*stashes),
1376 					  GFP_KERNEL);
1377 			if (!stashes)
1378 				goto err_alloc;
1379 
1380 			tx_qgrp->stashes = stashes;
1381 		}
1382 
1383 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1384 			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1385 
1386 			q->dev = &adapter->pdev->dev;
1387 			q->desc_count = vport->txq_desc_count;
1388 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1389 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1390 			q->netdev = vport->netdev;
1391 			q->txq_grp = tx_qgrp;
1392 
1393 			if (!split) {
1394 				q->clean_budget = vport->compln_clean_budget;
1395 				idpf_queue_assign(CRC_EN, q,
1396 						  vport->crc_enable);
1397 			}
1398 
1399 			if (!flow_sch_en)
1400 				continue;
1401 
1402 			if (split) {
1403 				q->stash = &stashes[j];
1404 				hash_init(q->stash->sched_buf_hash);
1405 			}
1406 
1407 			idpf_queue_set(FLOW_SCH_EN, q);
1408 		}
1409 
1410 		if (!split)
1411 			continue;
1412 
1413 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1414 					  sizeof(*tx_qgrp->complq),
1415 					  GFP_KERNEL);
1416 		if (!tx_qgrp->complq)
1417 			goto err_alloc;
1418 
1419 		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1420 		tx_qgrp->complq->txq_grp = tx_qgrp;
1421 		tx_qgrp->complq->netdev = vport->netdev;
1422 		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1423 
1424 		if (flow_sch_en)
1425 			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1426 	}
1427 
1428 	return 0;
1429 
1430 err_alloc:
1431 	idpf_txq_group_rel(vport);
1432 
1433 	return -ENOMEM;
1434 }
1435 
1436 /**
1437  * idpf_rxq_group_alloc - Allocate all rxq group resources
1438  * @vport: vport to allocate rxq groups for
1439  * @num_rxq: number of rxqs to allocate for each group
1440  *
1441  * Returns 0 on success, negative on failure
1442  */
1443 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1444 {
1445 	int i, k, err = 0;
1446 	bool hs;
1447 
1448 	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1449 				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1450 	if (!vport->rxq_grps)
1451 		return -ENOMEM;
1452 
1453 	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1454 
1455 	for (i = 0; i < vport->num_rxq_grp; i++) {
1456 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1457 		int j;
1458 
1459 		rx_qgrp->vport = vport;
1460 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1461 			rx_qgrp->singleq.num_rxq = num_rxq;
1462 			for (j = 0; j < num_rxq; j++) {
1463 				rx_qgrp->singleq.rxqs[j] =
1464 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1465 							GFP_KERNEL);
1466 				if (!rx_qgrp->singleq.rxqs[j]) {
1467 					err = -ENOMEM;
1468 					goto err_alloc;
1469 				}
1470 			}
1471 			goto skip_splitq_rx_init;
1472 		}
1473 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1474 
1475 		for (j = 0; j < num_rxq; j++) {
1476 			rx_qgrp->splitq.rxq_sets[j] =
1477 				kzalloc(sizeof(struct idpf_rxq_set),
1478 					GFP_KERNEL);
1479 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1480 				err = -ENOMEM;
1481 				goto err_alloc;
1482 			}
1483 		}
1484 
1485 		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1486 						    sizeof(struct idpf_bufq_set),
1487 						    GFP_KERNEL);
1488 		if (!rx_qgrp->splitq.bufq_sets) {
1489 			err = -ENOMEM;
1490 			goto err_alloc;
1491 		}
1492 
1493 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1494 			struct idpf_bufq_set *bufq_set =
1495 				&rx_qgrp->splitq.bufq_sets[j];
1496 			int swq_size = sizeof(struct idpf_sw_queue);
1497 			struct idpf_buf_queue *q;
1498 
1499 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1500 			q->desc_count = vport->bufq_desc_count[j];
1501 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1502 
1503 			idpf_queue_assign(HSPLIT_EN, q, hs);
1504 
1505 			bufq_set->num_refillqs = num_rxq;
1506 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1507 						     GFP_KERNEL);
1508 			if (!bufq_set->refillqs) {
1509 				err = -ENOMEM;
1510 				goto err_alloc;
1511 			}
1512 			for (k = 0; k < bufq_set->num_refillqs; k++) {
1513 				struct idpf_sw_queue *refillq =
1514 					&bufq_set->refillqs[k];
1515 
1516 				refillq->desc_count =
1517 					vport->bufq_desc_count[j];
1518 				idpf_queue_set(GEN_CHK, refillq);
1519 				idpf_queue_set(RFL_GEN_CHK, refillq);
1520 				refillq->ring = kcalloc(refillq->desc_count,
1521 							sizeof(*refillq->ring),
1522 							GFP_KERNEL);
1523 				if (!refillq->ring) {
1524 					err = -ENOMEM;
1525 					goto err_alloc;
1526 				}
1527 			}
1528 		}
1529 
1530 skip_splitq_rx_init:
1531 		for (j = 0; j < num_rxq; j++) {
1532 			struct idpf_rx_queue *q;
1533 
1534 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1535 				q = rx_qgrp->singleq.rxqs[j];
1536 				goto setup_rxq;
1537 			}
1538 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1539 			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1540 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1541 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1542 				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1543 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1544 
1545 			idpf_queue_assign(HSPLIT_EN, q, hs);
1546 
1547 setup_rxq:
1548 			q->desc_count = vport->rxq_desc_count;
1549 			q->rx_ptype_lkup = vport->rx_ptype_lkup;
1550 			q->netdev = vport->netdev;
1551 			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1552 			q->idx = (i * num_rxq) + j;
1553 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1554 			q->rx_max_pkt_size = vport->netdev->mtu +
1555 							LIBETH_RX_LL_LEN;
1556 			idpf_rxq_set_descids(vport, q);
1557 		}
1558 	}
1559 
1560 err_alloc:
1561 	if (err)
1562 		idpf_rxq_group_rel(vport);
1563 
1564 	return err;
1565 }
1566 
1567 /**
1568  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1569  * @vport: vport with qgrps to allocate
1570  *
1571  * Returns 0 on success, negative on failure
1572  */
1573 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1574 {
1575 	u16 num_txq, num_rxq;
1576 	int err;
1577 
1578 	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1579 
1580 	err = idpf_txq_group_alloc(vport, num_txq);
1581 	if (err)
1582 		goto err_out;
1583 
1584 	err = idpf_rxq_group_alloc(vport, num_rxq);
1585 	if (err)
1586 		goto err_out;
1587 
1588 	return 0;
1589 
1590 err_out:
1591 	idpf_vport_queue_grp_rel_all(vport);
1592 
1593 	return err;
1594 }
1595 
1596 /**
1597  * idpf_vport_queues_alloc - Allocate memory for all queues
1598  * @vport: virtual port
1599  *
1600  * Allocate memory for queues associated with a vport.  Returns 0 on success,
1601  * negative on failure.
1602  */
1603 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1604 {
1605 	int err;
1606 
1607 	err = idpf_vport_queue_grp_alloc_all(vport);
1608 	if (err)
1609 		goto err_out;
1610 
1611 	err = idpf_tx_desc_alloc_all(vport);
1612 	if (err)
1613 		goto err_out;
1614 
1615 	err = idpf_rx_desc_alloc_all(vport);
1616 	if (err)
1617 		goto err_out;
1618 
1619 	err = idpf_vport_init_fast_path_txqs(vport);
1620 	if (err)
1621 		goto err_out;
1622 
1623 	return 0;
1624 
1625 err_out:
1626 	idpf_vport_queues_rel(vport);
1627 
1628 	return err;
1629 }
1630 
1631 /**
1632  * idpf_tx_handle_sw_marker - Handle queue marker packet
1633  * @tx_q: tx queue to handle software marker
1634  */
1635 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1636 {
1637 	struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1638 	struct idpf_vport *vport = priv->vport;
1639 	int i;
1640 
1641 	idpf_queue_clear(SW_MARKER, tx_q);
1642 	/* Hardware must write marker packets to all queues associated with
1643 	 * completion queues. So check if all queues received marker packets
1644 	 */
1645 	for (i = 0; i < vport->num_txq; i++)
1646 		/* If we're still waiting on any other TXQ marker completions,
1647 		 * just return now since we cannot wake up the marker_wq yet.
1648 		 */
1649 		if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1650 			return;
1651 
1652 	/* Drain complete */
1653 	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1654 	wake_up(&vport->sw_marker_wq);
1655 }
1656 
1657 /**
1658  * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1659  * out of order completions
1660  * @txq: queue to clean
1661  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1662  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1663  * @budget: Used to determine if we are in netpoll
1664  */
1665 static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
1666 				       u16 compl_tag,
1667 				       struct libeth_sq_napi_stats *cleaned,
1668 				       int budget)
1669 {
1670 	struct idpf_tx_stash *stash;
1671 	struct hlist_node *tmp_buf;
1672 	struct libeth_cq_pp cp = {
1673 		.dev	= txq->dev,
1674 		.ss	= cleaned,
1675 		.napi	= budget,
1676 	};
1677 
1678 	/* Buffer completion */
1679 	hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
1680 				    hlist, compl_tag) {
1681 		if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
1682 			continue;
1683 
1684 		hash_del(&stash->hlist);
1685 		libeth_tx_complete(&stash->buf, &cp);
1686 
1687 		/* Push shadow buf back onto stack */
1688 		idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
1689 	}
1690 }
1691 
1692 /**
1693  * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1694  * later time (only relevant for flow scheduling mode)
1695  * @txq: Tx queue to clean
1696  * @tx_buf: buffer to store
1697  */
1698 static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
1699 				       struct idpf_tx_buf *tx_buf)
1700 {
1701 	struct idpf_tx_stash *stash;
1702 
1703 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
1704 		return 0;
1705 
1706 	stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
1707 	if (unlikely(!stash)) {
1708 		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1709 				    netdev_name(txq->netdev));
1710 
1711 		return -ENOMEM;
1712 	}
1713 
1714 	/* Store buffer params in shadow buffer */
1715 	stash->buf.skb = tx_buf->skb;
1716 	stash->buf.bytes = tx_buf->bytes;
1717 	stash->buf.packets = tx_buf->packets;
1718 	stash->buf.type = tx_buf->type;
1719 	stash->buf.nr_frags = tx_buf->nr_frags;
1720 	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1721 	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1722 	idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
1723 
1724 	/* Add buffer to buf_hash table to be freed later */
1725 	hash_add(txq->stash->sched_buf_hash, &stash->hlist,
1726 		 idpf_tx_buf_compl_tag(&stash->buf));
1727 
1728 	tx_buf->type = LIBETH_SQE_EMPTY;
1729 
1730 	return 0;
1731 }
1732 
1733 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1734 do {								\
1735 	if (unlikely(++(ntc) == (txq)->desc_count)) {		\
1736 		ntc = 0;					\
1737 		buf = (txq)->tx_buf;				\
1738 		desc = &(txq)->flex_tx[0];			\
1739 	} else {						\
1740 		(buf)++;					\
1741 		(desc)++;					\
1742 	}							\
1743 } while (0)
1744 
1745 /**
1746  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1747  * @tx_q: Tx queue to clean
1748  * @end: queue index until which it should be cleaned
1749  * @napi_budget: Used to determine if we are in netpoll
1750  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1751  * @descs_only: true if queue is using flow-based scheduling and should
1752  * not clean buffers at this time
1753  *
1754  * Cleans the queue descriptor ring. If the queue is using queue-based
1755  * scheduling, the buffers will be cleaned as well. If the queue is using
1756  * flow-based scheduling, only the descriptors are cleaned at this time.
1757  * Separate packet completion events will be reported on the completion queue,
1758  * and the buffers will be cleaned separately. The stats are not updated from
1759  * this function when using flow-based scheduling.
1760  *
1761  * Furthermore, in flow scheduling mode, check to make sure there are enough
1762  * reserve buffers to stash the packet. If there are not, return early, which
1763  * will leave next_to_clean pointing to the packet that failed to be stashed.
1764  *
1765  * Return: false in the scenario above, true otherwise.
1766  */
1767 static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1768 				 int napi_budget,
1769 				 struct libeth_sq_napi_stats *cleaned,
1770 				 bool descs_only)
1771 {
1772 	union idpf_tx_flex_desc *next_pending_desc = NULL;
1773 	union idpf_tx_flex_desc *tx_desc;
1774 	u32 ntc = tx_q->next_to_clean;
1775 	struct libeth_cq_pp cp = {
1776 		.dev	= tx_q->dev,
1777 		.ss	= cleaned,
1778 		.napi	= napi_budget,
1779 	};
1780 	struct idpf_tx_buf *tx_buf;
1781 	bool clean_complete = true;
1782 
1783 	tx_desc = &tx_q->flex_tx[ntc];
1784 	next_pending_desc = &tx_q->flex_tx[end];
1785 	tx_buf = &tx_q->tx_buf[ntc];
1786 
1787 	while (tx_desc != next_pending_desc) {
1788 		u32 eop_idx;
1789 
1790 		/* If this entry in the ring was used as a context descriptor,
1791 		 * it's corresponding entry in the buffer ring is reserved. We
1792 		 * can skip this descriptor since there is no buffer to clean.
1793 		 */
1794 		if (tx_buf->type <= LIBETH_SQE_CTX)
1795 			goto fetch_next_txq_desc;
1796 
1797 		if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
1798 			break;
1799 
1800 		eop_idx = tx_buf->rs_idx;
1801 
1802 		if (descs_only) {
1803 			if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
1804 				clean_complete = false;
1805 				goto tx_splitq_clean_out;
1806 			}
1807 
1808 			idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1809 
1810 			while (ntc != eop_idx) {
1811 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1812 							      tx_desc, tx_buf);
1813 				idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1814 			}
1815 		} else {
1816 			libeth_tx_complete(tx_buf, &cp);
1817 
1818 			/* unmap remaining buffers */
1819 			while (ntc != eop_idx) {
1820 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1821 							      tx_desc, tx_buf);
1822 
1823 				/* unmap any remaining paged data */
1824 				libeth_tx_complete(tx_buf, &cp);
1825 			}
1826 		}
1827 
1828 fetch_next_txq_desc:
1829 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1830 	}
1831 
1832 tx_splitq_clean_out:
1833 	tx_q->next_to_clean = ntc;
1834 
1835 	return clean_complete;
1836 }
1837 
1838 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1839 do {							\
1840 	(buf)++;					\
1841 	(ntc)++;					\
1842 	if (unlikely((ntc) == (txq)->desc_count)) {	\
1843 		buf = (txq)->tx_buf;			\
1844 		ntc = 0;				\
1845 	}						\
1846 } while (0)
1847 
1848 /**
1849  * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1850  * @txq: queue to clean
1851  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1852  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1853  * @budget: Used to determine if we are in netpoll
1854  *
1855  * Cleans all buffers associated with the input completion tag either from the
1856  * TX buffer ring or from the hash table if the buffers were previously
1857  * stashed. Returns the byte/segment count for the cleaned packet associated
1858  * this completion tag.
1859  */
1860 static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
1861 				   struct libeth_sq_napi_stats *cleaned,
1862 				   int budget)
1863 {
1864 	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1865 	struct idpf_tx_buf *tx_buf = NULL;
1866 	struct libeth_cq_pp cp = {
1867 		.dev	= txq->dev,
1868 		.ss	= cleaned,
1869 		.napi	= budget,
1870 	};
1871 	u16 ntc, orig_idx = idx;
1872 
1873 	tx_buf = &txq->tx_buf[idx];
1874 
1875 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
1876 		     idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
1877 		return false;
1878 
1879 	if (tx_buf->type == LIBETH_SQE_SKB)
1880 		libeth_tx_complete(tx_buf, &cp);
1881 
1882 	idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1883 
1884 	while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
1885 		libeth_tx_complete(tx_buf, &cp);
1886 		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1887 	}
1888 
1889 	/*
1890 	 * It's possible the packet we just cleaned was an out of order
1891 	 * completion, which means we can stash the buffers starting from
1892 	 * the original next_to_clean and reuse the descriptors. We need
1893 	 * to compare the descriptor ring next_to_clean packet's "first" buffer
1894 	 * to the "first" buffer of the packet we just cleaned to determine if
1895 	 * this is the case. Howevever, next_to_clean can point to either a
1896 	 * reserved buffer that corresponds to a context descriptor used for the
1897 	 * next_to_clean packet (TSO packet) or the "first" buffer (single
1898 	 * packet). The orig_idx from the packet we just cleaned will always
1899 	 * point to the "first" buffer. If next_to_clean points to a reserved
1900 	 * buffer, let's bump ntc once and start the comparison from there.
1901 	 */
1902 	ntc = txq->next_to_clean;
1903 	tx_buf = &txq->tx_buf[ntc];
1904 
1905 	if (tx_buf->type == LIBETH_SQE_CTX)
1906 		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1907 
1908 	/*
1909 	 * If ntc still points to a different "first" buffer, clean the
1910 	 * descriptor ring and stash all of the buffers for later cleaning. If
1911 	 * we cannot stash all of the buffers, next_to_clean will point to the
1912 	 * "first" buffer of the packet that could not be stashed and cleaning
1913 	 * will start there next time.
1914 	 */
1915 	if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
1916 		     !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
1917 					   true)))
1918 		return true;
1919 
1920 	/*
1921 	 * Otherwise, update next_to_clean to reflect the cleaning that was
1922 	 * done above.
1923 	 */
1924 	txq->next_to_clean = idx;
1925 
1926 	return true;
1927 }
1928 
1929 /**
1930  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1931  * whether on the buffer ring or in the hash table
1932  * @txq: Tx ring to clean
1933  * @desc: pointer to completion queue descriptor to extract completion
1934  * information from
1935  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1936  * @budget: Used to determine if we are in netpoll
1937  *
1938  * Returns bytes/packets cleaned
1939  */
1940 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
1941 					 struct idpf_splitq_tx_compl_desc *desc,
1942 					 struct libeth_sq_napi_stats *cleaned,
1943 					 int budget)
1944 {
1945 	u16 compl_tag;
1946 
1947 	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
1948 		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1949 
1950 		idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
1951 		return;
1952 	}
1953 
1954 	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1955 
1956 	/* If we didn't clean anything on the ring, this packet must be
1957 	 * in the hash table. Go clean it there.
1958 	 */
1959 	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1960 		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1961 }
1962 
1963 /**
1964  * idpf_tx_clean_complq - Reclaim resources on completion queue
1965  * @complq: Tx ring to clean
1966  * @budget: Used to determine if we are in netpoll
1967  * @cleaned: returns number of packets cleaned
1968  *
1969  * Returns true if there's any budget left (e.g. the clean is finished)
1970  */
1971 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
1972 				 int *cleaned)
1973 {
1974 	struct idpf_splitq_tx_compl_desc *tx_desc;
1975 	s16 ntc = complq->next_to_clean;
1976 	struct idpf_netdev_priv *np;
1977 	unsigned int complq_budget;
1978 	bool complq_ok = true;
1979 	int i;
1980 
1981 	complq_budget = complq->clean_budget;
1982 	tx_desc = &complq->comp[ntc];
1983 	ntc -= complq->desc_count;
1984 
1985 	do {
1986 		struct libeth_sq_napi_stats cleaned_stats = { };
1987 		struct idpf_tx_queue *tx_q;
1988 		int rel_tx_qid;
1989 		u16 hw_head;
1990 		u8 ctype;	/* completion type */
1991 		u16 gen;
1992 
1993 		/* if the descriptor isn't done, no work yet to do */
1994 		gen = le16_get_bits(tx_desc->qid_comptype_gen,
1995 				    IDPF_TXD_COMPLQ_GEN_M);
1996 		if (idpf_queue_has(GEN_CHK, complq) != gen)
1997 			break;
1998 
1999 		/* Find necessary info of TX queue to clean buffers */
2000 		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
2001 					   IDPF_TXD_COMPLQ_QID_M);
2002 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2003 		    !complq->txq_grp->txqs[rel_tx_qid]) {
2004 			netdev_err(complq->netdev, "TxQ not found\n");
2005 			goto fetch_next_desc;
2006 		}
2007 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2008 
2009 		/* Determine completion type */
2010 		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
2011 				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2012 		switch (ctype) {
2013 		case IDPF_TXD_COMPLT_RE:
2014 			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
2015 
2016 			idpf_tx_splitq_clean(tx_q, hw_head, budget,
2017 					     &cleaned_stats, true);
2018 			break;
2019 		case IDPF_TXD_COMPLT_RS:
2020 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2021 						     &cleaned_stats, budget);
2022 			break;
2023 		case IDPF_TXD_COMPLT_SW_MARKER:
2024 			idpf_tx_handle_sw_marker(tx_q);
2025 			break;
2026 		default:
2027 			netdev_err(tx_q->netdev,
2028 				   "Unknown TX completion type: %d\n", ctype);
2029 			goto fetch_next_desc;
2030 		}
2031 
2032 		u64_stats_update_begin(&tx_q->stats_sync);
2033 		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2034 		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2035 		tx_q->cleaned_pkts += cleaned_stats.packets;
2036 		tx_q->cleaned_bytes += cleaned_stats.bytes;
2037 		complq->num_completions++;
2038 		u64_stats_update_end(&tx_q->stats_sync);
2039 
2040 fetch_next_desc:
2041 		tx_desc++;
2042 		ntc++;
2043 		if (unlikely(!ntc)) {
2044 			ntc -= complq->desc_count;
2045 			tx_desc = &complq->comp[0];
2046 			idpf_queue_change(GEN_CHK, complq);
2047 		}
2048 
2049 		prefetch(tx_desc);
2050 
2051 		/* update budget accounting */
2052 		complq_budget--;
2053 	} while (likely(complq_budget));
2054 
2055 	/* Store the state of the complq to be used later in deciding if a
2056 	 * TXQ can be started again
2057 	 */
2058 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2059 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2060 		complq_ok = false;
2061 
2062 	np = netdev_priv(complq->netdev);
2063 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2064 		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2065 		struct netdev_queue *nq;
2066 		bool dont_wake;
2067 
2068 		/* We didn't clean anything on this queue, move along */
2069 		if (!tx_q->cleaned_bytes)
2070 			continue;
2071 
2072 		*cleaned += tx_q->cleaned_pkts;
2073 
2074 		/* Update BQL */
2075 		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2076 
2077 		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2078 			    np->state != __IDPF_VPORT_UP ||
2079 			    !netif_carrier_ok(tx_q->netdev);
2080 		/* Check if the TXQ needs to and can be restarted */
2081 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2082 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2083 					   dont_wake);
2084 
2085 		/* Reset cleaned stats for the next time this queue is
2086 		 * cleaned
2087 		 */
2088 		tx_q->cleaned_bytes = 0;
2089 		tx_q->cleaned_pkts = 0;
2090 	}
2091 
2092 	ntc += complq->desc_count;
2093 	complq->next_to_clean = ntc;
2094 
2095 	return !!complq_budget;
2096 }
2097 
2098 /**
2099  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2100  * based scheduling descriptors
2101  * @desc: descriptor to populate
2102  * @params: pointer to tx params struct
2103  * @td_cmd: command to be filled in desc
2104  * @size: size of buffer
2105  */
2106 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2107 			      struct idpf_tx_splitq_params *params,
2108 			      u16 td_cmd, u16 size)
2109 {
2110 	desc->q.qw1.cmd_dtype =
2111 		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2112 	desc->q.qw1.cmd_dtype |=
2113 		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2114 	desc->q.qw1.buf_size = cpu_to_le16(size);
2115 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2116 }
2117 
2118 /**
2119  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2120  * scheduling descriptors
2121  * @desc: descriptor to populate
2122  * @params: pointer to tx params struct
2123  * @td_cmd: command to be filled in desc
2124  * @size: size of buffer
2125  */
2126 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2127 				    struct idpf_tx_splitq_params *params,
2128 				    u16 td_cmd, u16 size)
2129 {
2130 	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
2131 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2132 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2133 }
2134 
2135 /**
2136  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2137  * @tx_q: the queue to be checked
2138  * @descs_needed: number of descriptors required for this packet
2139  *
2140  * Returns 0 if stop is not needed
2141  */
2142 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2143 				     unsigned int descs_needed)
2144 {
2145 	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2146 		goto out;
2147 
2148 	/* If there are too many outstanding completions expected on the
2149 	 * completion queue, stop the TX queue to give the device some time to
2150 	 * catch up
2151 	 */
2152 	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2153 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2154 		goto splitq_stop;
2155 
2156 	/* Also check for available book keeping buffers; if we are low, stop
2157 	 * the queue to wait for more completions
2158 	 */
2159 	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2160 		goto splitq_stop;
2161 
2162 	return 0;
2163 
2164 splitq_stop:
2165 	netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2166 
2167 out:
2168 	u64_stats_update_begin(&tx_q->stats_sync);
2169 	u64_stats_inc(&tx_q->q_stats.q_busy);
2170 	u64_stats_update_end(&tx_q->stats_sync);
2171 
2172 	return -EBUSY;
2173 }
2174 
2175 /**
2176  * idpf_tx_buf_hw_update - Store the new tail value
2177  * @tx_q: queue to bump
2178  * @val: new tail index
2179  * @xmit_more: more skb's pending
2180  *
2181  * The naming here is special in that 'hw' signals that this function is about
2182  * to do a register write to update our queue status. We know this can only
2183  * mean tail here as HW should be owning head for TX.
2184  */
2185 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2186 			   bool xmit_more)
2187 {
2188 	struct netdev_queue *nq;
2189 
2190 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2191 	tx_q->next_to_use = val;
2192 
2193 	if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
2194 		u64_stats_update_begin(&tx_q->stats_sync);
2195 		u64_stats_inc(&tx_q->q_stats.q_busy);
2196 		u64_stats_update_end(&tx_q->stats_sync);
2197 	}
2198 
2199 	/* Force memory writes to complete before letting h/w
2200 	 * know there are new descriptors to fetch.  (Only
2201 	 * applicable for weak-ordered memory model archs,
2202 	 * such as IA-64).
2203 	 */
2204 	wmb();
2205 
2206 	/* notify HW of packet */
2207 	if (netif_xmit_stopped(nq) || !xmit_more)
2208 		writel(val, tx_q->tail);
2209 }
2210 
2211 /**
2212  * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2213  * @txq: queue to send buffer on
2214  * @skb: send buffer
2215  *
2216  * Returns number of data descriptors needed for this skb.
2217  */
2218 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
2219 					 struct sk_buff *skb)
2220 {
2221 	const struct skb_shared_info *shinfo;
2222 	unsigned int count = 0, i;
2223 
2224 	count += !!skb_headlen(skb);
2225 
2226 	if (!skb_is_nonlinear(skb))
2227 		return count;
2228 
2229 	shinfo = skb_shinfo(skb);
2230 	for (i = 0; i < shinfo->nr_frags; i++) {
2231 		unsigned int size;
2232 
2233 		size = skb_frag_size(&shinfo->frags[i]);
2234 
2235 		/* We only need to use the idpf_size_to_txd_count check if the
2236 		 * fragment is going to span multiple descriptors,
2237 		 * i.e. size >= 16K.
2238 		 */
2239 		if (size >= SZ_16K)
2240 			count += idpf_size_to_txd_count(size);
2241 		else
2242 			count++;
2243 	}
2244 
2245 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2246 		if (__skb_linearize(skb))
2247 			return 0;
2248 
2249 		count = idpf_size_to_txd_count(skb->len);
2250 		u64_stats_update_begin(&txq->stats_sync);
2251 		u64_stats_inc(&txq->q_stats.linearize);
2252 		u64_stats_update_end(&txq->stats_sync);
2253 	}
2254 
2255 	return count;
2256 }
2257 
2258 /**
2259  * idpf_tx_dma_map_error - handle TX DMA map errors
2260  * @txq: queue to send buffer on
2261  * @skb: send buffer
2262  * @first: original first buffer info buffer for packet
2263  * @idx: starting point on ring to unwind
2264  */
2265 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2266 			   struct idpf_tx_buf *first, u16 idx)
2267 {
2268 	struct libeth_sq_napi_stats ss = { };
2269 	struct libeth_cq_pp cp = {
2270 		.dev	= txq->dev,
2271 		.ss	= &ss,
2272 	};
2273 
2274 	u64_stats_update_begin(&txq->stats_sync);
2275 	u64_stats_inc(&txq->q_stats.dma_map_errs);
2276 	u64_stats_update_end(&txq->stats_sync);
2277 
2278 	/* clear dma mappings for failed tx_buf map */
2279 	for (;;) {
2280 		struct idpf_tx_buf *tx_buf;
2281 
2282 		tx_buf = &txq->tx_buf[idx];
2283 		libeth_tx_complete(tx_buf, &cp);
2284 		if (tx_buf == first)
2285 			break;
2286 		if (idx == 0)
2287 			idx = txq->desc_count;
2288 		idx--;
2289 	}
2290 
2291 	if (skb_is_gso(skb)) {
2292 		union idpf_tx_flex_desc *tx_desc;
2293 
2294 		/* If we failed a DMA mapping for a TSO packet, we will have
2295 		 * used one additional descriptor for a context
2296 		 * descriptor. Reset that here.
2297 		 */
2298 		tx_desc = &txq->flex_tx[idx];
2299 		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2300 		if (idx == 0)
2301 			idx = txq->desc_count;
2302 		idx--;
2303 	}
2304 
2305 	/* Update tail in case netdev_xmit_more was previously true */
2306 	idpf_tx_buf_hw_update(txq, idx, false);
2307 }
2308 
2309 /**
2310  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2311  * @txq: the tx ring to wrap
2312  * @ntu: ring index to bump
2313  */
2314 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2315 {
2316 	ntu++;
2317 
2318 	if (ntu == txq->desc_count) {
2319 		ntu = 0;
2320 		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2321 	}
2322 
2323 	return ntu;
2324 }
2325 
2326 /**
2327  * idpf_tx_splitq_map - Build the Tx flex descriptor
2328  * @tx_q: queue to send buffer on
2329  * @params: pointer to splitq params struct
2330  * @first: first buffer info buffer to use
2331  *
2332  * This function loops over the skb data pointed to by *first
2333  * and gets a physical address for each memory location and programs
2334  * it and the length into the transmit flex descriptor.
2335  */
2336 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2337 			       struct idpf_tx_splitq_params *params,
2338 			       struct idpf_tx_buf *first)
2339 {
2340 	union idpf_tx_flex_desc *tx_desc;
2341 	unsigned int data_len, size;
2342 	struct idpf_tx_buf *tx_buf;
2343 	u16 i = tx_q->next_to_use;
2344 	struct netdev_queue *nq;
2345 	struct sk_buff *skb;
2346 	skb_frag_t *frag;
2347 	u16 td_cmd = 0;
2348 	dma_addr_t dma;
2349 
2350 	skb = first->skb;
2351 
2352 	td_cmd = params->offload.td_cmd;
2353 
2354 	data_len = skb->data_len;
2355 	size = skb_headlen(skb);
2356 
2357 	tx_desc = &tx_q->flex_tx[i];
2358 
2359 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2360 
2361 	tx_buf = first;
2362 	first->nr_frags = 0;
2363 
2364 	params->compl_tag =
2365 		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2366 
2367 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2368 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2369 
2370 		if (dma_mapping_error(tx_q->dev, dma))
2371 			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2372 
2373 		first->nr_frags++;
2374 		idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2375 		tx_buf->type = LIBETH_SQE_FRAG;
2376 
2377 		/* record length, and DMA address */
2378 		dma_unmap_len_set(tx_buf, len, size);
2379 		dma_unmap_addr_set(tx_buf, dma, dma);
2380 
2381 		/* buf_addr is in same location for both desc types */
2382 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2383 
2384 		/* The stack can send us fragments that are too large for a
2385 		 * single descriptor i.e. frag size > 16K-1. We will need to
2386 		 * split the fragment across multiple descriptors in this case.
2387 		 * To adhere to HW alignment restrictions, the fragment needs
2388 		 * to be split such that the first chunk ends on a 4K boundary
2389 		 * and all subsequent chunks start on a 4K boundary. We still
2390 		 * want to send as much data as possible though, so our
2391 		 * intermediate descriptor chunk size will be 12K.
2392 		 *
2393 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2394 		 * ------------------------------------------------------------
2395 		 * |                    frag_size = 32K                       |
2396 		 * ------------------------------------------------------------
2397 		 * |2600		  |16384	    |28672
2398 		 *
2399 		 * 3 descriptors will be used for this fragment. The HW expects
2400 		 * the descriptors to contain the following:
2401 		 * ------------------------------------------------------------
2402 		 * | size = 13784         | size = 12K      | size = 6696     |
2403 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2404 		 * ------------------------------------------------------------
2405 		 *
2406 		 * We need to first adjust the max_data for the first chunk so
2407 		 * that it ends on a 4K boundary. By negating the value of the
2408 		 * DMA address and taking only the low order bits, we're
2409 		 * effectively calculating
2410 		 *	4K - (DMA addr lower order bits) =
2411 		 *				bytes to next boundary.
2412 		 *
2413 		 * Add that to our base aligned max_data (12K) and we have
2414 		 * our first chunk size. In the example above,
2415 		 *	13784 = 12K + (4096-2600)
2416 		 *
2417 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2418 		 * will give the intermediate descriptors 12K chunks and
2419 		 * whatever is left to the final descriptor. This ensures that
2420 		 * all descriptors used for the remaining chunks of the
2421 		 * fragment start on a 4K boundary and we use as few
2422 		 * descriptors as possible.
2423 		 */
2424 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2425 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2426 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2427 						  max_data);
2428 
2429 			if (unlikely(++i == tx_q->desc_count)) {
2430 				tx_buf = tx_q->tx_buf;
2431 				tx_desc = &tx_q->flex_tx[0];
2432 				i = 0;
2433 				tx_q->compl_tag_cur_gen =
2434 					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2435 			} else {
2436 				tx_buf++;
2437 				tx_desc++;
2438 			}
2439 
2440 			/* Since this packet has a buffer that is going to span
2441 			 * multiple descriptors, it's going to leave holes in
2442 			 * to the TX buffer ring. To ensure these holes do not
2443 			 * cause issues in the cleaning routines, we will clear
2444 			 * them of any stale data and assign them the same
2445 			 * completion tag as the current packet. Then when the
2446 			 * packet is being cleaned, the cleaning routines will
2447 			 * simply pass over these holes and finish cleaning the
2448 			 * rest of the packet.
2449 			 */
2450 			tx_buf->type = LIBETH_SQE_EMPTY;
2451 
2452 			/* Adjust the DMA offset and the remaining size of the
2453 			 * fragment.  On the first iteration of this loop,
2454 			 * max_data will be >= 12K and <= 16K-1.  On any
2455 			 * subsequent iteration of this loop, max_data will
2456 			 * always be 12K.
2457 			 */
2458 			dma += max_data;
2459 			size -= max_data;
2460 
2461 			/* Reset max_data since remaining chunks will be 12K
2462 			 * at most
2463 			 */
2464 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2465 
2466 			/* buf_addr is in same location for both desc types */
2467 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2468 		}
2469 
2470 		if (!data_len)
2471 			break;
2472 
2473 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2474 
2475 		if (unlikely(++i == tx_q->desc_count)) {
2476 			tx_buf = tx_q->tx_buf;
2477 			tx_desc = &tx_q->flex_tx[0];
2478 			i = 0;
2479 			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2480 		} else {
2481 			tx_buf++;
2482 			tx_desc++;
2483 		}
2484 
2485 		size = skb_frag_size(frag);
2486 		data_len -= size;
2487 
2488 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2489 				       DMA_TO_DEVICE);
2490 	}
2491 
2492 	/* record SW timestamp if HW timestamp is not available */
2493 	skb_tx_timestamp(skb);
2494 
2495 	first->type = LIBETH_SQE_SKB;
2496 
2497 	/* write last descriptor with RS and EOP bits */
2498 	first->rs_idx = i;
2499 	td_cmd |= params->eop_cmd;
2500 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2501 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2502 
2503 	tx_q->txq_grp->num_completions_pending++;
2504 
2505 	/* record bytecount for BQL */
2506 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2507 	netdev_tx_sent_queue(nq, first->bytes);
2508 
2509 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2510 }
2511 
2512 /**
2513  * idpf_tso - computes mss and TSO length to prepare for TSO
2514  * @skb: pointer to skb
2515  * @off: pointer to struct that holds offload parameters
2516  *
2517  * Returns error (negative) if TSO was requested but cannot be applied to the
2518  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2519  */
2520 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2521 {
2522 	const struct skb_shared_info *shinfo;
2523 	union {
2524 		struct iphdr *v4;
2525 		struct ipv6hdr *v6;
2526 		unsigned char *hdr;
2527 	} ip;
2528 	union {
2529 		struct tcphdr *tcp;
2530 		struct udphdr *udp;
2531 		unsigned char *hdr;
2532 	} l4;
2533 	u32 paylen, l4_start;
2534 	int err;
2535 
2536 	if (!skb_is_gso(skb))
2537 		return 0;
2538 
2539 	err = skb_cow_head(skb, 0);
2540 	if (err < 0)
2541 		return err;
2542 
2543 	shinfo = skb_shinfo(skb);
2544 
2545 	ip.hdr = skb_network_header(skb);
2546 	l4.hdr = skb_transport_header(skb);
2547 
2548 	/* initialize outer IP header fields */
2549 	if (ip.v4->version == 4) {
2550 		ip.v4->tot_len = 0;
2551 		ip.v4->check = 0;
2552 	} else if (ip.v6->version == 6) {
2553 		ip.v6->payload_len = 0;
2554 	}
2555 
2556 	l4_start = skb_transport_offset(skb);
2557 
2558 	/* remove payload length from checksum */
2559 	paylen = skb->len - l4_start;
2560 
2561 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2562 	case SKB_GSO_TCPV4:
2563 	case SKB_GSO_TCPV6:
2564 		csum_replace_by_diff(&l4.tcp->check,
2565 				     (__force __wsum)htonl(paylen));
2566 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2567 		break;
2568 	case SKB_GSO_UDP_L4:
2569 		csum_replace_by_diff(&l4.udp->check,
2570 				     (__force __wsum)htonl(paylen));
2571 		/* compute length of segmentation header */
2572 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2573 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2574 		break;
2575 	default:
2576 		return -EINVAL;
2577 	}
2578 
2579 	off->tso_len = skb->len - off->tso_hdr_len;
2580 	off->mss = shinfo->gso_size;
2581 	off->tso_segs = shinfo->gso_segs;
2582 
2583 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2584 
2585 	return 1;
2586 }
2587 
2588 /**
2589  * __idpf_chk_linearize - Check skb is not using too many buffers
2590  * @skb: send buffer
2591  * @max_bufs: maximum number of buffers
2592  *
2593  * For TSO we need to count the TSO header and segment payload separately.  As
2594  * such we need to check cases where we have max_bufs-1 fragments or more as we
2595  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2596  * for the segment payload in the first descriptor, and another max_buf-1 for
2597  * the fragments.
2598  */
2599 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2600 {
2601 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2602 	const skb_frag_t *frag, *stale;
2603 	int nr_frags, sum;
2604 
2605 	/* no need to check if number of frags is less than max_bufs - 1 */
2606 	nr_frags = shinfo->nr_frags;
2607 	if (nr_frags < (max_bufs - 1))
2608 		return false;
2609 
2610 	/* We need to walk through the list and validate that each group
2611 	 * of max_bufs-2 fragments totals at least gso_size.
2612 	 */
2613 	nr_frags -= max_bufs - 2;
2614 	frag = &shinfo->frags[0];
2615 
2616 	/* Initialize size to the negative value of gso_size minus 1.  We use
2617 	 * this as the worst case scenario in which the frag ahead of us only
2618 	 * provides one byte which is why we are limited to max_bufs-2
2619 	 * descriptors for a single transmit as the header and previous
2620 	 * fragment are already consuming 2 descriptors.
2621 	 */
2622 	sum = 1 - shinfo->gso_size;
2623 
2624 	/* Add size of frags 0 through 4 to create our initial sum */
2625 	sum += skb_frag_size(frag++);
2626 	sum += skb_frag_size(frag++);
2627 	sum += skb_frag_size(frag++);
2628 	sum += skb_frag_size(frag++);
2629 	sum += skb_frag_size(frag++);
2630 
2631 	/* Walk through fragments adding latest fragment, testing it, and
2632 	 * then removing stale fragments from the sum.
2633 	 */
2634 	for (stale = &shinfo->frags[0];; stale++) {
2635 		int stale_size = skb_frag_size(stale);
2636 
2637 		sum += skb_frag_size(frag++);
2638 
2639 		/* The stale fragment may present us with a smaller
2640 		 * descriptor than the actual fragment size. To account
2641 		 * for that we need to remove all the data on the front and
2642 		 * figure out what the remainder would be in the last
2643 		 * descriptor associated with the fragment.
2644 		 */
2645 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2646 			int align_pad = -(skb_frag_off(stale)) &
2647 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2648 
2649 			sum -= align_pad;
2650 			stale_size -= align_pad;
2651 
2652 			do {
2653 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2654 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2655 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2656 		}
2657 
2658 		/* if sum is negative we failed to make sufficient progress */
2659 		if (sum < 0)
2660 			return true;
2661 
2662 		if (!nr_frags--)
2663 			break;
2664 
2665 		sum -= stale_size;
2666 	}
2667 
2668 	return false;
2669 }
2670 
2671 /**
2672  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2673  * @skb: send buffer
2674  * @max_bufs: maximum scatter gather buffers for single packet
2675  * @count: number of buffers this packet needs
2676  *
2677  * Make sure we don't exceed maximum scatter gather buffers for a single
2678  * packet. We have to do some special checking around the boundary (max_bufs-1)
2679  * if TSO is on since we need count the TSO header and payload separately.
2680  * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2681  * header, 1 for segment payload, and then 7 for the fragments.
2682  */
2683 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2684 			       unsigned int count)
2685 {
2686 	if (likely(count < max_bufs))
2687 		return false;
2688 	if (skb_is_gso(skb))
2689 		return __idpf_chk_linearize(skb, max_bufs);
2690 
2691 	return count > max_bufs;
2692 }
2693 
2694 /**
2695  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2696  * @txq: queue to put context descriptor on
2697  *
2698  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2699  * ring entry to reflect that this index is a context descriptor
2700  */
2701 static struct idpf_flex_tx_ctx_desc *
2702 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2703 {
2704 	struct idpf_flex_tx_ctx_desc *desc;
2705 	int i = txq->next_to_use;
2706 
2707 	txq->tx_buf[i].type = LIBETH_SQE_CTX;
2708 
2709 	/* grab the next descriptor */
2710 	desc = &txq->flex_ctx[i];
2711 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2712 
2713 	return desc;
2714 }
2715 
2716 /**
2717  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2718  * @tx_q: queue to send buffer on
2719  * @skb: pointer to skb
2720  */
2721 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2722 {
2723 	u64_stats_update_begin(&tx_q->stats_sync);
2724 	u64_stats_inc(&tx_q->q_stats.skb_drops);
2725 	u64_stats_update_end(&tx_q->stats_sync);
2726 
2727 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2728 
2729 	dev_kfree_skb(skb);
2730 
2731 	return NETDEV_TX_OK;
2732 }
2733 
2734 /**
2735  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2736  * @skb: send buffer
2737  * @tx_q: queue to send buffer on
2738  *
2739  * Returns NETDEV_TX_OK if sent, else an error code
2740  */
2741 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2742 					struct idpf_tx_queue *tx_q)
2743 {
2744 	struct idpf_tx_splitq_params tx_params = { };
2745 	struct idpf_tx_buf *first;
2746 	unsigned int count;
2747 	int tso;
2748 
2749 	count = idpf_tx_desc_count_required(tx_q, skb);
2750 	if (unlikely(!count))
2751 		return idpf_tx_drop_skb(tx_q, skb);
2752 
2753 	tso = idpf_tso(skb, &tx_params.offload);
2754 	if (unlikely(tso < 0))
2755 		return idpf_tx_drop_skb(tx_q, skb);
2756 
2757 	/* Check for splitq specific TX resources */
2758 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2759 	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2760 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2761 
2762 		return NETDEV_TX_BUSY;
2763 	}
2764 
2765 	if (tso) {
2766 		/* If tso is needed, set up context desc */
2767 		struct idpf_flex_tx_ctx_desc *ctx_desc =
2768 			idpf_tx_splitq_get_ctx_desc(tx_q);
2769 
2770 		ctx_desc->tso.qw1.cmd_dtype =
2771 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2772 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2773 		ctx_desc->tso.qw0.flex_tlen =
2774 				cpu_to_le32(tx_params.offload.tso_len &
2775 					    IDPF_TXD_FLEX_CTX_TLEN_M);
2776 		ctx_desc->tso.qw0.mss_rt =
2777 				cpu_to_le16(tx_params.offload.mss &
2778 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2779 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2780 
2781 		u64_stats_update_begin(&tx_q->stats_sync);
2782 		u64_stats_inc(&tx_q->q_stats.lso_pkts);
2783 		u64_stats_update_end(&tx_q->stats_sync);
2784 	}
2785 
2786 	/* record the location of the first descriptor for this packet */
2787 	first = &tx_q->tx_buf[tx_q->next_to_use];
2788 	first->skb = skb;
2789 
2790 	if (tso) {
2791 		first->packets = tx_params.offload.tso_segs;
2792 		first->bytes = skb->len +
2793 			((first->packets - 1) * tx_params.offload.tso_hdr_len);
2794 	} else {
2795 		first->packets = 1;
2796 		first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
2797 	}
2798 
2799 	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2800 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2801 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2802 		/* Set the RE bit to catch any packets that may have not been
2803 		 * stashed during RS completion cleaning. MIN_GAP is set to
2804 		 * MIN_RING size to ensure it will be set at least once each
2805 		 * time around the ring.
2806 		 */
2807 		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2808 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2809 			tx_q->txq_grp->num_completions_pending++;
2810 		}
2811 
2812 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2813 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2814 
2815 	} else {
2816 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2817 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2818 
2819 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2820 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2821 	}
2822 
2823 	idpf_tx_splitq_map(tx_q, &tx_params, first);
2824 
2825 	return NETDEV_TX_OK;
2826 }
2827 
2828 /**
2829  * idpf_tx_start - Selects the right Tx queue to send buffer
2830  * @skb: send buffer
2831  * @netdev: network interface device structure
2832  *
2833  * Returns NETDEV_TX_OK if sent, else an error code
2834  */
2835 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
2836 {
2837 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2838 	struct idpf_tx_queue *tx_q;
2839 
2840 	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2841 		dev_kfree_skb_any(skb);
2842 
2843 		return NETDEV_TX_OK;
2844 	}
2845 
2846 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2847 
2848 	/* hardware can't handle really short frames, hardware padding works
2849 	 * beyond this point
2850 	 */
2851 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2852 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2853 
2854 		return NETDEV_TX_OK;
2855 	}
2856 
2857 	if (idpf_is_queue_model_split(vport->txq_model))
2858 		return idpf_tx_splitq_frame(skb, tx_q);
2859 	else
2860 		return idpf_tx_singleq_frame(skb, tx_q);
2861 }
2862 
2863 /**
2864  * idpf_rx_hash - set the hash value in the skb
2865  * @rxq: Rx descriptor ring packet is being transacted on
2866  * @skb: pointer to current skb being populated
2867  * @rx_desc: Receive descriptor
2868  * @decoded: Decoded Rx packet type related fields
2869  */
2870 static void
2871 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2872 	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2873 	     struct libeth_rx_pt decoded)
2874 {
2875 	u32 hash;
2876 
2877 	if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2878 		return;
2879 
2880 	hash = le16_to_cpu(rx_desc->hash1) |
2881 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2882 	       (rx_desc->hash3 << 24);
2883 
2884 	libeth_rx_pt_set_hash(skb, hash, decoded);
2885 }
2886 
2887 /**
2888  * idpf_rx_csum - Indicate in skb if checksum is good
2889  * @rxq: Rx descriptor ring packet is being transacted on
2890  * @skb: pointer to current skb being populated
2891  * @csum_bits: checksum fields extracted from the descriptor
2892  * @decoded: Decoded Rx packet type related fields
2893  *
2894  * skb->protocol must be set before this function is called
2895  */
2896 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2897 			 struct idpf_rx_csum_decoded csum_bits,
2898 			 struct libeth_rx_pt decoded)
2899 {
2900 	bool ipv4, ipv6;
2901 
2902 	/* check if Rx checksum is enabled */
2903 	if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2904 		return;
2905 
2906 	/* check if HW has decoded the packet and checksum */
2907 	if (unlikely(!csum_bits.l3l4p))
2908 		return;
2909 
2910 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2911 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2912 
2913 	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2914 		goto checksum_fail;
2915 
2916 	if (unlikely(ipv6 && csum_bits.ipv6exadd))
2917 		return;
2918 
2919 	/* check for L4 errors and handle packets that were not able to be
2920 	 * checksummed
2921 	 */
2922 	if (unlikely(csum_bits.l4e))
2923 		goto checksum_fail;
2924 
2925 	if (csum_bits.raw_csum_inv ||
2926 	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
2927 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2928 		return;
2929 	}
2930 
2931 	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2932 	skb->ip_summed = CHECKSUM_COMPLETE;
2933 
2934 	return;
2935 
2936 checksum_fail:
2937 	u64_stats_update_begin(&rxq->stats_sync);
2938 	u64_stats_inc(&rxq->q_stats.hw_csum_err);
2939 	u64_stats_update_end(&rxq->stats_sync);
2940 }
2941 
2942 /**
2943  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2944  * @rx_desc: receive descriptor
2945  *
2946  * Return: parsed checksum status.
2947  **/
2948 static struct idpf_rx_csum_decoded
2949 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2950 {
2951 	struct idpf_rx_csum_decoded csum = { };
2952 	u8 qword0, qword1;
2953 
2954 	qword0 = rx_desc->status_err0_qw0;
2955 	qword1 = rx_desc->status_err0_qw1;
2956 
2957 	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2958 			     qword1);
2959 	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2960 			      qword1);
2961 	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2962 			     qword1);
2963 	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2964 			       qword1);
2965 	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2966 				   qword0);
2967 	csum.raw_csum_inv =
2968 		le16_get_bits(rx_desc->ptype_err_fflags0,
2969 			      VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
2970 	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
2971 
2972 	return csum;
2973 }
2974 
2975 /**
2976  * idpf_rx_rsc - Set the RSC fields in the skb
2977  * @rxq : Rx descriptor ring packet is being transacted on
2978  * @skb : pointer to current skb being populated
2979  * @rx_desc: Receive descriptor
2980  * @decoded: Decoded Rx packet type related fields
2981  *
2982  * Return 0 on success and error code on failure
2983  *
2984  * Populate the skb fields with the total number of RSC segments, RSC payload
2985  * length and packet type.
2986  */
2987 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2988 		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2989 		       struct libeth_rx_pt decoded)
2990 {
2991 	u16 rsc_segments, rsc_seg_len;
2992 	bool ipv4, ipv6;
2993 	int len;
2994 
2995 	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
2996 		     LIBETH_RX_PT_OUTER_L2))
2997 		return -EINVAL;
2998 
2999 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3000 	if (unlikely(!rsc_seg_len))
3001 		return -EINVAL;
3002 
3003 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3004 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3005 
3006 	if (unlikely(!(ipv4 ^ ipv6)))
3007 		return -EINVAL;
3008 
3009 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3010 	if (unlikely(rsc_segments == 1))
3011 		return 0;
3012 
3013 	NAPI_GRO_CB(skb)->count = rsc_segments;
3014 	skb_shinfo(skb)->gso_size = rsc_seg_len;
3015 
3016 	skb_reset_network_header(skb);
3017 	len = skb->len - skb_transport_offset(skb);
3018 
3019 	if (ipv4) {
3020 		struct iphdr *ipv4h = ip_hdr(skb);
3021 
3022 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3023 
3024 		/* Reset and set transport header offset in skb */
3025 		skb_set_transport_header(skb, sizeof(struct iphdr));
3026 
3027 		/* Compute the TCP pseudo header checksum*/
3028 		tcp_hdr(skb)->check =
3029 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3030 	} else {
3031 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3032 
3033 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3034 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3035 		tcp_hdr(skb)->check =
3036 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3037 	}
3038 
3039 	tcp_gro_complete(skb);
3040 
3041 	u64_stats_update_begin(&rxq->stats_sync);
3042 	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3043 	u64_stats_update_end(&rxq->stats_sync);
3044 
3045 	return 0;
3046 }
3047 
3048 /**
3049  * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3050  * @rxq: Rx descriptor ring packet is being transacted on
3051  * @skb: pointer to current skb being populated
3052  * @rx_desc: Receive descriptor
3053  *
3054  * This function checks the ring, descriptor, and packet information in
3055  * order to populate the hash, checksum, protocol, and
3056  * other fields within the skb.
3057  */
3058 static int
3059 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3060 			   const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3061 {
3062 	struct idpf_rx_csum_decoded csum_bits;
3063 	struct libeth_rx_pt decoded;
3064 	u16 rx_ptype;
3065 
3066 	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3067 				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3068 	decoded = rxq->rx_ptype_lkup[rx_ptype];
3069 
3070 	/* process RSS/hash */
3071 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3072 
3073 	skb->protocol = eth_type_trans(skb, rxq->netdev);
3074 
3075 	if (le16_get_bits(rx_desc->hdrlen_flags,
3076 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3077 		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3078 
3079 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3080 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3081 
3082 	skb_record_rx_queue(skb, rxq->idx);
3083 
3084 	return 0;
3085 }
3086 
3087 /**
3088  * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3089  * @rx_buf: buffer containing page to add
3090  * @skb: sk_buff to place the data into
3091  * @size: packet length from rx_desc
3092  *
3093  * This function will add the data contained in rx_buf->page to the skb.
3094  * It will just attach the page as a frag to the skb.
3095  * The function will then update the page offset.
3096  */
3097 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3098 		      unsigned int size)
3099 {
3100 	u32 hr = rx_buf->page->pp->p.offset;
3101 
3102 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
3103 			rx_buf->offset + hr, size, rx_buf->truesize);
3104 }
3105 
3106 /**
3107  * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3108  * @hdr: Rx buffer for the headers
3109  * @buf: Rx buffer for the payload
3110  * @data_len: number of bytes received to the payload buffer
3111  *
3112  * When a header buffer overflow occurs or the HW was unable do parse the
3113  * packet type to perform header split, the whole frame gets placed to the
3114  * payload buffer. We can't build a valid skb around a payload buffer when
3115  * the header split is active since it doesn't reserve any head- or tailroom.
3116  * In that case, copy either the whole frame when it's short or just the
3117  * Ethernet header to the header buffer to be able to build an skb and adjust
3118  * the data offset in the payload buffer, IOW emulate the header split.
3119  *
3120  * Return: number of bytes copied to the header buffer.
3121  */
3122 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3123 			     struct libeth_fqe *buf, u32 data_len)
3124 {
3125 	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3126 	const void *src;
3127 	void *dst;
3128 
3129 	if (!libeth_rx_sync_for_cpu(buf, copy))
3130 		return 0;
3131 
3132 	dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
3133 	src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
3134 	memcpy(dst, src, LARGEST_ALIGN(copy));
3135 
3136 	buf->offset += copy;
3137 
3138 	return copy;
3139 }
3140 
3141 /**
3142  * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3143  * @buf: Rx buffer to pull data from
3144  * @size: the length of the packet
3145  *
3146  * This function allocates an skb. It then populates it with the page data from
3147  * the current receive descriptor, taking care to set up the skb correctly.
3148  */
3149 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
3150 {
3151 	u32 hr = buf->page->pp->p.offset;
3152 	struct sk_buff *skb;
3153 	void *va;
3154 
3155 	va = page_address(buf->page) + buf->offset;
3156 	prefetch(va + hr);
3157 
3158 	skb = napi_build_skb(va, buf->truesize);
3159 	if (unlikely(!skb))
3160 		return NULL;
3161 
3162 	skb_mark_for_recycle(skb);
3163 
3164 	skb_reserve(skb, hr);
3165 	__skb_put(skb, size);
3166 
3167 	return skb;
3168 }
3169 
3170 /**
3171  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3172  * status and error fields
3173  * @stat_err_field: field from descriptor to test bits in
3174  * @stat_err_bits: value to mask
3175  *
3176  */
3177 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3178 					const u8 stat_err_bits)
3179 {
3180 	return !!(stat_err_field & stat_err_bits);
3181 }
3182 
3183 /**
3184  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3185  * @rx_desc: Rx descriptor for current buffer
3186  *
3187  * If the buffer is an EOP buffer, this function exits returning true,
3188  * otherwise return false indicating that this is in fact a non-EOP buffer.
3189  */
3190 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3191 {
3192 	/* if we are the last buffer then there is nothing else to do */
3193 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3194 						  IDPF_RXD_EOF_SPLITQ));
3195 }
3196 
3197 /**
3198  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3199  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3200  * @budget: Total limit on number of packets to process
3201  *
3202  * This function provides a "bounce buffer" approach to Rx interrupt
3203  * processing. The advantage to this is that on systems that have
3204  * expensive overhead for IOMMU access this provides a means of avoiding
3205  * it by maintaining the mapping of the page to the system.
3206  *
3207  * Returns amount of work completed
3208  */
3209 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3210 {
3211 	int total_rx_bytes = 0, total_rx_pkts = 0;
3212 	struct idpf_buf_queue *rx_bufq = NULL;
3213 	struct sk_buff *skb = rxq->skb;
3214 	u16 ntc = rxq->next_to_clean;
3215 
3216 	/* Process Rx packets bounded by budget */
3217 	while (likely(total_rx_pkts < budget)) {
3218 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3219 		struct libeth_fqe *hdr, *rx_buf = NULL;
3220 		struct idpf_sw_queue *refillq = NULL;
3221 		struct idpf_rxq_set *rxq_set = NULL;
3222 		unsigned int pkt_len = 0;
3223 		unsigned int hdr_len = 0;
3224 		u16 gen_id, buf_id = 0;
3225 		int bufq_id;
3226 		u8 rxdid;
3227 
3228 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3229 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3230 
3231 		/* This memory barrier is needed to keep us from reading
3232 		 * any other fields out of the rx_desc
3233 		 */
3234 		dma_rmb();
3235 
3236 		/* if the descriptor isn't done, no work yet to do */
3237 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3238 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3239 
3240 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3241 			break;
3242 
3243 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3244 				  rx_desc->rxdid_ucast);
3245 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3246 			IDPF_RX_BUMP_NTC(rxq, ntc);
3247 			u64_stats_update_begin(&rxq->stats_sync);
3248 			u64_stats_inc(&rxq->q_stats.bad_descs);
3249 			u64_stats_update_end(&rxq->stats_sync);
3250 			continue;
3251 		}
3252 
3253 		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3254 					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3255 
3256 		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3257 					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3258 
3259 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3260 		refillq = rxq_set->refillq[bufq_id];
3261 
3262 		/* retrieve buffer from the rxq */
3263 		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3264 
3265 		buf_id = le16_to_cpu(rx_desc->buf_id);
3266 
3267 		rx_buf = &rx_bufq->buf[buf_id];
3268 
3269 		if (!rx_bufq->hdr_pp)
3270 			goto payload;
3271 
3272 #define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3273 #define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3274 		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3275 			/* If a header buffer overflow, occurs, i.e. header is
3276 			 * too large to fit in the header split buffer, HW will
3277 			 * put the entire packet, including headers, in the
3278 			 * data/payload buffer.
3279 			 */
3280 			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3281 						__HDR_LEN_MASK);
3282 #undef __HDR_LEN_MASK
3283 #undef __HBO_BIT
3284 
3285 		hdr = &rx_bufq->hdr_buf[buf_id];
3286 
3287 		if (unlikely(!hdr_len && !skb)) {
3288 			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3289 			pkt_len -= hdr_len;
3290 
3291 			u64_stats_update_begin(&rxq->stats_sync);
3292 			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3293 			u64_stats_update_end(&rxq->stats_sync);
3294 		}
3295 
3296 		if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3297 			skb = idpf_rx_build_skb(hdr, hdr_len);
3298 			if (!skb)
3299 				break;
3300 
3301 			u64_stats_update_begin(&rxq->stats_sync);
3302 			u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3303 			u64_stats_update_end(&rxq->stats_sync);
3304 		}
3305 
3306 		hdr->page = NULL;
3307 
3308 payload:
3309 		if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3310 			goto skip_data;
3311 
3312 		if (skb)
3313 			idpf_rx_add_frag(rx_buf, skb, pkt_len);
3314 		else
3315 			skb = idpf_rx_build_skb(rx_buf, pkt_len);
3316 
3317 		/* exit if we failed to retrieve a buffer */
3318 		if (!skb)
3319 			break;
3320 
3321 skip_data:
3322 		rx_buf->page = NULL;
3323 
3324 		idpf_rx_post_buf_refill(refillq, buf_id);
3325 		IDPF_RX_BUMP_NTC(rxq, ntc);
3326 
3327 		/* skip if it is non EOP desc */
3328 		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3329 			continue;
3330 
3331 		/* pad skb if needed (to make valid ethernet frame) */
3332 		if (eth_skb_pad(skb)) {
3333 			skb = NULL;
3334 			continue;
3335 		}
3336 
3337 		/* probably a little skewed due to removing CRC */
3338 		total_rx_bytes += skb->len;
3339 
3340 		/* protocol */
3341 		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3342 			dev_kfree_skb_any(skb);
3343 			skb = NULL;
3344 			continue;
3345 		}
3346 
3347 		/* send completed skb up the stack */
3348 		napi_gro_receive(rxq->napi, skb);
3349 		skb = NULL;
3350 
3351 		/* update budget accounting */
3352 		total_rx_pkts++;
3353 	}
3354 
3355 	rxq->next_to_clean = ntc;
3356 
3357 	rxq->skb = skb;
3358 	u64_stats_update_begin(&rxq->stats_sync);
3359 	u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3360 	u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3361 	u64_stats_update_end(&rxq->stats_sync);
3362 
3363 	/* guarantee a trip back through this routine if there was a failure */
3364 	return total_rx_pkts;
3365 }
3366 
3367 /**
3368  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3369  * @bufq: Pointer to the buffer queue
3370  * @buf_id: buffer ID
3371  * @buf_desc: Buffer queue descriptor
3372  *
3373  * Return 0 on success and negative on failure.
3374  */
3375 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3376 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3377 {
3378 	struct libeth_fq_fp fq = {
3379 		.pp		= bufq->pp,
3380 		.fqes		= bufq->buf,
3381 		.truesize	= bufq->truesize,
3382 		.count		= bufq->desc_count,
3383 	};
3384 	dma_addr_t addr;
3385 
3386 	addr = libeth_rx_alloc(&fq, buf_id);
3387 	if (addr == DMA_MAPPING_ERROR)
3388 		return -ENOMEM;
3389 
3390 	buf_desc->pkt_addr = cpu_to_le64(addr);
3391 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3392 
3393 	if (!idpf_queue_has(HSPLIT_EN, bufq))
3394 		return 0;
3395 
3396 	fq.pp = bufq->hdr_pp;
3397 	fq.fqes = bufq->hdr_buf;
3398 	fq.truesize = bufq->hdr_truesize;
3399 
3400 	addr = libeth_rx_alloc(&fq, buf_id);
3401 	if (addr == DMA_MAPPING_ERROR)
3402 		return -ENOMEM;
3403 
3404 	buf_desc->hdr_addr = cpu_to_le64(addr);
3405 
3406 	return 0;
3407 }
3408 
3409 /**
3410  * idpf_rx_clean_refillq - Clean refill queue buffers
3411  * @bufq: buffer queue to post buffers back to
3412  * @refillq: refill queue to clean
3413  *
3414  * This function takes care of the buffer refill management
3415  */
3416 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3417 				  struct idpf_sw_queue *refillq)
3418 {
3419 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3420 	u16 bufq_nta = bufq->next_to_alloc;
3421 	u16 ntc = refillq->next_to_clean;
3422 	int cleaned = 0;
3423 
3424 	buf_desc = &bufq->split_buf[bufq_nta];
3425 
3426 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3427 	while (likely(cleaned < refillq->desc_count)) {
3428 		u32 buf_id, refill_desc = refillq->ring[ntc];
3429 		bool failure;
3430 
3431 		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3432 		    !!(refill_desc & IDPF_RX_BI_GEN_M))
3433 			break;
3434 
3435 		buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3436 		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3437 		if (failure)
3438 			break;
3439 
3440 		if (unlikely(++ntc == refillq->desc_count)) {
3441 			idpf_queue_change(RFL_GEN_CHK, refillq);
3442 			ntc = 0;
3443 		}
3444 
3445 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3446 			buf_desc = &bufq->split_buf[0];
3447 			bufq_nta = 0;
3448 		} else {
3449 			buf_desc++;
3450 		}
3451 
3452 		cleaned++;
3453 	}
3454 
3455 	if (!cleaned)
3456 		return;
3457 
3458 	/* We want to limit how many transactions on the bus we trigger with
3459 	 * tail writes so we only do it in strides. It's also important we
3460 	 * align the write to a multiple of 8 as required by HW.
3461 	 */
3462 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3463 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3464 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3465 						       IDPF_RX_BUF_POST_STRIDE));
3466 
3467 	/* update next to alloc since we have filled the ring */
3468 	refillq->next_to_clean = ntc;
3469 	bufq->next_to_alloc = bufq_nta;
3470 }
3471 
3472 /**
3473  * idpf_rx_clean_refillq_all - Clean all refill queues
3474  * @bufq: buffer queue with refill queues
3475  * @nid: ID of the closest NUMA node with memory
3476  *
3477  * Iterates through all refill queues assigned to the buffer queue assigned to
3478  * this vector.  Returns true if clean is complete within budget, false
3479  * otherwise.
3480  */
3481 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3482 {
3483 	struct idpf_bufq_set *bufq_set;
3484 	int i;
3485 
3486 	page_pool_nid_changed(bufq->pp, nid);
3487 	if (bufq->hdr_pp)
3488 		page_pool_nid_changed(bufq->hdr_pp, nid);
3489 
3490 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3491 	for (i = 0; i < bufq_set->num_refillqs; i++)
3492 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3493 }
3494 
3495 /**
3496  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3497  * @irq: interrupt number
3498  * @data: pointer to a q_vector
3499  *
3500  */
3501 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3502 						void *data)
3503 {
3504 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3505 
3506 	q_vector->total_events++;
3507 	napi_schedule(&q_vector->napi);
3508 
3509 	return IRQ_HANDLED;
3510 }
3511 
3512 /**
3513  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3514  * @vport: virtual port structure
3515  *
3516  */
3517 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3518 {
3519 	u16 v_idx;
3520 
3521 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3522 		netif_napi_del(&vport->q_vectors[v_idx].napi);
3523 }
3524 
3525 /**
3526  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3527  * @vport: main vport structure
3528  */
3529 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3530 {
3531 	int v_idx;
3532 
3533 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3534 		napi_disable(&vport->q_vectors[v_idx].napi);
3535 }
3536 
3537 /**
3538  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3539  * @vport: virtual port
3540  *
3541  * Free the memory allocated for interrupt vectors  associated to a vport
3542  */
3543 void idpf_vport_intr_rel(struct idpf_vport *vport)
3544 {
3545 	for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3546 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3547 
3548 		kfree(q_vector->complq);
3549 		q_vector->complq = NULL;
3550 		kfree(q_vector->bufq);
3551 		q_vector->bufq = NULL;
3552 		kfree(q_vector->tx);
3553 		q_vector->tx = NULL;
3554 		kfree(q_vector->rx);
3555 		q_vector->rx = NULL;
3556 
3557 		free_cpumask_var(q_vector->affinity_mask);
3558 	}
3559 
3560 	kfree(vport->q_vectors);
3561 	vport->q_vectors = NULL;
3562 }
3563 
3564 /**
3565  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3566  * @vport: main vport structure
3567  */
3568 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3569 {
3570 	struct idpf_adapter *adapter = vport->adapter;
3571 	int vector;
3572 
3573 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3574 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3575 		int irq_num, vidx;
3576 
3577 		/* free only the irqs that were actually requested */
3578 		if (!q_vector)
3579 			continue;
3580 
3581 		vidx = vport->q_vector_idxs[vector];
3582 		irq_num = adapter->msix_entries[vidx].vector;
3583 
3584 		/* clear the affinity_mask in the IRQ descriptor */
3585 		irq_set_affinity_hint(irq_num, NULL);
3586 		kfree(free_irq(irq_num, q_vector));
3587 	}
3588 }
3589 
3590 /**
3591  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3592  * @vport: main vport structure
3593  */
3594 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3595 {
3596 	struct idpf_q_vector *q_vector = vport->q_vectors;
3597 	int q_idx;
3598 
3599 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3600 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3601 }
3602 
3603 /**
3604  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3605  * @q_vector: pointer to q_vector
3606  * @type: itr index
3607  * @itr: itr value
3608  */
3609 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
3610 					const int type, u16 itr)
3611 {
3612 	u32 itr_val;
3613 
3614 	itr &= IDPF_ITR_MASK;
3615 	/* Don't clear PBA because that can cause lost interrupts that
3616 	 * came in while we were cleaning/polling
3617 	 */
3618 	itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
3619 		  (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3620 		  (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3621 
3622 	return itr_val;
3623 }
3624 
3625 /**
3626  * idpf_update_dim_sample - Update dim sample with packets and bytes
3627  * @q_vector: the vector associated with the interrupt
3628  * @dim_sample: dim sample to update
3629  * @dim: dim instance structure
3630  * @packets: total packets
3631  * @bytes: total bytes
3632  *
3633  * Update the dim sample with the packets and bytes which are passed to this
3634  * function. Set the dim state appropriately if the dim settings gets stale.
3635  */
3636 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3637 				   struct dim_sample *dim_sample,
3638 				   struct dim *dim, u64 packets, u64 bytes)
3639 {
3640 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3641 	dim_sample->comp_ctr = 0;
3642 
3643 	/* if dim settings get stale, like when not updated for 1 second or
3644 	 * longer, force it to start again. This addresses the frequent case
3645 	 * of an idle queue being switched to by the scheduler.
3646 	 */
3647 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3648 		dim->state = DIM_START_MEASURE;
3649 }
3650 
3651 /**
3652  * idpf_net_dim - Update net DIM algorithm
3653  * @q_vector: the vector associated with the interrupt
3654  *
3655  * Create a DIM sample and notify net_dim() so that it can possibly decide
3656  * a new ITR value based on incoming packets, bytes, and interrupts.
3657  *
3658  * This function is a no-op if the queue is not configured to dynamic ITR.
3659  */
3660 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3661 {
3662 	struct dim_sample dim_sample = { };
3663 	u64 packets, bytes;
3664 	u32 i;
3665 
3666 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3667 		goto check_rx_itr;
3668 
3669 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3670 		struct idpf_tx_queue *txq = q_vector->tx[i];
3671 		unsigned int start;
3672 
3673 		do {
3674 			start = u64_stats_fetch_begin(&txq->stats_sync);
3675 			packets += u64_stats_read(&txq->q_stats.packets);
3676 			bytes += u64_stats_read(&txq->q_stats.bytes);
3677 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3678 	}
3679 
3680 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3681 			       packets, bytes);
3682 	net_dim(&q_vector->tx_dim, &dim_sample);
3683 
3684 check_rx_itr:
3685 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3686 		return;
3687 
3688 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3689 		struct idpf_rx_queue *rxq = q_vector->rx[i];
3690 		unsigned int start;
3691 
3692 		do {
3693 			start = u64_stats_fetch_begin(&rxq->stats_sync);
3694 			packets += u64_stats_read(&rxq->q_stats.packets);
3695 			bytes += u64_stats_read(&rxq->q_stats.bytes);
3696 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3697 	}
3698 
3699 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3700 			       packets, bytes);
3701 	net_dim(&q_vector->rx_dim, &dim_sample);
3702 }
3703 
3704 /**
3705  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3706  * @q_vector: q_vector for which itr is being updated and interrupt enabled
3707  *
3708  * Update the net_dim() algorithm and re-enable the interrupt associated with
3709  * this vector.
3710  */
3711 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3712 {
3713 	u32 intval;
3714 
3715 	/* net_dim() updates ITR out-of-band using a work item */
3716 	idpf_net_dim(q_vector);
3717 
3718 	q_vector->wb_on_itr = false;
3719 	intval = idpf_vport_intr_buildreg_itr(q_vector,
3720 					      IDPF_NO_ITR_UPDATE_IDX, 0);
3721 
3722 	writel(intval, q_vector->intr_reg.dyn_ctl);
3723 }
3724 
3725 /**
3726  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3727  * @vport: main vport structure
3728  */
3729 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3730 {
3731 	struct idpf_adapter *adapter = vport->adapter;
3732 	const char *drv_name, *if_name, *vec_name;
3733 	int vector, err, irq_num, vidx;
3734 
3735 	drv_name = dev_driver_string(&adapter->pdev->dev);
3736 	if_name = netdev_name(vport->netdev);
3737 
3738 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3739 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3740 		char *name;
3741 
3742 		vidx = vport->q_vector_idxs[vector];
3743 		irq_num = adapter->msix_entries[vidx].vector;
3744 
3745 		if (q_vector->num_rxq && q_vector->num_txq)
3746 			vec_name = "TxRx";
3747 		else if (q_vector->num_rxq)
3748 			vec_name = "Rx";
3749 		else if (q_vector->num_txq)
3750 			vec_name = "Tx";
3751 		else
3752 			continue;
3753 
3754 		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3755 				 vec_name, vidx);
3756 
3757 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3758 				  name, q_vector);
3759 		if (err) {
3760 			netdev_err(vport->netdev,
3761 				   "Request_irq failed, error: %d\n", err);
3762 			goto free_q_irqs;
3763 		}
3764 		/* assign the mask for this irq */
3765 		irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
3766 	}
3767 
3768 	return 0;
3769 
3770 free_q_irqs:
3771 	while (--vector >= 0) {
3772 		vidx = vport->q_vector_idxs[vector];
3773 		irq_num = adapter->msix_entries[vidx].vector;
3774 		kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3775 	}
3776 
3777 	return err;
3778 }
3779 
3780 /**
3781  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3782  * @q_vector: q_vector structure
3783  * @itr: Interrupt throttling rate
3784  * @tx: Tx or Rx ITR
3785  */
3786 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3787 {
3788 	struct idpf_intr_reg *intr_reg;
3789 
3790 	if (tx && !q_vector->tx)
3791 		return;
3792 	else if (!tx && !q_vector->rx)
3793 		return;
3794 
3795 	intr_reg = &q_vector->intr_reg;
3796 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3797 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3798 }
3799 
3800 /**
3801  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3802  * @vport: main vport structure
3803  */
3804 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3805 {
3806 	bool dynamic;
3807 	int q_idx;
3808 	u16 itr;
3809 
3810 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3811 		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3812 
3813 		/* Set the initial ITR values */
3814 		if (qv->num_txq) {
3815 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3816 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3817 			idpf_vport_intr_write_itr(qv, dynamic ?
3818 						  itr : qv->tx_itr_value,
3819 						  true);
3820 		}
3821 
3822 		if (qv->num_rxq) {
3823 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3824 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3825 			idpf_vport_intr_write_itr(qv, dynamic ?
3826 						  itr : qv->rx_itr_value,
3827 						  false);
3828 		}
3829 
3830 		if (qv->num_txq || qv->num_rxq)
3831 			idpf_vport_intr_update_itr_ena_irq(qv);
3832 	}
3833 }
3834 
3835 /**
3836  * idpf_vport_intr_deinit - Release all vector associations for the vport
3837  * @vport: main vport structure
3838  */
3839 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3840 {
3841 	idpf_vport_intr_dis_irq_all(vport);
3842 	idpf_vport_intr_napi_dis_all(vport);
3843 	idpf_vport_intr_napi_del_all(vport);
3844 	idpf_vport_intr_rel_irq(vport);
3845 }
3846 
3847 /**
3848  * idpf_tx_dim_work - Call back from the stack
3849  * @work: work queue structure
3850  */
3851 static void idpf_tx_dim_work(struct work_struct *work)
3852 {
3853 	struct idpf_q_vector *q_vector;
3854 	struct idpf_vport *vport;
3855 	struct dim *dim;
3856 	u16 itr;
3857 
3858 	dim = container_of(work, struct dim, work);
3859 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3860 	vport = q_vector->vport;
3861 
3862 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3863 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3864 
3865 	/* look up the values in our local table */
3866 	itr = vport->tx_itr_profile[dim->profile_ix];
3867 
3868 	idpf_vport_intr_write_itr(q_vector, itr, true);
3869 
3870 	dim->state = DIM_START_MEASURE;
3871 }
3872 
3873 /**
3874  * idpf_rx_dim_work - Call back from the stack
3875  * @work: work queue structure
3876  */
3877 static void idpf_rx_dim_work(struct work_struct *work)
3878 {
3879 	struct idpf_q_vector *q_vector;
3880 	struct idpf_vport *vport;
3881 	struct dim *dim;
3882 	u16 itr;
3883 
3884 	dim = container_of(work, struct dim, work);
3885 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3886 	vport = q_vector->vport;
3887 
3888 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3889 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3890 
3891 	/* look up the values in our local table */
3892 	itr = vport->rx_itr_profile[dim->profile_ix];
3893 
3894 	idpf_vport_intr_write_itr(q_vector, itr, false);
3895 
3896 	dim->state = DIM_START_MEASURE;
3897 }
3898 
3899 /**
3900  * idpf_init_dim - Set up dynamic interrupt moderation
3901  * @qv: q_vector structure
3902  */
3903 static void idpf_init_dim(struct idpf_q_vector *qv)
3904 {
3905 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3906 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3907 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3908 
3909 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3910 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3911 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3912 }
3913 
3914 /**
3915  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3916  * @vport: main vport structure
3917  */
3918 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3919 {
3920 	int q_idx;
3921 
3922 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3923 		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3924 
3925 		idpf_init_dim(q_vector);
3926 		napi_enable(&q_vector->napi);
3927 	}
3928 }
3929 
3930 /**
3931  * idpf_tx_splitq_clean_all- Clean completion queues
3932  * @q_vec: queue vector
3933  * @budget: Used to determine if we are in netpoll
3934  * @cleaned: returns number of packets cleaned
3935  *
3936  * Returns false if clean is not complete else returns true
3937  */
3938 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3939 				     int budget, int *cleaned)
3940 {
3941 	u16 num_complq = q_vec->num_complq;
3942 	bool clean_complete = true;
3943 	int i, budget_per_q;
3944 
3945 	if (unlikely(!num_complq))
3946 		return true;
3947 
3948 	budget_per_q = DIV_ROUND_UP(budget, num_complq);
3949 
3950 	for (i = 0; i < num_complq; i++)
3951 		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
3952 						       budget_per_q, cleaned);
3953 
3954 	return clean_complete;
3955 }
3956 
3957 /**
3958  * idpf_rx_splitq_clean_all- Clean completion queues
3959  * @q_vec: queue vector
3960  * @budget: Used to determine if we are in netpoll
3961  * @cleaned: returns number of packets cleaned
3962  *
3963  * Returns false if clean is not complete else returns true
3964  */
3965 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3966 				     int *cleaned)
3967 {
3968 	u16 num_rxq = q_vec->num_rxq;
3969 	bool clean_complete = true;
3970 	int pkts_cleaned = 0;
3971 	int i, budget_per_q;
3972 	int nid;
3973 
3974 	/* We attempt to distribute budget to each Rx queue fairly, but don't
3975 	 * allow the budget to go below 1 because that would exit polling early.
3976 	 */
3977 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3978 	for (i = 0; i < num_rxq; i++) {
3979 		struct idpf_rx_queue *rxq = q_vec->rx[i];
3980 		int pkts_cleaned_per_q;
3981 
3982 		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3983 		/* if we clean as many as budgeted, we must not be done */
3984 		if (pkts_cleaned_per_q >= budget_per_q)
3985 			clean_complete = false;
3986 		pkts_cleaned += pkts_cleaned_per_q;
3987 	}
3988 	*cleaned = pkts_cleaned;
3989 
3990 	nid = numa_mem_id();
3991 
3992 	for (i = 0; i < q_vec->num_bufq; i++)
3993 		idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
3994 
3995 	return clean_complete;
3996 }
3997 
3998 /**
3999  * idpf_vport_splitq_napi_poll - NAPI handler
4000  * @napi: struct from which you get q_vector
4001  * @budget: budget provided by stack
4002  */
4003 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4004 {
4005 	struct idpf_q_vector *q_vector =
4006 				container_of(napi, struct idpf_q_vector, napi);
4007 	bool clean_complete;
4008 	int work_done = 0;
4009 
4010 	/* Handle case where we are called by netpoll with a budget of 0 */
4011 	if (unlikely(!budget)) {
4012 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4013 
4014 		return 0;
4015 	}
4016 
4017 	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
4018 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4019 
4020 	/* If work not completed, return budget and polling will return */
4021 	if (!clean_complete) {
4022 		idpf_vport_intr_set_wb_on_itr(q_vector);
4023 		return budget;
4024 	}
4025 
4026 	work_done = min_t(int, work_done, budget - 1);
4027 
4028 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4029 	 * poll us due to busy-polling
4030 	 */
4031 	if (likely(napi_complete_done(napi, work_done)))
4032 		idpf_vport_intr_update_itr_ena_irq(q_vector);
4033 	else
4034 		idpf_vport_intr_set_wb_on_itr(q_vector);
4035 
4036 	/* Switch to poll mode in the tear-down path after sending disable
4037 	 * queues virtchnl message, as the interrupts will be disabled after
4038 	 * that
4039 	 */
4040 	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
4041 							 q_vector->tx[0])))
4042 		return budget;
4043 	else
4044 		return work_done;
4045 }
4046 
4047 /**
4048  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4049  * @vport: virtual port
4050  *
4051  * Mapping for vectors to queues
4052  */
4053 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4054 {
4055 	bool split = idpf_is_queue_model_split(vport->rxq_model);
4056 	u16 num_txq_grp = vport->num_txq_grp;
4057 	struct idpf_rxq_group *rx_qgrp;
4058 	struct idpf_txq_group *tx_qgrp;
4059 	u32 i, qv_idx, q_index;
4060 
4061 	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4062 		u16 num_rxq;
4063 
4064 		if (qv_idx >= vport->num_q_vectors)
4065 			qv_idx = 0;
4066 
4067 		rx_qgrp = &vport->rxq_grps[i];
4068 		if (split)
4069 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4070 		else
4071 			num_rxq = rx_qgrp->singleq.num_rxq;
4072 
4073 		for (u32 j = 0; j < num_rxq; j++) {
4074 			struct idpf_rx_queue *q;
4075 
4076 			if (split)
4077 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4078 			else
4079 				q = rx_qgrp->singleq.rxqs[j];
4080 			q->q_vector = &vport->q_vectors[qv_idx];
4081 			q_index = q->q_vector->num_rxq;
4082 			q->q_vector->rx[q_index] = q;
4083 			q->q_vector->num_rxq++;
4084 
4085 			if (split)
4086 				q->napi = &q->q_vector->napi;
4087 		}
4088 
4089 		if (split) {
4090 			for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4091 				struct idpf_buf_queue *bufq;
4092 
4093 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4094 				bufq->q_vector = &vport->q_vectors[qv_idx];
4095 				q_index = bufq->q_vector->num_bufq;
4096 				bufq->q_vector->bufq[q_index] = bufq;
4097 				bufq->q_vector->num_bufq++;
4098 			}
4099 		}
4100 
4101 		qv_idx++;
4102 	}
4103 
4104 	split = idpf_is_queue_model_split(vport->txq_model);
4105 
4106 	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4107 		u16 num_txq;
4108 
4109 		if (qv_idx >= vport->num_q_vectors)
4110 			qv_idx = 0;
4111 
4112 		tx_qgrp = &vport->txq_grps[i];
4113 		num_txq = tx_qgrp->num_txq;
4114 
4115 		for (u32 j = 0; j < num_txq; j++) {
4116 			struct idpf_tx_queue *q;
4117 
4118 			q = tx_qgrp->txqs[j];
4119 			q->q_vector = &vport->q_vectors[qv_idx];
4120 			q->q_vector->tx[q->q_vector->num_txq++] = q;
4121 		}
4122 
4123 		if (split) {
4124 			struct idpf_compl_queue *q = tx_qgrp->complq;
4125 
4126 			q->q_vector = &vport->q_vectors[qv_idx];
4127 			q->q_vector->complq[q->q_vector->num_complq++] = q;
4128 		}
4129 
4130 		qv_idx++;
4131 	}
4132 }
4133 
4134 /**
4135  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4136  * @vport: virtual port
4137  *
4138  * Initialize vector indexes with values returened over mailbox
4139  */
4140 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4141 {
4142 	struct idpf_adapter *adapter = vport->adapter;
4143 	struct virtchnl2_alloc_vectors *ac;
4144 	u16 *vecids, total_vecs;
4145 	int i;
4146 
4147 	ac = adapter->req_vec_chunks;
4148 	if (!ac) {
4149 		for (i = 0; i < vport->num_q_vectors; i++)
4150 			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4151 
4152 		return 0;
4153 	}
4154 
4155 	total_vecs = idpf_get_reserved_vecs(adapter);
4156 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4157 	if (!vecids)
4158 		return -ENOMEM;
4159 
4160 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4161 
4162 	for (i = 0; i < vport->num_q_vectors; i++)
4163 		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4164 
4165 	kfree(vecids);
4166 
4167 	return 0;
4168 }
4169 
4170 /**
4171  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4172  * @vport: virtual port structure
4173  */
4174 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4175 {
4176 	int (*napi_poll)(struct napi_struct *napi, int budget);
4177 	u16 v_idx;
4178 
4179 	if (idpf_is_queue_model_split(vport->txq_model))
4180 		napi_poll = idpf_vport_splitq_napi_poll;
4181 	else
4182 		napi_poll = idpf_vport_singleq_napi_poll;
4183 
4184 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4185 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4186 
4187 		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4188 
4189 		/* only set affinity_mask if the CPU is online */
4190 		if (cpu_online(v_idx))
4191 			cpumask_set_cpu(v_idx, q_vector->affinity_mask);
4192 	}
4193 }
4194 
4195 /**
4196  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4197  * @vport: virtual port
4198  *
4199  * We allocate one q_vector per queue interrupt. If allocation fails we
4200  * return -ENOMEM.
4201  */
4202 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4203 {
4204 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4205 	struct idpf_q_vector *q_vector;
4206 	u32 complqs_per_vector, v_idx;
4207 
4208 	vport->q_vectors = kcalloc(vport->num_q_vectors,
4209 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4210 	if (!vport->q_vectors)
4211 		return -ENOMEM;
4212 
4213 	txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4214 				       vport->num_q_vectors);
4215 	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4216 				       vport->num_q_vectors);
4217 	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4218 			   DIV_ROUND_UP(vport->num_rxq_grp,
4219 					vport->num_q_vectors);
4220 	complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4221 					  vport->num_q_vectors);
4222 
4223 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4224 		q_vector = &vport->q_vectors[v_idx];
4225 		q_vector->vport = vport;
4226 
4227 		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4228 		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4229 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4230 
4231 		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4232 		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4233 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4234 
4235 		if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
4236 			goto error;
4237 
4238 		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4239 				       GFP_KERNEL);
4240 		if (!q_vector->tx)
4241 			goto error;
4242 
4243 		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4244 				       GFP_KERNEL);
4245 		if (!q_vector->rx)
4246 			goto error;
4247 
4248 		if (!idpf_is_queue_model_split(vport->rxq_model))
4249 			continue;
4250 
4251 		q_vector->bufq = kcalloc(bufqs_per_vector,
4252 					 sizeof(*q_vector->bufq),
4253 					 GFP_KERNEL);
4254 		if (!q_vector->bufq)
4255 			goto error;
4256 
4257 		q_vector->complq = kcalloc(complqs_per_vector,
4258 					   sizeof(*q_vector->complq),
4259 					   GFP_KERNEL);
4260 		if (!q_vector->complq)
4261 			goto error;
4262 	}
4263 
4264 	return 0;
4265 
4266 error:
4267 	idpf_vport_intr_rel(vport);
4268 
4269 	return -ENOMEM;
4270 }
4271 
4272 /**
4273  * idpf_vport_intr_init - Setup all vectors for the given vport
4274  * @vport: virtual port
4275  *
4276  * Returns 0 on success or negative on failure
4277  */
4278 int idpf_vport_intr_init(struct idpf_vport *vport)
4279 {
4280 	int err;
4281 
4282 	err = idpf_vport_intr_init_vec_idx(vport);
4283 	if (err)
4284 		return err;
4285 
4286 	idpf_vport_intr_map_vector_to_qs(vport);
4287 	idpf_vport_intr_napi_add_all(vport);
4288 
4289 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4290 	if (err)
4291 		goto unroll_vectors_alloc;
4292 
4293 	err = idpf_vport_intr_req_irq(vport);
4294 	if (err)
4295 		goto unroll_vectors_alloc;
4296 
4297 	return 0;
4298 
4299 unroll_vectors_alloc:
4300 	idpf_vport_intr_napi_del_all(vport);
4301 
4302 	return err;
4303 }
4304 
4305 void idpf_vport_intr_ena(struct idpf_vport *vport)
4306 {
4307 	idpf_vport_intr_napi_ena_all(vport);
4308 	idpf_vport_intr_ena_irq_all(vport);
4309 }
4310 
4311 /**
4312  * idpf_config_rss - Send virtchnl messages to configure RSS
4313  * @vport: virtual port
4314  *
4315  * Return 0 on success, negative on failure
4316  */
4317 int idpf_config_rss(struct idpf_vport *vport)
4318 {
4319 	int err;
4320 
4321 	err = idpf_send_get_set_rss_key_msg(vport, false);
4322 	if (err)
4323 		return err;
4324 
4325 	return idpf_send_get_set_rss_lut_msg(vport, false);
4326 }
4327 
4328 /**
4329  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4330  * @vport: virtual port structure
4331  */
4332 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4333 {
4334 	struct idpf_adapter *adapter = vport->adapter;
4335 	u16 num_active_rxq = vport->num_rxq;
4336 	struct idpf_rss_data *rss_data;
4337 	int i;
4338 
4339 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4340 
4341 	for (i = 0; i < rss_data->rss_lut_size; i++) {
4342 		rss_data->rss_lut[i] = i % num_active_rxq;
4343 		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4344 	}
4345 }
4346 
4347 /**
4348  * idpf_init_rss - Allocate and initialize RSS resources
4349  * @vport: virtual port
4350  *
4351  * Return 0 on success, negative on failure
4352  */
4353 int idpf_init_rss(struct idpf_vport *vport)
4354 {
4355 	struct idpf_adapter *adapter = vport->adapter;
4356 	struct idpf_rss_data *rss_data;
4357 	u32 lut_size;
4358 
4359 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4360 
4361 	lut_size = rss_data->rss_lut_size * sizeof(u32);
4362 	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4363 	if (!rss_data->rss_lut)
4364 		return -ENOMEM;
4365 
4366 	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4367 	if (!rss_data->cached_lut) {
4368 		kfree(rss_data->rss_lut);
4369 		rss_data->rss_lut = NULL;
4370 
4371 		return -ENOMEM;
4372 	}
4373 
4374 	/* Fill the default RSS lut values */
4375 	idpf_fill_dflt_rss_lut(vport);
4376 
4377 	return idpf_config_rss(vport);
4378 }
4379 
4380 /**
4381  * idpf_deinit_rss - Release RSS resources
4382  * @vport: virtual port
4383  */
4384 void idpf_deinit_rss(struct idpf_vport *vport)
4385 {
4386 	struct idpf_adapter *adapter = vport->adapter;
4387 	struct idpf_rss_data *rss_data;
4388 
4389 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4390 	kfree(rss_data->cached_lut);
4391 	rss_data->cached_lut = NULL;
4392 	kfree(rss_data->rss_lut);
4393 	rss_data->rss_lut = NULL;
4394 }
4395