xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 /**
7  * idpf_buf_lifo_push - push a buffer pointer onto stack
8  * @stack: pointer to stack struct
9  * @buf: pointer to buf to push
10  *
11  * Returns 0 on success, negative on failure
12  **/
13 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
14 			      struct idpf_tx_stash *buf)
15 {
16 	if (unlikely(stack->top == stack->size))
17 		return -ENOSPC;
18 
19 	stack->bufs[stack->top++] = buf;
20 
21 	return 0;
22 }
23 
24 /**
25  * idpf_buf_lifo_pop - pop a buffer pointer from stack
26  * @stack: pointer to stack struct
27  **/
28 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
29 {
30 	if (unlikely(!stack->top))
31 		return NULL;
32 
33 	return stack->bufs[--stack->top];
34 }
35 
36 /**
37  * idpf_tx_timeout - Respond to a Tx Hang
38  * @netdev: network interface device structure
39  * @txqueue: TX queue
40  */
41 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
42 {
43 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
44 
45 	adapter->tx_timeout_count++;
46 
47 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
48 		   adapter->tx_timeout_count, txqueue);
49 	if (!idpf_is_reset_in_prog(adapter)) {
50 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
51 		queue_delayed_work(adapter->vc_event_wq,
52 				   &adapter->vc_event_task,
53 				   msecs_to_jiffies(10));
54 	}
55 }
56 
57 /**
58  * idpf_tx_buf_rel - Release a Tx buffer
59  * @tx_q: the queue that owns the buffer
60  * @tx_buf: the buffer to free
61  */
62 static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
63 {
64 	if (tx_buf->skb) {
65 		if (dma_unmap_len(tx_buf, len))
66 			dma_unmap_single(tx_q->dev,
67 					 dma_unmap_addr(tx_buf, dma),
68 					 dma_unmap_len(tx_buf, len),
69 					 DMA_TO_DEVICE);
70 		dev_kfree_skb_any(tx_buf->skb);
71 	} else if (dma_unmap_len(tx_buf, len)) {
72 		dma_unmap_page(tx_q->dev,
73 			       dma_unmap_addr(tx_buf, dma),
74 			       dma_unmap_len(tx_buf, len),
75 			       DMA_TO_DEVICE);
76 	}
77 
78 	tx_buf->next_to_watch = NULL;
79 	tx_buf->skb = NULL;
80 	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
81 	dma_unmap_len_set(tx_buf, len, 0);
82 }
83 
84 /**
85  * idpf_tx_buf_rel_all - Free any empty Tx buffers
86  * @txq: queue to be cleaned
87  */
88 static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
89 {
90 	u16 i;
91 
92 	/* Buffers already cleared, nothing to do */
93 	if (!txq->tx_buf)
94 		return;
95 
96 	/* Free all the Tx buffer sk_buffs */
97 	for (i = 0; i < txq->desc_count; i++)
98 		idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
99 
100 	kfree(txq->tx_buf);
101 	txq->tx_buf = NULL;
102 
103 	if (!txq->buf_stack.bufs)
104 		return;
105 
106 	for (i = 0; i < txq->buf_stack.size; i++)
107 		kfree(txq->buf_stack.bufs[i]);
108 
109 	kfree(txq->buf_stack.bufs);
110 	txq->buf_stack.bufs = NULL;
111 }
112 
113 /**
114  * idpf_tx_desc_rel - Free Tx resources per queue
115  * @txq: Tx descriptor ring for a specific queue
116  * @bufq: buffer q or completion q
117  *
118  * Free all transmit software resources
119  */
120 static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
121 {
122 	if (bufq)
123 		idpf_tx_buf_rel_all(txq);
124 
125 	if (!txq->desc_ring)
126 		return;
127 
128 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
129 	txq->desc_ring = NULL;
130 	txq->next_to_alloc = 0;
131 	txq->next_to_use = 0;
132 	txq->next_to_clean = 0;
133 }
134 
135 /**
136  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
137  * @vport: virtual port structure
138  *
139  * Free all transmit software resources
140  */
141 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
142 {
143 	int i, j;
144 
145 	if (!vport->txq_grps)
146 		return;
147 
148 	for (i = 0; i < vport->num_txq_grp; i++) {
149 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
150 
151 		for (j = 0; j < txq_grp->num_txq; j++)
152 			idpf_tx_desc_rel(txq_grp->txqs[j], true);
153 
154 		if (idpf_is_queue_model_split(vport->txq_model))
155 			idpf_tx_desc_rel(txq_grp->complq, false);
156 	}
157 }
158 
159 /**
160  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
161  * @tx_q: queue for which the buffers are allocated
162  *
163  * Returns 0 on success, negative on failure
164  */
165 static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
166 {
167 	int buf_size;
168 	int i;
169 
170 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
171 	 * are allocated by kernel network stack and received as part of skb
172 	 */
173 	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
174 	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
175 	if (!tx_q->tx_buf)
176 		return -ENOMEM;
177 
178 	/* Initialize tx_bufs with invalid completion tags */
179 	for (i = 0; i < tx_q->desc_count; i++)
180 		tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
181 
182 	/* Initialize tx buf stack for out-of-order completions if
183 	 * flow scheduling offload is enabled
184 	 */
185 	tx_q->buf_stack.bufs =
186 		kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
187 			GFP_KERNEL);
188 	if (!tx_q->buf_stack.bufs)
189 		return -ENOMEM;
190 
191 	tx_q->buf_stack.size = tx_q->desc_count;
192 	tx_q->buf_stack.top = tx_q->desc_count;
193 
194 	for (i = 0; i < tx_q->desc_count; i++) {
195 		tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
196 						  GFP_KERNEL);
197 		if (!tx_q->buf_stack.bufs[i])
198 			return -ENOMEM;
199 	}
200 
201 	return 0;
202 }
203 
204 /**
205  * idpf_tx_desc_alloc - Allocate the Tx descriptors
206  * @tx_q: the tx ring to set up
207  * @bufq: buffer or completion queue
208  *
209  * Returns 0 on success, negative on failure
210  */
211 static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
212 {
213 	struct device *dev = tx_q->dev;
214 	u32 desc_sz;
215 	int err;
216 
217 	if (bufq) {
218 		err = idpf_tx_buf_alloc_all(tx_q);
219 		if (err)
220 			goto err_alloc;
221 
222 		desc_sz = sizeof(struct idpf_base_tx_desc);
223 	} else {
224 		desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
225 	}
226 
227 	tx_q->size = tx_q->desc_count * desc_sz;
228 
229 	/* Allocate descriptors also round up to nearest 4K */
230 	tx_q->size = ALIGN(tx_q->size, 4096);
231 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
232 					      GFP_KERNEL);
233 	if (!tx_q->desc_ring) {
234 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
235 			tx_q->size);
236 		err = -ENOMEM;
237 		goto err_alloc;
238 	}
239 
240 	tx_q->next_to_alloc = 0;
241 	tx_q->next_to_use = 0;
242 	tx_q->next_to_clean = 0;
243 	set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
244 
245 	return 0;
246 
247 err_alloc:
248 	idpf_tx_desc_rel(tx_q, bufq);
249 
250 	return err;
251 }
252 
253 /**
254  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
255  * @vport: virtual port private structure
256  *
257  * Returns 0 on success, negative on failure
258  */
259 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
260 {
261 	struct device *dev = &vport->adapter->pdev->dev;
262 	int err = 0;
263 	int i, j;
264 
265 	/* Setup buffer queues. In single queue model buffer queues and
266 	 * completion queues will be same
267 	 */
268 	for (i = 0; i < vport->num_txq_grp; i++) {
269 		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
270 			struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
271 			u8 gen_bits = 0;
272 			u16 bufidx_mask;
273 
274 			err = idpf_tx_desc_alloc(txq, true);
275 			if (err) {
276 				dev_err(dev, "Allocation for Tx Queue %u failed\n",
277 					i);
278 				goto err_out;
279 			}
280 
281 			if (!idpf_is_queue_model_split(vport->txq_model))
282 				continue;
283 
284 			txq->compl_tag_cur_gen = 0;
285 
286 			/* Determine the number of bits in the bufid
287 			 * mask and add one to get the start of the
288 			 * generation bits
289 			 */
290 			bufidx_mask = txq->desc_count - 1;
291 			while (bufidx_mask >> 1) {
292 				txq->compl_tag_gen_s++;
293 				bufidx_mask = bufidx_mask >> 1;
294 			}
295 			txq->compl_tag_gen_s++;
296 
297 			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
298 							txq->compl_tag_gen_s;
299 			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
300 
301 			/* Set bufid mask based on location of first
302 			 * gen bit; it cannot simply be the descriptor
303 			 * ring size-1 since we can have size values
304 			 * where not all of those bits are set.
305 			 */
306 			txq->compl_tag_bufid_m =
307 				GETMAXVAL(txq->compl_tag_gen_s);
308 		}
309 
310 		if (!idpf_is_queue_model_split(vport->txq_model))
311 			continue;
312 
313 		/* Setup completion queues */
314 		err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
315 		if (err) {
316 			dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
317 				i);
318 			goto err_out;
319 		}
320 	}
321 
322 err_out:
323 	if (err)
324 		idpf_tx_desc_rel_all(vport);
325 
326 	return err;
327 }
328 
329 /**
330  * idpf_rx_page_rel - Release an rx buffer page
331  * @rxq: the queue that owns the buffer
332  * @rx_buf: the buffer to free
333  */
334 static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
335 {
336 	if (unlikely(!rx_buf->page))
337 		return;
338 
339 	page_pool_put_full_page(rxq->pp, rx_buf->page, false);
340 
341 	rx_buf->page = NULL;
342 	rx_buf->page_offset = 0;
343 }
344 
345 /**
346  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
347  * @rxq: queue to use
348  */
349 static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
350 {
351 	struct idpf_adapter *adapter = rxq->vport->adapter;
352 
353 	dma_free_coherent(&adapter->pdev->dev,
354 			  rxq->desc_count * IDPF_HDR_BUF_SIZE,
355 			  rxq->rx_buf.hdr_buf_va,
356 			  rxq->rx_buf.hdr_buf_pa);
357 	rxq->rx_buf.hdr_buf_va = NULL;
358 }
359 
360 /**
361  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
362  * @rxq: queue to be cleaned
363  */
364 static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
365 {
366 	u16 i;
367 
368 	/* queue already cleared, nothing to do */
369 	if (!rxq->rx_buf.buf)
370 		return;
371 
372 	/* Free all the bufs allocated and given to hw on Rx queue */
373 	for (i = 0; i < rxq->desc_count; i++)
374 		idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
375 
376 	if (rxq->rx_hsplit_en)
377 		idpf_rx_hdr_buf_rel_all(rxq);
378 
379 	page_pool_destroy(rxq->pp);
380 	rxq->pp = NULL;
381 
382 	kfree(rxq->rx_buf.buf);
383 	rxq->rx_buf.buf = NULL;
384 }
385 
386 /**
387  * idpf_rx_desc_rel - Free a specific Rx q resources
388  * @rxq: queue to clean the resources from
389  * @bufq: buffer q or completion q
390  * @q_model: single or split q model
391  *
392  * Free a specific rx queue resources
393  */
394 static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
395 {
396 	if (!rxq)
397 		return;
398 
399 	if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
400 		dev_kfree_skb_any(rxq->skb);
401 		rxq->skb = NULL;
402 	}
403 
404 	if (bufq || !idpf_is_queue_model_split(q_model))
405 		idpf_rx_buf_rel_all(rxq);
406 
407 	rxq->next_to_alloc = 0;
408 	rxq->next_to_clean = 0;
409 	rxq->next_to_use = 0;
410 	if (!rxq->desc_ring)
411 		return;
412 
413 	dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
414 	rxq->desc_ring = NULL;
415 }
416 
417 /**
418  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
419  * @vport: virtual port structure
420  *
421  * Free all rx queues resources
422  */
423 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
424 {
425 	struct idpf_rxq_group *rx_qgrp;
426 	u16 num_rxq;
427 	int i, j;
428 
429 	if (!vport->rxq_grps)
430 		return;
431 
432 	for (i = 0; i < vport->num_rxq_grp; i++) {
433 		rx_qgrp = &vport->rxq_grps[i];
434 
435 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
436 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
437 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
438 						 false, vport->rxq_model);
439 			continue;
440 		}
441 
442 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
443 		for (j = 0; j < num_rxq; j++)
444 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
445 					 false, vport->rxq_model);
446 
447 		if (!rx_qgrp->splitq.bufq_sets)
448 			continue;
449 
450 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
451 			struct idpf_bufq_set *bufq_set =
452 				&rx_qgrp->splitq.bufq_sets[j];
453 
454 			idpf_rx_desc_rel(&bufq_set->bufq, true,
455 					 vport->rxq_model);
456 		}
457 	}
458 }
459 
460 /**
461  * idpf_rx_buf_hw_update - Store the new tail and head values
462  * @rxq: queue to bump
463  * @val: new head index
464  */
465 void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
466 {
467 	rxq->next_to_use = val;
468 
469 	if (unlikely(!rxq->tail))
470 		return;
471 
472 	/* writel has an implicit memory barrier */
473 	writel(val, rxq->tail);
474 }
475 
476 /**
477  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
478  * @rxq: ring to use
479  *
480  * Returns 0 on success, negative on failure.
481  */
482 static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
483 {
484 	struct idpf_adapter *adapter = rxq->vport->adapter;
485 
486 	rxq->rx_buf.hdr_buf_va =
487 		dma_alloc_coherent(&adapter->pdev->dev,
488 				   IDPF_HDR_BUF_SIZE * rxq->desc_count,
489 				   &rxq->rx_buf.hdr_buf_pa,
490 				   GFP_KERNEL);
491 	if (!rxq->rx_buf.hdr_buf_va)
492 		return -ENOMEM;
493 
494 	return 0;
495 }
496 
497 /**
498  * idpf_rx_post_buf_refill - Post buffer id to refill queue
499  * @refillq: refill queue to post to
500  * @buf_id: buffer id to post
501  */
502 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
503 {
504 	u16 nta = refillq->next_to_alloc;
505 
506 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
507 	refillq->ring[nta] =
508 		((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
509 		(!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
510 		 IDPF_RX_BI_GEN_S);
511 
512 	if (unlikely(++nta == refillq->desc_count)) {
513 		nta = 0;
514 		change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
515 	}
516 	refillq->next_to_alloc = nta;
517 }
518 
519 /**
520  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
521  * @bufq: buffer queue to post to
522  * @buf_id: buffer id to post
523  *
524  * Returns false if buffer could not be allocated, true otherwise.
525  */
526 static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
527 {
528 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
529 	u16 nta = bufq->next_to_alloc;
530 	struct idpf_rx_buf *buf;
531 	dma_addr_t addr;
532 
533 	splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
534 	buf = &bufq->rx_buf.buf[buf_id];
535 
536 	if (bufq->rx_hsplit_en) {
537 		splitq_rx_desc->hdr_addr =
538 			cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
539 				    (u32)buf_id * IDPF_HDR_BUF_SIZE);
540 	}
541 
542 	addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
543 	if (unlikely(addr == DMA_MAPPING_ERROR))
544 		return false;
545 
546 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
547 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
548 
549 	nta++;
550 	if (unlikely(nta == bufq->desc_count))
551 		nta = 0;
552 	bufq->next_to_alloc = nta;
553 
554 	return true;
555 }
556 
557 /**
558  * idpf_rx_post_init_bufs - Post initial buffers to bufq
559  * @bufq: buffer queue to post working set to
560  * @working_set: number of buffers to put in working set
561  *
562  * Returns true if @working_set bufs were posted successfully, false otherwise.
563  */
564 static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
565 {
566 	int i;
567 
568 	for (i = 0; i < working_set; i++) {
569 		if (!idpf_rx_post_buf_desc(bufq, i))
570 			return false;
571 	}
572 
573 	idpf_rx_buf_hw_update(bufq,
574 			      bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
575 
576 	return true;
577 }
578 
579 /**
580  * idpf_rx_create_page_pool - Create a page pool
581  * @rxbufq: RX queue to create page pool for
582  *
583  * Returns &page_pool on success, casted -errno on failure
584  */
585 static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
586 {
587 	struct page_pool_params pp = {
588 		.flags		= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
589 		.order		= 0,
590 		.pool_size	= rxbufq->desc_count,
591 		.nid		= NUMA_NO_NODE,
592 		.dev		= rxbufq->vport->netdev->dev.parent,
593 		.max_len	= PAGE_SIZE,
594 		.dma_dir	= DMA_FROM_DEVICE,
595 		.offset		= 0,
596 	};
597 
598 	return page_pool_create(&pp);
599 }
600 
601 /**
602  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
603  * @rxbufq: queue for which the buffers are allocated; equivalent to
604  * rxq when operating in singleq mode
605  *
606  * Returns 0 on success, negative on failure
607  */
608 static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
609 {
610 	int err = 0;
611 
612 	/* Allocate book keeping buffers */
613 	rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
614 				     sizeof(struct idpf_rx_buf), GFP_KERNEL);
615 	if (!rxbufq->rx_buf.buf) {
616 		err = -ENOMEM;
617 		goto rx_buf_alloc_all_out;
618 	}
619 
620 	if (rxbufq->rx_hsplit_en) {
621 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
622 		if (err)
623 			goto rx_buf_alloc_all_out;
624 	}
625 
626 	/* Allocate buffers to be given to HW.	 */
627 	if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
628 		int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
629 
630 		if (!idpf_rx_post_init_bufs(rxbufq, working_set))
631 			err = -ENOMEM;
632 	} else {
633 		if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
634 						     rxbufq->desc_count - 1))
635 			err = -ENOMEM;
636 	}
637 
638 rx_buf_alloc_all_out:
639 	if (err)
640 		idpf_rx_buf_rel_all(rxbufq);
641 
642 	return err;
643 }
644 
645 /**
646  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
647  * @rxbufq: RX queue to create page pool for
648  *
649  * Returns 0 on success, negative on failure
650  */
651 static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
652 {
653 	struct page_pool *pool;
654 
655 	pool = idpf_rx_create_page_pool(rxbufq);
656 	if (IS_ERR(pool))
657 		return PTR_ERR(pool);
658 
659 	rxbufq->pp = pool;
660 
661 	return idpf_rx_buf_alloc_all(rxbufq);
662 }
663 
664 /**
665  * idpf_rx_bufs_init_all - Initialize all RX bufs
666  * @vport: virtual port struct
667  *
668  * Returns 0 on success, negative on failure
669  */
670 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
671 {
672 	struct idpf_rxq_group *rx_qgrp;
673 	struct idpf_queue *q;
674 	int i, j, err;
675 
676 	for (i = 0; i < vport->num_rxq_grp; i++) {
677 		rx_qgrp = &vport->rxq_grps[i];
678 
679 		/* Allocate bufs for the rxq itself in singleq */
680 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
681 			int num_rxq = rx_qgrp->singleq.num_rxq;
682 
683 			for (j = 0; j < num_rxq; j++) {
684 				q = rx_qgrp->singleq.rxqs[j];
685 				err = idpf_rx_bufs_init(q);
686 				if (err)
687 					return err;
688 			}
689 
690 			continue;
691 		}
692 
693 		/* Otherwise, allocate bufs for the buffer queues */
694 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
695 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
696 			err = idpf_rx_bufs_init(q);
697 			if (err)
698 				return err;
699 		}
700 	}
701 
702 	return 0;
703 }
704 
705 /**
706  * idpf_rx_desc_alloc - Allocate queue Rx resources
707  * @rxq: Rx queue for which the resources are setup
708  * @bufq: buffer or completion queue
709  * @q_model: single or split queue model
710  *
711  * Returns 0 on success, negative on failure
712  */
713 static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
714 {
715 	struct device *dev = rxq->dev;
716 
717 	if (bufq)
718 		rxq->size = rxq->desc_count *
719 			sizeof(struct virtchnl2_splitq_rx_buf_desc);
720 	else
721 		rxq->size = rxq->desc_count *
722 			sizeof(union virtchnl2_rx_desc);
723 
724 	/* Allocate descriptors and also round up to nearest 4K */
725 	rxq->size = ALIGN(rxq->size, 4096);
726 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
727 					     &rxq->dma, GFP_KERNEL);
728 	if (!rxq->desc_ring) {
729 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
730 			rxq->size);
731 		return -ENOMEM;
732 	}
733 
734 	rxq->next_to_alloc = 0;
735 	rxq->next_to_clean = 0;
736 	rxq->next_to_use = 0;
737 	set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
738 
739 	return 0;
740 }
741 
742 /**
743  * idpf_rx_desc_alloc_all - allocate all RX queues resources
744  * @vport: virtual port structure
745  *
746  * Returns 0 on success, negative on failure
747  */
748 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
749 {
750 	struct device *dev = &vport->adapter->pdev->dev;
751 	struct idpf_rxq_group *rx_qgrp;
752 	struct idpf_queue *q;
753 	int i, j, err;
754 	u16 num_rxq;
755 
756 	for (i = 0; i < vport->num_rxq_grp; i++) {
757 		rx_qgrp = &vport->rxq_grps[i];
758 		if (idpf_is_queue_model_split(vport->rxq_model))
759 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
760 		else
761 			num_rxq = rx_qgrp->singleq.num_rxq;
762 
763 		for (j = 0; j < num_rxq; j++) {
764 			if (idpf_is_queue_model_split(vport->rxq_model))
765 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
766 			else
767 				q = rx_qgrp->singleq.rxqs[j];
768 			err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
769 			if (err) {
770 				dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
771 					i);
772 				goto err_out;
773 			}
774 		}
775 
776 		if (!idpf_is_queue_model_split(vport->rxq_model))
777 			continue;
778 
779 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
780 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
781 			err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
782 			if (err) {
783 				dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
784 					i);
785 				goto err_out;
786 			}
787 		}
788 	}
789 
790 	return 0;
791 
792 err_out:
793 	idpf_rx_desc_rel_all(vport);
794 
795 	return err;
796 }
797 
798 /**
799  * idpf_txq_group_rel - Release all resources for txq groups
800  * @vport: vport to release txq groups on
801  */
802 static void idpf_txq_group_rel(struct idpf_vport *vport)
803 {
804 	int i, j;
805 
806 	if (!vport->txq_grps)
807 		return;
808 
809 	for (i = 0; i < vport->num_txq_grp; i++) {
810 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
811 
812 		for (j = 0; j < txq_grp->num_txq; j++) {
813 			kfree(txq_grp->txqs[j]);
814 			txq_grp->txqs[j] = NULL;
815 		}
816 		kfree(txq_grp->complq);
817 		txq_grp->complq = NULL;
818 	}
819 	kfree(vport->txq_grps);
820 	vport->txq_grps = NULL;
821 }
822 
823 /**
824  * idpf_rxq_sw_queue_rel - Release software queue resources
825  * @rx_qgrp: rx queue group with software queues
826  */
827 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
828 {
829 	int i, j;
830 
831 	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
832 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
833 
834 		for (j = 0; j < bufq_set->num_refillqs; j++) {
835 			kfree(bufq_set->refillqs[j].ring);
836 			bufq_set->refillqs[j].ring = NULL;
837 		}
838 		kfree(bufq_set->refillqs);
839 		bufq_set->refillqs = NULL;
840 	}
841 }
842 
843 /**
844  * idpf_rxq_group_rel - Release all resources for rxq groups
845  * @vport: vport to release rxq groups on
846  */
847 static void idpf_rxq_group_rel(struct idpf_vport *vport)
848 {
849 	int i;
850 
851 	if (!vport->rxq_grps)
852 		return;
853 
854 	for (i = 0; i < vport->num_rxq_grp; i++) {
855 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
856 		u16 num_rxq;
857 		int j;
858 
859 		if (idpf_is_queue_model_split(vport->rxq_model)) {
860 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
861 			for (j = 0; j < num_rxq; j++) {
862 				kfree(rx_qgrp->splitq.rxq_sets[j]);
863 				rx_qgrp->splitq.rxq_sets[j] = NULL;
864 			}
865 
866 			idpf_rxq_sw_queue_rel(rx_qgrp);
867 			kfree(rx_qgrp->splitq.bufq_sets);
868 			rx_qgrp->splitq.bufq_sets = NULL;
869 		} else {
870 			num_rxq = rx_qgrp->singleq.num_rxq;
871 			for (j = 0; j < num_rxq; j++) {
872 				kfree(rx_qgrp->singleq.rxqs[j]);
873 				rx_qgrp->singleq.rxqs[j] = NULL;
874 			}
875 		}
876 	}
877 	kfree(vport->rxq_grps);
878 	vport->rxq_grps = NULL;
879 }
880 
881 /**
882  * idpf_vport_queue_grp_rel_all - Release all queue groups
883  * @vport: vport to release queue groups for
884  */
885 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
886 {
887 	idpf_txq_group_rel(vport);
888 	idpf_rxq_group_rel(vport);
889 }
890 
891 /**
892  * idpf_vport_queues_rel - Free memory for all queues
893  * @vport: virtual port
894  *
895  * Free the memory allocated for queues associated to a vport
896  */
897 void idpf_vport_queues_rel(struct idpf_vport *vport)
898 {
899 	idpf_tx_desc_rel_all(vport);
900 	idpf_rx_desc_rel_all(vport);
901 	idpf_vport_queue_grp_rel_all(vport);
902 
903 	kfree(vport->txqs);
904 	vport->txqs = NULL;
905 }
906 
907 /**
908  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
909  * @vport: vport to init txqs on
910  *
911  * We get a queue index from skb->queue_mapping and we need a fast way to
912  * dereference the queue from queue groups.  This allows us to quickly pull a
913  * txq based on a queue index.
914  *
915  * Returns 0 on success, negative on failure
916  */
917 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
918 {
919 	int i, j, k = 0;
920 
921 	vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
922 			      GFP_KERNEL);
923 
924 	if (!vport->txqs)
925 		return -ENOMEM;
926 
927 	for (i = 0; i < vport->num_txq_grp; i++) {
928 		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
929 
930 		for (j = 0; j < tx_grp->num_txq; j++, k++) {
931 			vport->txqs[k] = tx_grp->txqs[j];
932 			vport->txqs[k]->idx = k;
933 		}
934 	}
935 
936 	return 0;
937 }
938 
939 /**
940  * idpf_vport_init_num_qs - Initialize number of queues
941  * @vport: vport to initialize queues
942  * @vport_msg: data to be filled into vport
943  */
944 void idpf_vport_init_num_qs(struct idpf_vport *vport,
945 			    struct virtchnl2_create_vport *vport_msg)
946 {
947 	struct idpf_vport_user_config_data *config_data;
948 	u16 idx = vport->idx;
949 
950 	config_data = &vport->adapter->vport_config[idx]->user_config;
951 	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
952 	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
953 	/* number of txqs and rxqs in config data will be zeros only in the
954 	 * driver load path and we dont update them there after
955 	 */
956 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
957 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
958 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
959 	}
960 
961 	if (idpf_is_queue_model_split(vport->txq_model))
962 		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
963 	if (idpf_is_queue_model_split(vport->rxq_model))
964 		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
965 
966 	/* Adjust number of buffer queues per Rx queue group. */
967 	if (!idpf_is_queue_model_split(vport->rxq_model)) {
968 		vport->num_bufqs_per_qgrp = 0;
969 		vport->bufq_size[0] = IDPF_RX_BUF_2048;
970 
971 		return;
972 	}
973 
974 	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
975 	/* Bufq[0] default buffer size is 4K
976 	 * Bufq[1] default buffer size is 2K
977 	 */
978 	vport->bufq_size[0] = IDPF_RX_BUF_4096;
979 	vport->bufq_size[1] = IDPF_RX_BUF_2048;
980 }
981 
982 /**
983  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
984  * @vport: vport to calculate q groups for
985  */
986 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
987 {
988 	struct idpf_vport_user_config_data *config_data;
989 	int num_bufqs = vport->num_bufqs_per_qgrp;
990 	u32 num_req_txq_desc, num_req_rxq_desc;
991 	u16 idx = vport->idx;
992 	int i;
993 
994 	config_data =  &vport->adapter->vport_config[idx]->user_config;
995 	num_req_txq_desc = config_data->num_req_txq_desc;
996 	num_req_rxq_desc = config_data->num_req_rxq_desc;
997 
998 	vport->complq_desc_count = 0;
999 	if (num_req_txq_desc) {
1000 		vport->txq_desc_count = num_req_txq_desc;
1001 		if (idpf_is_queue_model_split(vport->txq_model)) {
1002 			vport->complq_desc_count = num_req_txq_desc;
1003 			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1004 				vport->complq_desc_count =
1005 					IDPF_MIN_TXQ_COMPLQ_DESC;
1006 		}
1007 	} else {
1008 		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1009 		if (idpf_is_queue_model_split(vport->txq_model))
1010 			vport->complq_desc_count =
1011 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1012 	}
1013 
1014 	if (num_req_rxq_desc)
1015 		vport->rxq_desc_count = num_req_rxq_desc;
1016 	else
1017 		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1018 
1019 	for (i = 0; i < num_bufqs; i++) {
1020 		if (!vport->bufq_desc_count[i])
1021 			vport->bufq_desc_count[i] =
1022 				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1023 							num_bufqs);
1024 	}
1025 }
1026 
1027 /**
1028  * idpf_vport_calc_total_qs - Calculate total number of queues
1029  * @adapter: private data struct
1030  * @vport_idx: vport idx to retrieve vport pointer
1031  * @vport_msg: message to fill with data
1032  * @max_q: vport max queue info
1033  *
1034  * Return 0 on success, error value on failure.
1035  */
1036 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1037 			     struct virtchnl2_create_vport *vport_msg,
1038 			     struct idpf_vport_max_q *max_q)
1039 {
1040 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1041 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1042 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1043 	struct idpf_vport_config *vport_config;
1044 	u16 num_txq_grps, num_rxq_grps;
1045 	u32 num_qs;
1046 
1047 	vport_config = adapter->vport_config[vport_idx];
1048 	if (vport_config) {
1049 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1050 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1051 	} else {
1052 		int num_cpus;
1053 
1054 		/* Restrict num of queues to cpus online as a default
1055 		 * configuration to give best performance. User can always
1056 		 * override to a max number of queues via ethtool.
1057 		 */
1058 		num_cpus = num_online_cpus();
1059 
1060 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1061 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1062 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1063 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1064 	}
1065 
1066 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1067 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1068 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1069 						       IDPF_COMPLQ_PER_GROUP);
1070 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1071 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1072 	} else {
1073 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1074 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1075 					 dflt_singleq_txqs);
1076 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1077 		vport_msg->num_tx_complq = 0;
1078 	}
1079 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1080 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1081 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1082 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1083 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1084 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1085 	} else {
1086 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1087 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1088 					 dflt_singleq_rxqs);
1089 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1090 		vport_msg->num_rx_bufq = 0;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 /**
1097  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1098  * @vport: vport to calculate q groups for
1099  */
1100 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1101 {
1102 	if (idpf_is_queue_model_split(vport->txq_model))
1103 		vport->num_txq_grp = vport->num_txq;
1104 	else
1105 		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1106 
1107 	if (idpf_is_queue_model_split(vport->rxq_model))
1108 		vport->num_rxq_grp = vport->num_rxq;
1109 	else
1110 		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1111 }
1112 
1113 /**
1114  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1115  * @vport: vport to calculate queues for
1116  * @num_txq: return parameter for number of TX queues
1117  * @num_rxq: return parameter for number of RX queues
1118  */
1119 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1120 					 u16 *num_txq, u16 *num_rxq)
1121 {
1122 	if (idpf_is_queue_model_split(vport->txq_model))
1123 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1124 	else
1125 		*num_txq = vport->num_txq;
1126 
1127 	if (idpf_is_queue_model_split(vport->rxq_model))
1128 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1129 	else
1130 		*num_rxq = vport->num_rxq;
1131 }
1132 
1133 /**
1134  * idpf_rxq_set_descids - set the descids supported by this queue
1135  * @vport: virtual port data structure
1136  * @q: rx queue for which descids are set
1137  *
1138  */
1139 static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
1140 {
1141 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1142 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1143 	} else {
1144 		if (vport->base_rxd)
1145 			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1146 		else
1147 			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1148 	}
1149 }
1150 
1151 /**
1152  * idpf_txq_group_alloc - Allocate all txq group resources
1153  * @vport: vport to allocate txq groups for
1154  * @num_txq: number of txqs to allocate for each group
1155  *
1156  * Returns 0 on success, negative on failure
1157  */
1158 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1159 {
1160 	bool flow_sch_en;
1161 	int err, i;
1162 
1163 	vport->txq_grps = kcalloc(vport->num_txq_grp,
1164 				  sizeof(*vport->txq_grps), GFP_KERNEL);
1165 	if (!vport->txq_grps)
1166 		return -ENOMEM;
1167 
1168 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1169 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1170 
1171 	for (i = 0; i < vport->num_txq_grp; i++) {
1172 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1173 		struct idpf_adapter *adapter = vport->adapter;
1174 		int j;
1175 
1176 		tx_qgrp->vport = vport;
1177 		tx_qgrp->num_txq = num_txq;
1178 
1179 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1180 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1181 						   GFP_KERNEL);
1182 			if (!tx_qgrp->txqs[j]) {
1183 				err = -ENOMEM;
1184 				goto err_alloc;
1185 			}
1186 		}
1187 
1188 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1189 			struct idpf_queue *q = tx_qgrp->txqs[j];
1190 
1191 			q->dev = &adapter->pdev->dev;
1192 			q->desc_count = vport->txq_desc_count;
1193 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1194 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1195 			q->vport = vport;
1196 			q->txq_grp = tx_qgrp;
1197 			hash_init(q->sched_buf_hash);
1198 
1199 			if (flow_sch_en)
1200 				set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
1201 		}
1202 
1203 		if (!idpf_is_queue_model_split(vport->txq_model))
1204 			continue;
1205 
1206 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1207 					  sizeof(*tx_qgrp->complq),
1208 					  GFP_KERNEL);
1209 		if (!tx_qgrp->complq) {
1210 			err = -ENOMEM;
1211 			goto err_alloc;
1212 		}
1213 
1214 		tx_qgrp->complq->dev = &adapter->pdev->dev;
1215 		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1216 		tx_qgrp->complq->vport = vport;
1217 		tx_qgrp->complq->txq_grp = tx_qgrp;
1218 
1219 		if (flow_sch_en)
1220 			__set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
1221 	}
1222 
1223 	return 0;
1224 
1225 err_alloc:
1226 	idpf_txq_group_rel(vport);
1227 
1228 	return err;
1229 }
1230 
1231 /**
1232  * idpf_rxq_group_alloc - Allocate all rxq group resources
1233  * @vport: vport to allocate rxq groups for
1234  * @num_rxq: number of rxqs to allocate for each group
1235  *
1236  * Returns 0 on success, negative on failure
1237  */
1238 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1239 {
1240 	struct idpf_adapter *adapter = vport->adapter;
1241 	struct idpf_queue *q;
1242 	int i, k, err = 0;
1243 
1244 	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1245 				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1246 	if (!vport->rxq_grps)
1247 		return -ENOMEM;
1248 
1249 	for (i = 0; i < vport->num_rxq_grp; i++) {
1250 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1251 		int j;
1252 
1253 		rx_qgrp->vport = vport;
1254 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1255 			rx_qgrp->singleq.num_rxq = num_rxq;
1256 			for (j = 0; j < num_rxq; j++) {
1257 				rx_qgrp->singleq.rxqs[j] =
1258 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1259 							GFP_KERNEL);
1260 				if (!rx_qgrp->singleq.rxqs[j]) {
1261 					err = -ENOMEM;
1262 					goto err_alloc;
1263 				}
1264 			}
1265 			goto skip_splitq_rx_init;
1266 		}
1267 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1268 
1269 		for (j = 0; j < num_rxq; j++) {
1270 			rx_qgrp->splitq.rxq_sets[j] =
1271 				kzalloc(sizeof(struct idpf_rxq_set),
1272 					GFP_KERNEL);
1273 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1274 				err = -ENOMEM;
1275 				goto err_alloc;
1276 			}
1277 		}
1278 
1279 		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1280 						    sizeof(struct idpf_bufq_set),
1281 						    GFP_KERNEL);
1282 		if (!rx_qgrp->splitq.bufq_sets) {
1283 			err = -ENOMEM;
1284 			goto err_alloc;
1285 		}
1286 
1287 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1288 			struct idpf_bufq_set *bufq_set =
1289 				&rx_qgrp->splitq.bufq_sets[j];
1290 			int swq_size = sizeof(struct idpf_sw_queue);
1291 
1292 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1293 			q->dev = &adapter->pdev->dev;
1294 			q->desc_count = vport->bufq_desc_count[j];
1295 			q->vport = vport;
1296 			q->rxq_grp = rx_qgrp;
1297 			q->idx = j;
1298 			q->rx_buf_size = vport->bufq_size[j];
1299 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1300 			q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
1301 			if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
1302 						IDPF_CAP_HSPLIT) &&
1303 			    idpf_is_queue_model_split(vport->rxq_model)) {
1304 				q->rx_hsplit_en = true;
1305 				q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
1306 			}
1307 
1308 			bufq_set->num_refillqs = num_rxq;
1309 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1310 						     GFP_KERNEL);
1311 			if (!bufq_set->refillqs) {
1312 				err = -ENOMEM;
1313 				goto err_alloc;
1314 			}
1315 			for (k = 0; k < bufq_set->num_refillqs; k++) {
1316 				struct idpf_sw_queue *refillq =
1317 					&bufq_set->refillqs[k];
1318 
1319 				refillq->dev = &vport->adapter->pdev->dev;
1320 				refillq->desc_count =
1321 					vport->bufq_desc_count[j];
1322 				set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
1323 				set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
1324 				refillq->ring = kcalloc(refillq->desc_count,
1325 							sizeof(u16),
1326 							GFP_KERNEL);
1327 				if (!refillq->ring) {
1328 					err = -ENOMEM;
1329 					goto err_alloc;
1330 				}
1331 			}
1332 		}
1333 
1334 skip_splitq_rx_init:
1335 		for (j = 0; j < num_rxq; j++) {
1336 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1337 				q = rx_qgrp->singleq.rxqs[j];
1338 				goto setup_rxq;
1339 			}
1340 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1341 			rx_qgrp->splitq.rxq_sets[j]->refillq0 =
1342 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1343 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1344 				rx_qgrp->splitq.rxq_sets[j]->refillq1 =
1345 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1346 
1347 			if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
1348 						IDPF_CAP_HSPLIT) &&
1349 			    idpf_is_queue_model_split(vport->rxq_model)) {
1350 				q->rx_hsplit_en = true;
1351 				q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
1352 			}
1353 
1354 setup_rxq:
1355 			q->dev = &adapter->pdev->dev;
1356 			q->desc_count = vport->rxq_desc_count;
1357 			q->vport = vport;
1358 			q->rxq_grp = rx_qgrp;
1359 			q->idx = (i * num_rxq) + j;
1360 			/* In splitq mode, RXQ buffer size should be
1361 			 * set to that of the first buffer queue
1362 			 * associated with this RXQ
1363 			 */
1364 			q->rx_buf_size = vport->bufq_size[0];
1365 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1366 			q->rx_max_pkt_size = vport->netdev->mtu +
1367 							IDPF_PACKET_HDR_PAD;
1368 			idpf_rxq_set_descids(vport, q);
1369 		}
1370 	}
1371 
1372 err_alloc:
1373 	if (err)
1374 		idpf_rxq_group_rel(vport);
1375 
1376 	return err;
1377 }
1378 
1379 /**
1380  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1381  * @vport: vport with qgrps to allocate
1382  *
1383  * Returns 0 on success, negative on failure
1384  */
1385 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1386 {
1387 	u16 num_txq, num_rxq;
1388 	int err;
1389 
1390 	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1391 
1392 	err = idpf_txq_group_alloc(vport, num_txq);
1393 	if (err)
1394 		goto err_out;
1395 
1396 	err = idpf_rxq_group_alloc(vport, num_rxq);
1397 	if (err)
1398 		goto err_out;
1399 
1400 	return 0;
1401 
1402 err_out:
1403 	idpf_vport_queue_grp_rel_all(vport);
1404 
1405 	return err;
1406 }
1407 
1408 /**
1409  * idpf_vport_queues_alloc - Allocate memory for all queues
1410  * @vport: virtual port
1411  *
1412  * Allocate memory for queues associated with a vport.  Returns 0 on success,
1413  * negative on failure.
1414  */
1415 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1416 {
1417 	int err;
1418 
1419 	err = idpf_vport_queue_grp_alloc_all(vport);
1420 	if (err)
1421 		goto err_out;
1422 
1423 	err = idpf_tx_desc_alloc_all(vport);
1424 	if (err)
1425 		goto err_out;
1426 
1427 	err = idpf_rx_desc_alloc_all(vport);
1428 	if (err)
1429 		goto err_out;
1430 
1431 	err = idpf_vport_init_fast_path_txqs(vport);
1432 	if (err)
1433 		goto err_out;
1434 
1435 	return 0;
1436 
1437 err_out:
1438 	idpf_vport_queues_rel(vport);
1439 
1440 	return err;
1441 }
1442 
1443 /**
1444  * idpf_tx_handle_sw_marker - Handle queue marker packet
1445  * @tx_q: tx queue to handle software marker
1446  */
1447 static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
1448 {
1449 	struct idpf_vport *vport = tx_q->vport;
1450 	int i;
1451 
1452 	clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
1453 	/* Hardware must write marker packets to all queues associated with
1454 	 * completion queues. So check if all queues received marker packets
1455 	 */
1456 	for (i = 0; i < vport->num_txq; i++)
1457 		/* If we're still waiting on any other TXQ marker completions,
1458 		 * just return now since we cannot wake up the marker_wq yet.
1459 		 */
1460 		if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
1461 			return;
1462 
1463 	/* Drain complete */
1464 	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1465 	wake_up(&vport->sw_marker_wq);
1466 }
1467 
1468 /**
1469  * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
1470  * packet
1471  * @tx_q: tx queue to clean buffer from
1472  * @tx_buf: buffer to be cleaned
1473  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1474  * @napi_budget: Used to determine if we are in netpoll
1475  */
1476 static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
1477 				     struct idpf_tx_buf *tx_buf,
1478 				     struct idpf_cleaned_stats *cleaned,
1479 				     int napi_budget)
1480 {
1481 	napi_consume_skb(tx_buf->skb, napi_budget);
1482 
1483 	if (dma_unmap_len(tx_buf, len)) {
1484 		dma_unmap_single(tx_q->dev,
1485 				 dma_unmap_addr(tx_buf, dma),
1486 				 dma_unmap_len(tx_buf, len),
1487 				 DMA_TO_DEVICE);
1488 
1489 		dma_unmap_len_set(tx_buf, len, 0);
1490 	}
1491 
1492 	/* clear tx_buf data */
1493 	tx_buf->skb = NULL;
1494 
1495 	cleaned->bytes += tx_buf->bytecount;
1496 	cleaned->packets += tx_buf->gso_segs;
1497 }
1498 
1499 /**
1500  * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1501  * out of order completions
1502  * @txq: queue to clean
1503  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1504  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1505  * @budget: Used to determine if we are in netpoll
1506  */
1507 static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
1508 				       struct idpf_cleaned_stats *cleaned,
1509 				       int budget)
1510 {
1511 	struct idpf_tx_stash *stash;
1512 	struct hlist_node *tmp_buf;
1513 
1514 	/* Buffer completion */
1515 	hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
1516 				    hlist, compl_tag) {
1517 		if (unlikely(stash->buf.compl_tag != (int)compl_tag))
1518 			continue;
1519 
1520 		if (stash->buf.skb) {
1521 			idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
1522 						 budget);
1523 		} else if (dma_unmap_len(&stash->buf, len)) {
1524 			dma_unmap_page(txq->dev,
1525 				       dma_unmap_addr(&stash->buf, dma),
1526 				       dma_unmap_len(&stash->buf, len),
1527 				       DMA_TO_DEVICE);
1528 			dma_unmap_len_set(&stash->buf, len, 0);
1529 		}
1530 
1531 		/* Push shadow buf back onto stack */
1532 		idpf_buf_lifo_push(&txq->buf_stack, stash);
1533 
1534 		hash_del(&stash->hlist);
1535 	}
1536 }
1537 
1538 /**
1539  * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1540  * later time (only relevant for flow scheduling mode)
1541  * @txq: Tx queue to clean
1542  * @tx_buf: buffer to store
1543  */
1544 static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
1545 				       struct idpf_tx_buf *tx_buf)
1546 {
1547 	struct idpf_tx_stash *stash;
1548 
1549 	if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
1550 		     !dma_unmap_len(tx_buf, len)))
1551 		return 0;
1552 
1553 	stash = idpf_buf_lifo_pop(&txq->buf_stack);
1554 	if (unlikely(!stash)) {
1555 		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1556 				    txq->vport->netdev->name);
1557 
1558 		return -ENOMEM;
1559 	}
1560 
1561 	/* Store buffer params in shadow buffer */
1562 	stash->buf.skb = tx_buf->skb;
1563 	stash->buf.bytecount = tx_buf->bytecount;
1564 	stash->buf.gso_segs = tx_buf->gso_segs;
1565 	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1566 	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1567 	stash->buf.compl_tag = tx_buf->compl_tag;
1568 
1569 	/* Add buffer to buf_hash table to be freed later */
1570 	hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
1571 
1572 	memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1573 
1574 	/* Reinitialize buf_id portion of tag */
1575 	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1576 
1577 	return 0;
1578 }
1579 
1580 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1581 do {								\
1582 	(ntc)++;						\
1583 	if (unlikely(!(ntc))) {					\
1584 		ntc -= (txq)->desc_count;			\
1585 		buf = (txq)->tx_buf;				\
1586 		desc = IDPF_FLEX_TX_DESC(txq, 0);		\
1587 	} else {						\
1588 		(buf)++;					\
1589 		(desc)++;					\
1590 	}							\
1591 } while (0)
1592 
1593 /**
1594  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1595  * @tx_q: Tx queue to clean
1596  * @end: queue index until which it should be cleaned
1597  * @napi_budget: Used to determine if we are in netpoll
1598  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1599  * @descs_only: true if queue is using flow-based scheduling and should
1600  * not clean buffers at this time
1601  *
1602  * Cleans the queue descriptor ring. If the queue is using queue-based
1603  * scheduling, the buffers will be cleaned as well. If the queue is using
1604  * flow-based scheduling, only the descriptors are cleaned at this time.
1605  * Separate packet completion events will be reported on the completion queue,
1606  * and the buffers will be cleaned separately. The stats are not updated from
1607  * this function when using flow-based scheduling.
1608  */
1609 static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
1610 				 int napi_budget,
1611 				 struct idpf_cleaned_stats *cleaned,
1612 				 bool descs_only)
1613 {
1614 	union idpf_tx_flex_desc *next_pending_desc = NULL;
1615 	union idpf_tx_flex_desc *tx_desc;
1616 	s16 ntc = tx_q->next_to_clean;
1617 	struct idpf_tx_buf *tx_buf;
1618 
1619 	tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
1620 	next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
1621 	tx_buf = &tx_q->tx_buf[ntc];
1622 	ntc -= tx_q->desc_count;
1623 
1624 	while (tx_desc != next_pending_desc) {
1625 		union idpf_tx_flex_desc *eop_desc;
1626 
1627 		/* If this entry in the ring was used as a context descriptor,
1628 		 * it's corresponding entry in the buffer ring will have an
1629 		 * invalid completion tag since no buffer was used.  We can
1630 		 * skip this descriptor since there is no buffer to clean.
1631 		 */
1632 		if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
1633 			goto fetch_next_txq_desc;
1634 
1635 		eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
1636 
1637 		/* clear next_to_watch to prevent false hangs */
1638 		tx_buf->next_to_watch = NULL;
1639 
1640 		if (descs_only) {
1641 			if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
1642 				goto tx_splitq_clean_out;
1643 
1644 			while (tx_desc != eop_desc) {
1645 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1646 							      tx_desc, tx_buf);
1647 
1648 				if (dma_unmap_len(tx_buf, len)) {
1649 					if (idpf_stash_flow_sch_buffers(tx_q,
1650 									tx_buf))
1651 						goto tx_splitq_clean_out;
1652 				}
1653 			}
1654 		} else {
1655 			idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
1656 						 napi_budget);
1657 
1658 			/* unmap remaining buffers */
1659 			while (tx_desc != eop_desc) {
1660 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1661 							      tx_desc, tx_buf);
1662 
1663 				/* unmap any remaining paged data */
1664 				if (dma_unmap_len(tx_buf, len)) {
1665 					dma_unmap_page(tx_q->dev,
1666 						       dma_unmap_addr(tx_buf, dma),
1667 						       dma_unmap_len(tx_buf, len),
1668 						       DMA_TO_DEVICE);
1669 					dma_unmap_len_set(tx_buf, len, 0);
1670 				}
1671 			}
1672 		}
1673 
1674 fetch_next_txq_desc:
1675 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1676 	}
1677 
1678 tx_splitq_clean_out:
1679 	ntc += tx_q->desc_count;
1680 	tx_q->next_to_clean = ntc;
1681 }
1682 
1683 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1684 do {							\
1685 	(buf)++;					\
1686 	(ntc)++;					\
1687 	if (unlikely((ntc) == (txq)->desc_count)) {	\
1688 		buf = (txq)->tx_buf;			\
1689 		ntc = 0;				\
1690 	}						\
1691 } while (0)
1692 
1693 /**
1694  * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1695  * @txq: queue to clean
1696  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1697  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1698  * @budget: Used to determine if we are in netpoll
1699  *
1700  * Cleans all buffers associated with the input completion tag either from the
1701  * TX buffer ring or from the hash table if the buffers were previously
1702  * stashed. Returns the byte/segment count for the cleaned packet associated
1703  * this completion tag.
1704  */
1705 static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
1706 				   struct idpf_cleaned_stats *cleaned,
1707 				   int budget)
1708 {
1709 	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1710 	struct idpf_tx_buf *tx_buf = NULL;
1711 	u16 ntc = txq->next_to_clean;
1712 	u16 num_descs_cleaned = 0;
1713 	u16 orig_idx = idx;
1714 
1715 	tx_buf = &txq->tx_buf[idx];
1716 
1717 	while (tx_buf->compl_tag == (int)compl_tag) {
1718 		if (tx_buf->skb) {
1719 			idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
1720 		} else if (dma_unmap_len(tx_buf, len)) {
1721 			dma_unmap_page(txq->dev,
1722 				       dma_unmap_addr(tx_buf, dma),
1723 				       dma_unmap_len(tx_buf, len),
1724 				       DMA_TO_DEVICE);
1725 			dma_unmap_len_set(tx_buf, len, 0);
1726 		}
1727 
1728 		memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1729 		tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1730 
1731 		num_descs_cleaned++;
1732 		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1733 	}
1734 
1735 	/* If we didn't clean anything on the ring for this completion, there's
1736 	 * nothing more to do.
1737 	 */
1738 	if (unlikely(!num_descs_cleaned))
1739 		return false;
1740 
1741 	/* Otherwise, if we did clean a packet on the ring directly, it's safe
1742 	 * to assume that the descriptors starting from the original
1743 	 * next_to_clean up until the previously cleaned packet can be reused.
1744 	 * Therefore, we will go back in the ring and stash any buffers still
1745 	 * in the ring into the hash table to be cleaned later.
1746 	 */
1747 	tx_buf = &txq->tx_buf[ntc];
1748 	while (tx_buf != &txq->tx_buf[orig_idx]) {
1749 		idpf_stash_flow_sch_buffers(txq, tx_buf);
1750 		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1751 	}
1752 
1753 	/* Finally, update next_to_clean to reflect the work that was just done
1754 	 * on the ring, if any. If the packet was only cleaned from the hash
1755 	 * table, the ring will not be impacted, therefore we should not touch
1756 	 * next_to_clean. The updated idx is used here
1757 	 */
1758 	txq->next_to_clean = idx;
1759 
1760 	return true;
1761 }
1762 
1763 /**
1764  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1765  * whether on the buffer ring or in the hash table
1766  * @txq: Tx ring to clean
1767  * @desc: pointer to completion queue descriptor to extract completion
1768  * information from
1769  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1770  * @budget: Used to determine if we are in netpoll
1771  *
1772  * Returns bytes/packets cleaned
1773  */
1774 static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
1775 					 struct idpf_splitq_tx_compl_desc *desc,
1776 					 struct idpf_cleaned_stats *cleaned,
1777 					 int budget)
1778 {
1779 	u16 compl_tag;
1780 
1781 	if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
1782 		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1783 
1784 		return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
1785 	}
1786 
1787 	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1788 
1789 	/* If we didn't clean anything on the ring, this packet must be
1790 	 * in the hash table. Go clean it there.
1791 	 */
1792 	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1793 		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1794 }
1795 
1796 /**
1797  * idpf_tx_clean_complq - Reclaim resources on completion queue
1798  * @complq: Tx ring to clean
1799  * @budget: Used to determine if we are in netpoll
1800  * @cleaned: returns number of packets cleaned
1801  *
1802  * Returns true if there's any budget left (e.g. the clean is finished)
1803  */
1804 static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
1805 				 int *cleaned)
1806 {
1807 	struct idpf_splitq_tx_compl_desc *tx_desc;
1808 	struct idpf_vport *vport = complq->vport;
1809 	s16 ntc = complq->next_to_clean;
1810 	struct idpf_netdev_priv *np;
1811 	unsigned int complq_budget;
1812 	bool complq_ok = true;
1813 	int i;
1814 
1815 	complq_budget = vport->compln_clean_budget;
1816 	tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
1817 	ntc -= complq->desc_count;
1818 
1819 	do {
1820 		struct idpf_cleaned_stats cleaned_stats = { };
1821 		struct idpf_queue *tx_q;
1822 		int rel_tx_qid;
1823 		u16 hw_head;
1824 		u8 ctype;	/* completion type */
1825 		u16 gen;
1826 
1827 		/* if the descriptor isn't done, no work yet to do */
1828 		gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
1829 		      IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
1830 		if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
1831 			break;
1832 
1833 		/* Find necessary info of TX queue to clean buffers */
1834 		rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
1835 			 IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
1836 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
1837 		    !complq->txq_grp->txqs[rel_tx_qid]) {
1838 			dev_err(&complq->vport->adapter->pdev->dev,
1839 				"TxQ not found\n");
1840 			goto fetch_next_desc;
1841 		}
1842 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
1843 
1844 		/* Determine completion type */
1845 		ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
1846 			IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
1847 			IDPF_TXD_COMPLQ_COMPL_TYPE_S;
1848 		switch (ctype) {
1849 		case IDPF_TXD_COMPLT_RE:
1850 			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
1851 
1852 			idpf_tx_splitq_clean(tx_q, hw_head, budget,
1853 					     &cleaned_stats, true);
1854 			break;
1855 		case IDPF_TXD_COMPLT_RS:
1856 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
1857 						     &cleaned_stats, budget);
1858 			break;
1859 		case IDPF_TXD_COMPLT_SW_MARKER:
1860 			idpf_tx_handle_sw_marker(tx_q);
1861 			break;
1862 		default:
1863 			dev_err(&tx_q->vport->adapter->pdev->dev,
1864 				"Unknown TX completion type: %d\n",
1865 				ctype);
1866 			goto fetch_next_desc;
1867 		}
1868 
1869 		u64_stats_update_begin(&tx_q->stats_sync);
1870 		u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
1871 		u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
1872 		tx_q->cleaned_pkts += cleaned_stats.packets;
1873 		tx_q->cleaned_bytes += cleaned_stats.bytes;
1874 		complq->num_completions++;
1875 		u64_stats_update_end(&tx_q->stats_sync);
1876 
1877 fetch_next_desc:
1878 		tx_desc++;
1879 		ntc++;
1880 		if (unlikely(!ntc)) {
1881 			ntc -= complq->desc_count;
1882 			tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
1883 			change_bit(__IDPF_Q_GEN_CHK, complq->flags);
1884 		}
1885 
1886 		prefetch(tx_desc);
1887 
1888 		/* update budget accounting */
1889 		complq_budget--;
1890 	} while (likely(complq_budget));
1891 
1892 	/* Store the state of the complq to be used later in deciding if a
1893 	 * TXQ can be started again
1894 	 */
1895 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
1896 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
1897 		complq_ok = false;
1898 
1899 	np = netdev_priv(complq->vport->netdev);
1900 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
1901 		struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
1902 		struct netdev_queue *nq;
1903 		bool dont_wake;
1904 
1905 		/* We didn't clean anything on this queue, move along */
1906 		if (!tx_q->cleaned_bytes)
1907 			continue;
1908 
1909 		*cleaned += tx_q->cleaned_pkts;
1910 
1911 		/* Update BQL */
1912 		nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1913 
1914 		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
1915 			    np->state != __IDPF_VPORT_UP ||
1916 			    !netif_carrier_ok(tx_q->vport->netdev);
1917 		/* Check if the TXQ needs to and can be restarted */
1918 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
1919 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
1920 					   dont_wake);
1921 
1922 		/* Reset cleaned stats for the next time this queue is
1923 		 * cleaned
1924 		 */
1925 		tx_q->cleaned_bytes = 0;
1926 		tx_q->cleaned_pkts = 0;
1927 	}
1928 
1929 	ntc += complq->desc_count;
1930 	complq->next_to_clean = ntc;
1931 
1932 	return !!complq_budget;
1933 }
1934 
1935 /**
1936  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
1937  * based scheduling descriptors
1938  * @desc: descriptor to populate
1939  * @params: pointer to tx params struct
1940  * @td_cmd: command to be filled in desc
1941  * @size: size of buffer
1942  */
1943 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
1944 			      struct idpf_tx_splitq_params *params,
1945 			      u16 td_cmd, u16 size)
1946 {
1947 	desc->q.qw1.cmd_dtype =
1948 		cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
1949 	desc->q.qw1.cmd_dtype |=
1950 		cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
1951 			    IDPF_FLEX_TXD_QW1_CMD_M);
1952 	desc->q.qw1.buf_size = cpu_to_le16((u16)size);
1953 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
1954 }
1955 
1956 /**
1957  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
1958  * scheduling descriptors
1959  * @desc: descriptor to populate
1960  * @params: pointer to tx params struct
1961  * @td_cmd: command to be filled in desc
1962  * @size: size of buffer
1963  */
1964 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
1965 				    struct idpf_tx_splitq_params *params,
1966 				    u16 td_cmd, u16 size)
1967 {
1968 	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
1969 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
1970 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
1971 }
1972 
1973 /**
1974  * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
1975  * @tx_q: the queue to be checked
1976  * @size: number of descriptors we want to assure is available
1977  *
1978  * Returns 0 if stop is not needed
1979  */
1980 int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
1981 {
1982 	struct netdev_queue *nq;
1983 
1984 	if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
1985 		return 0;
1986 
1987 	u64_stats_update_begin(&tx_q->stats_sync);
1988 	u64_stats_inc(&tx_q->q_stats.tx.q_busy);
1989 	u64_stats_update_end(&tx_q->stats_sync);
1990 
1991 	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1992 
1993 	return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
1994 }
1995 
1996 /**
1997  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
1998  * @tx_q: the queue to be checked
1999  * @descs_needed: number of descriptors required for this packet
2000  *
2001  * Returns 0 if stop is not needed
2002  */
2003 static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
2004 				     unsigned int descs_needed)
2005 {
2006 	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2007 		goto splitq_stop;
2008 
2009 	/* If there are too many outstanding completions expected on the
2010 	 * completion queue, stop the TX queue to give the device some time to
2011 	 * catch up
2012 	 */
2013 	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2014 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2015 		goto splitq_stop;
2016 
2017 	/* Also check for available book keeping buffers; if we are low, stop
2018 	 * the queue to wait for more completions
2019 	 */
2020 	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2021 		goto splitq_stop;
2022 
2023 	return 0;
2024 
2025 splitq_stop:
2026 	u64_stats_update_begin(&tx_q->stats_sync);
2027 	u64_stats_inc(&tx_q->q_stats.tx.q_busy);
2028 	u64_stats_update_end(&tx_q->stats_sync);
2029 	netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
2030 
2031 	return -EBUSY;
2032 }
2033 
2034 /**
2035  * idpf_tx_buf_hw_update - Store the new tail value
2036  * @tx_q: queue to bump
2037  * @val: new tail index
2038  * @xmit_more: more skb's pending
2039  *
2040  * The naming here is special in that 'hw' signals that this function is about
2041  * to do a register write to update our queue status. We know this can only
2042  * mean tail here as HW should be owning head for TX.
2043  */
2044 void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
2045 			   bool xmit_more)
2046 {
2047 	struct netdev_queue *nq;
2048 
2049 	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2050 	tx_q->next_to_use = val;
2051 
2052 	idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
2053 
2054 	/* Force memory writes to complete before letting h/w
2055 	 * know there are new descriptors to fetch.  (Only
2056 	 * applicable for weak-ordered memory model archs,
2057 	 * such as IA-64).
2058 	 */
2059 	wmb();
2060 
2061 	/* notify HW of packet */
2062 	if (netif_xmit_stopped(nq) || !xmit_more)
2063 		writel(val, tx_q->tail);
2064 }
2065 
2066 /**
2067  * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2068  * @txq: queue to send buffer on
2069  * @skb: send buffer
2070  *
2071  * Returns number of data descriptors needed for this skb.
2072  */
2073 unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
2074 					 struct sk_buff *skb)
2075 {
2076 	const struct skb_shared_info *shinfo;
2077 	unsigned int count = 0, i;
2078 
2079 	count += !!skb_headlen(skb);
2080 
2081 	if (!skb_is_nonlinear(skb))
2082 		return count;
2083 
2084 	shinfo = skb_shinfo(skb);
2085 	for (i = 0; i < shinfo->nr_frags; i++) {
2086 		unsigned int size;
2087 
2088 		size = skb_frag_size(&shinfo->frags[i]);
2089 
2090 		/* We only need to use the idpf_size_to_txd_count check if the
2091 		 * fragment is going to span multiple descriptors,
2092 		 * i.e. size >= 16K.
2093 		 */
2094 		if (size >= SZ_16K)
2095 			count += idpf_size_to_txd_count(size);
2096 		else
2097 			count++;
2098 	}
2099 
2100 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2101 		if (__skb_linearize(skb))
2102 			return 0;
2103 
2104 		count = idpf_size_to_txd_count(skb->len);
2105 		u64_stats_update_begin(&txq->stats_sync);
2106 		u64_stats_inc(&txq->q_stats.tx.linearize);
2107 		u64_stats_update_end(&txq->stats_sync);
2108 	}
2109 
2110 	return count;
2111 }
2112 
2113 /**
2114  * idpf_tx_dma_map_error - handle TX DMA map errors
2115  * @txq: queue to send buffer on
2116  * @skb: send buffer
2117  * @first: original first buffer info buffer for packet
2118  * @idx: starting point on ring to unwind
2119  */
2120 void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
2121 			   struct idpf_tx_buf *first, u16 idx)
2122 {
2123 	u64_stats_update_begin(&txq->stats_sync);
2124 	u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
2125 	u64_stats_update_end(&txq->stats_sync);
2126 
2127 	/* clear dma mappings for failed tx_buf map */
2128 	for (;;) {
2129 		struct idpf_tx_buf *tx_buf;
2130 
2131 		tx_buf = &txq->tx_buf[idx];
2132 		idpf_tx_buf_rel(txq, tx_buf);
2133 		if (tx_buf == first)
2134 			break;
2135 		if (idx == 0)
2136 			idx = txq->desc_count;
2137 		idx--;
2138 	}
2139 
2140 	if (skb_is_gso(skb)) {
2141 		union idpf_tx_flex_desc *tx_desc;
2142 
2143 		/* If we failed a DMA mapping for a TSO packet, we will have
2144 		 * used one additional descriptor for a context
2145 		 * descriptor. Reset that here.
2146 		 */
2147 		tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
2148 		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2149 		if (idx == 0)
2150 			idx = txq->desc_count;
2151 		idx--;
2152 	}
2153 
2154 	/* Update tail in case netdev_xmit_more was previously true */
2155 	idpf_tx_buf_hw_update(txq, idx, false);
2156 }
2157 
2158 /**
2159  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2160  * @txq: the tx ring to wrap
2161  * @ntu: ring index to bump
2162  */
2163 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
2164 {
2165 	ntu++;
2166 
2167 	if (ntu == txq->desc_count) {
2168 		ntu = 0;
2169 		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2170 	}
2171 
2172 	return ntu;
2173 }
2174 
2175 /**
2176  * idpf_tx_splitq_map - Build the Tx flex descriptor
2177  * @tx_q: queue to send buffer on
2178  * @params: pointer to splitq params struct
2179  * @first: first buffer info buffer to use
2180  *
2181  * This function loops over the skb data pointed to by *first
2182  * and gets a physical address for each memory location and programs
2183  * it and the length into the transmit flex descriptor.
2184  */
2185 static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
2186 			       struct idpf_tx_splitq_params *params,
2187 			       struct idpf_tx_buf *first)
2188 {
2189 	union idpf_tx_flex_desc *tx_desc;
2190 	unsigned int data_len, size;
2191 	struct idpf_tx_buf *tx_buf;
2192 	u16 i = tx_q->next_to_use;
2193 	struct netdev_queue *nq;
2194 	struct sk_buff *skb;
2195 	skb_frag_t *frag;
2196 	u16 td_cmd = 0;
2197 	dma_addr_t dma;
2198 
2199 	skb = first->skb;
2200 
2201 	td_cmd = params->offload.td_cmd;
2202 
2203 	data_len = skb->data_len;
2204 	size = skb_headlen(skb);
2205 
2206 	tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
2207 
2208 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2209 
2210 	tx_buf = first;
2211 
2212 	params->compl_tag =
2213 		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2214 
2215 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2216 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2217 
2218 		if (dma_mapping_error(tx_q->dev, dma))
2219 			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2220 
2221 		tx_buf->compl_tag = params->compl_tag;
2222 
2223 		/* record length, and DMA address */
2224 		dma_unmap_len_set(tx_buf, len, size);
2225 		dma_unmap_addr_set(tx_buf, dma, dma);
2226 
2227 		/* buf_addr is in same location for both desc types */
2228 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2229 
2230 		/* The stack can send us fragments that are too large for a
2231 		 * single descriptor i.e. frag size > 16K-1. We will need to
2232 		 * split the fragment across multiple descriptors in this case.
2233 		 * To adhere to HW alignment restrictions, the fragment needs
2234 		 * to be split such that the first chunk ends on a 4K boundary
2235 		 * and all subsequent chunks start on a 4K boundary. We still
2236 		 * want to send as much data as possible though, so our
2237 		 * intermediate descriptor chunk size will be 12K.
2238 		 *
2239 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2240 		 * ------------------------------------------------------------
2241 		 * |                    frag_size = 32K                       |
2242 		 * ------------------------------------------------------------
2243 		 * |2600		  |16384	    |28672
2244 		 *
2245 		 * 3 descriptors will be used for this fragment. The HW expects
2246 		 * the descriptors to contain the following:
2247 		 * ------------------------------------------------------------
2248 		 * | size = 13784         | size = 12K      | size = 6696     |
2249 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2250 		 * ------------------------------------------------------------
2251 		 *
2252 		 * We need to first adjust the max_data for the first chunk so
2253 		 * that it ends on a 4K boundary. By negating the value of the
2254 		 * DMA address and taking only the low order bits, we're
2255 		 * effectively calculating
2256 		 *	4K - (DMA addr lower order bits) =
2257 		 *				bytes to next boundary.
2258 		 *
2259 		 * Add that to our base aligned max_data (12K) and we have
2260 		 * our first chunk size. In the example above,
2261 		 *	13784 = 12K + (4096-2600)
2262 		 *
2263 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2264 		 * will give the intermediate descriptors 12K chunks and
2265 		 * whatever is left to the final descriptor. This ensures that
2266 		 * all descriptors used for the remaining chunks of the
2267 		 * fragment start on a 4K boundary and we use as few
2268 		 * descriptors as possible.
2269 		 */
2270 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2271 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2272 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2273 						  max_data);
2274 
2275 			tx_desc++;
2276 			i++;
2277 
2278 			if (i == tx_q->desc_count) {
2279 				tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2280 				i = 0;
2281 				tx_q->compl_tag_cur_gen =
2282 					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2283 			}
2284 
2285 			/* Since this packet has a buffer that is going to span
2286 			 * multiple descriptors, it's going to leave holes in
2287 			 * to the TX buffer ring. To ensure these holes do not
2288 			 * cause issues in the cleaning routines, we will clear
2289 			 * them of any stale data and assign them the same
2290 			 * completion tag as the current packet. Then when the
2291 			 * packet is being cleaned, the cleaning routines will
2292 			 * simply pass over these holes and finish cleaning the
2293 			 * rest of the packet.
2294 			 */
2295 			memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2296 			tx_q->tx_buf[i].compl_tag = params->compl_tag;
2297 
2298 			/* Adjust the DMA offset and the remaining size of the
2299 			 * fragment.  On the first iteration of this loop,
2300 			 * max_data will be >= 12K and <= 16K-1.  On any
2301 			 * subsequent iteration of this loop, max_data will
2302 			 * always be 12K.
2303 			 */
2304 			dma += max_data;
2305 			size -= max_data;
2306 
2307 			/* Reset max_data since remaining chunks will be 12K
2308 			 * at most
2309 			 */
2310 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2311 
2312 			/* buf_addr is in same location for both desc types */
2313 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2314 		}
2315 
2316 		if (!data_len)
2317 			break;
2318 
2319 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2320 		tx_desc++;
2321 		i++;
2322 
2323 		if (i == tx_q->desc_count) {
2324 			tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2325 			i = 0;
2326 			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2327 		}
2328 
2329 		size = skb_frag_size(frag);
2330 		data_len -= size;
2331 
2332 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2333 				       DMA_TO_DEVICE);
2334 
2335 		tx_buf = &tx_q->tx_buf[i];
2336 	}
2337 
2338 	/* record SW timestamp if HW timestamp is not available */
2339 	skb_tx_timestamp(skb);
2340 
2341 	/* write last descriptor with RS and EOP bits */
2342 	td_cmd |= params->eop_cmd;
2343 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2344 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2345 
2346 	/* set next_to_watch value indicating a packet is present */
2347 	first->next_to_watch = tx_desc;
2348 
2349 	tx_q->txq_grp->num_completions_pending++;
2350 
2351 	/* record bytecount for BQL */
2352 	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2353 	netdev_tx_sent_queue(nq, first->bytecount);
2354 
2355 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2356 }
2357 
2358 /**
2359  * idpf_tso - computes mss and TSO length to prepare for TSO
2360  * @skb: pointer to skb
2361  * @off: pointer to struct that holds offload parameters
2362  *
2363  * Returns error (negative) if TSO was requested but cannot be applied to the
2364  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2365  */
2366 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2367 {
2368 	const struct skb_shared_info *shinfo;
2369 	union {
2370 		struct iphdr *v4;
2371 		struct ipv6hdr *v6;
2372 		unsigned char *hdr;
2373 	} ip;
2374 	union {
2375 		struct tcphdr *tcp;
2376 		struct udphdr *udp;
2377 		unsigned char *hdr;
2378 	} l4;
2379 	u32 paylen, l4_start;
2380 	int err;
2381 
2382 	if (!skb_is_gso(skb))
2383 		return 0;
2384 
2385 	err = skb_cow_head(skb, 0);
2386 	if (err < 0)
2387 		return err;
2388 
2389 	shinfo = skb_shinfo(skb);
2390 
2391 	ip.hdr = skb_network_header(skb);
2392 	l4.hdr = skb_transport_header(skb);
2393 
2394 	/* initialize outer IP header fields */
2395 	if (ip.v4->version == 4) {
2396 		ip.v4->tot_len = 0;
2397 		ip.v4->check = 0;
2398 	} else if (ip.v6->version == 6) {
2399 		ip.v6->payload_len = 0;
2400 	}
2401 
2402 	l4_start = skb_transport_offset(skb);
2403 
2404 	/* remove payload length from checksum */
2405 	paylen = skb->len - l4_start;
2406 
2407 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2408 	case SKB_GSO_TCPV4:
2409 	case SKB_GSO_TCPV6:
2410 		csum_replace_by_diff(&l4.tcp->check,
2411 				     (__force __wsum)htonl(paylen));
2412 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2413 		break;
2414 	case SKB_GSO_UDP_L4:
2415 		csum_replace_by_diff(&l4.udp->check,
2416 				     (__force __wsum)htonl(paylen));
2417 		/* compute length of segmentation header */
2418 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2419 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2420 		break;
2421 	default:
2422 		return -EINVAL;
2423 	}
2424 
2425 	off->tso_len = skb->len - off->tso_hdr_len;
2426 	off->mss = shinfo->gso_size;
2427 	off->tso_segs = shinfo->gso_segs;
2428 
2429 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2430 
2431 	return 1;
2432 }
2433 
2434 /**
2435  * __idpf_chk_linearize - Check skb is not using too many buffers
2436  * @skb: send buffer
2437  * @max_bufs: maximum number of buffers
2438  *
2439  * For TSO we need to count the TSO header and segment payload separately.  As
2440  * such we need to check cases where we have max_bufs-1 fragments or more as we
2441  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2442  * for the segment payload in the first descriptor, and another max_buf-1 for
2443  * the fragments.
2444  */
2445 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2446 {
2447 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2448 	const skb_frag_t *frag, *stale;
2449 	int nr_frags, sum;
2450 
2451 	/* no need to check if number of frags is less than max_bufs - 1 */
2452 	nr_frags = shinfo->nr_frags;
2453 	if (nr_frags < (max_bufs - 1))
2454 		return false;
2455 
2456 	/* We need to walk through the list and validate that each group
2457 	 * of max_bufs-2 fragments totals at least gso_size.
2458 	 */
2459 	nr_frags -= max_bufs - 2;
2460 	frag = &shinfo->frags[0];
2461 
2462 	/* Initialize size to the negative value of gso_size minus 1.  We use
2463 	 * this as the worst case scenario in which the frag ahead of us only
2464 	 * provides one byte which is why we are limited to max_bufs-2
2465 	 * descriptors for a single transmit as the header and previous
2466 	 * fragment are already consuming 2 descriptors.
2467 	 */
2468 	sum = 1 - shinfo->gso_size;
2469 
2470 	/* Add size of frags 0 through 4 to create our initial sum */
2471 	sum += skb_frag_size(frag++);
2472 	sum += skb_frag_size(frag++);
2473 	sum += skb_frag_size(frag++);
2474 	sum += skb_frag_size(frag++);
2475 	sum += skb_frag_size(frag++);
2476 
2477 	/* Walk through fragments adding latest fragment, testing it, and
2478 	 * then removing stale fragments from the sum.
2479 	 */
2480 	for (stale = &shinfo->frags[0];; stale++) {
2481 		int stale_size = skb_frag_size(stale);
2482 
2483 		sum += skb_frag_size(frag++);
2484 
2485 		/* The stale fragment may present us with a smaller
2486 		 * descriptor than the actual fragment size. To account
2487 		 * for that we need to remove all the data on the front and
2488 		 * figure out what the remainder would be in the last
2489 		 * descriptor associated with the fragment.
2490 		 */
2491 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2492 			int align_pad = -(skb_frag_off(stale)) &
2493 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2494 
2495 			sum -= align_pad;
2496 			stale_size -= align_pad;
2497 
2498 			do {
2499 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2500 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2501 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2502 		}
2503 
2504 		/* if sum is negative we failed to make sufficient progress */
2505 		if (sum < 0)
2506 			return true;
2507 
2508 		if (!nr_frags--)
2509 			break;
2510 
2511 		sum -= stale_size;
2512 	}
2513 
2514 	return false;
2515 }
2516 
2517 /**
2518  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2519  * @skb: send buffer
2520  * @max_bufs: maximum scatter gather buffers for single packet
2521  * @count: number of buffers this packet needs
2522  *
2523  * Make sure we don't exceed maximum scatter gather buffers for a single
2524  * packet. We have to do some special checking around the boundary (max_bufs-1)
2525  * if TSO is on since we need count the TSO header and payload separately.
2526  * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2527  * header, 1 for segment payload, and then 7 for the fragments.
2528  */
2529 bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2530 			unsigned int count)
2531 {
2532 	if (likely(count < max_bufs))
2533 		return false;
2534 	if (skb_is_gso(skb))
2535 		return __idpf_chk_linearize(skb, max_bufs);
2536 
2537 	return count > max_bufs;
2538 }
2539 
2540 /**
2541  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2542  * @txq: queue to put context descriptor on
2543  *
2544  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2545  * ring entry to reflect that this index is a context descriptor
2546  */
2547 static struct idpf_flex_tx_ctx_desc *
2548 idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
2549 {
2550 	struct idpf_flex_tx_ctx_desc *desc;
2551 	int i = txq->next_to_use;
2552 
2553 	memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2554 	txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
2555 
2556 	/* grab the next descriptor */
2557 	desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
2558 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2559 
2560 	return desc;
2561 }
2562 
2563 /**
2564  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2565  * @tx_q: queue to send buffer on
2566  * @skb: pointer to skb
2567  */
2568 netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
2569 {
2570 	u64_stats_update_begin(&tx_q->stats_sync);
2571 	u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
2572 	u64_stats_update_end(&tx_q->stats_sync);
2573 
2574 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2575 
2576 	dev_kfree_skb(skb);
2577 
2578 	return NETDEV_TX_OK;
2579 }
2580 
2581 /**
2582  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2583  * @skb: send buffer
2584  * @tx_q: queue to send buffer on
2585  *
2586  * Returns NETDEV_TX_OK if sent, else an error code
2587  */
2588 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2589 					struct idpf_queue *tx_q)
2590 {
2591 	struct idpf_tx_splitq_params tx_params = { };
2592 	struct idpf_tx_buf *first;
2593 	unsigned int count;
2594 	int tso;
2595 
2596 	count = idpf_tx_desc_count_required(tx_q, skb);
2597 	if (unlikely(!count))
2598 		return idpf_tx_drop_skb(tx_q, skb);
2599 
2600 	tso = idpf_tso(skb, &tx_params.offload);
2601 	if (unlikely(tso < 0))
2602 		return idpf_tx_drop_skb(tx_q, skb);
2603 
2604 	/* Check for splitq specific TX resources */
2605 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2606 	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2607 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2608 
2609 		return NETDEV_TX_BUSY;
2610 	}
2611 
2612 	if (tso) {
2613 		/* If tso is needed, set up context desc */
2614 		struct idpf_flex_tx_ctx_desc *ctx_desc =
2615 			idpf_tx_splitq_get_ctx_desc(tx_q);
2616 
2617 		ctx_desc->tso.qw1.cmd_dtype =
2618 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2619 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2620 		ctx_desc->tso.qw0.flex_tlen =
2621 				cpu_to_le32(tx_params.offload.tso_len &
2622 					    IDPF_TXD_FLEX_CTX_TLEN_M);
2623 		ctx_desc->tso.qw0.mss_rt =
2624 				cpu_to_le16(tx_params.offload.mss &
2625 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2626 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2627 
2628 		u64_stats_update_begin(&tx_q->stats_sync);
2629 		u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
2630 		u64_stats_update_end(&tx_q->stats_sync);
2631 	}
2632 
2633 	/* record the location of the first descriptor for this packet */
2634 	first = &tx_q->tx_buf[tx_q->next_to_use];
2635 	first->skb = skb;
2636 
2637 	if (tso) {
2638 		first->gso_segs = tx_params.offload.tso_segs;
2639 		first->bytecount = skb->len +
2640 			((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
2641 	} else {
2642 		first->gso_segs = 1;
2643 		first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2644 	}
2645 
2646 	if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
2647 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2648 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2649 		/* Set the RE bit to catch any packets that may have not been
2650 		 * stashed during RS completion cleaning. MIN_GAP is set to
2651 		 * MIN_RING size to ensure it will be set at least once each
2652 		 * time around the ring.
2653 		 */
2654 		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2655 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2656 			tx_q->txq_grp->num_completions_pending++;
2657 		}
2658 
2659 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2660 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2661 
2662 	} else {
2663 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2664 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2665 
2666 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2667 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2668 	}
2669 
2670 	idpf_tx_splitq_map(tx_q, &tx_params, first);
2671 
2672 	return NETDEV_TX_OK;
2673 }
2674 
2675 /**
2676  * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
2677  * @skb: send buffer
2678  * @netdev: network interface device structure
2679  *
2680  * Returns NETDEV_TX_OK if sent, else an error code
2681  */
2682 netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
2683 				 struct net_device *netdev)
2684 {
2685 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2686 	struct idpf_queue *tx_q;
2687 
2688 	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2689 		dev_kfree_skb_any(skb);
2690 
2691 		return NETDEV_TX_OK;
2692 	}
2693 
2694 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2695 
2696 	/* hardware can't handle really short frames, hardware padding works
2697 	 * beyond this point
2698 	 */
2699 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2700 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2701 
2702 		return NETDEV_TX_OK;
2703 	}
2704 
2705 	return idpf_tx_splitq_frame(skb, tx_q);
2706 }
2707 
2708 /**
2709  * idpf_ptype_to_htype - get a hash type
2710  * @decoded: Decoded Rx packet type related fields
2711  *
2712  * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
2713  * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
2714  * Rx desc.
2715  */
2716 enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
2717 {
2718 	if (!decoded->known)
2719 		return PKT_HASH_TYPE_NONE;
2720 	if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
2721 	    decoded->inner_prot)
2722 		return PKT_HASH_TYPE_L4;
2723 	if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
2724 	    decoded->outer_ip)
2725 		return PKT_HASH_TYPE_L3;
2726 	if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
2727 		return PKT_HASH_TYPE_L2;
2728 
2729 	return PKT_HASH_TYPE_NONE;
2730 }
2731 
2732 /**
2733  * idpf_rx_hash - set the hash value in the skb
2734  * @rxq: Rx descriptor ring packet is being transacted on
2735  * @skb: pointer to current skb being populated
2736  * @rx_desc: Receive descriptor
2737  * @decoded: Decoded Rx packet type related fields
2738  */
2739 static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
2740 			 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2741 			 struct idpf_rx_ptype_decoded *decoded)
2742 {
2743 	u32 hash;
2744 
2745 	if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
2746 		return;
2747 
2748 	hash = le16_to_cpu(rx_desc->hash1) |
2749 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2750 	       (rx_desc->hash3 << 24);
2751 
2752 	skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
2753 }
2754 
2755 /**
2756  * idpf_rx_csum - Indicate in skb if checksum is good
2757  * @rxq: Rx descriptor ring packet is being transacted on
2758  * @skb: pointer to current skb being populated
2759  * @csum_bits: checksum fields extracted from the descriptor
2760  * @decoded: Decoded Rx packet type related fields
2761  *
2762  * skb->protocol must be set before this function is called
2763  */
2764 static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
2765 			 struct idpf_rx_csum_decoded *csum_bits,
2766 			 struct idpf_rx_ptype_decoded *decoded)
2767 {
2768 	bool ipv4, ipv6;
2769 
2770 	/* check if Rx checksum is enabled */
2771 	if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
2772 		return;
2773 
2774 	/* check if HW has decoded the packet and checksum */
2775 	if (!(csum_bits->l3l4p))
2776 		return;
2777 
2778 	ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
2779 	ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
2780 
2781 	if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
2782 		goto checksum_fail;
2783 
2784 	if (ipv6 && csum_bits->ipv6exadd)
2785 		return;
2786 
2787 	/* check for L4 errors and handle packets that were not able to be
2788 	 * checksummed
2789 	 */
2790 	if (csum_bits->l4e)
2791 		goto checksum_fail;
2792 
2793 	/* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
2794 	switch (decoded->inner_prot) {
2795 	case IDPF_RX_PTYPE_INNER_PROT_ICMP:
2796 	case IDPF_RX_PTYPE_INNER_PROT_TCP:
2797 	case IDPF_RX_PTYPE_INNER_PROT_UDP:
2798 		if (!csum_bits->raw_csum_inv) {
2799 			u16 csum = csum_bits->raw_csum;
2800 
2801 			skb->csum = csum_unfold((__force __sum16)~swab16(csum));
2802 			skb->ip_summed = CHECKSUM_COMPLETE;
2803 		} else {
2804 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2805 		}
2806 		break;
2807 	case IDPF_RX_PTYPE_INNER_PROT_SCTP:
2808 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2809 		break;
2810 	default:
2811 		break;
2812 	}
2813 
2814 	return;
2815 
2816 checksum_fail:
2817 	u64_stats_update_begin(&rxq->stats_sync);
2818 	u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
2819 	u64_stats_update_end(&rxq->stats_sync);
2820 }
2821 
2822 /**
2823  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2824  * @rx_desc: receive descriptor
2825  * @csum: structure to extract checksum fields
2826  *
2827  **/
2828 static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2829 					     struct idpf_rx_csum_decoded *csum)
2830 {
2831 	u8 qword0, qword1;
2832 
2833 	qword0 = rx_desc->status_err0_qw0;
2834 	qword1 = rx_desc->status_err0_qw1;
2835 
2836 	csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2837 			      qword1);
2838 	csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2839 			       qword1);
2840 	csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2841 			      qword1);
2842 	csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2843 				qword1);
2844 	csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2845 				    qword0);
2846 	csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
2847 				       le16_to_cpu(rx_desc->ptype_err_fflags0));
2848 	csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
2849 }
2850 
2851 /**
2852  * idpf_rx_rsc - Set the RSC fields in the skb
2853  * @rxq : Rx descriptor ring packet is being transacted on
2854  * @skb : pointer to current skb being populated
2855  * @rx_desc: Receive descriptor
2856  * @decoded: Decoded Rx packet type related fields
2857  *
2858  * Return 0 on success and error code on failure
2859  *
2860  * Populate the skb fields with the total number of RSC segments, RSC payload
2861  * length and packet type.
2862  */
2863 static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
2864 		       struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2865 		       struct idpf_rx_ptype_decoded *decoded)
2866 {
2867 	u16 rsc_segments, rsc_seg_len;
2868 	bool ipv4, ipv6;
2869 	int len;
2870 
2871 	if (unlikely(!decoded->outer_ip))
2872 		return -EINVAL;
2873 
2874 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
2875 	if (unlikely(!rsc_seg_len))
2876 		return -EINVAL;
2877 
2878 	ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
2879 	ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
2880 
2881 	if (unlikely(!(ipv4 ^ ipv6)))
2882 		return -EINVAL;
2883 
2884 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
2885 	if (unlikely(rsc_segments == 1))
2886 		return 0;
2887 
2888 	NAPI_GRO_CB(skb)->count = rsc_segments;
2889 	skb_shinfo(skb)->gso_size = rsc_seg_len;
2890 
2891 	skb_reset_network_header(skb);
2892 	len = skb->len - skb_transport_offset(skb);
2893 
2894 	if (ipv4) {
2895 		struct iphdr *ipv4h = ip_hdr(skb);
2896 
2897 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2898 
2899 		/* Reset and set transport header offset in skb */
2900 		skb_set_transport_header(skb, sizeof(struct iphdr));
2901 
2902 		/* Compute the TCP pseudo header checksum*/
2903 		tcp_hdr(skb)->check =
2904 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
2905 	} else {
2906 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2907 
2908 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2909 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
2910 		tcp_hdr(skb)->check =
2911 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
2912 	}
2913 
2914 	tcp_gro_complete(skb);
2915 
2916 	u64_stats_update_begin(&rxq->stats_sync);
2917 	u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
2918 	u64_stats_update_end(&rxq->stats_sync);
2919 
2920 	return 0;
2921 }
2922 
2923 /**
2924  * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
2925  * @rxq: Rx descriptor ring packet is being transacted on
2926  * @skb: pointer to current skb being populated
2927  * @rx_desc: Receive descriptor
2928  *
2929  * This function checks the ring, descriptor, and packet information in
2930  * order to populate the hash, checksum, protocol, and
2931  * other fields within the skb.
2932  */
2933 static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
2934 				      struct sk_buff *skb,
2935 				      struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2936 {
2937 	struct idpf_rx_csum_decoded csum_bits = { };
2938 	struct idpf_rx_ptype_decoded decoded;
2939 	u16 rx_ptype;
2940 
2941 	rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
2942 			     le16_to_cpu(rx_desc->ptype_err_fflags0));
2943 
2944 	decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
2945 	/* If we don't know the ptype we can't do anything else with it. Just
2946 	 * pass it up the stack as-is.
2947 	 */
2948 	if (!decoded.known)
2949 		return 0;
2950 
2951 	/* process RSS/hash */
2952 	idpf_rx_hash(rxq, skb, rx_desc, &decoded);
2953 
2954 	skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
2955 
2956 	if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
2957 		      le16_to_cpu(rx_desc->hdrlen_flags)))
2958 		return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
2959 
2960 	idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
2961 	idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
2962 
2963 	return 0;
2964 }
2965 
2966 /**
2967  * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
2968  * @rx_buf: buffer containing page to add
2969  * @skb: sk_buff to place the data into
2970  * @size: packet length from rx_desc
2971  *
2972  * This function will add the data contained in rx_buf->page to the skb.
2973  * It will just attach the page as a frag to the skb.
2974  * The function will then update the page offset.
2975  */
2976 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
2977 		      unsigned int size)
2978 {
2979 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
2980 			rx_buf->page_offset, size, rx_buf->truesize);
2981 
2982 	rx_buf->page = NULL;
2983 }
2984 
2985 /**
2986  * idpf_rx_construct_skb - Allocate skb and populate it
2987  * @rxq: Rx descriptor queue
2988  * @rx_buf: Rx buffer to pull data from
2989  * @size: the length of the packet
2990  *
2991  * This function allocates an skb. It then populates it with the page
2992  * data from the current receive descriptor, taking care to set up the
2993  * skb correctly.
2994  */
2995 struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
2996 				      struct idpf_rx_buf *rx_buf,
2997 				      unsigned int size)
2998 {
2999 	unsigned int headlen;
3000 	struct sk_buff *skb;
3001 	void *va;
3002 
3003 	va = page_address(rx_buf->page) + rx_buf->page_offset;
3004 
3005 	/* prefetch first cache line of first page */
3006 	net_prefetch(va);
3007 	/* allocate a skb to store the frags */
3008 	skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
3009 			       GFP_ATOMIC);
3010 	if (unlikely(!skb)) {
3011 		idpf_rx_put_page(rx_buf);
3012 
3013 		return NULL;
3014 	}
3015 
3016 	skb_record_rx_queue(skb, rxq->idx);
3017 	skb_mark_for_recycle(skb);
3018 
3019 	/* Determine available headroom for copy */
3020 	headlen = size;
3021 	if (headlen > IDPF_RX_HDR_SIZE)
3022 		headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
3023 
3024 	/* align pull length to size of long to optimize memcpy performance */
3025 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
3026 
3027 	/* if we exhaust the linear part then add what is left as a frag */
3028 	size -= headlen;
3029 	if (!size) {
3030 		idpf_rx_put_page(rx_buf);
3031 
3032 		return skb;
3033 	}
3034 
3035 	skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
3036 			size, rx_buf->truesize);
3037 
3038 	/* Since we're giving the page to the stack, clear our reference to it.
3039 	 * We'll get a new one during buffer posting.
3040 	 */
3041 	rx_buf->page = NULL;
3042 
3043 	return skb;
3044 }
3045 
3046 /**
3047  * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
3048  * @rxq: Rx descriptor queue
3049  * @va: Rx buffer to pull data from
3050  * @size: the length of the packet
3051  *
3052  * This function allocates an skb. It then populates it with the page data from
3053  * the current receive descriptor, taking care to set up the skb correctly.
3054  * This specifically uses a header buffer to start building the skb.
3055  */
3056 static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
3057 						 const void *va,
3058 						 unsigned int size)
3059 {
3060 	struct sk_buff *skb;
3061 
3062 	/* allocate a skb to store the frags */
3063 	skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
3064 	if (unlikely(!skb))
3065 		return NULL;
3066 
3067 	skb_record_rx_queue(skb, rxq->idx);
3068 
3069 	memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
3070 
3071 	/* More than likely, a payload fragment, which will use a page from
3072 	 * page_pool will be added to the SKB so mark it for recycle
3073 	 * preemptively. And if not, it's inconsequential.
3074 	 */
3075 	skb_mark_for_recycle(skb);
3076 
3077 	return skb;
3078 }
3079 
3080 /**
3081  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3082  * status and error fields
3083  * @stat_err_field: field from descriptor to test bits in
3084  * @stat_err_bits: value to mask
3085  *
3086  */
3087 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3088 					const u8 stat_err_bits)
3089 {
3090 	return !!(stat_err_field & stat_err_bits);
3091 }
3092 
3093 /**
3094  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3095  * @rx_desc: Rx descriptor for current buffer
3096  *
3097  * If the buffer is an EOP buffer, this function exits returning true,
3098  * otherwise return false indicating that this is in fact a non-EOP buffer.
3099  */
3100 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3101 {
3102 	/* if we are the last buffer then there is nothing else to do */
3103 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3104 						  IDPF_RXD_EOF_SPLITQ));
3105 }
3106 
3107 /**
3108  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3109  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3110  * @budget: Total limit on number of packets to process
3111  *
3112  * This function provides a "bounce buffer" approach to Rx interrupt
3113  * processing. The advantage to this is that on systems that have
3114  * expensive overhead for IOMMU access this provides a means of avoiding
3115  * it by maintaining the mapping of the page to the system.
3116  *
3117  * Returns amount of work completed
3118  */
3119 static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
3120 {
3121 	int total_rx_bytes = 0, total_rx_pkts = 0;
3122 	struct idpf_queue *rx_bufq = NULL;
3123 	struct sk_buff *skb = rxq->skb;
3124 	u16 ntc = rxq->next_to_clean;
3125 
3126 	/* Process Rx packets bounded by budget */
3127 	while (likely(total_rx_pkts < budget)) {
3128 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3129 		struct idpf_sw_queue *refillq = NULL;
3130 		struct idpf_rxq_set *rxq_set = NULL;
3131 		struct idpf_rx_buf *rx_buf = NULL;
3132 		union virtchnl2_rx_desc *desc;
3133 		unsigned int pkt_len = 0;
3134 		unsigned int hdr_len = 0;
3135 		u16 gen_id, buf_id = 0;
3136 		 /* Header buffer overflow only valid for header split */
3137 		bool hbo = false;
3138 		int bufq_id;
3139 		u8 rxdid;
3140 
3141 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3142 		desc = IDPF_RX_DESC(rxq, ntc);
3143 		rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
3144 
3145 		/* This memory barrier is needed to keep us from reading
3146 		 * any other fields out of the rx_desc
3147 		 */
3148 		dma_rmb();
3149 
3150 		/* if the descriptor isn't done, no work yet to do */
3151 		gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
3152 		gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
3153 
3154 		if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
3155 			break;
3156 
3157 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3158 				  rx_desc->rxdid_ucast);
3159 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3160 			IDPF_RX_BUMP_NTC(rxq, ntc);
3161 			u64_stats_update_begin(&rxq->stats_sync);
3162 			u64_stats_inc(&rxq->q_stats.rx.bad_descs);
3163 			u64_stats_update_end(&rxq->stats_sync);
3164 			continue;
3165 		}
3166 
3167 		pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
3168 		pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
3169 				    pkt_len);
3170 
3171 		hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
3172 				rx_desc->status_err0_qw1);
3173 
3174 		if (unlikely(hbo)) {
3175 			/* If a header buffer overflow, occurs, i.e. header is
3176 			 * too large to fit in the header split buffer, HW will
3177 			 * put the entire packet, including headers, in the
3178 			 * data/payload buffer.
3179 			 */
3180 			u64_stats_update_begin(&rxq->stats_sync);
3181 			u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
3182 			u64_stats_update_end(&rxq->stats_sync);
3183 			goto bypass_hsplit;
3184 		}
3185 
3186 		hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
3187 		hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
3188 				    hdr_len);
3189 
3190 bypass_hsplit:
3191 		bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
3192 		bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
3193 				    bufq_id);
3194 
3195 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3196 		if (!bufq_id)
3197 			refillq = rxq_set->refillq0;
3198 		else
3199 			refillq = rxq_set->refillq1;
3200 
3201 		/* retrieve buffer from the rxq */
3202 		rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
3203 
3204 		buf_id = le16_to_cpu(rx_desc->buf_id);
3205 
3206 		rx_buf = &rx_bufq->rx_buf.buf[buf_id];
3207 
3208 		if (hdr_len) {
3209 			const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
3210 						(u32)buf_id * IDPF_HDR_BUF_SIZE;
3211 
3212 			skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
3213 			u64_stats_update_begin(&rxq->stats_sync);
3214 			u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
3215 			u64_stats_update_end(&rxq->stats_sync);
3216 		}
3217 
3218 		if (pkt_len) {
3219 			idpf_rx_sync_for_cpu(rx_buf, pkt_len);
3220 			if (skb)
3221 				idpf_rx_add_frag(rx_buf, skb, pkt_len);
3222 			else
3223 				skb = idpf_rx_construct_skb(rxq, rx_buf,
3224 							    pkt_len);
3225 		} else {
3226 			idpf_rx_put_page(rx_buf);
3227 		}
3228 
3229 		/* exit if we failed to retrieve a buffer */
3230 		if (!skb)
3231 			break;
3232 
3233 		idpf_rx_post_buf_refill(refillq, buf_id);
3234 
3235 		IDPF_RX_BUMP_NTC(rxq, ntc);
3236 		/* skip if it is non EOP desc */
3237 		if (!idpf_rx_splitq_is_eop(rx_desc))
3238 			continue;
3239 
3240 		/* pad skb if needed (to make valid ethernet frame) */
3241 		if (eth_skb_pad(skb)) {
3242 			skb = NULL;
3243 			continue;
3244 		}
3245 
3246 		/* probably a little skewed due to removing CRC */
3247 		total_rx_bytes += skb->len;
3248 
3249 		/* protocol */
3250 		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3251 			dev_kfree_skb_any(skb);
3252 			skb = NULL;
3253 			continue;
3254 		}
3255 
3256 		/* send completed skb up the stack */
3257 		napi_gro_receive(&rxq->q_vector->napi, skb);
3258 		skb = NULL;
3259 
3260 		/* update budget accounting */
3261 		total_rx_pkts++;
3262 	}
3263 
3264 	rxq->next_to_clean = ntc;
3265 
3266 	rxq->skb = skb;
3267 	u64_stats_update_begin(&rxq->stats_sync);
3268 	u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
3269 	u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
3270 	u64_stats_update_end(&rxq->stats_sync);
3271 
3272 	/* guarantee a trip back through this routine if there was a failure */
3273 	return total_rx_pkts;
3274 }
3275 
3276 /**
3277  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3278  * @bufq: Pointer to the buffer queue
3279  * @refill_desc: SW Refill queue descriptor containing buffer ID
3280  * @buf_desc: Buffer queue descriptor
3281  *
3282  * Return 0 on success and negative on failure.
3283  */
3284 static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
3285 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3286 {
3287 	struct idpf_rx_buf *buf;
3288 	dma_addr_t addr;
3289 	u16 buf_id;
3290 
3291 	buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3292 
3293 	buf = &bufq->rx_buf.buf[buf_id];
3294 
3295 	addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
3296 	if (unlikely(addr == DMA_MAPPING_ERROR))
3297 		return -ENOMEM;
3298 
3299 	buf_desc->pkt_addr = cpu_to_le64(addr);
3300 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3301 
3302 	if (!bufq->rx_hsplit_en)
3303 		return 0;
3304 
3305 	buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
3306 					 (u32)buf_id * IDPF_HDR_BUF_SIZE);
3307 
3308 	return 0;
3309 }
3310 
3311 /**
3312  * idpf_rx_clean_refillq - Clean refill queue buffers
3313  * @bufq: buffer queue to post buffers back to
3314  * @refillq: refill queue to clean
3315  *
3316  * This function takes care of the buffer refill management
3317  */
3318 static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
3319 				  struct idpf_sw_queue *refillq)
3320 {
3321 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3322 	u16 bufq_nta = bufq->next_to_alloc;
3323 	u16 ntc = refillq->next_to_clean;
3324 	int cleaned = 0;
3325 	u16 gen;
3326 
3327 	buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
3328 
3329 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3330 	while (likely(cleaned < refillq->desc_count)) {
3331 		u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
3332 		bool failure;
3333 
3334 		gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
3335 		if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
3336 			break;
3337 
3338 		failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
3339 						   buf_desc);
3340 		if (failure)
3341 			break;
3342 
3343 		if (unlikely(++ntc == refillq->desc_count)) {
3344 			change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
3345 			ntc = 0;
3346 		}
3347 
3348 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3349 			buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
3350 			bufq_nta = 0;
3351 		} else {
3352 			buf_desc++;
3353 		}
3354 
3355 		cleaned++;
3356 	}
3357 
3358 	if (!cleaned)
3359 		return;
3360 
3361 	/* We want to limit how many transactions on the bus we trigger with
3362 	 * tail writes so we only do it in strides. It's also important we
3363 	 * align the write to a multiple of 8 as required by HW.
3364 	 */
3365 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3366 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3367 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3368 						       IDPF_RX_BUF_POST_STRIDE));
3369 
3370 	/* update next to alloc since we have filled the ring */
3371 	refillq->next_to_clean = ntc;
3372 	bufq->next_to_alloc = bufq_nta;
3373 }
3374 
3375 /**
3376  * idpf_rx_clean_refillq_all - Clean all refill queues
3377  * @bufq: buffer queue with refill queues
3378  *
3379  * Iterates through all refill queues assigned to the buffer queue assigned to
3380  * this vector.  Returns true if clean is complete within budget, false
3381  * otherwise.
3382  */
3383 static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
3384 {
3385 	struct idpf_bufq_set *bufq_set;
3386 	int i;
3387 
3388 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3389 	for (i = 0; i < bufq_set->num_refillqs; i++)
3390 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3391 }
3392 
3393 /**
3394  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3395  * @irq: interrupt number
3396  * @data: pointer to a q_vector
3397  *
3398  */
3399 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3400 						void *data)
3401 {
3402 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3403 
3404 	q_vector->total_events++;
3405 	napi_schedule(&q_vector->napi);
3406 
3407 	return IRQ_HANDLED;
3408 }
3409 
3410 /**
3411  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3412  * @vport: virtual port structure
3413  *
3414  */
3415 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3416 {
3417 	u16 v_idx;
3418 
3419 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3420 		netif_napi_del(&vport->q_vectors[v_idx].napi);
3421 }
3422 
3423 /**
3424  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3425  * @vport: main vport structure
3426  */
3427 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3428 {
3429 	int v_idx;
3430 
3431 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3432 		napi_disable(&vport->q_vectors[v_idx].napi);
3433 }
3434 
3435 /**
3436  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3437  * @vport: virtual port
3438  *
3439  * Free the memory allocated for interrupt vectors  associated to a vport
3440  */
3441 void idpf_vport_intr_rel(struct idpf_vport *vport)
3442 {
3443 	int i, j, v_idx;
3444 
3445 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3446 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3447 
3448 		kfree(q_vector->bufq);
3449 		q_vector->bufq = NULL;
3450 		kfree(q_vector->tx);
3451 		q_vector->tx = NULL;
3452 		kfree(q_vector->rx);
3453 		q_vector->rx = NULL;
3454 	}
3455 
3456 	/* Clean up the mapping of queues to vectors */
3457 	for (i = 0; i < vport->num_rxq_grp; i++) {
3458 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3459 
3460 		if (idpf_is_queue_model_split(vport->rxq_model))
3461 			for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
3462 				rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
3463 		else
3464 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
3465 				rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
3466 	}
3467 
3468 	if (idpf_is_queue_model_split(vport->txq_model))
3469 		for (i = 0; i < vport->num_txq_grp; i++)
3470 			vport->txq_grps[i].complq->q_vector = NULL;
3471 	else
3472 		for (i = 0; i < vport->num_txq_grp; i++)
3473 			for (j = 0; j < vport->txq_grps[i].num_txq; j++)
3474 				vport->txq_grps[i].txqs[j]->q_vector = NULL;
3475 
3476 	kfree(vport->q_vectors);
3477 	vport->q_vectors = NULL;
3478 }
3479 
3480 /**
3481  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3482  * @vport: main vport structure
3483  */
3484 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3485 {
3486 	struct idpf_adapter *adapter = vport->adapter;
3487 	int vector;
3488 
3489 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3490 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3491 		int irq_num, vidx;
3492 
3493 		/* free only the irqs that were actually requested */
3494 		if (!q_vector)
3495 			continue;
3496 
3497 		vidx = vport->q_vector_idxs[vector];
3498 		irq_num = adapter->msix_entries[vidx].vector;
3499 
3500 		/* clear the affinity_mask in the IRQ descriptor */
3501 		irq_set_affinity_hint(irq_num, NULL);
3502 		free_irq(irq_num, q_vector);
3503 	}
3504 }
3505 
3506 /**
3507  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3508  * @vport: main vport structure
3509  */
3510 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3511 {
3512 	struct idpf_q_vector *q_vector = vport->q_vectors;
3513 	int q_idx;
3514 
3515 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3516 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3517 }
3518 
3519 /**
3520  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3521  * @q_vector: pointer to q_vector
3522  * @type: itr index
3523  * @itr: itr value
3524  */
3525 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
3526 					const int type, u16 itr)
3527 {
3528 	u32 itr_val;
3529 
3530 	itr &= IDPF_ITR_MASK;
3531 	/* Don't clear PBA because that can cause lost interrupts that
3532 	 * came in while we were cleaning/polling
3533 	 */
3534 	itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
3535 		  (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3536 		  (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3537 
3538 	return itr_val;
3539 }
3540 
3541 /**
3542  * idpf_update_dim_sample - Update dim sample with packets and bytes
3543  * @q_vector: the vector associated with the interrupt
3544  * @dim_sample: dim sample to update
3545  * @dim: dim instance structure
3546  * @packets: total packets
3547  * @bytes: total bytes
3548  *
3549  * Update the dim sample with the packets and bytes which are passed to this
3550  * function. Set the dim state appropriately if the dim settings gets stale.
3551  */
3552 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3553 				   struct dim_sample *dim_sample,
3554 				   struct dim *dim, u64 packets, u64 bytes)
3555 {
3556 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3557 	dim_sample->comp_ctr = 0;
3558 
3559 	/* if dim settings get stale, like when not updated for 1 second or
3560 	 * longer, force it to start again. This addresses the frequent case
3561 	 * of an idle queue being switched to by the scheduler.
3562 	 */
3563 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3564 		dim->state = DIM_START_MEASURE;
3565 }
3566 
3567 /**
3568  * idpf_net_dim - Update net DIM algorithm
3569  * @q_vector: the vector associated with the interrupt
3570  *
3571  * Create a DIM sample and notify net_dim() so that it can possibly decide
3572  * a new ITR value based on incoming packets, bytes, and interrupts.
3573  *
3574  * This function is a no-op if the queue is not configured to dynamic ITR.
3575  */
3576 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3577 {
3578 	struct dim_sample dim_sample = { };
3579 	u64 packets, bytes;
3580 	u32 i;
3581 
3582 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3583 		goto check_rx_itr;
3584 
3585 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3586 		struct idpf_queue *txq = q_vector->tx[i];
3587 		unsigned int start;
3588 
3589 		do {
3590 			start = u64_stats_fetch_begin(&txq->stats_sync);
3591 			packets += u64_stats_read(&txq->q_stats.tx.packets);
3592 			bytes += u64_stats_read(&txq->q_stats.tx.bytes);
3593 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3594 	}
3595 
3596 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3597 			       packets, bytes);
3598 	net_dim(&q_vector->tx_dim, dim_sample);
3599 
3600 check_rx_itr:
3601 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3602 		return;
3603 
3604 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3605 		struct idpf_queue *rxq = q_vector->rx[i];
3606 		unsigned int start;
3607 
3608 		do {
3609 			start = u64_stats_fetch_begin(&rxq->stats_sync);
3610 			packets += u64_stats_read(&rxq->q_stats.rx.packets);
3611 			bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
3612 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3613 	}
3614 
3615 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3616 			       packets, bytes);
3617 	net_dim(&q_vector->rx_dim, dim_sample);
3618 }
3619 
3620 /**
3621  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3622  * @q_vector: q_vector for which itr is being updated and interrupt enabled
3623  *
3624  * Update the net_dim() algorithm and re-enable the interrupt associated with
3625  * this vector.
3626  */
3627 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3628 {
3629 	u32 intval;
3630 
3631 	/* net_dim() updates ITR out-of-band using a work item */
3632 	idpf_net_dim(q_vector);
3633 
3634 	intval = idpf_vport_intr_buildreg_itr(q_vector,
3635 					      IDPF_NO_ITR_UPDATE_IDX, 0);
3636 
3637 	writel(intval, q_vector->intr_reg.dyn_ctl);
3638 }
3639 
3640 /**
3641  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3642  * @vport: main vport structure
3643  * @basename: name for the vector
3644  */
3645 static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
3646 {
3647 	struct idpf_adapter *adapter = vport->adapter;
3648 	int vector, err, irq_num, vidx;
3649 	const char *vec_name;
3650 
3651 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3652 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3653 
3654 		vidx = vport->q_vector_idxs[vector];
3655 		irq_num = adapter->msix_entries[vidx].vector;
3656 
3657 		if (q_vector->num_rxq && q_vector->num_txq)
3658 			vec_name = "TxRx";
3659 		else if (q_vector->num_rxq)
3660 			vec_name = "Rx";
3661 		else if (q_vector->num_txq)
3662 			vec_name = "Tx";
3663 		else
3664 			continue;
3665 
3666 		q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
3667 					   basename, vec_name, vidx);
3668 
3669 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3670 				  q_vector->name, q_vector);
3671 		if (err) {
3672 			netdev_err(vport->netdev,
3673 				   "Request_irq failed, error: %d\n", err);
3674 			goto free_q_irqs;
3675 		}
3676 		/* assign the mask for this irq */
3677 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
3678 	}
3679 
3680 	return 0;
3681 
3682 free_q_irqs:
3683 	while (--vector >= 0) {
3684 		vidx = vport->q_vector_idxs[vector];
3685 		irq_num = adapter->msix_entries[vidx].vector;
3686 		free_irq(irq_num, &vport->q_vectors[vector]);
3687 	}
3688 
3689 	return err;
3690 }
3691 
3692 /**
3693  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3694  * @q_vector: q_vector structure
3695  * @itr: Interrupt throttling rate
3696  * @tx: Tx or Rx ITR
3697  */
3698 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3699 {
3700 	struct idpf_intr_reg *intr_reg;
3701 
3702 	if (tx && !q_vector->tx)
3703 		return;
3704 	else if (!tx && !q_vector->rx)
3705 		return;
3706 
3707 	intr_reg = &q_vector->intr_reg;
3708 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3709 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3710 }
3711 
3712 /**
3713  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3714  * @vport: main vport structure
3715  */
3716 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3717 {
3718 	bool dynamic;
3719 	int q_idx;
3720 	u16 itr;
3721 
3722 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3723 		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3724 
3725 		/* Set the initial ITR values */
3726 		if (qv->num_txq) {
3727 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3728 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3729 			idpf_vport_intr_write_itr(qv, dynamic ?
3730 						  itr : qv->tx_itr_value,
3731 						  true);
3732 		}
3733 
3734 		if (qv->num_rxq) {
3735 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3736 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3737 			idpf_vport_intr_write_itr(qv, dynamic ?
3738 						  itr : qv->rx_itr_value,
3739 						  false);
3740 		}
3741 
3742 		if (qv->num_txq || qv->num_rxq)
3743 			idpf_vport_intr_update_itr_ena_irq(qv);
3744 	}
3745 }
3746 
3747 /**
3748  * idpf_vport_intr_deinit - Release all vector associations for the vport
3749  * @vport: main vport structure
3750  */
3751 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3752 {
3753 	idpf_vport_intr_napi_dis_all(vport);
3754 	idpf_vport_intr_napi_del_all(vport);
3755 	idpf_vport_intr_dis_irq_all(vport);
3756 	idpf_vport_intr_rel_irq(vport);
3757 }
3758 
3759 /**
3760  * idpf_tx_dim_work - Call back from the stack
3761  * @work: work queue structure
3762  */
3763 static void idpf_tx_dim_work(struct work_struct *work)
3764 {
3765 	struct idpf_q_vector *q_vector;
3766 	struct idpf_vport *vport;
3767 	struct dim *dim;
3768 	u16 itr;
3769 
3770 	dim = container_of(work, struct dim, work);
3771 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3772 	vport = q_vector->vport;
3773 
3774 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3775 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3776 
3777 	/* look up the values in our local table */
3778 	itr = vport->tx_itr_profile[dim->profile_ix];
3779 
3780 	idpf_vport_intr_write_itr(q_vector, itr, true);
3781 
3782 	dim->state = DIM_START_MEASURE;
3783 }
3784 
3785 /**
3786  * idpf_rx_dim_work - Call back from the stack
3787  * @work: work queue structure
3788  */
3789 static void idpf_rx_dim_work(struct work_struct *work)
3790 {
3791 	struct idpf_q_vector *q_vector;
3792 	struct idpf_vport *vport;
3793 	struct dim *dim;
3794 	u16 itr;
3795 
3796 	dim = container_of(work, struct dim, work);
3797 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3798 	vport = q_vector->vport;
3799 
3800 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3801 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3802 
3803 	/* look up the values in our local table */
3804 	itr = vport->rx_itr_profile[dim->profile_ix];
3805 
3806 	idpf_vport_intr_write_itr(q_vector, itr, false);
3807 
3808 	dim->state = DIM_START_MEASURE;
3809 }
3810 
3811 /**
3812  * idpf_init_dim - Set up dynamic interrupt moderation
3813  * @qv: q_vector structure
3814  */
3815 static void idpf_init_dim(struct idpf_q_vector *qv)
3816 {
3817 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3818 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3819 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3820 
3821 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3822 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3823 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3824 }
3825 
3826 /**
3827  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3828  * @vport: main vport structure
3829  */
3830 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3831 {
3832 	int q_idx;
3833 
3834 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3835 		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3836 
3837 		idpf_init_dim(q_vector);
3838 		napi_enable(&q_vector->napi);
3839 	}
3840 }
3841 
3842 /**
3843  * idpf_tx_splitq_clean_all- Clean completion queues
3844  * @q_vec: queue vector
3845  * @budget: Used to determine if we are in netpoll
3846  * @cleaned: returns number of packets cleaned
3847  *
3848  * Returns false if clean is not complete else returns true
3849  */
3850 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3851 				     int budget, int *cleaned)
3852 {
3853 	u16 num_txq = q_vec->num_txq;
3854 	bool clean_complete = true;
3855 	int i, budget_per_q;
3856 
3857 	if (unlikely(!num_txq))
3858 		return true;
3859 
3860 	budget_per_q = DIV_ROUND_UP(budget, num_txq);
3861 	for (i = 0; i < num_txq; i++)
3862 		clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
3863 						       budget_per_q, cleaned);
3864 
3865 	return clean_complete;
3866 }
3867 
3868 /**
3869  * idpf_rx_splitq_clean_all- Clean completion queues
3870  * @q_vec: queue vector
3871  * @budget: Used to determine if we are in netpoll
3872  * @cleaned: returns number of packets cleaned
3873  *
3874  * Returns false if clean is not complete else returns true
3875  */
3876 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3877 				     int *cleaned)
3878 {
3879 	u16 num_rxq = q_vec->num_rxq;
3880 	bool clean_complete = true;
3881 	int pkts_cleaned = 0;
3882 	int i, budget_per_q;
3883 
3884 	/* We attempt to distribute budget to each Rx queue fairly, but don't
3885 	 * allow the budget to go below 1 because that would exit polling early.
3886 	 */
3887 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3888 	for (i = 0; i < num_rxq; i++) {
3889 		struct idpf_queue *rxq = q_vec->rx[i];
3890 		int pkts_cleaned_per_q;
3891 
3892 		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3893 		/* if we clean as many as budgeted, we must not be done */
3894 		if (pkts_cleaned_per_q >= budget_per_q)
3895 			clean_complete = false;
3896 		pkts_cleaned += pkts_cleaned_per_q;
3897 	}
3898 	*cleaned = pkts_cleaned;
3899 
3900 	for (i = 0; i < q_vec->num_bufq; i++)
3901 		idpf_rx_clean_refillq_all(q_vec->bufq[i]);
3902 
3903 	return clean_complete;
3904 }
3905 
3906 /**
3907  * idpf_vport_splitq_napi_poll - NAPI handler
3908  * @napi: struct from which you get q_vector
3909  * @budget: budget provided by stack
3910  */
3911 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
3912 {
3913 	struct idpf_q_vector *q_vector =
3914 				container_of(napi, struct idpf_q_vector, napi);
3915 	bool clean_complete;
3916 	int work_done = 0;
3917 
3918 	/* Handle case where we are called by netpoll with a budget of 0 */
3919 	if (unlikely(!budget)) {
3920 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3921 
3922 		return 0;
3923 	}
3924 
3925 	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
3926 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3927 
3928 	/* If work not completed, return budget and polling will return */
3929 	if (!clean_complete)
3930 		return budget;
3931 
3932 	work_done = min_t(int, work_done, budget - 1);
3933 
3934 	/* Exit the polling mode, but don't re-enable interrupts if stack might
3935 	 * poll us due to busy-polling
3936 	 */
3937 	if (likely(napi_complete_done(napi, work_done)))
3938 		idpf_vport_intr_update_itr_ena_irq(q_vector);
3939 
3940 	/* Switch to poll mode in the tear-down path after sending disable
3941 	 * queues virtchnl message, as the interrupts will be disabled after
3942 	 * that
3943 	 */
3944 	if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
3945 						   q_vector->tx[0]->flags)))
3946 		return budget;
3947 	else
3948 		return work_done;
3949 }
3950 
3951 /**
3952  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
3953  * @vport: virtual port
3954  *
3955  * Mapping for vectors to queues
3956  */
3957 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
3958 {
3959 	u16 num_txq_grp = vport->num_txq_grp;
3960 	int i, j, qv_idx, bufq_vidx = 0;
3961 	struct idpf_rxq_group *rx_qgrp;
3962 	struct idpf_txq_group *tx_qgrp;
3963 	struct idpf_queue *q, *bufq;
3964 	u16 q_index;
3965 
3966 	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
3967 		u16 num_rxq;
3968 
3969 		rx_qgrp = &vport->rxq_grps[i];
3970 		if (idpf_is_queue_model_split(vport->rxq_model))
3971 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
3972 		else
3973 			num_rxq = rx_qgrp->singleq.num_rxq;
3974 
3975 		for (j = 0; j < num_rxq; j++) {
3976 			if (qv_idx >= vport->num_q_vectors)
3977 				qv_idx = 0;
3978 
3979 			if (idpf_is_queue_model_split(vport->rxq_model))
3980 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3981 			else
3982 				q = rx_qgrp->singleq.rxqs[j];
3983 			q->q_vector = &vport->q_vectors[qv_idx];
3984 			q_index = q->q_vector->num_rxq;
3985 			q->q_vector->rx[q_index] = q;
3986 			q->q_vector->num_rxq++;
3987 			qv_idx++;
3988 		}
3989 
3990 		if (idpf_is_queue_model_split(vport->rxq_model)) {
3991 			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
3992 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
3993 				bufq->q_vector = &vport->q_vectors[bufq_vidx];
3994 				q_index = bufq->q_vector->num_bufq;
3995 				bufq->q_vector->bufq[q_index] = bufq;
3996 				bufq->q_vector->num_bufq++;
3997 			}
3998 			if (++bufq_vidx >= vport->num_q_vectors)
3999 				bufq_vidx = 0;
4000 		}
4001 	}
4002 
4003 	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4004 		u16 num_txq;
4005 
4006 		tx_qgrp = &vport->txq_grps[i];
4007 		num_txq = tx_qgrp->num_txq;
4008 
4009 		if (idpf_is_queue_model_split(vport->txq_model)) {
4010 			if (qv_idx >= vport->num_q_vectors)
4011 				qv_idx = 0;
4012 
4013 			q = tx_qgrp->complq;
4014 			q->q_vector = &vport->q_vectors[qv_idx];
4015 			q_index = q->q_vector->num_txq;
4016 			q->q_vector->tx[q_index] = q;
4017 			q->q_vector->num_txq++;
4018 			qv_idx++;
4019 		} else {
4020 			for (j = 0; j < num_txq; j++) {
4021 				if (qv_idx >= vport->num_q_vectors)
4022 					qv_idx = 0;
4023 
4024 				q = tx_qgrp->txqs[j];
4025 				q->q_vector = &vport->q_vectors[qv_idx];
4026 				q_index = q->q_vector->num_txq;
4027 				q->q_vector->tx[q_index] = q;
4028 				q->q_vector->num_txq++;
4029 
4030 				qv_idx++;
4031 			}
4032 		}
4033 	}
4034 }
4035 
4036 /**
4037  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4038  * @vport: virtual port
4039  *
4040  * Initialize vector indexes with values returened over mailbox
4041  */
4042 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4043 {
4044 	struct idpf_adapter *adapter = vport->adapter;
4045 	struct virtchnl2_alloc_vectors *ac;
4046 	u16 *vecids, total_vecs;
4047 	int i;
4048 
4049 	ac = adapter->req_vec_chunks;
4050 	if (!ac) {
4051 		for (i = 0; i < vport->num_q_vectors; i++)
4052 			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4053 
4054 		return 0;
4055 	}
4056 
4057 	total_vecs = idpf_get_reserved_vecs(adapter);
4058 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4059 	if (!vecids)
4060 		return -ENOMEM;
4061 
4062 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4063 
4064 	for (i = 0; i < vport->num_q_vectors; i++)
4065 		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4066 
4067 	kfree(vecids);
4068 
4069 	return 0;
4070 }
4071 
4072 /**
4073  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4074  * @vport: virtual port structure
4075  */
4076 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4077 {
4078 	int (*napi_poll)(struct napi_struct *napi, int budget);
4079 	u16 v_idx;
4080 
4081 	if (idpf_is_queue_model_split(vport->txq_model))
4082 		napi_poll = idpf_vport_splitq_napi_poll;
4083 	else
4084 		napi_poll = idpf_vport_singleq_napi_poll;
4085 
4086 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4087 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4088 
4089 		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4090 
4091 		/* only set affinity_mask if the CPU is online */
4092 		if (cpu_online(v_idx))
4093 			cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
4094 	}
4095 }
4096 
4097 /**
4098  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4099  * @vport: virtual port
4100  *
4101  * We allocate one q_vector per queue interrupt. If allocation fails we
4102  * return -ENOMEM.
4103  */
4104 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4105 {
4106 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4107 	struct idpf_q_vector *q_vector;
4108 	int v_idx, err;
4109 
4110 	vport->q_vectors = kcalloc(vport->num_q_vectors,
4111 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4112 	if (!vport->q_vectors)
4113 		return -ENOMEM;
4114 
4115 	txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
4116 	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
4117 	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4118 			   DIV_ROUND_UP(vport->num_rxq_grp,
4119 					vport->num_q_vectors);
4120 
4121 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4122 		q_vector = &vport->q_vectors[v_idx];
4123 		q_vector->vport = vport;
4124 
4125 		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4126 		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4127 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4128 
4129 		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4130 		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4131 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4132 
4133 		q_vector->tx = kcalloc(txqs_per_vector,
4134 				       sizeof(struct idpf_queue *),
4135 				       GFP_KERNEL);
4136 		if (!q_vector->tx) {
4137 			err = -ENOMEM;
4138 			goto error;
4139 		}
4140 
4141 		q_vector->rx = kcalloc(rxqs_per_vector,
4142 				       sizeof(struct idpf_queue *),
4143 				       GFP_KERNEL);
4144 		if (!q_vector->rx) {
4145 			err = -ENOMEM;
4146 			goto error;
4147 		}
4148 
4149 		if (!idpf_is_queue_model_split(vport->rxq_model))
4150 			continue;
4151 
4152 		q_vector->bufq = kcalloc(bufqs_per_vector,
4153 					 sizeof(struct idpf_queue *),
4154 					 GFP_KERNEL);
4155 		if (!q_vector->bufq) {
4156 			err = -ENOMEM;
4157 			goto error;
4158 		}
4159 	}
4160 
4161 	return 0;
4162 
4163 error:
4164 	idpf_vport_intr_rel(vport);
4165 
4166 	return err;
4167 }
4168 
4169 /**
4170  * idpf_vport_intr_init - Setup all vectors for the given vport
4171  * @vport: virtual port
4172  *
4173  * Returns 0 on success or negative on failure
4174  */
4175 int idpf_vport_intr_init(struct idpf_vport *vport)
4176 {
4177 	char *int_name;
4178 	int err;
4179 
4180 	err = idpf_vport_intr_init_vec_idx(vport);
4181 	if (err)
4182 		return err;
4183 
4184 	idpf_vport_intr_map_vector_to_qs(vport);
4185 	idpf_vport_intr_napi_add_all(vport);
4186 	idpf_vport_intr_napi_ena_all(vport);
4187 
4188 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4189 	if (err)
4190 		goto unroll_vectors_alloc;
4191 
4192 	int_name = kasprintf(GFP_KERNEL, "%s-%s",
4193 			     dev_driver_string(&vport->adapter->pdev->dev),
4194 			     vport->netdev->name);
4195 
4196 	err = idpf_vport_intr_req_irq(vport, int_name);
4197 	if (err)
4198 		goto unroll_vectors_alloc;
4199 
4200 	idpf_vport_intr_ena_irq_all(vport);
4201 
4202 	return 0;
4203 
4204 unroll_vectors_alloc:
4205 	idpf_vport_intr_napi_dis_all(vport);
4206 	idpf_vport_intr_napi_del_all(vport);
4207 
4208 	return err;
4209 }
4210 
4211 /**
4212  * idpf_config_rss - Send virtchnl messages to configure RSS
4213  * @vport: virtual port
4214  *
4215  * Return 0 on success, negative on failure
4216  */
4217 int idpf_config_rss(struct idpf_vport *vport)
4218 {
4219 	int err;
4220 
4221 	err = idpf_send_get_set_rss_key_msg(vport, false);
4222 	if (err)
4223 		return err;
4224 
4225 	return idpf_send_get_set_rss_lut_msg(vport, false);
4226 }
4227 
4228 /**
4229  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4230  * @vport: virtual port structure
4231  */
4232 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4233 {
4234 	struct idpf_adapter *adapter = vport->adapter;
4235 	u16 num_active_rxq = vport->num_rxq;
4236 	struct idpf_rss_data *rss_data;
4237 	int i;
4238 
4239 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4240 
4241 	for (i = 0; i < rss_data->rss_lut_size; i++) {
4242 		rss_data->rss_lut[i] = i % num_active_rxq;
4243 		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4244 	}
4245 }
4246 
4247 /**
4248  * idpf_init_rss - Allocate and initialize RSS resources
4249  * @vport: virtual port
4250  *
4251  * Return 0 on success, negative on failure
4252  */
4253 int idpf_init_rss(struct idpf_vport *vport)
4254 {
4255 	struct idpf_adapter *adapter = vport->adapter;
4256 	struct idpf_rss_data *rss_data;
4257 	u32 lut_size;
4258 
4259 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4260 
4261 	lut_size = rss_data->rss_lut_size * sizeof(u32);
4262 	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4263 	if (!rss_data->rss_lut)
4264 		return -ENOMEM;
4265 
4266 	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4267 	if (!rss_data->cached_lut) {
4268 		kfree(rss_data->rss_lut);
4269 		rss_data->rss_lut = NULL;
4270 
4271 		return -ENOMEM;
4272 	}
4273 
4274 	/* Fill the default RSS lut values */
4275 	idpf_fill_dflt_rss_lut(vport);
4276 
4277 	return idpf_config_rss(vport);
4278 }
4279 
4280 /**
4281  * idpf_deinit_rss - Release RSS resources
4282  * @vport: virtual port
4283  */
4284 void idpf_deinit_rss(struct idpf_vport *vport)
4285 {
4286 	struct idpf_adapter *adapter = vport->adapter;
4287 	struct idpf_rss_data *rss_data;
4288 
4289 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4290 	kfree(rss_data->cached_lut);
4291 	rss_data->cached_lut = NULL;
4292 	kfree(rss_data->rss_lut);
4293 	rss_data->rss_lut = NULL;
4294 }
4295