xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision 116af5f2d015da415c036b3bc992c04522d0ae80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 #include "xdp.h"
8 #include "xsk.h"
9 
10 #define idpf_tx_buf_next(buf)		(*(u32 *)&(buf)->priv)
11 LIBETH_SQE_CHECK_PRIV(u32);
12 
13 /**
14  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
15  * @skb: send buffer
16  * @max_bufs: maximum scatter gather buffers for single packet
17  * @count: number of buffers this packet needs
18  *
19  * Make sure we don't exceed maximum scatter gather buffers for a single
20  * packet.
21  * TSO case has been handled earlier from idpf_features_check().
22  *
23  * Return: %true if skb exceeds max descriptors per packet, %false otherwise.
24  */
25 static bool idpf_chk_linearize(const struct sk_buff *skb,
26 			       unsigned int max_bufs,
27 			       unsigned int count)
28 {
29 	if (likely(count <= max_bufs))
30 		return false;
31 
32 	if (skb_is_gso(skb))
33 		return false;
34 
35 	return true;
36 }
37 
38 /**
39  * idpf_tx_timeout - Respond to a Tx Hang
40  * @netdev: network interface device structure
41  * @txqueue: TX queue
42  */
43 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
44 {
45 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
46 
47 	adapter->tx_timeout_count++;
48 
49 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
50 		   adapter->tx_timeout_count, txqueue);
51 	if (!idpf_is_reset_in_prog(adapter)) {
52 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
53 		queue_delayed_work(adapter->vc_event_wq,
54 				   &adapter->vc_event_task,
55 				   msecs_to_jiffies(10));
56 	}
57 }
58 
59 static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
60 {
61 	struct libeth_sq_napi_stats ss = { };
62 	struct xdp_frame_bulk bq;
63 	struct libeth_cq_pp cp = {
64 		.dev	= txq->dev,
65 		.bq	= &bq,
66 		.ss	= &ss,
67 	};
68 
69 	xdp_frame_bulk_init(&bq);
70 
71 	/* Free all the Tx buffer sk_buffs */
72 	for (u32 i = 0; i < txq->buf_pool_size; i++)
73 		libeth_tx_complete_any(&txq->tx_buf[i], &cp);
74 
75 	xdp_flush_frame_bulk(&bq);
76 }
77 
78 /**
79  * idpf_tx_buf_rel_all - Free any empty Tx buffers
80  * @txq: queue to be cleaned
81  */
82 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
83 {
84 	/* Buffers already cleared, nothing to do */
85 	if (!txq->tx_buf)
86 		return;
87 
88 	if (idpf_queue_has(XSK, txq))
89 		idpf_xsksq_clean(txq);
90 	else
91 		idpf_tx_buf_clean(txq);
92 
93 	kfree(txq->tx_buf);
94 	txq->tx_buf = NULL;
95 }
96 
97 /**
98  * idpf_tx_desc_rel - Free Tx resources per queue
99  * @txq: Tx descriptor ring for a specific queue
100  *
101  * Free all transmit software resources
102  */
103 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
104 {
105 	bool xdp = idpf_queue_has(XDP, txq);
106 
107 	if (xdp)
108 		libeth_xdpsq_deinit_timer(txq->timer);
109 
110 	idpf_tx_buf_rel_all(txq);
111 
112 	if (!xdp)
113 		netdev_tx_reset_subqueue(txq->netdev, txq->idx);
114 
115 	idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
116 
117 	if (!txq->desc_ring)
118 		return;
119 
120 	if (!xdp && txq->refillq)
121 		kfree(txq->refillq->ring);
122 
123 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
124 	txq->desc_ring = NULL;
125 	txq->next_to_use = 0;
126 	txq->next_to_clean = 0;
127 }
128 
129 /**
130  * idpf_compl_desc_rel - Free completion resources per queue
131  * @complq: completion queue
132  *
133  * Free all completion software resources.
134  */
135 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
136 {
137 	idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
138 
139 	if (!complq->desc_ring)
140 		return;
141 
142 	dma_free_coherent(complq->netdev->dev.parent, complq->size,
143 			  complq->desc_ring, complq->dma);
144 	complq->desc_ring = NULL;
145 	complq->next_to_use = 0;
146 	complq->next_to_clean = 0;
147 }
148 
149 /**
150  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
151  * @rsrc: pointer to queue and vector resources
152  *
153  * Free all transmit software resources
154  */
155 static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
156 {
157 	if (!rsrc->txq_grps)
158 		return;
159 
160 	for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
161 		struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
162 
163 		for (unsigned int j = 0; j < txq_grp->num_txq; j++)
164 			idpf_tx_desc_rel(txq_grp->txqs[j]);
165 
166 		if (idpf_is_queue_model_split(rsrc->txq_model))
167 			idpf_compl_desc_rel(txq_grp->complq);
168 	}
169 }
170 
171 /**
172  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
173  * @tx_q: queue for which the buffers are allocated
174  *
175  * Return: 0 on success, negative on failure
176  */
177 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
178 {
179 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
180 	 * are allocated by kernel network stack and received as part of skb
181 	 */
182 	if (idpf_queue_has(FLOW_SCH_EN, tx_q))
183 		tx_q->buf_pool_size = U16_MAX;
184 	else
185 		tx_q->buf_pool_size = tx_q->desc_count;
186 	tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
187 			       GFP_KERNEL);
188 	if (!tx_q->tx_buf)
189 		return -ENOMEM;
190 
191 	return 0;
192 }
193 
194 /**
195  * idpf_tx_desc_alloc - Allocate the Tx descriptors
196  * @vport: vport to allocate resources for
197  * @tx_q: the tx ring to set up
198  *
199  * Return: 0 on success, negative on failure
200  */
201 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
202 			      struct idpf_tx_queue *tx_q)
203 {
204 	struct device *dev = tx_q->dev;
205 	struct idpf_sw_queue *refillq;
206 	int err;
207 
208 	err = idpf_tx_buf_alloc_all(tx_q);
209 	if (err)
210 		goto err_alloc;
211 
212 	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
213 
214 	/* Allocate descriptors also round up to nearest 4K */
215 	tx_q->size = ALIGN(tx_q->size, 4096);
216 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
217 					      GFP_KERNEL);
218 	if (!tx_q->desc_ring) {
219 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
220 			tx_q->size);
221 		err = -ENOMEM;
222 		goto err_alloc;
223 	}
224 
225 	tx_q->next_to_use = 0;
226 	tx_q->next_to_clean = 0;
227 	idpf_queue_set(GEN_CHK, tx_q);
228 
229 	idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
230 
231 	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
232 		return 0;
233 
234 	refillq = tx_q->refillq;
235 	refillq->desc_count = tx_q->buf_pool_size;
236 	refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
237 				GFP_KERNEL);
238 	if (!refillq->ring) {
239 		err = -ENOMEM;
240 		goto err_alloc;
241 	}
242 
243 	for (unsigned int i = 0; i < refillq->desc_count; i++)
244 		refillq->ring[i] =
245 			FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
246 			FIELD_PREP(IDPF_RFL_BI_GEN_M,
247 				   idpf_queue_has(GEN_CHK, refillq));
248 
249 	/* Go ahead and flip the GEN bit since this counts as filling
250 	 * up the ring, i.e. we already ring wrapped.
251 	 */
252 	idpf_queue_change(GEN_CHK, refillq);
253 
254 	tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
255 
256 	return 0;
257 
258 err_alloc:
259 	idpf_tx_desc_rel(tx_q);
260 
261 	return err;
262 }
263 
264 /**
265  * idpf_compl_desc_alloc - allocate completion descriptors
266  * @vport: virtual port private structure
267  * @complq: completion queue to set up
268  *
269  * Return: 0 on success, -errno on failure.
270  */
271 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
272 				 struct idpf_compl_queue *complq)
273 {
274 	u32 desc_size;
275 
276 	desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
277 		    sizeof(*complq->comp) : sizeof(*complq->comp_4b);
278 	complq->size = array_size(complq->desc_count, desc_size);
279 
280 	complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
281 					       complq->size, &complq->dma,
282 					       GFP_KERNEL);
283 	if (!complq->desc_ring)
284 		return -ENOMEM;
285 
286 	complq->next_to_use = 0;
287 	complq->next_to_clean = 0;
288 	idpf_queue_set(GEN_CHK, complq);
289 
290 	idpf_xsk_setup_queue(vport, complq,
291 			     VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
292 
293 	return 0;
294 }
295 
296 /**
297  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
298  * @vport: virtual port private structure
299  * @rsrc: pointer to queue and vector resources
300  *
301  * Return: 0 on success, negative on failure
302  */
303 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
304 				  struct idpf_q_vec_rsrc *rsrc)
305 {
306 	int err = 0;
307 
308 	/* Setup buffer queues. In single queue model buffer queues and
309 	 * completion queues will be same
310 	 */
311 	for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
312 		for (unsigned int j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
313 			struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
314 
315 			err = idpf_tx_desc_alloc(vport, txq);
316 			if (err) {
317 				pci_err(vport->adapter->pdev,
318 					"Allocation for Tx Queue %u failed\n",
319 					i);
320 				goto err_out;
321 			}
322 		}
323 
324 		if (!idpf_is_queue_model_split(rsrc->txq_model))
325 			continue;
326 
327 		/* Setup completion queues */
328 		err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
329 		if (err) {
330 			pci_err(vport->adapter->pdev,
331 				"Allocation for Tx Completion Queue %u failed\n",
332 				i);
333 			goto err_out;
334 		}
335 	}
336 
337 err_out:
338 	if (err)
339 		idpf_tx_desc_rel_all(rsrc);
340 
341 	return err;
342 }
343 
344 /**
345  * idpf_rx_page_rel - Release an rx buffer page
346  * @rx_buf: the buffer to free
347  */
348 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
349 {
350 	if (unlikely(!rx_buf->netmem))
351 		return;
352 
353 	libeth_rx_recycle_slow(rx_buf->netmem);
354 
355 	rx_buf->netmem = 0;
356 	rx_buf->offset = 0;
357 }
358 
359 /**
360  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
361  * @bufq: queue to use
362  */
363 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
364 {
365 	struct libeth_fq fq = {
366 		.fqes	= bufq->hdr_buf,
367 		.pp	= bufq->hdr_pp,
368 	};
369 
370 	for (u32 i = 0; i < bufq->desc_count; i++)
371 		idpf_rx_page_rel(&bufq->hdr_buf[i]);
372 
373 	libeth_rx_fq_destroy(&fq);
374 	bufq->hdr_buf = NULL;
375 	bufq->hdr_pp = NULL;
376 }
377 
378 /**
379  * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
380  * @bufq: queue to be cleaned
381  */
382 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
383 {
384 	struct libeth_fq fq = {
385 		.fqes	= bufq->buf,
386 		.pp	= bufq->pp,
387 	};
388 
389 	/* queue already cleared, nothing to do */
390 	if (!bufq->buf)
391 		return;
392 
393 	if (idpf_queue_has(XSK, bufq)) {
394 		idpf_xskfq_rel(bufq);
395 		return;
396 	}
397 
398 	/* Free all the bufs allocated and given to hw on Rx queue */
399 	for (u32 i = 0; i < bufq->desc_count; i++)
400 		idpf_rx_page_rel(&bufq->buf[i]);
401 
402 	if (idpf_queue_has(HSPLIT_EN, bufq))
403 		idpf_rx_hdr_buf_rel_all(bufq);
404 
405 	libeth_rx_fq_destroy(&fq);
406 	bufq->buf = NULL;
407 	bufq->pp = NULL;
408 }
409 
410 /**
411  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
412  * @rxq: queue to be cleaned
413  */
414 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
415 {
416 	struct libeth_fq fq = {
417 		.fqes	= rxq->rx_buf,
418 		.pp	= rxq->pp,
419 	};
420 
421 	if (!rxq->rx_buf)
422 		return;
423 
424 	for (u32 i = 0; i < rxq->desc_count; i++)
425 		idpf_rx_page_rel(&rxq->rx_buf[i]);
426 
427 	libeth_rx_fq_destroy(&fq);
428 	rxq->rx_buf = NULL;
429 	rxq->pp = NULL;
430 }
431 
432 /**
433  * idpf_rx_desc_rel - Free a specific Rx q resources
434  * @rxq: queue to clean the resources from
435  * @dev: device to free DMA memory
436  * @model: single or split queue model
437  *
438  * Free a specific rx queue resources
439  */
440 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
441 			     u32 model)
442 {
443 	if (!rxq)
444 		return;
445 
446 	if (!idpf_queue_has(XSK, rxq))
447 		libeth_xdp_return_stash(&rxq->xdp);
448 
449 	if (!idpf_is_queue_model_split(model))
450 		idpf_rx_buf_rel_all(rxq);
451 
452 	idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
453 
454 	rxq->next_to_alloc = 0;
455 	rxq->next_to_clean = 0;
456 	rxq->next_to_use = 0;
457 	if (!rxq->desc_ring)
458 		return;
459 
460 	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
461 	rxq->desc_ring = NULL;
462 }
463 
464 /**
465  * idpf_rx_desc_rel_bufq - free buffer queue resources
466  * @bufq: buffer queue to clean the resources from
467  * @dev: device to free DMA memory
468  */
469 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
470 				  struct device *dev)
471 {
472 	if (!bufq)
473 		return;
474 
475 	idpf_rx_buf_rel_bufq(bufq);
476 	idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
477 
478 	bufq->next_to_alloc = 0;
479 	bufq->next_to_clean = 0;
480 	bufq->next_to_use = 0;
481 
482 	if (!bufq->split_buf)
483 		return;
484 
485 	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
486 	bufq->split_buf = NULL;
487 }
488 
489 /**
490  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
491  * @vport: virtual port structure
492  * @rsrc: pointer to queue and vector resources
493  *
494  * Free all rx queues resources
495  */
496 static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
497 {
498 	struct device *dev = rsrc->dev;
499 	struct idpf_rxq_group *rx_qgrp;
500 	u16 num_rxq;
501 
502 	if (!rsrc->rxq_grps)
503 		return;
504 
505 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
506 		rx_qgrp = &rsrc->rxq_grps[i];
507 
508 		if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
509 			for (unsigned int j = 0; j < rx_qgrp->singleq.num_rxq; j++)
510 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
511 						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
512 			continue;
513 		}
514 
515 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
516 		for (unsigned int j = 0; j < num_rxq; j++)
517 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
518 					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
519 
520 		if (!rx_qgrp->splitq.bufq_sets)
521 			continue;
522 
523 		for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
524 			struct idpf_bufq_set *bufq_set =
525 				&rx_qgrp->splitq.bufq_sets[j];
526 
527 			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
528 		}
529 	}
530 }
531 
532 /**
533  * idpf_rx_buf_hw_update - Store the new tail and head values
534  * @bufq: queue to bump
535  * @val: new head index
536  */
537 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
538 {
539 	bufq->next_to_use = val;
540 
541 	if (unlikely(!bufq->tail))
542 		return;
543 
544 	/* writel has an implicit memory barrier */
545 	writel(val, bufq->tail);
546 }
547 
548 /**
549  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
550  * @bufq: ring to use
551  *
552  * Return: 0 on success, negative on failure.
553  */
554 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
555 {
556 	struct libeth_fq fq = {
557 		.count	= bufq->desc_count,
558 		.type	= LIBETH_FQE_HDR,
559 		.xdp	= idpf_xdp_enabled(bufq->q_vector->vport),
560 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
561 	};
562 	int ret;
563 
564 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
565 	if (ret)
566 		return ret;
567 
568 	bufq->hdr_pp = fq.pp;
569 	bufq->hdr_buf = fq.fqes;
570 	bufq->hdr_truesize = fq.truesize;
571 	bufq->rx_hbuf_size = fq.buf_len;
572 
573 	return 0;
574 }
575 
576 /**
577  * idpf_post_buf_refill - Post buffer id to refill queue
578  * @refillq: refill queue to post to
579  * @buf_id: buffer id to post
580  */
581 static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
582 {
583 	u32 nta = refillq->next_to_use;
584 
585 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
586 	refillq->ring[nta] =
587 		FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
588 		FIELD_PREP(IDPF_RFL_BI_GEN_M,
589 			   idpf_queue_has(GEN_CHK, refillq));
590 
591 	if (unlikely(++nta == refillq->desc_count)) {
592 		nta = 0;
593 		idpf_queue_change(GEN_CHK, refillq);
594 	}
595 
596 	refillq->next_to_use = nta;
597 }
598 
599 /**
600  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
601  * @bufq: buffer queue to post to
602  * @buf_id: buffer id to post
603  *
604  * Return: %false if buffer could not be allocated, %true otherwise.
605  */
606 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
607 {
608 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
609 	struct libeth_fq_fp fq = {
610 		.count	= bufq->desc_count,
611 	};
612 	u16 nta = bufq->next_to_alloc;
613 	dma_addr_t addr;
614 
615 	splitq_rx_desc = &bufq->split_buf[nta];
616 
617 	if (idpf_queue_has(HSPLIT_EN, bufq)) {
618 		fq.pp = bufq->hdr_pp;
619 		fq.fqes = bufq->hdr_buf;
620 		fq.truesize = bufq->hdr_truesize;
621 
622 		addr = libeth_rx_alloc(&fq, buf_id);
623 		if (addr == DMA_MAPPING_ERROR)
624 			return false;
625 
626 		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
627 	}
628 
629 	fq.pp = bufq->pp;
630 	fq.fqes = bufq->buf;
631 	fq.truesize = bufq->truesize;
632 
633 	addr = libeth_rx_alloc(&fq, buf_id);
634 	if (addr == DMA_MAPPING_ERROR)
635 		return false;
636 
637 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
638 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
639 
640 	nta++;
641 	if (unlikely(nta == bufq->desc_count))
642 		nta = 0;
643 	bufq->next_to_alloc = nta;
644 
645 	return true;
646 }
647 
648 /**
649  * idpf_rx_post_init_bufs - Post initial buffers to bufq
650  * @bufq: buffer queue to post working set to
651  * @working_set: number of buffers to put in working set
652  *
653  * Return: %true if @working_set bufs were posted successfully, %false otherwise.
654  */
655 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
656 				   u16 working_set)
657 {
658 	int i;
659 
660 	for (i = 0; i < working_set; i++) {
661 		if (!idpf_rx_post_buf_desc(bufq, i))
662 			return false;
663 	}
664 
665 	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
666 					       IDPF_RX_BUF_STRIDE));
667 
668 	return true;
669 }
670 
671 /**
672  * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
673  * @rxq: queue for which the buffers are allocated
674  *
675  * Return: 0 on success, -ENOMEM on failure.
676  */
677 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
678 {
679 	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
680 		goto err;
681 
682 	return 0;
683 
684 err:
685 	idpf_rx_buf_rel_all(rxq);
686 
687 	return -ENOMEM;
688 }
689 
690 /**
691  * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
692  * @rxq: buffer queue to create page pool for
693  *
694  * Return: 0 on success, -errno on failure.
695  */
696 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
697 {
698 	struct libeth_fq fq = {
699 		.count		= rxq->desc_count,
700 		.type		= LIBETH_FQE_MTU,
701 		.buf_len	= IDPF_RX_MAX_BUF_SZ,
702 		.nid		= idpf_q_vector_to_mem(rxq->q_vector),
703 	};
704 	int ret;
705 
706 	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
707 	if (ret)
708 		return ret;
709 
710 	rxq->pp = fq.pp;
711 	rxq->rx_buf = fq.fqes;
712 	rxq->truesize = fq.truesize;
713 	rxq->rx_buf_size = fq.buf_len;
714 
715 	return idpf_rx_buf_alloc_singleq(rxq);
716 }
717 
718 /**
719  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
720  * @rxbufq: queue for which the buffers are allocated
721  *
722  * Return: 0 on success, negative on failure
723  */
724 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
725 {
726 	int err = 0;
727 
728 	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
729 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
730 		if (err)
731 			goto rx_buf_alloc_all_out;
732 	}
733 
734 	/* Allocate buffers to be given to HW.	 */
735 	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
736 		err = -ENOMEM;
737 
738 rx_buf_alloc_all_out:
739 	if (err)
740 		idpf_rx_buf_rel_bufq(rxbufq);
741 
742 	return err;
743 }
744 
745 /**
746  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
747  * @bufq: buffer queue to create page pool for
748  * @type: type of Rx buffers to allocate
749  *
750  * Return: 0 on success, negative on failure
751  */
752 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
753 			     enum libeth_fqe_type type)
754 {
755 	struct libeth_fq fq = {
756 		.truesize	= bufq->truesize,
757 		.count		= bufq->desc_count,
758 		.type		= type,
759 		.buf_len	= IDPF_RX_MAX_BUF_SZ,
760 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
761 		.xdp		= idpf_xdp_enabled(bufq->q_vector->vport),
762 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
763 	};
764 	int ret;
765 
766 	if (idpf_queue_has(XSK, bufq))
767 		return idpf_xskfq_init(bufq);
768 
769 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
770 	if (ret)
771 		return ret;
772 
773 	bufq->pp = fq.pp;
774 	bufq->buf = fq.fqes;
775 	bufq->truesize = fq.truesize;
776 	bufq->rx_buf_size = fq.buf_len;
777 
778 	return idpf_rx_buf_alloc_all(bufq);
779 }
780 
781 /**
782  * idpf_rx_bufs_init_all - Initialize all RX bufs
783  * @vport: pointer to vport struct
784  * @rsrc: pointer to queue and vector resources
785  *
786  * Return: 0 on success, negative on failure
787  */
788 int idpf_rx_bufs_init_all(struct idpf_vport *vport,
789 			  struct idpf_q_vec_rsrc *rsrc)
790 {
791 	bool split = idpf_is_queue_model_split(rsrc->rxq_model);
792 	int err;
793 
794 	idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
795 
796 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
797 		struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
798 		u32 truesize = 0;
799 
800 		/* Allocate bufs for the rxq itself in singleq */
801 		if (!split) {
802 			int num_rxq = rx_qgrp->singleq.num_rxq;
803 
804 			for (unsigned int j = 0; j < num_rxq; j++) {
805 				struct idpf_rx_queue *q;
806 
807 				q = rx_qgrp->singleq.rxqs[j];
808 				err = idpf_rx_bufs_init_singleq(q);
809 				if (err)
810 					return err;
811 			}
812 
813 			continue;
814 		}
815 
816 		/* Otherwise, allocate bufs for the buffer queues */
817 		for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
818 			enum libeth_fqe_type type;
819 			struct idpf_buf_queue *q;
820 
821 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
822 			q->truesize = truesize;
823 
824 			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
825 
826 			err = idpf_rx_bufs_init(q, type);
827 			if (err)
828 				return err;
829 
830 			truesize = q->truesize >> 1;
831 		}
832 	}
833 
834 	return 0;
835 }
836 
837 /**
838  * idpf_rx_desc_alloc - Allocate queue Rx resources
839  * @vport: vport to allocate resources for
840  * @rxq: Rx queue for which the resources are setup
841  *
842  * Return: 0 on success, negative on failure
843  */
844 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
845 			      struct idpf_rx_queue *rxq)
846 {
847 	struct device *dev = &vport->adapter->pdev->dev;
848 
849 	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
850 
851 	/* Allocate descriptors and also round up to nearest 4K */
852 	rxq->size = ALIGN(rxq->size, 4096);
853 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
854 					     &rxq->dma, GFP_KERNEL);
855 	if (!rxq->desc_ring) {
856 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
857 			rxq->size);
858 		return -ENOMEM;
859 	}
860 
861 	rxq->next_to_alloc = 0;
862 	rxq->next_to_clean = 0;
863 	rxq->next_to_use = 0;
864 	idpf_queue_set(GEN_CHK, rxq);
865 
866 	idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
867 
868 	return 0;
869 }
870 
871 /**
872  * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
873  * @vport: vport to allocate resources for
874  * @bufq: buffer queue for which the resources are set up
875  *
876  * Return: 0 on success, -ENOMEM on failure.
877  */
878 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
879 				struct idpf_buf_queue *bufq)
880 {
881 	struct device *dev = &vport->adapter->pdev->dev;
882 
883 	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
884 
885 	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
886 					     GFP_KERNEL);
887 	if (!bufq->split_buf)
888 		return -ENOMEM;
889 
890 	bufq->next_to_alloc = 0;
891 	bufq->next_to_clean = 0;
892 	bufq->next_to_use = 0;
893 	idpf_queue_set(GEN_CHK, bufq);
894 
895 	idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
896 
897 	return 0;
898 }
899 
900 /**
901  * idpf_rx_desc_alloc_all - allocate all RX queues resources
902  * @vport: virtual port structure
903  * @rsrc: pointer to queue and vector resources
904  *
905  * Return: 0 on success, negative on failure
906  */
907 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
908 				  struct idpf_q_vec_rsrc *rsrc)
909 {
910 	struct idpf_rxq_group *rx_qgrp;
911 	u16 num_rxq;
912 	int err;
913 
914 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
915 		rx_qgrp = &rsrc->rxq_grps[i];
916 		if (idpf_is_queue_model_split(rsrc->rxq_model))
917 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
918 		else
919 			num_rxq = rx_qgrp->singleq.num_rxq;
920 
921 		for (unsigned int j = 0; j < num_rxq; j++) {
922 			struct idpf_rx_queue *q;
923 
924 			if (idpf_is_queue_model_split(rsrc->rxq_model))
925 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
926 			else
927 				q = rx_qgrp->singleq.rxqs[j];
928 
929 			err = idpf_rx_desc_alloc(vport, q);
930 			if (err) {
931 				pci_err(vport->adapter->pdev,
932 					"Memory allocation for Rx queue %u from queue group %u failed\n",
933 					j, i);
934 				goto err_out;
935 			}
936 		}
937 
938 		if (!idpf_is_queue_model_split(rsrc->rxq_model))
939 			continue;
940 
941 		for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
942 			struct idpf_buf_queue *q;
943 
944 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
945 
946 			err = idpf_bufq_desc_alloc(vport, q);
947 			if (err) {
948 				pci_err(vport->adapter->pdev,
949 					"Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
950 					j, i);
951 				goto err_out;
952 			}
953 		}
954 	}
955 
956 	return 0;
957 
958 err_out:
959 	idpf_rx_desc_rel_all(rsrc);
960 
961 	return err;
962 }
963 
964 static int idpf_init_queue_set(const struct idpf_vport *vport,
965 			       const struct idpf_queue_set *qs)
966 {
967 	bool splitq;
968 	int err;
969 
970 	splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
971 
972 	for (u32 i = 0; i < qs->num; i++) {
973 		const struct idpf_queue_ptr *q = &qs->qs[i];
974 		struct idpf_buf_queue *bufq;
975 
976 		switch (q->type) {
977 		case VIRTCHNL2_QUEUE_TYPE_RX:
978 			err = idpf_rx_desc_alloc(vport, q->rxq);
979 			if (err)
980 				break;
981 
982 			err = idpf_xdp_rxq_info_init(q->rxq);
983 			if (err)
984 				break;
985 
986 			if (!splitq)
987 				err = idpf_rx_bufs_init_singleq(q->rxq);
988 
989 			break;
990 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
991 			bufq = q->bufq;
992 
993 			err = idpf_bufq_desc_alloc(vport, bufq);
994 			if (err)
995 				break;
996 
997 			for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
998 				struct idpf_buf_queue * const *bufqs;
999 				enum libeth_fqe_type type;
1000 				u32 ts;
1001 
1002 				bufqs = bufq->q_vector->bufq;
1003 				if (bufqs[j] != bufq)
1004 					continue;
1005 
1006 				if (j) {
1007 					type = LIBETH_FQE_SHORT;
1008 					ts = bufqs[j - 1]->truesize >> 1;
1009 				} else {
1010 					type = LIBETH_FQE_MTU;
1011 					ts = 0;
1012 				}
1013 
1014 				bufq->truesize = ts;
1015 
1016 				err = idpf_rx_bufs_init(bufq, type);
1017 				break;
1018 			}
1019 
1020 			break;
1021 		case VIRTCHNL2_QUEUE_TYPE_TX:
1022 			err = idpf_tx_desc_alloc(vport, q->txq);
1023 			break;
1024 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1025 			err = idpf_compl_desc_alloc(vport, q->complq);
1026 			break;
1027 		default:
1028 			continue;
1029 		}
1030 
1031 		if (err)
1032 			return err;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
1039 {
1040 	const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
1041 
1042 	for (u32 i = 0; i < qs->num; i++) {
1043 		const struct idpf_queue_ptr *q = &qs->qs[i];
1044 
1045 		switch (q->type) {
1046 		case VIRTCHNL2_QUEUE_TYPE_RX:
1047 			idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
1048 			idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
1049 			break;
1050 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1051 			idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
1052 			break;
1053 		case VIRTCHNL2_QUEUE_TYPE_TX:
1054 			idpf_tx_desc_rel(q->txq);
1055 
1056 			if (idpf_queue_has(XDP, q->txq)) {
1057 				q->txq->pending = 0;
1058 				q->txq->xdp_tx = 0;
1059 			} else {
1060 				q->txq->txq_grp->num_completions_pending = 0;
1061 			}
1062 
1063 			writel(q->txq->next_to_use, q->txq->tail);
1064 			break;
1065 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1066 			idpf_compl_desc_rel(q->complq);
1067 			q->complq->num_completions = 0;
1068 			break;
1069 		default:
1070 			break;
1071 		}
1072 	}
1073 }
1074 
1075 static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
1076 {
1077 	if (qv->num_txq) {
1078 		u32 itr;
1079 
1080 		if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
1081 			itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
1082 		else
1083 			itr = qv->tx_itr_value;
1084 
1085 		idpf_vport_intr_write_itr(qv, itr, true);
1086 	}
1087 
1088 	if (qv->num_rxq) {
1089 		u32 itr;
1090 
1091 		if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
1092 			itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
1093 		else
1094 			itr = qv->rx_itr_value;
1095 
1096 		idpf_vport_intr_write_itr(qv, itr, false);
1097 	}
1098 
1099 	if (qv->num_txq || qv->num_rxq)
1100 		idpf_vport_intr_update_itr_ena_irq(qv);
1101 }
1102 
1103 /**
1104  * idpf_vector_to_queue_set - create a queue set associated with the given
1105  *			      queue vector
1106  * @qv: queue vector corresponding to the queue pair
1107  *
1108  * Returns a pointer to a dynamically allocated array of pointers to all
1109  * queues associated with a given queue vector (@qv).
1110  * Please note that the caller is responsible to free the memory allocated
1111  * by this function using kfree().
1112  *
1113  * Return: &idpf_queue_set on success, %NULL in case of error.
1114  */
1115 static struct idpf_queue_set *
1116 idpf_vector_to_queue_set(struct idpf_q_vector *qv)
1117 {
1118 	u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
1119 	bool xdp = xdp_txq_offset && !qv->num_xsksq;
1120 	struct idpf_vport *vport = qv->vport;
1121 	struct idpf_queue_set *qs;
1122 	u32 num;
1123 
1124 	num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
1125 	num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
1126 	if (!num)
1127 		return NULL;
1128 
1129 	qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
1130 				  vport->vport_id, num);
1131 	if (!qs)
1132 		return NULL;
1133 
1134 	num = 0;
1135 
1136 	for (u32 i = 0; i < qv->num_bufq; i++) {
1137 		qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1138 		qs->qs[num++].bufq = qv->bufq[i];
1139 	}
1140 
1141 	for (u32 i = 0; i < qv->num_rxq; i++) {
1142 		qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
1143 		qs->qs[num++].rxq = qv->rx[i];
1144 	}
1145 
1146 	for (u32 i = 0; i < qv->num_txq; i++) {
1147 		qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1148 		qs->qs[num++].txq = qv->tx[i];
1149 	}
1150 
1151 	for (u32 i = 0; i < qv->num_complq; i++) {
1152 		qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1153 		qs->qs[num++].complq = qv->complq[i];
1154 	}
1155 
1156 	if (!xdp_txq_offset)
1157 		goto finalize;
1158 
1159 	if (xdp) {
1160 		for (u32 i = 0; i < qv->num_rxq; i++) {
1161 			u32 idx = xdp_txq_offset + qv->rx[i]->idx;
1162 
1163 			qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1164 			qs->qs[num++].txq = vport->txqs[idx];
1165 
1166 			qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1167 			qs->qs[num++].complq = vport->txqs[idx]->complq;
1168 		}
1169 	} else {
1170 		for (u32 i = 0; i < qv->num_xsksq; i++) {
1171 			qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1172 			qs->qs[num++].txq = qv->xsksq[i];
1173 
1174 			qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1175 			qs->qs[num++].complq = qv->xsksq[i]->complq;
1176 		}
1177 	}
1178 
1179 finalize:
1180 	if (num != qs->num) {
1181 		kfree(qs);
1182 		return NULL;
1183 	}
1184 
1185 	return qs;
1186 }
1187 
1188 static int idpf_qp_enable(const struct idpf_vport *vport,
1189 			  const struct idpf_queue_set *qs, u32 qid)
1190 {
1191 	const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1192 	struct idpf_q_vector *q_vector;
1193 	int err;
1194 
1195 	q_vector = idpf_find_rxq_vec(vport, qid);
1196 
1197 	err = idpf_init_queue_set(vport, qs);
1198 	if (err) {
1199 		netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
1200 			   qid, ERR_PTR(err));
1201 		return err;
1202 	}
1203 
1204 	if (!rsrc->xdp_txq_offset)
1205 		goto config;
1206 
1207 	q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
1208 					       rsrc->num_q_vectors),
1209 				  sizeof(*q_vector->xsksq), GFP_KERNEL);
1210 	if (!q_vector->xsksq)
1211 		return -ENOMEM;
1212 
1213 	for (u32 i = 0; i < qs->num; i++) {
1214 		const struct idpf_queue_ptr *q = &qs->qs[i];
1215 
1216 		if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
1217 			continue;
1218 
1219 		if (!idpf_queue_has(XSK, q->txq))
1220 			continue;
1221 
1222 		idpf_xsk_init_wakeup(q_vector);
1223 
1224 		q->txq->q_vector = q_vector;
1225 		q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
1226 	}
1227 
1228 config:
1229 	err = idpf_send_config_queue_set_msg(qs);
1230 	if (err) {
1231 		netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
1232 			   qid, ERR_PTR(err));
1233 		return err;
1234 	}
1235 
1236 	err = idpf_send_enable_queue_set_msg(qs);
1237 	if (err) {
1238 		netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
1239 			   qid, ERR_PTR(err));
1240 		return err;
1241 	}
1242 
1243 	napi_enable(&q_vector->napi);
1244 	idpf_qvec_ena_irq(q_vector);
1245 
1246 	netif_start_subqueue(vport->netdev, qid);
1247 
1248 	return 0;
1249 }
1250 
1251 static int idpf_qp_disable(const struct idpf_vport *vport,
1252 			   const struct idpf_queue_set *qs, u32 qid)
1253 {
1254 	struct idpf_q_vector *q_vector;
1255 	int err;
1256 
1257 	q_vector = idpf_find_rxq_vec(vport, qid);
1258 	netif_stop_subqueue(vport->netdev, qid);
1259 
1260 	writel(0, q_vector->intr_reg.dyn_ctl);
1261 	napi_disable(&q_vector->napi);
1262 
1263 	err = idpf_send_disable_queue_set_msg(qs);
1264 	if (err) {
1265 		netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
1266 			   qid, ERR_PTR(err));
1267 		return err;
1268 	}
1269 
1270 	idpf_clean_queue_set(qs);
1271 
1272 	kfree(q_vector->xsksq);
1273 	q_vector->num_xsksq = 0;
1274 
1275 	return 0;
1276 }
1277 
1278 /**
1279  * idpf_qp_switch - enable or disable queues associated with queue pair
1280  * @vport: vport to switch the pair for
1281  * @qid: index of the queue pair to switch
1282  * @en: whether to enable or disable the pair
1283  *
1284  * Return: 0 on success, -errno on failure.
1285  */
1286 int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
1287 {
1288 	struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
1289 	struct idpf_queue_set *qs __free(kfree) = NULL;
1290 
1291 	if (idpf_find_txq_vec(vport, qid) != q_vector)
1292 		return -EINVAL;
1293 
1294 	qs = idpf_vector_to_queue_set(q_vector);
1295 	if (!qs)
1296 		return -ENOMEM;
1297 
1298 	return en ? idpf_qp_enable(vport, qs, qid) :
1299 		    idpf_qp_disable(vport, qs, qid);
1300 }
1301 
1302 /**
1303  * idpf_txq_group_rel - Release all resources for txq groups
1304  * @rsrc: pointer to queue and vector resources
1305  */
1306 static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
1307 {
1308 	bool split;
1309 
1310 	if (!rsrc->txq_grps)
1311 		return;
1312 
1313 	split = idpf_is_queue_model_split(rsrc->txq_model);
1314 
1315 	for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1316 		struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
1317 
1318 		for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
1319 			if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
1320 				kfree(txq_grp->txqs[j]->refillq);
1321 				txq_grp->txqs[j]->refillq = NULL;
1322 			}
1323 
1324 			kfree(txq_grp->txqs[j]);
1325 			txq_grp->txqs[j] = NULL;
1326 		}
1327 
1328 		if (!split)
1329 			continue;
1330 
1331 		kfree(txq_grp->complq);
1332 		txq_grp->complq = NULL;
1333 	}
1334 	kfree(rsrc->txq_grps);
1335 	rsrc->txq_grps = NULL;
1336 }
1337 
1338 /**
1339  * idpf_rxq_sw_queue_rel - Release software queue resources
1340  * @rx_qgrp: rx queue group with software queues
1341  */
1342 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1343 {
1344 	for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
1345 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1346 
1347 		for (unsigned int j = 0; j < bufq_set->num_refillqs; j++) {
1348 			kfree(bufq_set->refillqs[j].ring);
1349 			bufq_set->refillqs[j].ring = NULL;
1350 		}
1351 		kfree(bufq_set->refillqs);
1352 		bufq_set->refillqs = NULL;
1353 	}
1354 }
1355 
1356 /**
1357  * idpf_rxq_group_rel - Release all resources for rxq groups
1358  * @rsrc: pointer to queue and vector resources
1359  */
1360 static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
1361 {
1362 	if (!rsrc->rxq_grps)
1363 		return;
1364 
1365 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
1366 		struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1367 		u16 num_rxq;
1368 
1369 		if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1370 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1371 			for (unsigned int j = 0; j < num_rxq; j++) {
1372 				kfree(rx_qgrp->splitq.rxq_sets[j]);
1373 				rx_qgrp->splitq.rxq_sets[j] = NULL;
1374 			}
1375 
1376 			idpf_rxq_sw_queue_rel(rx_qgrp);
1377 			kfree(rx_qgrp->splitq.bufq_sets);
1378 			rx_qgrp->splitq.bufq_sets = NULL;
1379 		} else {
1380 			num_rxq = rx_qgrp->singleq.num_rxq;
1381 			for (unsigned int j = 0; j < num_rxq; j++) {
1382 				kfree(rx_qgrp->singleq.rxqs[j]);
1383 				rx_qgrp->singleq.rxqs[j] = NULL;
1384 			}
1385 		}
1386 	}
1387 	kfree(rsrc->rxq_grps);
1388 	rsrc->rxq_grps = NULL;
1389 }
1390 
1391 /**
1392  * idpf_vport_queue_grp_rel_all - Release all queue groups
1393  * @vport: vport to release queue groups for
1394  * @rsrc: pointer to queue and vector resources
1395  */
1396 static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
1397 {
1398 	idpf_txq_group_rel(rsrc);
1399 	idpf_rxq_group_rel(rsrc);
1400 }
1401 
1402 /**
1403  * idpf_vport_queues_rel - Free memory for all queues
1404  * @vport: virtual port
1405  * @rsrc: pointer to queue and vector resources
1406  *
1407  * Free the memory allocated for queues associated to a vport
1408  */
1409 void idpf_vport_queues_rel(struct idpf_vport *vport,
1410 			   struct idpf_q_vec_rsrc *rsrc)
1411 {
1412 	idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
1413 
1414 	idpf_tx_desc_rel_all(rsrc);
1415 	idpf_rx_desc_rel_all(rsrc);
1416 
1417 	idpf_xdpsqs_put(vport);
1418 	idpf_vport_queue_grp_rel_all(rsrc);
1419 
1420 	kfree(vport->txqs);
1421 	vport->txqs = NULL;
1422 }
1423 
1424 /**
1425  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1426  * @vport: vport to init txqs on
1427  * @rsrc: pointer to queue and vector resources
1428  *
1429  * We get a queue index from skb->queue_mapping and we need a fast way to
1430  * dereference the queue from queue groups.  This allows us to quickly pull a
1431  * txq based on a queue index.
1432  *
1433  * Return: 0 on success, negative on failure
1434  */
1435 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
1436 					  struct idpf_q_vec_rsrc *rsrc)
1437 {
1438 	struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
1439 	struct work_struct *tstamp_task = &vport->tstamp_task;
1440 	int k = 0;
1441 
1442 	vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs),
1443 			      GFP_KERNEL);
1444 	if (!vport->txqs)
1445 		return -ENOMEM;
1446 
1447 	vport->num_txq = rsrc->num_txq;
1448 	for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1449 		struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
1450 
1451 		for (unsigned int j = 0; j < tx_grp->num_txq; j++, k++) {
1452 			vport->txqs[k] = tx_grp->txqs[j];
1453 			vport->txqs[k]->idx = k;
1454 
1455 			if (!caps)
1456 				continue;
1457 
1458 			vport->txqs[k]->cached_tstamp_caps = caps;
1459 			vport->txqs[k]->tstamp_task = tstamp_task;
1460 		}
1461 	}
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * idpf_vport_init_num_qs - Initialize number of queues
1468  * @vport: vport to initialize queues
1469  * @vport_msg: data to be filled into vport
1470  * @rsrc: pointer to queue and vector resources
1471  */
1472 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1473 			    struct virtchnl2_create_vport *vport_msg,
1474 			    struct idpf_q_vec_rsrc *rsrc)
1475 {
1476 	struct idpf_vport_user_config_data *config_data;
1477 	u16 idx = vport->idx;
1478 
1479 	config_data = &vport->adapter->vport_config[idx]->user_config;
1480 	rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1481 	rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1482 	/* number of txqs and rxqs in config data will be zeros only in the
1483 	 * driver load path and we dont update them there after
1484 	 */
1485 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1486 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1487 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1488 	}
1489 
1490 	if (idpf_is_queue_model_split(rsrc->txq_model))
1491 		rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1492 	if (idpf_is_queue_model_split(rsrc->rxq_model))
1493 		rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1494 
1495 	vport->xdp_prog = config_data->xdp_prog;
1496 	if (idpf_xdp_enabled(vport)) {
1497 		rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
1498 		vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
1499 				     rsrc->xdp_txq_offset;
1500 		vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
1501 	} else {
1502 		rsrc->xdp_txq_offset = 0;
1503 		vport->num_xdp_txq = 0;
1504 		vport->xdpsq_share = false;
1505 	}
1506 
1507 	/* Adjust number of buffer queues per Rx queue group. */
1508 	if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1509 		rsrc->num_bufqs_per_qgrp = 0;
1510 
1511 		return;
1512 	}
1513 
1514 	rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1515 }
1516 
1517 /**
1518  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1519  * @vport: vport to calculate q groups for
1520  * @rsrc: pointer to queue and vector resources
1521  */
1522 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
1523 				struct idpf_q_vec_rsrc *rsrc)
1524 {
1525 	struct idpf_vport_user_config_data *config_data;
1526 	u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
1527 	u32 num_req_txq_desc, num_req_rxq_desc;
1528 	u16 idx = vport->idx;
1529 
1530 	config_data =  &vport->adapter->vport_config[idx]->user_config;
1531 	num_req_txq_desc = config_data->num_req_txq_desc;
1532 	num_req_rxq_desc = config_data->num_req_rxq_desc;
1533 
1534 	rsrc->complq_desc_count = 0;
1535 	if (num_req_txq_desc) {
1536 		rsrc->txq_desc_count = num_req_txq_desc;
1537 		if (idpf_is_queue_model_split(rsrc->txq_model)) {
1538 			rsrc->complq_desc_count = num_req_txq_desc;
1539 			if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1540 				rsrc->complq_desc_count =
1541 					IDPF_MIN_TXQ_COMPLQ_DESC;
1542 		}
1543 	} else {
1544 		rsrc->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1545 		if (idpf_is_queue_model_split(rsrc->txq_model))
1546 			rsrc->complq_desc_count =
1547 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1548 	}
1549 
1550 	if (num_req_rxq_desc)
1551 		rsrc->rxq_desc_count = num_req_rxq_desc;
1552 	else
1553 		rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1554 
1555 	for (unsigned int i = 0; i < num_bufqs; i++) {
1556 		if (!rsrc->bufq_desc_count[i])
1557 			rsrc->bufq_desc_count[i] =
1558 				IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
1559 							num_bufqs);
1560 	}
1561 }
1562 
1563 /**
1564  * idpf_vport_calc_total_qs - Calculate total number of queues
1565  * @adapter: private data struct
1566  * @vport_idx: vport idx to retrieve vport pointer
1567  * @vport_msg: message to fill with data
1568  * @max_q: vport max queue info
1569  *
1570  * Return: 0 on success, error value on failure.
1571  */
1572 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1573 			     struct virtchnl2_create_vport *vport_msg,
1574 			     struct idpf_vport_max_q *max_q)
1575 {
1576 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1577 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1578 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1579 	struct idpf_vport_user_config_data *user;
1580 	struct idpf_vport_config *vport_config;
1581 	u16 num_txq_grps, num_rxq_grps;
1582 	u32 num_qs, num_xdpsq;
1583 
1584 	vport_config = adapter->vport_config[vport_idx];
1585 	if (vport_config) {
1586 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1587 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1588 	} else {
1589 		u32 num_cpus = netif_get_num_default_rss_queues();
1590 
1591 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1592 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1593 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1594 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1595 	}
1596 
1597 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1598 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1599 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1600 						       IDPF_COMPLQ_PER_GROUP);
1601 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1602 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1603 	} else {
1604 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1605 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1606 					 dflt_singleq_txqs);
1607 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1608 		vport_msg->num_tx_complq = 0;
1609 	}
1610 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1611 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1612 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1613 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1614 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1615 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1616 	} else {
1617 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1618 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1619 					 dflt_singleq_rxqs);
1620 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1621 		vport_msg->num_rx_bufq = 0;
1622 	}
1623 
1624 	if (!vport_config)
1625 		return 0;
1626 
1627 	user = &vport_config->user_config;
1628 	user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1629 	user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1630 
1631 	if (vport_config->user_config.xdp_prog)
1632 		num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
1633 					     user->num_req_tx_qs,
1634 					     vport_config->max_q.max_txq);
1635 	else
1636 		num_xdpsq = 0;
1637 
1638 	vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
1639 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
1640 		vport_msg->num_tx_complq = vport_msg->num_tx_q;
1641 
1642 	return 0;
1643 }
1644 
1645 /**
1646  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1647  * @rsrc: pointer to queue and vector resources
1648  */
1649 void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
1650 {
1651 	if (idpf_is_queue_model_split(rsrc->txq_model))
1652 		rsrc->num_txq_grp = rsrc->num_txq;
1653 	else
1654 		rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1655 
1656 	if (idpf_is_queue_model_split(rsrc->rxq_model))
1657 		rsrc->num_rxq_grp = rsrc->num_rxq;
1658 	else
1659 		rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1660 }
1661 
1662 /**
1663  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1664  * @rsrc: pointer to queue and vector resources
1665  * @num_txq: return parameter for number of TX queues
1666  * @num_rxq: return parameter for number of RX queues
1667  */
1668 static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
1669 					 u16 *num_txq, u16 *num_rxq)
1670 {
1671 	if (idpf_is_queue_model_split(rsrc->txq_model))
1672 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1673 	else
1674 		*num_txq = rsrc->num_txq;
1675 
1676 	if (idpf_is_queue_model_split(rsrc->rxq_model))
1677 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1678 	else
1679 		*num_rxq = rsrc->num_rxq;
1680 }
1681 
1682 /**
1683  * idpf_rxq_set_descids - set the descids supported by this queue
1684  * @rsrc: pointer to queue and vector resources
1685  * @q: rx queue for which descids are set
1686  *
1687  */
1688 static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
1689 				 struct idpf_rx_queue *q)
1690 {
1691 	if (idpf_is_queue_model_split(rsrc->rxq_model))
1692 		return;
1693 
1694 	if (rsrc->base_rxd)
1695 		q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1696 	else
1697 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1698 }
1699 
1700 /**
1701  * idpf_txq_group_alloc - Allocate all txq group resources
1702  * @vport: vport to allocate txq groups for
1703  * @rsrc: pointer to queue and vector resources
1704  * @num_txq: number of txqs to allocate for each group
1705  *
1706  * Return: 0 on success, negative on failure
1707  */
1708 static int idpf_txq_group_alloc(struct idpf_vport *vport,
1709 				struct idpf_q_vec_rsrc *rsrc,
1710 				u16 num_txq)
1711 {
1712 	bool split, flow_sch_en;
1713 
1714 	rsrc->txq_grps = kcalloc(rsrc->num_txq_grp,
1715 				 sizeof(*rsrc->txq_grps), GFP_KERNEL);
1716 	if (!rsrc->txq_grps)
1717 		return -ENOMEM;
1718 
1719 	split = idpf_is_queue_model_split(rsrc->txq_model);
1720 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1721 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1722 
1723 	for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1724 		struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1725 		struct idpf_adapter *adapter = vport->adapter;
1726 
1727 		tx_qgrp->vport = vport;
1728 		tx_qgrp->num_txq = num_txq;
1729 
1730 		for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
1731 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1732 						   GFP_KERNEL);
1733 			if (!tx_qgrp->txqs[j])
1734 				goto err_alloc;
1735 		}
1736 
1737 		for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
1738 			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1739 
1740 			q->dev = &adapter->pdev->dev;
1741 			q->desc_count = rsrc->txq_desc_count;
1742 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1743 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1744 			q->netdev = vport->netdev;
1745 			q->txq_grp = tx_qgrp;
1746 			q->rel_q_id = j;
1747 
1748 			if (!split) {
1749 				q->clean_budget = vport->compln_clean_budget;
1750 				idpf_queue_assign(CRC_EN, q,
1751 						  vport->crc_enable);
1752 			}
1753 
1754 			if (!flow_sch_en)
1755 				continue;
1756 
1757 			idpf_queue_set(FLOW_SCH_EN, q);
1758 
1759 			q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
1760 			if (!q->refillq)
1761 				goto err_alloc;
1762 
1763 			idpf_queue_set(GEN_CHK, q->refillq);
1764 			idpf_queue_set(RFL_GEN_CHK, q->refillq);
1765 		}
1766 
1767 		if (!split)
1768 			continue;
1769 
1770 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1771 					  sizeof(*tx_qgrp->complq),
1772 					  GFP_KERNEL);
1773 		if (!tx_qgrp->complq)
1774 			goto err_alloc;
1775 
1776 		tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
1777 		tx_qgrp->complq->txq_grp = tx_qgrp;
1778 		tx_qgrp->complq->netdev = vport->netdev;
1779 		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1780 
1781 		if (flow_sch_en)
1782 			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1783 	}
1784 
1785 	return 0;
1786 
1787 err_alloc:
1788 	idpf_txq_group_rel(rsrc);
1789 
1790 	return -ENOMEM;
1791 }
1792 
1793 /**
1794  * idpf_rxq_group_alloc - Allocate all rxq group resources
1795  * @vport: vport to allocate rxq groups for
1796  * @rsrc: pointer to queue and vector resources
1797  * @num_rxq: number of rxqs to allocate for each group
1798  *
1799  * Return: 0 on success, negative on failure
1800  */
1801 static int idpf_rxq_group_alloc(struct idpf_vport *vport,
1802 				struct idpf_q_vec_rsrc *rsrc,
1803 				u16 num_rxq)
1804 {
1805 	struct idpf_adapter *adapter = vport->adapter;
1806 	bool hs, rsc;
1807 	int err = 0;
1808 
1809 	rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp,
1810 				 sizeof(struct idpf_rxq_group), GFP_KERNEL);
1811 	if (!rsrc->rxq_grps)
1812 		return -ENOMEM;
1813 
1814 	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1815 	rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
1816 
1817 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
1818 		struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1819 
1820 		rx_qgrp->vport = vport;
1821 		if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1822 			rx_qgrp->singleq.num_rxq = num_rxq;
1823 			for (unsigned int j = 0; j < num_rxq; j++) {
1824 				rx_qgrp->singleq.rxqs[j] =
1825 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1826 							GFP_KERNEL);
1827 				if (!rx_qgrp->singleq.rxqs[j]) {
1828 					err = -ENOMEM;
1829 					goto err_alloc;
1830 				}
1831 			}
1832 			goto skip_splitq_rx_init;
1833 		}
1834 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1835 
1836 		for (unsigned int j = 0; j < num_rxq; j++) {
1837 			rx_qgrp->splitq.rxq_sets[j] =
1838 				kzalloc(sizeof(struct idpf_rxq_set),
1839 					GFP_KERNEL);
1840 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1841 				err = -ENOMEM;
1842 				goto err_alloc;
1843 			}
1844 		}
1845 
1846 		rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp,
1847 						    sizeof(struct idpf_bufq_set),
1848 						    GFP_KERNEL);
1849 		if (!rx_qgrp->splitq.bufq_sets) {
1850 			err = -ENOMEM;
1851 			goto err_alloc;
1852 		}
1853 		rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
1854 
1855 		for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
1856 			struct idpf_bufq_set *bufq_set =
1857 				&rx_qgrp->splitq.bufq_sets[j];
1858 			int swq_size = sizeof(struct idpf_sw_queue);
1859 			struct idpf_buf_queue *q;
1860 
1861 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1862 			q->desc_count = rsrc->bufq_desc_count[j];
1863 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1864 
1865 			idpf_queue_assign(HSPLIT_EN, q, hs);
1866 			idpf_queue_assign(RSC_EN, q, rsc);
1867 
1868 			bufq_set->num_refillqs = num_rxq;
1869 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1870 						     GFP_KERNEL);
1871 			if (!bufq_set->refillqs) {
1872 				err = -ENOMEM;
1873 				goto err_alloc;
1874 			}
1875 			for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) {
1876 				struct idpf_sw_queue *refillq =
1877 					&bufq_set->refillqs[k];
1878 
1879 				refillq->desc_count =
1880 					rsrc->bufq_desc_count[j];
1881 				idpf_queue_set(GEN_CHK, refillq);
1882 				idpf_queue_set(RFL_GEN_CHK, refillq);
1883 				refillq->ring = kcalloc(refillq->desc_count,
1884 							sizeof(*refillq->ring),
1885 							GFP_KERNEL);
1886 				if (!refillq->ring) {
1887 					err = -ENOMEM;
1888 					goto err_alloc;
1889 				}
1890 			}
1891 		}
1892 
1893 skip_splitq_rx_init:
1894 		for (unsigned int j = 0; j < num_rxq; j++) {
1895 			struct idpf_rx_queue *q;
1896 
1897 			if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1898 				q = rx_qgrp->singleq.rxqs[j];
1899 				q->rx_ptype_lkup = adapter->singleq_pt_lkup;
1900 				goto setup_rxq;
1901 			}
1902 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1903 			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1904 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1905 			if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1906 				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1907 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1908 
1909 			idpf_queue_assign(HSPLIT_EN, q, hs);
1910 			idpf_queue_assign(RSC_EN, q, rsc);
1911 			q->rx_ptype_lkup = adapter->splitq_pt_lkup;
1912 
1913 setup_rxq:
1914 			q->desc_count = rsrc->rxq_desc_count;
1915 			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1916 			q->idx = (i * num_rxq) + j;
1917 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1918 			q->rx_max_pkt_size = vport->netdev->mtu +
1919 							LIBETH_RX_LL_LEN;
1920 			idpf_rxq_set_descids(rsrc, q);
1921 		}
1922 	}
1923 
1924 err_alloc:
1925 	if (err)
1926 		idpf_rxq_group_rel(rsrc);
1927 
1928 	return err;
1929 }
1930 
1931 /**
1932  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1933  * @vport: vport with qgrps to allocate
1934  * @rsrc: pointer to queue and vector resources
1935  *
1936  * Return: 0 on success, negative on failure
1937  */
1938 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
1939 					  struct idpf_q_vec_rsrc *rsrc)
1940 {
1941 	u16 num_txq, num_rxq;
1942 	int err;
1943 
1944 	idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
1945 
1946 	err = idpf_txq_group_alloc(vport, rsrc, num_txq);
1947 	if (err)
1948 		goto err_out;
1949 
1950 	err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
1951 	if (err)
1952 		goto err_out;
1953 
1954 	return 0;
1955 
1956 err_out:
1957 	idpf_vport_queue_grp_rel_all(rsrc);
1958 
1959 	return err;
1960 }
1961 
1962 /**
1963  * idpf_vport_queues_alloc - Allocate memory for all queues
1964  * @vport: virtual port
1965  * @rsrc: pointer to queue and vector resources
1966  *
1967  * Allocate memory for queues associated with a vport.
1968  *
1969  * Return: 0 on success, negative on failure.
1970  */
1971 int idpf_vport_queues_alloc(struct idpf_vport *vport,
1972 			    struct idpf_q_vec_rsrc *rsrc)
1973 {
1974 	int err;
1975 
1976 	err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
1977 	if (err)
1978 		goto err_out;
1979 
1980 	err = idpf_vport_init_fast_path_txqs(vport, rsrc);
1981 	if (err)
1982 		goto err_out;
1983 
1984 	err = idpf_xdpsqs_get(vport);
1985 	if (err)
1986 		goto err_out;
1987 
1988 	err = idpf_tx_desc_alloc_all(vport, rsrc);
1989 	if (err)
1990 		goto err_out;
1991 
1992 	err = idpf_rx_desc_alloc_all(vport, rsrc);
1993 	if (err)
1994 		goto err_out;
1995 
1996 	return 0;
1997 
1998 err_out:
1999 	idpf_vport_queues_rel(vport, rsrc);
2000 
2001 	return err;
2002 }
2003 
2004 /**
2005  * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
2006  * @txq: queue to read the timestamp from
2007  * @skb: socket buffer to provide Tx timestamp value
2008  *
2009  * Schedule a work to read Tx timestamp value generated once the packet is
2010  * transmitted.
2011  */
2012 static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
2013 {
2014 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
2015 	struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
2016 
2017 	tx_tstamp_caps = txq->cached_tstamp_caps;
2018 	spin_lock_bh(&tx_tstamp_caps->status_lock);
2019 
2020 	for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
2021 		tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
2022 		if (tx_tstamp_status->state != IDPF_PTP_FREE)
2023 			continue;
2024 
2025 		tx_tstamp_status->skb = skb;
2026 		tx_tstamp_status->state = IDPF_PTP_REQUEST;
2027 
2028 		/* Fetch timestamp from completion descriptor through
2029 		 * virtchnl msg to report to stack.
2030 		 */
2031 		queue_work(system_unbound_wq, txq->tstamp_task);
2032 		break;
2033 	}
2034 
2035 	spin_unlock_bh(&tx_tstamp_caps->status_lock);
2036 }
2037 
2038 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
2039 do {								\
2040 	if (unlikely(++(ntc) == (txq)->desc_count)) {		\
2041 		ntc = 0;					\
2042 		buf = (txq)->tx_buf;				\
2043 		desc = &(txq)->flex_tx[0];			\
2044 	} else {						\
2045 		(buf)++;					\
2046 		(desc)++;					\
2047 	}							\
2048 } while (0)
2049 
2050 /**
2051  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
2052  * @tx_q: Tx queue to clean
2053  * @end: queue index until which it should be cleaned
2054  * @napi_budget: Used to determine if we are in netpoll
2055  * @cleaned: pointer to stats struct to track cleaned packets/bytes
2056  * @descs_only: true if queue is using flow-based scheduling and should
2057  * not clean buffers at this time
2058  *
2059  * Cleans the queue descriptor ring. If the queue is using queue-based
2060  * scheduling, the buffers will be cleaned as well. If the queue is using
2061  * flow-based scheduling, only the descriptors are cleaned at this time.
2062  * Separate packet completion events will be reported on the completion queue,
2063  * and the buffers will be cleaned separately. The stats are not updated from
2064  * this function when using flow-based scheduling.
2065  */
2066 static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
2067 				 int napi_budget,
2068 				 struct libeth_sq_napi_stats *cleaned,
2069 				 bool descs_only)
2070 {
2071 	union idpf_tx_flex_desc *next_pending_desc = NULL;
2072 	union idpf_tx_flex_desc *tx_desc;
2073 	u32 ntc = tx_q->next_to_clean;
2074 	struct libeth_cq_pp cp = {
2075 		.dev	= tx_q->dev,
2076 		.ss	= cleaned,
2077 		.napi	= napi_budget,
2078 	};
2079 	struct idpf_tx_buf *tx_buf;
2080 
2081 	if (descs_only) {
2082 		/* Bump ring index to mark as cleaned. */
2083 		tx_q->next_to_clean = end;
2084 		return;
2085 	}
2086 
2087 	tx_desc = &tx_q->flex_tx[ntc];
2088 	next_pending_desc = &tx_q->flex_tx[end];
2089 	tx_buf = &tx_q->tx_buf[ntc];
2090 
2091 	while (tx_desc != next_pending_desc) {
2092 		u32 eop_idx;
2093 
2094 		/* If this entry in the ring was used as a context descriptor,
2095 		 * it's corresponding entry in the buffer ring is reserved. We
2096 		 * can skip this descriptor since there is no buffer to clean.
2097 		 */
2098 		if (tx_buf->type <= LIBETH_SQE_CTX)
2099 			goto fetch_next_txq_desc;
2100 
2101 		if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
2102 			break;
2103 
2104 		eop_idx = tx_buf->rs_idx;
2105 		libeth_tx_complete(tx_buf, &cp);
2106 
2107 		/* unmap remaining buffers */
2108 		while (ntc != eop_idx) {
2109 			idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
2110 						      tx_desc, tx_buf);
2111 
2112 			/* unmap any remaining paged data */
2113 			libeth_tx_complete(tx_buf, &cp);
2114 		}
2115 
2116 fetch_next_txq_desc:
2117 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
2118 	}
2119 
2120 	tx_q->next_to_clean = ntc;
2121 }
2122 
2123 /**
2124  * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
2125  * @txq: queue to clean
2126  * @buf_id: packet's starting buffer ID, from completion descriptor
2127  * @cleaned: pointer to stats struct to track cleaned packets/bytes
2128  * @budget: Used to determine if we are in netpoll
2129  *
2130  * Clean all buffers associated with the packet starting at buf_id. Returns the
2131  * byte/segment count for the cleaned packet.
2132  */
2133 static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
2134 			       struct libeth_sq_napi_stats *cleaned,
2135 			       int budget)
2136 {
2137 	struct idpf_tx_buf *tx_buf = NULL;
2138 	struct libeth_cq_pp cp = {
2139 		.dev	= txq->dev,
2140 		.ss	= cleaned,
2141 		.napi	= budget,
2142 	};
2143 
2144 	tx_buf = &txq->tx_buf[buf_id];
2145 	if (tx_buf->type == LIBETH_SQE_SKB) {
2146 		if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
2147 			idpf_tx_read_tstamp(txq, tx_buf->skb);
2148 
2149 		libeth_tx_complete(tx_buf, &cp);
2150 		idpf_post_buf_refill(txq->refillq, buf_id);
2151 	}
2152 
2153 	while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
2154 		buf_id = idpf_tx_buf_next(tx_buf);
2155 
2156 		tx_buf = &txq->tx_buf[buf_id];
2157 		libeth_tx_complete(tx_buf, &cp);
2158 		idpf_post_buf_refill(txq->refillq, buf_id);
2159 	}
2160 }
2161 
2162 /**
2163  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
2164  * whether on the buffer ring or in the hash table
2165  * @txq: Tx ring to clean
2166  * @desc: pointer to completion queue descriptor to extract completion
2167  * information from
2168  * @cleaned: pointer to stats struct to track cleaned packets/bytes
2169  * @budget: Used to determine if we are in netpoll
2170  *
2171  * Returns bytes/packets cleaned
2172  */
2173 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
2174 					 struct idpf_splitq_tx_compl_desc *desc,
2175 					 struct libeth_sq_napi_stats *cleaned,
2176 					 int budget)
2177 {
2178 	/* RS completion contains queue head for queue based scheduling or
2179 	 * completion tag for flow based scheduling.
2180 	 */
2181 	u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
2182 
2183 	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
2184 		idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
2185 		return;
2186 	}
2187 
2188 	idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
2189 }
2190 
2191 /**
2192  * idpf_tx_clean_complq - Reclaim resources on completion queue
2193  * @complq: Tx ring to clean
2194  * @budget: Used to determine if we are in netpoll
2195  * @cleaned: returns number of packets cleaned
2196  *
2197  * Return: %true if there's any budget left (e.g. the clean is finished)
2198  */
2199 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
2200 				 int *cleaned)
2201 {
2202 	struct idpf_splitq_tx_compl_desc *tx_desc;
2203 	s16 ntc = complq->next_to_clean;
2204 	struct idpf_netdev_priv *np;
2205 	unsigned int complq_budget;
2206 	bool complq_ok = true;
2207 	int i;
2208 
2209 	complq_budget = complq->clean_budget;
2210 	tx_desc = &complq->comp[ntc];
2211 	ntc -= complq->desc_count;
2212 
2213 	do {
2214 		struct libeth_sq_napi_stats cleaned_stats = { };
2215 		struct idpf_tx_queue *tx_q;
2216 		__le16 hw_head;
2217 		int rel_tx_qid;
2218 		u8 ctype;	/* completion type */
2219 		u16 gen;
2220 
2221 		/* if the descriptor isn't done, no work yet to do */
2222 		gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
2223 				    IDPF_TXD_COMPLQ_GEN_M);
2224 		if (idpf_queue_has(GEN_CHK, complq) != gen)
2225 			break;
2226 
2227 		/* Find necessary info of TX queue to clean buffers */
2228 		rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
2229 					   IDPF_TXD_COMPLQ_QID_M);
2230 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2231 		    !complq->txq_grp->txqs[rel_tx_qid]) {
2232 			netdev_err(complq->netdev, "TxQ not found\n");
2233 			goto fetch_next_desc;
2234 		}
2235 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2236 
2237 		/* Determine completion type */
2238 		ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
2239 				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2240 		switch (ctype) {
2241 		case IDPF_TXD_COMPLT_RE:
2242 			hw_head = tx_desc->common.q_head_compl_tag.q_head;
2243 
2244 			idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
2245 					     budget, &cleaned_stats, true);
2246 			break;
2247 		case IDPF_TXD_COMPLT_RS:
2248 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2249 						     &cleaned_stats, budget);
2250 			break;
2251 		default:
2252 			netdev_err(tx_q->netdev,
2253 				   "Unknown TX completion type: %d\n", ctype);
2254 			goto fetch_next_desc;
2255 		}
2256 
2257 		u64_stats_update_begin(&tx_q->stats_sync);
2258 		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2259 		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2260 		tx_q->cleaned_pkts += cleaned_stats.packets;
2261 		tx_q->cleaned_bytes += cleaned_stats.bytes;
2262 		complq->num_completions++;
2263 		u64_stats_update_end(&tx_q->stats_sync);
2264 
2265 fetch_next_desc:
2266 		tx_desc++;
2267 		ntc++;
2268 		if (unlikely(!ntc)) {
2269 			ntc -= complq->desc_count;
2270 			tx_desc = &complq->comp[0];
2271 			idpf_queue_change(GEN_CHK, complq);
2272 		}
2273 
2274 		prefetch(tx_desc);
2275 
2276 		/* update budget accounting */
2277 		complq_budget--;
2278 	} while (likely(complq_budget));
2279 
2280 	/* Store the state of the complq to be used later in deciding if a
2281 	 * TXQ can be started again
2282 	 */
2283 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2284 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2285 		complq_ok = false;
2286 
2287 	np = netdev_priv(complq->netdev);
2288 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2289 		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2290 		struct netdev_queue *nq;
2291 		bool dont_wake;
2292 
2293 		/* We didn't clean anything on this queue, move along */
2294 		if (!tx_q->cleaned_bytes)
2295 			continue;
2296 
2297 		*cleaned += tx_q->cleaned_pkts;
2298 
2299 		/* Update BQL */
2300 		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2301 
2302 		dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
2303 			    !netif_carrier_ok(tx_q->netdev);
2304 		/* Check if the TXQ needs to and can be restarted */
2305 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2306 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2307 					   dont_wake);
2308 
2309 		/* Reset cleaned stats for the next time this queue is
2310 		 * cleaned
2311 		 */
2312 		tx_q->cleaned_bytes = 0;
2313 		tx_q->cleaned_pkts = 0;
2314 	}
2315 
2316 	ntc += complq->desc_count;
2317 	complq->next_to_clean = ntc;
2318 
2319 	return !!complq_budget;
2320 }
2321 
2322 /**
2323  * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
2324  * @txq: disabled Tx queue
2325  *
2326  * When Tx queue is requested for disabling, the CP sends a special completion
2327  * descriptor called "SW marker", meaning the queue is ready to be destroyed.
2328  * If, for some reason, the marker is not received within 500 ms, break the
2329  * polling to not hang the driver.
2330  */
2331 void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
2332 {
2333 	struct idpf_compl_queue *complq;
2334 	unsigned long timeout;
2335 	bool flow, gen_flag;
2336 	u32 ntc;
2337 
2338 	if (!idpf_queue_has(SW_MARKER, txq))
2339 		return;
2340 
2341 	complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
2342 	ntc = complq->next_to_clean;
2343 
2344 	flow = idpf_queue_has(FLOW_SCH_EN, complq);
2345 	gen_flag = idpf_queue_has(GEN_CHK, complq);
2346 
2347 	timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
2348 
2349 	do {
2350 		struct idpf_splitq_4b_tx_compl_desc *tx_desc;
2351 		struct idpf_tx_queue *target;
2352 		u32 ctype_gen, id;
2353 
2354 		tx_desc = flow ? &complq->comp[ntc].common :
2355 			  &complq->comp_4b[ntc];
2356 		ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
2357 
2358 		if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
2359 			usleep_range(500, 1000);
2360 			continue;
2361 		}
2362 
2363 		if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
2364 		    IDPF_TXD_COMPLT_SW_MARKER)
2365 			goto next;
2366 
2367 		id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
2368 		target = complq->txq_grp->txqs[id];
2369 
2370 		idpf_queue_clear(SW_MARKER, target);
2371 		if (target == txq)
2372 			break;
2373 
2374 next:
2375 		if (unlikely(++ntc == complq->desc_count)) {
2376 			ntc = 0;
2377 			gen_flag = !gen_flag;
2378 		}
2379 	} while (time_before(jiffies, timeout));
2380 
2381 	idpf_queue_assign(GEN_CHK, complq, gen_flag);
2382 	complq->next_to_clean = ntc;
2383 }
2384 
2385 /**
2386  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2387  * based scheduling descriptors
2388  * @desc: descriptor to populate
2389  * @params: pointer to tx params struct
2390  * @td_cmd: command to be filled in desc
2391  * @size: size of buffer
2392  */
2393 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2394 			      struct idpf_tx_splitq_params *params,
2395 			      u16 td_cmd, u16 size)
2396 {
2397 	desc->q.qw1.cmd_dtype =
2398 		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2399 	desc->q.qw1.cmd_dtype |=
2400 		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2401 	desc->q.qw1.buf_size = cpu_to_le16(size);
2402 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2403 }
2404 
2405 /**
2406  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2407  * scheduling descriptors
2408  * @desc: descriptor to populate
2409  * @params: pointer to tx params struct
2410  * @td_cmd: command to be filled in desc
2411  * @size: size of buffer
2412  */
2413 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2414 				    struct idpf_tx_splitq_params *params,
2415 				    u16 td_cmd, u16 size)
2416 {
2417 	*(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
2418 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2419 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2420 }
2421 
2422 /**
2423  * idpf_txq_has_room - check if enough Tx splitq resources are available
2424  * @tx_q: the queue to be checked
2425  * @descs_needed: number of descriptors required for this packet
2426  * @bufs_needed: number of Tx buffers required for this packet
2427  *
2428  * Return: 0 if no room available, 1 otherwise
2429  */
2430 static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
2431 			     u32 bufs_needed)
2432 {
2433 	if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
2434 	    IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2435 		IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
2436 	    idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
2437 		return 0;
2438 	return 1;
2439 }
2440 
2441 /**
2442  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2443  * @tx_q: the queue to be checked
2444  * @descs_needed: number of descriptors required for this packet
2445  * @bufs_needed: number of buffers needed for this packet
2446  *
2447  * Return: 0 if stop is not needed
2448  */
2449 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2450 				     u32 descs_needed,
2451 				     u32 bufs_needed)
2452 {
2453 	/* Since we have multiple resources to check for splitq, our
2454 	 * start,stop_thrs becomes a boolean check instead of a count
2455 	 * threshold.
2456 	 */
2457 	if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
2458 				      idpf_txq_has_room(tx_q, descs_needed,
2459 							bufs_needed),
2460 				      1, 1))
2461 		return 0;
2462 
2463 	u64_stats_update_begin(&tx_q->stats_sync);
2464 	u64_stats_inc(&tx_q->q_stats.q_busy);
2465 	u64_stats_update_end(&tx_q->stats_sync);
2466 
2467 	return -EBUSY;
2468 }
2469 
2470 /**
2471  * idpf_tx_buf_hw_update - Store the new tail value
2472  * @tx_q: queue to bump
2473  * @val: new tail index
2474  * @xmit_more: more skb's pending
2475  *
2476  * The naming here is special in that 'hw' signals that this function is about
2477  * to do a register write to update our queue status. We know this can only
2478  * mean tail here as HW should be owning head for TX.
2479  */
2480 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2481 			   bool xmit_more)
2482 {
2483 	struct netdev_queue *nq;
2484 
2485 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2486 	tx_q->next_to_use = val;
2487 
2488 	/* Force memory writes to complete before letting h/w
2489 	 * know there are new descriptors to fetch.  (Only
2490 	 * applicable for weak-ordered memory model archs,
2491 	 * such as IA-64).
2492 	 */
2493 	wmb();
2494 
2495 	/* notify HW of packet */
2496 	if (netif_xmit_stopped(nq) || !xmit_more)
2497 		writel(val, tx_q->tail);
2498 }
2499 
2500 /**
2501  * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
2502  * @txq: queue to send buffer on
2503  * @skb: send buffer
2504  * @bufs_needed: (output) number of buffers needed for this skb.
2505  *
2506  * Return: number of data descriptors and buffers needed for this skb.
2507  */
2508 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
2509 					struct sk_buff *skb,
2510 					u32 *bufs_needed)
2511 {
2512 	const struct skb_shared_info *shinfo;
2513 	unsigned int count = 0, i;
2514 
2515 	count += !!skb_headlen(skb);
2516 
2517 	if (!skb_is_nonlinear(skb))
2518 		return count;
2519 
2520 	shinfo = skb_shinfo(skb);
2521 	*bufs_needed += shinfo->nr_frags;
2522 	for (i = 0; i < shinfo->nr_frags; i++) {
2523 		unsigned int size;
2524 
2525 		size = skb_frag_size(&shinfo->frags[i]);
2526 
2527 		/* We only need to use the idpf_size_to_txd_count check if the
2528 		 * fragment is going to span multiple descriptors,
2529 		 * i.e. size >= 16K.
2530 		 */
2531 		if (size >= SZ_16K)
2532 			count += idpf_size_to_txd_count(size);
2533 		else
2534 			count++;
2535 	}
2536 
2537 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2538 		if (__skb_linearize(skb))
2539 			return 0;
2540 
2541 		count = idpf_size_to_txd_count(skb->len);
2542 		u64_stats_update_begin(&txq->stats_sync);
2543 		u64_stats_inc(&txq->q_stats.linearize);
2544 		u64_stats_update_end(&txq->stats_sync);
2545 	}
2546 
2547 	return count;
2548 }
2549 
2550 /**
2551  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2552  * @txq: the tx ring to wrap
2553  * @ntu: ring index to bump
2554  *
2555  * Return: the next ring index hopping to 0 when wraps around
2556  */
2557 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2558 {
2559 	ntu++;
2560 
2561 	if (ntu == txq->desc_count)
2562 		ntu = 0;
2563 
2564 	return ntu;
2565 }
2566 
2567 /**
2568  * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
2569  * @refillq: refill queue to get buffer ID from
2570  * @buf_id: return buffer ID
2571  *
2572  * Return: true if a buffer ID was found, false if not
2573  */
2574 static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
2575 				    u32 *buf_id)
2576 {
2577 	u32 ntc = refillq->next_to_clean;
2578 	u32 refill_desc;
2579 
2580 	refill_desc = refillq->ring[ntc];
2581 
2582 	if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
2583 		     !!(refill_desc & IDPF_RFL_BI_GEN_M)))
2584 		return false;
2585 
2586 	*buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
2587 
2588 	if (unlikely(++ntc == refillq->desc_count)) {
2589 		idpf_queue_change(RFL_GEN_CHK, refillq);
2590 		ntc = 0;
2591 	}
2592 
2593 	refillq->next_to_clean = ntc;
2594 
2595 	return true;
2596 }
2597 
2598 /**
2599  * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2600  * @txq: Tx queue to unwind
2601  * @params: pointer to splitq params struct
2602  * @first: starting buffer for packet to unmap
2603  */
2604 static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2605 					 struct idpf_tx_splitq_params *params,
2606 					 struct idpf_tx_buf *first)
2607 {
2608 	struct idpf_sw_queue *refillq = txq->refillq;
2609 	struct libeth_sq_napi_stats ss = { };
2610 	struct idpf_tx_buf *tx_buf = first;
2611 	struct libeth_cq_pp cp = {
2612 		.dev    = txq->dev,
2613 		.ss     = &ss,
2614 	};
2615 
2616 	u64_stats_update_begin(&txq->stats_sync);
2617 	u64_stats_inc(&txq->q_stats.dma_map_errs);
2618 	u64_stats_update_end(&txq->stats_sync);
2619 
2620 	libeth_tx_complete(tx_buf, &cp);
2621 	while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
2622 		tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
2623 		libeth_tx_complete(tx_buf, &cp);
2624 	}
2625 
2626 	/* Update tail in case netdev_xmit_more was previously true. */
2627 	idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2628 
2629 	if (!refillq)
2630 		return;
2631 
2632 	/* Restore refillq state to avoid leaking tags. */
2633 	if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
2634 		idpf_queue_change(RFL_GEN_CHK, refillq);
2635 	refillq->next_to_clean = params->prev_refill_ntc;
2636 }
2637 
2638 /**
2639  * idpf_tx_splitq_map - Build the Tx flex descriptor
2640  * @tx_q: queue to send buffer on
2641  * @params: pointer to splitq params struct
2642  * @first: first buffer info buffer to use
2643  *
2644  * This function loops over the skb data pointed to by *first
2645  * and gets a physical address for each memory location and programs
2646  * it and the length into the transmit flex descriptor.
2647  */
2648 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2649 			       struct idpf_tx_splitq_params *params,
2650 			       struct idpf_tx_buf *first)
2651 {
2652 	union idpf_tx_flex_desc *tx_desc;
2653 	unsigned int data_len, size;
2654 	struct idpf_tx_buf *tx_buf;
2655 	u16 i = tx_q->next_to_use;
2656 	struct netdev_queue *nq;
2657 	struct sk_buff *skb;
2658 	skb_frag_t *frag;
2659 	u32 next_buf_id;
2660 	u16 td_cmd = 0;
2661 	dma_addr_t dma;
2662 
2663 	skb = first->skb;
2664 
2665 	td_cmd = params->offload.td_cmd;
2666 
2667 	data_len = skb->data_len;
2668 	size = skb_headlen(skb);
2669 
2670 	tx_desc = &tx_q->flex_tx[i];
2671 
2672 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2673 
2674 	tx_buf = first;
2675 	first->nr_frags = 0;
2676 
2677 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2678 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2679 
2680 		if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
2681 			idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2682 			return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2683 							    first);
2684 		}
2685 
2686 		first->nr_frags++;
2687 		tx_buf->type = LIBETH_SQE_FRAG;
2688 
2689 		/* record length, and DMA address */
2690 		dma_unmap_len_set(tx_buf, len, size);
2691 		dma_unmap_addr_set(tx_buf, dma, dma);
2692 
2693 		/* buf_addr is in same location for both desc types */
2694 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2695 
2696 		/* The stack can send us fragments that are too large for a
2697 		 * single descriptor i.e. frag size > 16K-1. We will need to
2698 		 * split the fragment across multiple descriptors in this case.
2699 		 * To adhere to HW alignment restrictions, the fragment needs
2700 		 * to be split such that the first chunk ends on a 4K boundary
2701 		 * and all subsequent chunks start on a 4K boundary. We still
2702 		 * want to send as much data as possible though, so our
2703 		 * intermediate descriptor chunk size will be 12K.
2704 		 *
2705 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2706 		 * ------------------------------------------------------------
2707 		 * |                    frag_size = 32K                       |
2708 		 * ------------------------------------------------------------
2709 		 * |2600		  |16384	    |28672
2710 		 *
2711 		 * 3 descriptors will be used for this fragment. The HW expects
2712 		 * the descriptors to contain the following:
2713 		 * ------------------------------------------------------------
2714 		 * | size = 13784         | size = 12K      | size = 6696     |
2715 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2716 		 * ------------------------------------------------------------
2717 		 *
2718 		 * We need to first adjust the max_data for the first chunk so
2719 		 * that it ends on a 4K boundary. By negating the value of the
2720 		 * DMA address and taking only the low order bits, we're
2721 		 * effectively calculating
2722 		 *	4K - (DMA addr lower order bits) =
2723 		 *				bytes to next boundary.
2724 		 *
2725 		 * Add that to our base aligned max_data (12K) and we have
2726 		 * our first chunk size. In the example above,
2727 		 *	13784 = 12K + (4096-2600)
2728 		 *
2729 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2730 		 * will give the intermediate descriptors 12K chunks and
2731 		 * whatever is left to the final descriptor. This ensures that
2732 		 * all descriptors used for the remaining chunks of the
2733 		 * fragment start on a 4K boundary and we use as few
2734 		 * descriptors as possible.
2735 		 */
2736 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2737 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2738 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2739 						  max_data);
2740 
2741 			if (unlikely(++i == tx_q->desc_count)) {
2742 				tx_desc = &tx_q->flex_tx[0];
2743 				i = 0;
2744 			} else {
2745 				tx_desc++;
2746 			}
2747 
2748 			/* Adjust the DMA offset and the remaining size of the
2749 			 * fragment.  On the first iteration of this loop,
2750 			 * max_data will be >= 12K and <= 16K-1.  On any
2751 			 * subsequent iteration of this loop, max_data will
2752 			 * always be 12K.
2753 			 */
2754 			dma += max_data;
2755 			size -= max_data;
2756 
2757 			/* Reset max_data since remaining chunks will be 12K
2758 			 * at most
2759 			 */
2760 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2761 
2762 			/* buf_addr is in same location for both desc types */
2763 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2764 		}
2765 
2766 		if (!data_len)
2767 			break;
2768 
2769 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2770 
2771 		if (unlikely(++i == tx_q->desc_count)) {
2772 			tx_desc = &tx_q->flex_tx[0];
2773 			i = 0;
2774 		} else {
2775 			tx_desc++;
2776 		}
2777 
2778 		if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2779 			if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
2780 							      &next_buf_id))) {
2781 				idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2782 				return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2783 								    first);
2784 			}
2785 		} else {
2786 			next_buf_id = i;
2787 		}
2788 		idpf_tx_buf_next(tx_buf) = next_buf_id;
2789 		tx_buf = &tx_q->tx_buf[next_buf_id];
2790 
2791 		size = skb_frag_size(frag);
2792 		data_len -= size;
2793 
2794 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2795 				       DMA_TO_DEVICE);
2796 	}
2797 
2798 	/* record SW timestamp if HW timestamp is not available */
2799 	skb_tx_timestamp(skb);
2800 
2801 	first->type = LIBETH_SQE_SKB;
2802 
2803 	/* write last descriptor with RS and EOP bits */
2804 	first->rs_idx = i;
2805 	idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2806 	td_cmd |= params->eop_cmd;
2807 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2808 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2809 
2810 	tx_q->txq_grp->num_completions_pending++;
2811 
2812 	/* record bytecount for BQL */
2813 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2814 	netdev_tx_sent_queue(nq, first->bytes);
2815 
2816 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2817 }
2818 
2819 /**
2820  * idpf_tso - computes mss and TSO length to prepare for TSO
2821  * @skb: pointer to skb
2822  * @off: pointer to struct that holds offload parameters
2823  *
2824  * Return: error (negative) if TSO was requested but cannot be applied to the
2825  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2826  */
2827 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2828 {
2829 	const struct skb_shared_info *shinfo;
2830 	union {
2831 		struct iphdr *v4;
2832 		struct ipv6hdr *v6;
2833 		unsigned char *hdr;
2834 	} ip;
2835 	union {
2836 		struct tcphdr *tcp;
2837 		struct udphdr *udp;
2838 		unsigned char *hdr;
2839 	} l4;
2840 	u32 paylen, l4_start;
2841 	int err;
2842 
2843 	if (!skb_is_gso(skb))
2844 		return 0;
2845 
2846 	err = skb_cow_head(skb, 0);
2847 	if (err < 0)
2848 		return err;
2849 
2850 	shinfo = skb_shinfo(skb);
2851 
2852 	ip.hdr = skb_network_header(skb);
2853 	l4.hdr = skb_transport_header(skb);
2854 
2855 	/* initialize outer IP header fields */
2856 	if (ip.v4->version == 4) {
2857 		ip.v4->tot_len = 0;
2858 		ip.v4->check = 0;
2859 	} else if (ip.v6->version == 6) {
2860 		ip.v6->payload_len = 0;
2861 	}
2862 
2863 	l4_start = skb_transport_offset(skb);
2864 
2865 	/* remove payload length from checksum */
2866 	paylen = skb->len - l4_start;
2867 
2868 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2869 	case SKB_GSO_TCPV4:
2870 	case SKB_GSO_TCPV6:
2871 		csum_replace_by_diff(&l4.tcp->check,
2872 				     (__force __wsum)htonl(paylen));
2873 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2874 		break;
2875 	case SKB_GSO_UDP_L4:
2876 		csum_replace_by_diff(&l4.udp->check,
2877 				     (__force __wsum)htonl(paylen));
2878 		/* compute length of segmentation header */
2879 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2880 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2881 		break;
2882 	default:
2883 		return -EINVAL;
2884 	}
2885 
2886 	off->tso_len = skb->len - off->tso_hdr_len;
2887 	off->mss = shinfo->gso_size;
2888 	off->tso_segs = shinfo->gso_segs;
2889 
2890 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2891 
2892 	return 1;
2893 }
2894 
2895 
2896 /**
2897  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2898  * @txq: queue to put context descriptor on
2899  *
2900  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2901  * ring entry to reflect that this index is a context descriptor
2902  *
2903  * Return: pointer to the next descriptor
2904  */
2905 static union idpf_flex_tx_ctx_desc *
2906 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2907 {
2908 	union idpf_flex_tx_ctx_desc *desc;
2909 	int i = txq->next_to_use;
2910 
2911 	/* grab the next descriptor */
2912 	desc = &txq->flex_ctx[i];
2913 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2914 
2915 	return desc;
2916 }
2917 
2918 /**
2919  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2920  * @tx_q: queue to send buffer on
2921  * @skb: pointer to skb
2922  *
2923  * Return: always NETDEV_TX_OK
2924  */
2925 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2926 {
2927 	u64_stats_update_begin(&tx_q->stats_sync);
2928 	u64_stats_inc(&tx_q->q_stats.skb_drops);
2929 	u64_stats_update_end(&tx_q->stats_sync);
2930 
2931 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2932 
2933 	dev_kfree_skb(skb);
2934 
2935 	return NETDEV_TX_OK;
2936 }
2937 
2938 #if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
2939 /**
2940  * idpf_tx_tstamp - set up context descriptor for hardware timestamp
2941  * @tx_q: queue to send buffer on
2942  * @skb: pointer to the SKB we're sending
2943  * @off: pointer to the offload struct
2944  *
2945  * Return: Positive index number on success, negative otherwise.
2946  */
2947 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2948 			  struct idpf_tx_offload_params *off)
2949 {
2950 	int err, idx;
2951 
2952 	/* only timestamp the outbound packet if the user has requested it */
2953 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2954 		return -1;
2955 
2956 	if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
2957 		return -1;
2958 
2959 	/* Tx timestamps cannot be sampled when doing TSO */
2960 	if (off->tx_flags & IDPF_TX_FLAGS_TSO)
2961 		return -1;
2962 
2963 	/* Grab an open timestamp slot */
2964 	err = idpf_ptp_request_ts(tx_q, skb, &idx);
2965 	if (err) {
2966 		u64_stats_update_begin(&tx_q->stats_sync);
2967 		u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
2968 		u64_stats_update_end(&tx_q->stats_sync);
2969 
2970 		return -1;
2971 	}
2972 
2973 	off->tx_flags |= IDPF_TX_FLAGS_TSYN;
2974 
2975 	return idx;
2976 }
2977 
2978 /**
2979  * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
2980  *			     PHY Tx timestamp
2981  * @ctx_desc: Context descriptor
2982  * @idx: Index of the Tx timestamp latch
2983  */
2984 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2985 				    u32 idx)
2986 {
2987 	ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
2988 					      IDPF_TX_CTX_DTYPE_M) |
2989 			     le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
2990 					      IDPF_TX_CTX_CMD_M) |
2991 			     le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
2992 }
2993 #else /* CONFIG_PTP_1588_CLOCK */
2994 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2995 			  struct idpf_tx_offload_params *off)
2996 {
2997 	return -1;
2998 }
2999 
3000 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
3001 				    u32 idx)
3002 { }
3003 #endif /* CONFIG_PTP_1588_CLOCK */
3004 
3005 /**
3006  * idpf_tx_splitq_need_re - check whether RE bit needs to be set
3007  * @tx_q: pointer to Tx queue
3008  *
3009  * Return: true if RE bit needs to be set, false otherwise
3010  */
3011 static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
3012 {
3013 	int gap = tx_q->next_to_use - tx_q->last_re;
3014 
3015 	gap += (gap < 0) ? tx_q->desc_count : 0;
3016 
3017 	return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
3018 }
3019 
3020 /**
3021  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
3022  * @skb: send buffer
3023  * @tx_q: queue to send buffer on
3024  *
3025  * Return: NETDEV_TX_OK if sent, else an error code
3026  */
3027 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
3028 					struct idpf_tx_queue *tx_q)
3029 {
3030 	struct idpf_tx_splitq_params tx_params = {
3031 		.prev_ntu = tx_q->next_to_use,
3032 	};
3033 	union idpf_flex_tx_ctx_desc *ctx_desc;
3034 	struct idpf_tx_buf *first;
3035 	u32 count, buf_count = 1;
3036 	int tso, idx;
3037 	u32 buf_id;
3038 
3039 	count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
3040 	if (unlikely(!count))
3041 		return idpf_tx_drop_skb(tx_q, skb);
3042 
3043 	tso = idpf_tso(skb, &tx_params.offload);
3044 	if (unlikely(tso < 0))
3045 		return idpf_tx_drop_skb(tx_q, skb);
3046 
3047 	/* Check for splitq specific TX resources */
3048 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
3049 	if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
3050 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
3051 
3052 		return NETDEV_TX_BUSY;
3053 	}
3054 
3055 	if (tso) {
3056 		/* If tso is needed, set up context desc */
3057 		ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
3058 
3059 		ctx_desc->tso.qw1.cmd_dtype =
3060 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
3061 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
3062 		ctx_desc->tso.qw0.flex_tlen =
3063 				cpu_to_le32(tx_params.offload.tso_len &
3064 					    IDPF_TXD_FLEX_CTX_TLEN_M);
3065 		ctx_desc->tso.qw0.mss_rt =
3066 				cpu_to_le16(tx_params.offload.mss &
3067 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
3068 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
3069 
3070 		u64_stats_update_begin(&tx_q->stats_sync);
3071 		u64_stats_inc(&tx_q->q_stats.lso_pkts);
3072 		u64_stats_update_end(&tx_q->stats_sync);
3073 	}
3074 
3075 	idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
3076 	if (idx != -1) {
3077 		ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
3078 		idpf_tx_set_tstamp_desc(ctx_desc, idx);
3079 	}
3080 
3081 	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
3082 		struct idpf_sw_queue *refillq = tx_q->refillq;
3083 
3084 		/* Save refillq state in case of a packet rollback.  Otherwise,
3085 		 * the tags will be leaked since they will be popped from the
3086 		 * refillq but never reposted during cleaning.
3087 		 */
3088 		tx_params.prev_refill_gen =
3089 			idpf_queue_has(RFL_GEN_CHK, refillq);
3090 		tx_params.prev_refill_ntc = refillq->next_to_clean;
3091 
3092 		if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
3093 						      &buf_id))) {
3094 			if (tx_params.prev_refill_gen !=
3095 			    idpf_queue_has(RFL_GEN_CHK, refillq))
3096 				idpf_queue_change(RFL_GEN_CHK, refillq);
3097 			refillq->next_to_clean = tx_params.prev_refill_ntc;
3098 
3099 			tx_q->next_to_use = tx_params.prev_ntu;
3100 			return idpf_tx_drop_skb(tx_q, skb);
3101 		}
3102 		tx_params.compl_tag = buf_id;
3103 
3104 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
3105 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
3106 		/* Set the RE bit to periodically "clean" the descriptor ring.
3107 		 * MIN_GAP is set to MIN_RING size to ensure it will be set at
3108 		 * least once each time around the ring.
3109 		 */
3110 		if (idpf_tx_splitq_need_re(tx_q)) {
3111 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
3112 			tx_q->txq_grp->num_completions_pending++;
3113 			tx_q->last_re = tx_q->next_to_use;
3114 		}
3115 
3116 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3117 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
3118 
3119 	} else {
3120 		buf_id = tx_q->next_to_use;
3121 
3122 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
3123 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
3124 
3125 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3126 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
3127 	}
3128 
3129 	first = &tx_q->tx_buf[buf_id];
3130 	first->skb = skb;
3131 
3132 	if (tso) {
3133 		first->packets = tx_params.offload.tso_segs;
3134 		first->bytes = skb->len +
3135 			((first->packets - 1) * tx_params.offload.tso_hdr_len);
3136 	} else {
3137 		first->packets = 1;
3138 		first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
3139 	}
3140 
3141 	idpf_tx_splitq_map(tx_q, &tx_params, first);
3142 
3143 	return NETDEV_TX_OK;
3144 }
3145 
3146 /**
3147  * idpf_tx_start - Selects the right Tx queue to send buffer
3148  * @skb: send buffer
3149  * @netdev: network interface device structure
3150  *
3151  * Return: NETDEV_TX_OK if sent, else an error code
3152  */
3153 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
3154 {
3155 	const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
3156 	struct idpf_tx_queue *tx_q;
3157 
3158 	if (unlikely(skb_get_queue_mapping(skb) >=
3159 		     vport->num_txq - vport->num_xdp_txq)) {
3160 		dev_kfree_skb_any(skb);
3161 
3162 		return NETDEV_TX_OK;
3163 	}
3164 
3165 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
3166 
3167 	/* hardware can't handle really short frames, hardware padding works
3168 	 * beyond this point
3169 	 */
3170 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
3171 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
3172 
3173 		return NETDEV_TX_OK;
3174 	}
3175 
3176 	if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
3177 		return idpf_tx_splitq_frame(skb, tx_q);
3178 	else
3179 		return idpf_tx_singleq_frame(skb, tx_q);
3180 }
3181 
3182 /**
3183  * idpf_rx_hash - set the hash value in the skb
3184  * @rxq: Rx descriptor ring packet is being transacted on
3185  * @skb: pointer to current skb being populated
3186  * @rx_desc: Receive descriptor
3187  * @decoded: Decoded Rx packet type related fields
3188  */
3189 static void
3190 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
3191 	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3192 	     struct libeth_rx_pt decoded)
3193 {
3194 	u32 hash;
3195 
3196 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
3197 		return;
3198 
3199 	hash = le16_to_cpu(rx_desc->hash1) |
3200 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
3201 	       (rx_desc->hash3 << 24);
3202 
3203 	libeth_rx_pt_set_hash(skb, hash, decoded);
3204 }
3205 
3206 /**
3207  * idpf_rx_csum - Indicate in skb if checksum is good
3208  * @rxq: Rx descriptor ring packet is being transacted on
3209  * @skb: pointer to current skb being populated
3210  * @csum_bits: checksum fields extracted from the descriptor
3211  * @decoded: Decoded Rx packet type related fields
3212  *
3213  * skb->protocol must be set before this function is called
3214  */
3215 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3216 			 struct libeth_rx_csum csum_bits,
3217 			 struct libeth_rx_pt decoded)
3218 {
3219 	bool ipv4, ipv6;
3220 
3221 	/* check if Rx checksum is enabled */
3222 	if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
3223 		return;
3224 
3225 	/* check if HW has decoded the packet and checksum */
3226 	if (unlikely(!csum_bits.l3l4p))
3227 		return;
3228 
3229 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3230 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3231 
3232 	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
3233 		goto checksum_fail;
3234 
3235 	if (unlikely(ipv6 && csum_bits.ipv6exadd))
3236 		return;
3237 
3238 	/* check for L4 errors and handle packets that were not able to be
3239 	 * checksummed
3240 	 */
3241 	if (unlikely(csum_bits.l4e))
3242 		goto checksum_fail;
3243 
3244 	if (!csum_bits.raw_csum_valid ||
3245 	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
3246 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3247 		return;
3248 	}
3249 
3250 	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
3251 	skb->ip_summed = CHECKSUM_COMPLETE;
3252 
3253 	return;
3254 
3255 checksum_fail:
3256 	u64_stats_update_begin(&rxq->stats_sync);
3257 	u64_stats_inc(&rxq->q_stats.hw_csum_err);
3258 	u64_stats_update_end(&rxq->stats_sync);
3259 }
3260 
3261 /**
3262  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
3263  * @rx_desc: receive descriptor
3264  *
3265  * Return: parsed checksum status.
3266  **/
3267 static struct libeth_rx_csum
3268 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3269 {
3270 	struct libeth_rx_csum csum = { };
3271 	u8 qword0, qword1;
3272 
3273 	qword0 = rx_desc->status_err0_qw0;
3274 	qword1 = rx_desc->status_err0_qw1;
3275 
3276 	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
3277 			     qword1);
3278 	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
3279 			      qword1);
3280 	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
3281 			     qword1);
3282 	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
3283 			       qword1);
3284 	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
3285 				   qword0);
3286 	csum.raw_csum_valid =
3287 		!le16_get_bits(rx_desc->ptype_err_fflags0,
3288 			       VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
3289 	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
3290 
3291 	return csum;
3292 }
3293 
3294 /**
3295  * idpf_rx_rsc - Set the RSC fields in the skb
3296  * @rxq : Rx descriptor ring packet is being transacted on
3297  * @skb : pointer to current skb being populated
3298  * @rx_desc: Receive descriptor
3299  * @decoded: Decoded Rx packet type related fields
3300  *
3301  * Populate the skb fields with the total number of RSC segments, RSC payload
3302  * length and packet type.
3303  *
3304  * Return: 0 on success and error code on failure
3305  */
3306 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3307 		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3308 		       struct libeth_rx_pt decoded)
3309 {
3310 	u16 rsc_segments, rsc_seg_len;
3311 	bool ipv4, ipv6;
3312 	int len;
3313 
3314 	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
3315 		     LIBETH_RX_PT_OUTER_L2))
3316 		return -EINVAL;
3317 
3318 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3319 	if (unlikely(!rsc_seg_len))
3320 		return -EINVAL;
3321 
3322 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3323 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3324 
3325 	if (unlikely(!(ipv4 ^ ipv6)))
3326 		return -EINVAL;
3327 
3328 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3329 
3330 	NAPI_GRO_CB(skb)->count = rsc_segments;
3331 	skb_shinfo(skb)->gso_size = rsc_seg_len;
3332 
3333 	skb_reset_network_header(skb);
3334 
3335 	if (ipv4) {
3336 		struct iphdr *ipv4h = ip_hdr(skb);
3337 
3338 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3339 
3340 		/* Reset and set transport header offset in skb */
3341 		skb_set_transport_header(skb, sizeof(struct iphdr));
3342 		len = skb->len - skb_transport_offset(skb);
3343 
3344 		/* Compute the TCP pseudo header checksum*/
3345 		tcp_hdr(skb)->check =
3346 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3347 	} else {
3348 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3349 
3350 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3351 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3352 		len = skb->len - skb_transport_offset(skb);
3353 		tcp_hdr(skb)->check =
3354 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3355 	}
3356 
3357 	tcp_gro_complete(skb);
3358 
3359 	u64_stats_update_begin(&rxq->stats_sync);
3360 	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3361 	u64_stats_update_end(&rxq->stats_sync);
3362 
3363 	return 0;
3364 }
3365 
3366 /**
3367  * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
3368  * @rxq: pointer to the rx queue that receives the timestamp
3369  * @rx_desc: pointer to rx descriptor containing timestamp
3370  * @skb: skb to put timestamp in
3371  */
3372 static void
3373 idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
3374 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3375 		 struct sk_buff *skb)
3376 {
3377 	u64 cached_time, ts_ns;
3378 	u32 ts_high;
3379 
3380 	if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
3381 		return;
3382 
3383 	cached_time = READ_ONCE(rxq->cached_phc_time);
3384 
3385 	ts_high = le32_to_cpu(rx_desc->ts_high);
3386 	ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
3387 
3388 	*skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
3389 		.hwtstamp = ns_to_ktime(ts_ns),
3390 	};
3391 }
3392 
3393 /**
3394  * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3395  * @rxq: Rx descriptor ring packet is being transacted on
3396  * @skb: pointer to current skb being populated
3397  * @rx_desc: Receive descriptor
3398  *
3399  * This function checks the ring, descriptor, and packet information in
3400  * order to populate the hash, checksum, protocol, and
3401  * other fields within the skb.
3402  *
3403  * Return: 0 on success and error code on failure
3404  */
3405 static int
3406 __idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3407 			     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3408 {
3409 	struct libeth_rx_csum csum_bits;
3410 	struct libeth_rx_pt decoded;
3411 	u16 rx_ptype;
3412 
3413 	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3414 				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3415 	decoded = rxq->rx_ptype_lkup[rx_ptype];
3416 
3417 	/* process RSS/hash */
3418 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3419 
3420 	if (idpf_queue_has(PTP, rxq))
3421 		idpf_rx_hwtstamp(rxq, rx_desc, skb);
3422 
3423 	if (le16_get_bits(rx_desc->hdrlen_flags,
3424 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3425 		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3426 
3427 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3428 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3429 
3430 	return 0;
3431 }
3432 
3433 bool idpf_rx_process_skb_fields(struct sk_buff *skb,
3434 				const struct libeth_xdp_buff *xdp,
3435 				struct libeth_rq_napi_stats *rs)
3436 {
3437 	struct idpf_rx_queue *rxq;
3438 
3439 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
3440 
3441 	return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
3442 }
3443 
3444 LIBETH_XDP_DEFINE_START();
3445 LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
3446 		      idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
3447 LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
3448 			   idpf_xdp_tx_finalize);
3449 LIBETH_XDP_DEFINE_END();
3450 
3451 /**
3452  * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3453  * @hdr: Rx buffer for the headers
3454  * @buf: Rx buffer for the payload
3455  * @data_len: number of bytes received to the payload buffer
3456  *
3457  * When a header buffer overflow occurs or the HW was unable do parse the
3458  * packet type to perform header split, the whole frame gets placed to the
3459  * payload buffer. We can't build a valid skb around a payload buffer when
3460  * the header split is active since it doesn't reserve any head- or tailroom.
3461  * In that case, copy either the whole frame when it's short or just the
3462  * Ethernet header to the header buffer to be able to build an skb and adjust
3463  * the data offset in the payload buffer, IOW emulate the header split.
3464  *
3465  * Return: number of bytes copied to the header buffer.
3466  */
3467 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3468 			     struct libeth_fqe *buf, u32 data_len)
3469 {
3470 	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3471 	struct page *hdr_page, *buf_page;
3472 	const void *src;
3473 	void *dst;
3474 
3475 	if (unlikely(netmem_is_net_iov(buf->netmem)) ||
3476 	    !libeth_rx_sync_for_cpu(buf, copy))
3477 		return 0;
3478 
3479 	hdr_page = __netmem_to_page(hdr->netmem);
3480 	buf_page = __netmem_to_page(buf->netmem);
3481 	dst = page_address(hdr_page) + hdr->offset +
3482 		pp_page_to_nmdesc(hdr_page)->pp->p.offset;
3483 	src = page_address(buf_page) + buf->offset +
3484 		pp_page_to_nmdesc(buf_page)->pp->p.offset;
3485 
3486 	memcpy(dst, src, LARGEST_ALIGN(copy));
3487 	buf->offset += copy;
3488 
3489 	return copy;
3490 }
3491 
3492 /**
3493  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3494  * status and error fields
3495  * @stat_err_field: field from descriptor to test bits in
3496  * @stat_err_bits: value to mask
3497  *
3498  * Return: %true if any of given @stat_err_bits are set, %false otherwise.
3499  */
3500 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3501 					const u8 stat_err_bits)
3502 {
3503 	return !!(stat_err_field & stat_err_bits);
3504 }
3505 
3506 /**
3507  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3508  * @rx_desc: Rx descriptor for current buffer
3509  *
3510  * Return: %true if the buffer is an EOP buffer, %false otherwise, indicating
3511  * that this is in fact a non-EOP buffer.
3512  */
3513 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3514 {
3515 	/* if we are the last buffer then there is nothing else to do */
3516 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3517 						  IDPF_RXD_EOF_SPLITQ));
3518 }
3519 
3520 /**
3521  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3522  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3523  * @budget: Total limit on number of packets to process
3524  *
3525  * This function provides a "bounce buffer" approach to Rx interrupt
3526  * processing. The advantage to this is that on systems that have
3527  * expensive overhead for IOMMU access this provides a means of avoiding
3528  * it by maintaining the mapping of the page to the system.
3529  *
3530  * Return: amount of work completed
3531  */
3532 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3533 {
3534 	struct idpf_buf_queue *rx_bufq = NULL;
3535 	struct libeth_rq_napi_stats rs = { };
3536 	u16 ntc = rxq->next_to_clean;
3537 	LIBETH_XDP_ONSTACK_BUFF(xdp);
3538 	LIBETH_XDP_ONSTACK_BULK(bq);
3539 
3540 	libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
3541 				rxq->xdpsqs, rxq->num_xdp_txq);
3542 	libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
3543 
3544 	/* Process Rx packets bounded by budget */
3545 	while (likely(rs.packets < budget)) {
3546 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3547 		struct libeth_fqe *hdr, *rx_buf = NULL;
3548 		struct idpf_sw_queue *refillq = NULL;
3549 		struct idpf_rxq_set *rxq_set = NULL;
3550 		unsigned int pkt_len = 0;
3551 		unsigned int hdr_len = 0;
3552 		u16 gen_id, buf_id = 0;
3553 		int bufq_id;
3554 		u8 rxdid;
3555 
3556 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3557 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3558 
3559 		/* if the descriptor isn't done, no work yet to do */
3560 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3561 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3562 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3563 			break;
3564 
3565 		dma_rmb();
3566 
3567 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3568 				  rx_desc->rxdid_ucast);
3569 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3570 			IDPF_RX_BUMP_NTC(rxq, ntc);
3571 			u64_stats_update_begin(&rxq->stats_sync);
3572 			u64_stats_inc(&rxq->q_stats.bad_descs);
3573 			u64_stats_update_end(&rxq->stats_sync);
3574 			continue;
3575 		}
3576 
3577 		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3578 					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3579 
3580 		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3581 					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3582 
3583 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3584 		refillq = rxq_set->refillq[bufq_id];
3585 
3586 		/* retrieve buffer from the rxq */
3587 		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3588 
3589 		buf_id = le16_to_cpu(rx_desc->buf_id);
3590 
3591 		rx_buf = &rx_bufq->buf[buf_id];
3592 
3593 		if (!rx_bufq->hdr_pp)
3594 			goto payload;
3595 
3596 #define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3597 #define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3598 		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3599 			/* If a header buffer overflow, occurs, i.e. header is
3600 			 * too large to fit in the header split buffer, HW will
3601 			 * put the entire packet, including headers, in the
3602 			 * data/payload buffer.
3603 			 */
3604 			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3605 						__HDR_LEN_MASK);
3606 #undef __HDR_LEN_MASK
3607 #undef __HBO_BIT
3608 
3609 		hdr = &rx_bufq->hdr_buf[buf_id];
3610 
3611 		if (unlikely(!hdr_len && !xdp->data)) {
3612 			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3613 			/* If failed, drop both buffers by setting len to 0 */
3614 			pkt_len -= hdr_len ? : pkt_len;
3615 
3616 			u64_stats_update_begin(&rxq->stats_sync);
3617 			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3618 			u64_stats_update_end(&rxq->stats_sync);
3619 		}
3620 
3621 		if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
3622 			rs.hsplit++;
3623 
3624 		hdr->netmem = 0;
3625 
3626 payload:
3627 		libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
3628 		rx_buf->netmem = 0;
3629 
3630 		idpf_post_buf_refill(refillq, buf_id);
3631 		IDPF_RX_BUMP_NTC(rxq, ntc);
3632 
3633 		/* skip if it is non EOP desc */
3634 		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
3635 			continue;
3636 
3637 		idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
3638 	}
3639 
3640 	idpf_xdp_finalize_rx(&bq);
3641 
3642 	rxq->next_to_clean = ntc;
3643 	libeth_xdp_save_buff(&rxq->xdp, xdp);
3644 
3645 	u64_stats_update_begin(&rxq->stats_sync);
3646 	u64_stats_add(&rxq->q_stats.packets, rs.packets);
3647 	u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
3648 	u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
3649 	u64_stats_update_end(&rxq->stats_sync);
3650 
3651 	return rs.packets;
3652 }
3653 
3654 /**
3655  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3656  * @bufq: Pointer to the buffer queue
3657  * @buf_id: buffer ID
3658  * @buf_desc: Buffer queue descriptor
3659  *
3660  * Return: 0 on success and negative on failure.
3661  */
3662 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3663 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3664 {
3665 	struct libeth_fq_fp fq = {
3666 		.pp		= bufq->pp,
3667 		.fqes		= bufq->buf,
3668 		.truesize	= bufq->truesize,
3669 		.count		= bufq->desc_count,
3670 	};
3671 	dma_addr_t addr;
3672 
3673 	addr = libeth_rx_alloc(&fq, buf_id);
3674 	if (addr == DMA_MAPPING_ERROR)
3675 		return -ENOMEM;
3676 
3677 	buf_desc->pkt_addr = cpu_to_le64(addr);
3678 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3679 
3680 	if (!idpf_queue_has(HSPLIT_EN, bufq))
3681 		return 0;
3682 
3683 	fq.pp = bufq->hdr_pp;
3684 	fq.fqes = bufq->hdr_buf;
3685 	fq.truesize = bufq->hdr_truesize;
3686 
3687 	addr = libeth_rx_alloc(&fq, buf_id);
3688 	if (addr == DMA_MAPPING_ERROR)
3689 		return -ENOMEM;
3690 
3691 	buf_desc->hdr_addr = cpu_to_le64(addr);
3692 
3693 	return 0;
3694 }
3695 
3696 /**
3697  * idpf_rx_clean_refillq - Clean refill queue buffers
3698  * @bufq: buffer queue to post buffers back to
3699  * @refillq: refill queue to clean
3700  *
3701  * This function takes care of the buffer refill management
3702  */
3703 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3704 				  struct idpf_sw_queue *refillq)
3705 {
3706 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3707 	u16 bufq_nta = bufq->next_to_alloc;
3708 	u16 ntc = refillq->next_to_clean;
3709 	int cleaned = 0;
3710 
3711 	buf_desc = &bufq->split_buf[bufq_nta];
3712 
3713 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3714 	while (likely(cleaned < refillq->desc_count)) {
3715 		u32 buf_id, refill_desc = refillq->ring[ntc];
3716 		bool failure;
3717 
3718 		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3719 		    !!(refill_desc & IDPF_RFL_BI_GEN_M))
3720 			break;
3721 
3722 		buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
3723 		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3724 		if (failure)
3725 			break;
3726 
3727 		if (unlikely(++ntc == refillq->desc_count)) {
3728 			idpf_queue_change(RFL_GEN_CHK, refillq);
3729 			ntc = 0;
3730 		}
3731 
3732 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3733 			buf_desc = &bufq->split_buf[0];
3734 			bufq_nta = 0;
3735 		} else {
3736 			buf_desc++;
3737 		}
3738 
3739 		cleaned++;
3740 	}
3741 
3742 	if (!cleaned)
3743 		return;
3744 
3745 	/* We want to limit how many transactions on the bus we trigger with
3746 	 * tail writes so we only do it in strides. It's also important we
3747 	 * align the write to a multiple of 8 as required by HW.
3748 	 */
3749 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3750 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3751 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3752 						       IDPF_RX_BUF_POST_STRIDE));
3753 
3754 	/* update next to alloc since we have filled the ring */
3755 	refillq->next_to_clean = ntc;
3756 	bufq->next_to_alloc = bufq_nta;
3757 }
3758 
3759 /**
3760  * idpf_rx_clean_refillq_all - Clean all refill queues
3761  * @bufq: buffer queue with refill queues
3762  * @nid: ID of the closest NUMA node with memory
3763  *
3764  * Iterates through all refill queues assigned to the buffer queue assigned to
3765  * this vector.  Returns true if clean is complete within budget, false
3766  * otherwise.
3767  */
3768 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3769 {
3770 	struct idpf_bufq_set *bufq_set;
3771 	int i;
3772 
3773 	page_pool_nid_changed(bufq->pp, nid);
3774 	if (bufq->hdr_pp)
3775 		page_pool_nid_changed(bufq->hdr_pp, nid);
3776 
3777 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3778 	for (i = 0; i < bufq_set->num_refillqs; i++)
3779 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3780 }
3781 
3782 /**
3783  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3784  * @irq: interrupt number
3785  * @data: pointer to a q_vector
3786  *
3787  * Return: always IRQ_HANDLED
3788  */
3789 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3790 						void *data)
3791 {
3792 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3793 
3794 	q_vector->total_events++;
3795 	napi_schedule_irqoff(&q_vector->napi);
3796 
3797 	return IRQ_HANDLED;
3798 }
3799 
3800 /**
3801  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3802  * @rsrc: pointer to queue and vector resources
3803  */
3804 static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
3805 {
3806 	for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
3807 		netif_napi_del(&rsrc->q_vectors[v_idx].napi);
3808 }
3809 
3810 /**
3811  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3812  * @rsrc: pointer to queue and vector resources
3813  */
3814 static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
3815 {
3816 	for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
3817 		napi_disable(&rsrc->q_vectors[v_idx].napi);
3818 }
3819 
3820 /**
3821  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3822  * @rsrc: pointer to queue and vector resources
3823  *
3824  * Free the memory allocated for interrupt vectors  associated to a vport
3825  */
3826 void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
3827 {
3828 	for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
3829 		struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
3830 
3831 		kfree(q_vector->xsksq);
3832 		q_vector->xsksq = NULL;
3833 		kfree(q_vector->complq);
3834 		q_vector->complq = NULL;
3835 		kfree(q_vector->bufq);
3836 		q_vector->bufq = NULL;
3837 		kfree(q_vector->tx);
3838 		q_vector->tx = NULL;
3839 		kfree(q_vector->rx);
3840 		q_vector->rx = NULL;
3841 	}
3842 
3843 	kfree(rsrc->q_vectors);
3844 	rsrc->q_vectors = NULL;
3845 }
3846 
3847 static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
3848 {
3849 	struct napi_struct *napi = link ? &q_vector->napi : NULL;
3850 	struct net_device *dev = q_vector->vport->netdev;
3851 
3852 	for (u32 i = 0; i < q_vector->num_rxq; i++)
3853 		netif_queue_set_napi(dev, q_vector->rx[i]->idx,
3854 				     NETDEV_QUEUE_TYPE_RX, napi);
3855 
3856 	for (u32 i = 0; i < q_vector->num_txq; i++)
3857 		netif_queue_set_napi(dev, q_vector->tx[i]->idx,
3858 				     NETDEV_QUEUE_TYPE_TX, napi);
3859 }
3860 
3861 /**
3862  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3863  * @vport: main vport structure
3864  * @rsrc: pointer to queue and vector resources
3865  */
3866 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
3867 				    struct idpf_q_vec_rsrc *rsrc)
3868 {
3869 	struct idpf_adapter *adapter = vport->adapter;
3870 
3871 	for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
3872 		struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
3873 		int irq_num, vidx;
3874 
3875 		/* free only the irqs that were actually requested */
3876 		if (!q_vector)
3877 			continue;
3878 
3879 		vidx = rsrc->q_vector_idxs[vector];
3880 		irq_num = adapter->msix_entries[vidx].vector;
3881 
3882 		idpf_q_vector_set_napi(q_vector, false);
3883 		kfree(free_irq(irq_num, q_vector));
3884 	}
3885 }
3886 
3887 /**
3888  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3889  * @rsrc: pointer to queue and vector resources
3890  */
3891 static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
3892 {
3893 	struct idpf_q_vector *q_vector = rsrc->q_vectors;
3894 
3895 	writel(0, rsrc->noirq_dyn_ctl);
3896 
3897 	for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
3898 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3899 }
3900 
3901 /**
3902  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3903  * @q_vector: pointer to q_vector
3904  *
3905  * Return: value to be written back to HW to enable interrupt generation
3906  */
3907 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
3908 {
3909 	u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3910 	int type = IDPF_NO_ITR_UPDATE_IDX;
3911 	u16 itr = 0;
3912 
3913 	if (q_vector->wb_on_itr) {
3914 		/*
3915 		 * Trigger a software interrupt when exiting wb_on_itr, to make
3916 		 * sure we catch any pending write backs that might have been
3917 		 * missed due to interrupt state transition.
3918 		 */
3919 		itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3920 			   q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3921 		type = IDPF_SW_ITR_UPDATE_IDX;
3922 		itr = IDPF_ITR_20K;
3923 	}
3924 
3925 	itr &= IDPF_ITR_MASK;
3926 	/* Don't clear PBA because that can cause lost interrupts that
3927 	 * came in while we were cleaning/polling
3928 	 */
3929 	itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3930 		   (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3931 
3932 	return itr_val;
3933 }
3934 
3935 /**
3936  * idpf_update_dim_sample - Update dim sample with packets and bytes
3937  * @q_vector: the vector associated with the interrupt
3938  * @dim_sample: dim sample to update
3939  * @dim: dim instance structure
3940  * @packets: total packets
3941  * @bytes: total bytes
3942  *
3943  * Update the dim sample with the packets and bytes which are passed to this
3944  * function. Set the dim state appropriately if the dim settings gets stale.
3945  */
3946 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3947 				   struct dim_sample *dim_sample,
3948 				   struct dim *dim, u64 packets, u64 bytes)
3949 {
3950 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3951 	dim_sample->comp_ctr = 0;
3952 
3953 	/* if dim settings get stale, like when not updated for 1 second or
3954 	 * longer, force it to start again. This addresses the frequent case
3955 	 * of an idle queue being switched to by the scheduler.
3956 	 */
3957 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3958 		dim->state = DIM_START_MEASURE;
3959 }
3960 
3961 /**
3962  * idpf_net_dim - Update net DIM algorithm
3963  * @q_vector: the vector associated with the interrupt
3964  *
3965  * Create a DIM sample and notify net_dim() so that it can possibly decide
3966  * a new ITR value based on incoming packets, bytes, and interrupts.
3967  *
3968  * This function is a no-op if the queue is not configured to dynamic ITR.
3969  */
3970 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3971 {
3972 	struct dim_sample dim_sample = { };
3973 	u64 packets, bytes, pkts, bts;
3974 	u32 i;
3975 
3976 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3977 		goto check_rx_itr;
3978 
3979 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3980 		struct idpf_tx_queue *txq = q_vector->tx[i];
3981 		unsigned int start;
3982 
3983 		do {
3984 			start = u64_stats_fetch_begin(&txq->stats_sync);
3985 			pkts = u64_stats_read(&txq->q_stats.packets);
3986 			bts = u64_stats_read(&txq->q_stats.bytes);
3987 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3988 
3989 		packets += pkts;
3990 		bytes += bts;
3991 	}
3992 
3993 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3994 			       packets, bytes);
3995 	net_dim(&q_vector->tx_dim, &dim_sample);
3996 
3997 check_rx_itr:
3998 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3999 		return;
4000 
4001 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
4002 		struct idpf_rx_queue *rxq = q_vector->rx[i];
4003 		unsigned int start;
4004 
4005 		do {
4006 			start = u64_stats_fetch_begin(&rxq->stats_sync);
4007 			pkts = u64_stats_read(&rxq->q_stats.packets);
4008 			bts = u64_stats_read(&rxq->q_stats.bytes);
4009 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
4010 
4011 		packets += pkts;
4012 		bytes += bts;
4013 	}
4014 
4015 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
4016 			       packets, bytes);
4017 	net_dim(&q_vector->rx_dim, &dim_sample);
4018 }
4019 
4020 /**
4021  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
4022  * @q_vector: q_vector for which itr is being updated and interrupt enabled
4023  *
4024  * Update the net_dim() algorithm and re-enable the interrupt associated with
4025  * this vector.
4026  */
4027 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
4028 {
4029 	u32 intval;
4030 
4031 	/* net_dim() updates ITR out-of-band using a work item */
4032 	idpf_net_dim(q_vector);
4033 
4034 	intval = idpf_vport_intr_buildreg_itr(q_vector);
4035 	q_vector->wb_on_itr = false;
4036 
4037 	writel(intval, q_vector->intr_reg.dyn_ctl);
4038 }
4039 
4040 /**
4041  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
4042  * @vport: main vport structure
4043  * @rsrc: pointer to queue and vector resources
4044  *
4045  * Return: 0 on success, negative on failure
4046  */
4047 static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
4048 				   struct idpf_q_vec_rsrc *rsrc)
4049 {
4050 	struct idpf_adapter *adapter = vport->adapter;
4051 	const char *drv_name, *if_name, *vec_name;
4052 	int vector, err, irq_num, vidx;
4053 
4054 	drv_name = dev_driver_string(&adapter->pdev->dev);
4055 	if_name = netdev_name(vport->netdev);
4056 
4057 	for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
4058 		struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
4059 		char *name;
4060 
4061 		vidx = rsrc->q_vector_idxs[vector];
4062 		irq_num = adapter->msix_entries[vidx].vector;
4063 
4064 		if (q_vector->num_rxq && q_vector->num_txq)
4065 			vec_name = "TxRx";
4066 		else if (q_vector->num_rxq)
4067 			vec_name = "Rx";
4068 		else if (q_vector->num_txq)
4069 			vec_name = "Tx";
4070 		else
4071 			continue;
4072 
4073 		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
4074 				 vec_name, vidx);
4075 
4076 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
4077 				  name, q_vector);
4078 		if (err) {
4079 			netdev_err(vport->netdev,
4080 				   "Request_irq failed, error: %d\n", err);
4081 			goto free_q_irqs;
4082 		}
4083 
4084 		idpf_q_vector_set_napi(q_vector, true);
4085 	}
4086 
4087 	return 0;
4088 
4089 free_q_irqs:
4090 	while (--vector >= 0) {
4091 		vidx = rsrc->q_vector_idxs[vector];
4092 		irq_num = adapter->msix_entries[vidx].vector;
4093 		kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
4094 	}
4095 
4096 	return err;
4097 }
4098 
4099 /**
4100  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
4101  * @q_vector: q_vector structure
4102  * @itr: Interrupt throttling rate
4103  * @tx: Tx or Rx ITR
4104  */
4105 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
4106 {
4107 	struct idpf_intr_reg *intr_reg;
4108 
4109 	if (tx && !q_vector->tx)
4110 		return;
4111 	else if (!tx && !q_vector->rx)
4112 		return;
4113 
4114 	intr_reg = &q_vector->intr_reg;
4115 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
4116 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
4117 }
4118 
4119 /**
4120  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
4121  * @vport: main vport structure
4122  * @rsrc: pointer to queue and vector resources
4123  */
4124 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
4125 					struct idpf_q_vec_rsrc *rsrc)
4126 {
4127 	bool dynamic;
4128 	u16 itr;
4129 
4130 	for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
4131 		struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
4132 
4133 		/* Set the initial ITR values */
4134 		if (qv->num_txq) {
4135 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
4136 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
4137 			idpf_vport_intr_write_itr(qv, dynamic ?
4138 						  itr : qv->tx_itr_value,
4139 						  true);
4140 		}
4141 
4142 		if (qv->num_rxq) {
4143 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
4144 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
4145 			idpf_vport_intr_write_itr(qv, dynamic ?
4146 						  itr : qv->rx_itr_value,
4147 						  false);
4148 		}
4149 
4150 		if (qv->num_txq || qv->num_rxq)
4151 			idpf_vport_intr_update_itr_ena_irq(qv);
4152 	}
4153 
4154 	writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
4155 }
4156 
4157 /**
4158  * idpf_vport_intr_deinit - Release all vector associations for the vport
4159  * @vport: main vport structure
4160  * @rsrc: pointer to queue and vector resources
4161  */
4162 void idpf_vport_intr_deinit(struct idpf_vport *vport,
4163 			    struct idpf_q_vec_rsrc *rsrc)
4164 {
4165 	idpf_vport_intr_dis_irq_all(rsrc);
4166 	idpf_vport_intr_napi_dis_all(rsrc);
4167 	idpf_vport_intr_napi_del_all(rsrc);
4168 	idpf_vport_intr_rel_irq(vport, rsrc);
4169 }
4170 
4171 /**
4172  * idpf_tx_dim_work - Call back from the stack
4173  * @work: work queue structure
4174  */
4175 static void idpf_tx_dim_work(struct work_struct *work)
4176 {
4177 	struct idpf_q_vector *q_vector;
4178 	struct idpf_vport *vport;
4179 	struct dim *dim;
4180 	u16 itr;
4181 
4182 	dim = container_of(work, struct dim, work);
4183 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
4184 	vport = q_vector->vport;
4185 
4186 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
4187 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
4188 
4189 	/* look up the values in our local table */
4190 	itr = vport->tx_itr_profile[dim->profile_ix];
4191 
4192 	idpf_vport_intr_write_itr(q_vector, itr, true);
4193 
4194 	dim->state = DIM_START_MEASURE;
4195 }
4196 
4197 /**
4198  * idpf_rx_dim_work - Call back from the stack
4199  * @work: work queue structure
4200  */
4201 static void idpf_rx_dim_work(struct work_struct *work)
4202 {
4203 	struct idpf_q_vector *q_vector;
4204 	struct idpf_vport *vport;
4205 	struct dim *dim;
4206 	u16 itr;
4207 
4208 	dim = container_of(work, struct dim, work);
4209 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
4210 	vport = q_vector->vport;
4211 
4212 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
4213 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
4214 
4215 	/* look up the values in our local table */
4216 	itr = vport->rx_itr_profile[dim->profile_ix];
4217 
4218 	idpf_vport_intr_write_itr(q_vector, itr, false);
4219 
4220 	dim->state = DIM_START_MEASURE;
4221 }
4222 
4223 /**
4224  * idpf_init_dim - Set up dynamic interrupt moderation
4225  * @qv: q_vector structure
4226  */
4227 static void idpf_init_dim(struct idpf_q_vector *qv)
4228 {
4229 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
4230 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4231 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
4232 
4233 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
4234 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4235 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
4236 }
4237 
4238 /**
4239  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
4240  * @rsrc: pointer to queue and vector resources
4241  */
4242 static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
4243 {
4244 	for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
4245 		struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
4246 
4247 		idpf_init_dim(q_vector);
4248 		napi_enable(&q_vector->napi);
4249 	}
4250 }
4251 
4252 /**
4253  * idpf_tx_splitq_clean_all- Clean completion queues
4254  * @q_vec: queue vector
4255  * @budget: Used to determine if we are in netpoll
4256  * @cleaned: returns number of packets cleaned
4257  *
4258  * Return: %false if clean is not complete else returns %true
4259  */
4260 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
4261 				     int budget, int *cleaned)
4262 {
4263 	u16 num_complq = q_vec->num_complq;
4264 	bool clean_complete = true;
4265 	int i, budget_per_q;
4266 
4267 	if (unlikely(!num_complq))
4268 		return true;
4269 
4270 	budget_per_q = DIV_ROUND_UP(budget, num_complq);
4271 
4272 	for (i = 0; i < num_complq; i++)
4273 		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
4274 						       budget_per_q, cleaned);
4275 
4276 	return clean_complete;
4277 }
4278 
4279 /**
4280  * idpf_rx_splitq_clean_all- Clean completion queues
4281  * @q_vec: queue vector
4282  * @budget: Used to determine if we are in netpoll
4283  * @cleaned: returns number of packets cleaned
4284  *
4285  * Return: %false if clean is not complete else returns %true
4286  */
4287 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
4288 				     int *cleaned)
4289 {
4290 	u16 num_rxq = q_vec->num_rxq;
4291 	bool clean_complete = true;
4292 	int pkts_cleaned = 0;
4293 	int i, budget_per_q;
4294 	int nid;
4295 
4296 	/* We attempt to distribute budget to each Rx queue fairly, but don't
4297 	 * allow the budget to go below 1 because that would exit polling early.
4298 	 */
4299 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
4300 	for (i = 0; i < num_rxq; i++) {
4301 		struct idpf_rx_queue *rxq = q_vec->rx[i];
4302 		int pkts_cleaned_per_q;
4303 
4304 		pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
4305 				     idpf_xskrq_poll(rxq, budget_per_q) :
4306 				     idpf_rx_splitq_clean(rxq, budget_per_q);
4307 		/* if we clean as many as budgeted, we must not be done */
4308 		if (pkts_cleaned_per_q >= budget_per_q)
4309 			clean_complete = false;
4310 		pkts_cleaned += pkts_cleaned_per_q;
4311 	}
4312 	*cleaned = pkts_cleaned;
4313 
4314 	nid = numa_mem_id();
4315 
4316 	for (i = 0; i < q_vec->num_bufq; i++) {
4317 		if (!idpf_queue_has(XSK, q_vec->bufq[i]))
4318 			idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4319 	}
4320 
4321 	return clean_complete;
4322 }
4323 
4324 /**
4325  * idpf_vport_splitq_napi_poll - NAPI handler
4326  * @napi: struct from which you get q_vector
4327  * @budget: budget provided by stack
4328  *
4329  * Return: how many packets were cleaned
4330  */
4331 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4332 {
4333 	struct idpf_q_vector *q_vector =
4334 				container_of(napi, struct idpf_q_vector, napi);
4335 	bool clean_complete = true;
4336 	int work_done = 0;
4337 
4338 	/* Handle case where we are called by netpoll with a budget of 0 */
4339 	if (unlikely(!budget)) {
4340 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4341 
4342 		return 0;
4343 	}
4344 
4345 	for (u32 i = 0; i < q_vector->num_xsksq; i++)
4346 		clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
4347 
4348 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
4349 						   &work_done);
4350 	clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
4351 						   &work_done);
4352 
4353 	/* If work not completed, return budget and polling will return */
4354 	if (!clean_complete) {
4355 		idpf_vport_intr_set_wb_on_itr(q_vector);
4356 		return budget;
4357 	}
4358 
4359 	work_done = min_t(int, work_done, budget - 1);
4360 
4361 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4362 	 * poll us due to busy-polling
4363 	 */
4364 	if (napi_complete_done(napi, work_done))
4365 		idpf_vport_intr_update_itr_ena_irq(q_vector);
4366 	else
4367 		idpf_vport_intr_set_wb_on_itr(q_vector);
4368 
4369 	return work_done;
4370 }
4371 
4372 /**
4373  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4374  * @vport: virtual port
4375  * @rsrc: pointer to queue and vector resources
4376  *
4377  * Mapping for vectors to queues
4378  */
4379 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
4380 					     struct idpf_q_vec_rsrc *rsrc)
4381 {
4382 	u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
4383 	bool split = idpf_is_queue_model_split(rsrc->rxq_model);
4384 	struct idpf_rxq_group *rx_qgrp;
4385 	struct idpf_txq_group *tx_qgrp;
4386 	u32 q_index;
4387 
4388 	for (unsigned int i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
4389 		u16 num_rxq;
4390 
4391 		if (qv_idx >= rsrc->num_q_vectors)
4392 			qv_idx = 0;
4393 
4394 		rx_qgrp = &rsrc->rxq_grps[i];
4395 		if (split)
4396 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4397 		else
4398 			num_rxq = rx_qgrp->singleq.num_rxq;
4399 
4400 		for (u32 j = 0; j < num_rxq; j++) {
4401 			struct idpf_rx_queue *q;
4402 
4403 			if (split)
4404 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4405 			else
4406 				q = rx_qgrp->singleq.rxqs[j];
4407 			q->q_vector = &rsrc->q_vectors[qv_idx];
4408 			q_index = q->q_vector->num_rxq;
4409 			q->q_vector->rx[q_index] = q;
4410 			q->q_vector->num_rxq++;
4411 
4412 			if (split)
4413 				q->napi = &q->q_vector->napi;
4414 		}
4415 
4416 		if (split) {
4417 			for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
4418 				struct idpf_buf_queue *bufq;
4419 
4420 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4421 				bufq->q_vector = &rsrc->q_vectors[qv_idx];
4422 				q_index = bufq->q_vector->num_bufq;
4423 				bufq->q_vector->bufq[q_index] = bufq;
4424 				bufq->q_vector->num_bufq++;
4425 			}
4426 		}
4427 
4428 		qv_idx++;
4429 	}
4430 
4431 	split = idpf_is_queue_model_split(rsrc->txq_model);
4432 
4433 	for (unsigned int i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4434 		u16 num_txq;
4435 
4436 		if (qv_idx >= rsrc->num_q_vectors)
4437 			qv_idx = 0;
4438 
4439 		tx_qgrp = &rsrc->txq_grps[i];
4440 		num_txq = tx_qgrp->num_txq;
4441 
4442 		for (u32 j = 0; j < num_txq; j++) {
4443 			struct idpf_tx_queue *q;
4444 
4445 			q = tx_qgrp->txqs[j];
4446 			q->q_vector = &rsrc->q_vectors[qv_idx];
4447 			q->q_vector->tx[q->q_vector->num_txq++] = q;
4448 		}
4449 
4450 		if (split) {
4451 			struct idpf_compl_queue *q = tx_qgrp->complq;
4452 
4453 			q->q_vector = &rsrc->q_vectors[qv_idx];
4454 			q->q_vector->complq[q->q_vector->num_complq++] = q;
4455 		}
4456 
4457 		qv_idx++;
4458 	}
4459 
4460 	for (unsigned int i = 0; i < vport->num_xdp_txq; i++) {
4461 		struct idpf_tx_queue *xdpsq;
4462 		struct idpf_q_vector *qv;
4463 
4464 		xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
4465 		if (!idpf_queue_has(XSK, xdpsq))
4466 			continue;
4467 
4468 		qv = idpf_find_rxq_vec(vport, i);
4469 		idpf_xsk_init_wakeup(qv);
4470 
4471 		xdpsq->q_vector = qv;
4472 		qv->xsksq[qv->num_xsksq++] = xdpsq;
4473 	}
4474 }
4475 
4476 /**
4477  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4478  * @vport: virtual port
4479  * @rsrc: pointer to queue and vector resources
4480  *
4481  * Initialize vector indexes with values returned over mailbox.
4482  *
4483  * Return: 0 on success, negative on failure
4484  */
4485 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
4486 					struct idpf_q_vec_rsrc *rsrc)
4487 {
4488 	struct idpf_adapter *adapter = vport->adapter;
4489 	struct virtchnl2_alloc_vectors *ac;
4490 	u16 *vecids, total_vecs;
4491 	int i;
4492 
4493 	ac = adapter->req_vec_chunks;
4494 	if (!ac) {
4495 		for (i = 0; i < rsrc->num_q_vectors; i++)
4496 			rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
4497 
4498 		rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
4499 
4500 		return 0;
4501 	}
4502 
4503 	total_vecs = idpf_get_reserved_vecs(adapter);
4504 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4505 	if (!vecids)
4506 		return -ENOMEM;
4507 
4508 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4509 
4510 	for (i = 0; i < rsrc->num_q_vectors; i++)
4511 		rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
4512 
4513 	rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
4514 
4515 	kfree(vecids);
4516 
4517 	return 0;
4518 }
4519 
4520 /**
4521  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4522  * @vport: virtual port structure
4523  * @rsrc: pointer to queue and vector resources
4524  */
4525 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
4526 					 struct idpf_q_vec_rsrc *rsrc)
4527 {
4528 	int (*napi_poll)(struct napi_struct *napi, int budget);
4529 	int irq_num;
4530 	u16 qv_idx;
4531 
4532 	if (idpf_is_queue_model_split(rsrc->txq_model))
4533 		napi_poll = idpf_vport_splitq_napi_poll;
4534 	else
4535 		napi_poll = idpf_vport_singleq_napi_poll;
4536 
4537 	for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
4538 		struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
4539 
4540 		qv_idx = rsrc->q_vector_idxs[v_idx];
4541 		irq_num = vport->adapter->msix_entries[qv_idx].vector;
4542 
4543 		netif_napi_add_config(vport->netdev, &q_vector->napi,
4544 				      napi_poll, v_idx);
4545 		netif_napi_set_irq(&q_vector->napi, irq_num);
4546 	}
4547 }
4548 
4549 /**
4550  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4551  * @vport: virtual port
4552  * @rsrc: pointer to queue and vector resources
4553  *
4554  * Allocate one q_vector per queue interrupt.
4555  *
4556  * Return: 0 on success, if allocation fails we return -ENOMEM.
4557  */
4558 int idpf_vport_intr_alloc(struct idpf_vport *vport,
4559 			  struct idpf_q_vec_rsrc *rsrc)
4560 {
4561 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4562 	struct idpf_vport_user_config_data *user_config;
4563 	struct idpf_q_vector *q_vector;
4564 	struct idpf_q_coalesce *q_coal;
4565 	u32 complqs_per_vector;
4566 	u16 idx = vport->idx;
4567 
4568 	user_config = &vport->adapter->vport_config[idx]->user_config;
4569 
4570 	rsrc->q_vectors = kcalloc(rsrc->num_q_vectors,
4571 				  sizeof(struct idpf_q_vector), GFP_KERNEL);
4572 	if (!rsrc->q_vectors)
4573 		return -ENOMEM;
4574 
4575 	txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
4576 				       rsrc->num_q_vectors);
4577 	rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
4578 				       rsrc->num_q_vectors);
4579 	bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
4580 			   DIV_ROUND_UP(rsrc->num_rxq_grp,
4581 					rsrc->num_q_vectors);
4582 	complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
4583 					  rsrc->num_q_vectors);
4584 
4585 	for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
4586 		q_vector = &rsrc->q_vectors[v_idx];
4587 		q_coal = &user_config->q_coalesce[v_idx];
4588 		q_vector->vport = vport;
4589 
4590 		q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
4591 		q_vector->tx_intr_mode = q_coal->tx_intr_mode;
4592 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4593 
4594 		q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
4595 		q_vector->rx_intr_mode = q_coal->rx_intr_mode;
4596 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4597 
4598 		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4599 				       GFP_KERNEL);
4600 		if (!q_vector->tx)
4601 			goto error;
4602 
4603 		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4604 				       GFP_KERNEL);
4605 		if (!q_vector->rx)
4606 			goto error;
4607 
4608 		if (!idpf_is_queue_model_split(rsrc->rxq_model))
4609 			continue;
4610 
4611 		q_vector->bufq = kcalloc(bufqs_per_vector,
4612 					 sizeof(*q_vector->bufq),
4613 					 GFP_KERNEL);
4614 		if (!q_vector->bufq)
4615 			goto error;
4616 
4617 		q_vector->complq = kcalloc(complqs_per_vector,
4618 					   sizeof(*q_vector->complq),
4619 					   GFP_KERNEL);
4620 		if (!q_vector->complq)
4621 			goto error;
4622 
4623 		if (!rsrc->xdp_txq_offset)
4624 			continue;
4625 
4626 		q_vector->xsksq = kcalloc(rxqs_per_vector,
4627 					  sizeof(*q_vector->xsksq),
4628 					  GFP_KERNEL);
4629 		if (!q_vector->xsksq)
4630 			goto error;
4631 	}
4632 
4633 	return 0;
4634 
4635 error:
4636 	idpf_vport_intr_rel(rsrc);
4637 
4638 	return -ENOMEM;
4639 }
4640 
4641 /**
4642  * idpf_vport_intr_init - Setup all vectors for the given vport
4643  * @vport: virtual port
4644  * @rsrc: pointer to queue and vector resources
4645  *
4646  * Return: 0 on success or negative on failure
4647  */
4648 int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4649 {
4650 	int err;
4651 
4652 	err = idpf_vport_intr_init_vec_idx(vport, rsrc);
4653 	if (err)
4654 		return err;
4655 
4656 	idpf_vport_intr_map_vector_to_qs(vport, rsrc);
4657 	idpf_vport_intr_napi_add_all(vport, rsrc);
4658 
4659 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
4660 	if (err)
4661 		goto unroll_vectors_alloc;
4662 
4663 	err = idpf_vport_intr_req_irq(vport, rsrc);
4664 	if (err)
4665 		goto unroll_vectors_alloc;
4666 
4667 	return 0;
4668 
4669 unroll_vectors_alloc:
4670 	idpf_vport_intr_napi_del_all(rsrc);
4671 
4672 	return err;
4673 }
4674 
4675 void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4676 {
4677 	idpf_vport_intr_napi_ena_all(rsrc);
4678 	idpf_vport_intr_ena_irq_all(vport, rsrc);
4679 }
4680 
4681 /**
4682  * idpf_config_rss - Send virtchnl messages to configure RSS
4683  * @vport: virtual port
4684  * @rss_data: pointer to RSS key and lut info
4685  *
4686  * Return: 0 on success, negative on failure
4687  */
4688 int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
4689 {
4690 	struct idpf_adapter *adapter = vport->adapter;
4691 	u32 vport_id = vport->vport_id;
4692 	int err;
4693 
4694 	err = idpf_send_get_set_rss_key_msg(adapter, rss_data, vport_id, false);
4695 	if (err)
4696 		return err;
4697 
4698 	return idpf_send_get_set_rss_lut_msg(adapter, rss_data, vport_id, false);
4699 }
4700 
4701 /**
4702  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4703  * @vport: virtual port structure
4704  * @rss_data: pointer to RSS key and lut info
4705  */
4706 void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
4707 			    struct idpf_rss_data *rss_data)
4708 {
4709 	u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
4710 	int i;
4711 
4712 	for (i = 0; i < rss_data->rss_lut_size; i++)
4713 		rss_data->rss_lut[i] = i % num_active_rxq;
4714 }
4715 
4716 /**
4717  * idpf_init_rss_lut - Allocate and initialize RSS LUT
4718  * @vport: virtual port
4719  * @rss_data: pointer to RSS key and lut info
4720  *
4721  * Return: 0 on success, negative on failure
4722  */
4723 int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
4724 {
4725 	if (!rss_data->rss_lut) {
4726 		u32 lut_size;
4727 
4728 		lut_size = rss_data->rss_lut_size * sizeof(u32);
4729 		rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4730 		if (!rss_data->rss_lut)
4731 			return -ENOMEM;
4732 	}
4733 
4734 	/* Fill the default RSS lut values */
4735 	idpf_fill_dflt_rss_lut(vport, rss_data);
4736 
4737 	return 0;
4738 }
4739 
4740 /**
4741  * idpf_deinit_rss_lut - Release RSS LUT
4742  * @rss_data: pointer to RSS key and lut info
4743  */
4744 void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data)
4745 {
4746 	kfree(rss_data->rss_lut);
4747 	rss_data->rss_lut = NULL;
4748 }
4749