Lines Matching +full:misc +full:- +full:latch

1 // SPDX-License-Identifier: GPL-2.0-only
10 #define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
14 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
37 * idpf_tx_timeout - Respond to a Tx Hang
45 adapter->tx_timeout_count++; in idpf_tx_timeout()
48 adapter->tx_timeout_count, txqueue); in idpf_tx_timeout()
50 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); in idpf_tx_timeout()
51 queue_delayed_work(adapter->vc_event_wq, in idpf_tx_timeout()
52 &adapter->vc_event_task, in idpf_tx_timeout()
62 .dev = txq->dev, in idpf_tx_buf_clean()
70 for (u32 i = 0; i < txq->buf_pool_size; i++) in idpf_tx_buf_clean()
71 libeth_tx_complete_any(&txq->tx_buf[i], &cp); in idpf_tx_buf_clean()
77 * idpf_tx_buf_rel_all - Free any empty Tx buffers
83 if (!txq->tx_buf) in idpf_tx_buf_rel_all()
91 kfree(txq->tx_buf); in idpf_tx_buf_rel_all()
92 txq->tx_buf = NULL; in idpf_tx_buf_rel_all()
96 * idpf_tx_desc_rel - Free Tx resources per queue
106 libeth_xdpsq_deinit_timer(txq->timer); in idpf_tx_desc_rel()
111 netdev_tx_reset_subqueue(txq->netdev, txq->idx); in idpf_tx_desc_rel()
115 if (!txq->desc_ring) in idpf_tx_desc_rel()
118 if (!xdp && txq->refillq) in idpf_tx_desc_rel()
119 kfree(txq->refillq->ring); in idpf_tx_desc_rel()
121 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); in idpf_tx_desc_rel()
122 txq->desc_ring = NULL; in idpf_tx_desc_rel()
123 txq->next_to_use = 0; in idpf_tx_desc_rel()
124 txq->next_to_clean = 0; in idpf_tx_desc_rel()
128 * idpf_compl_desc_rel - Free completion resources per queue
137 if (!complq->comp) in idpf_compl_desc_rel()
140 dma_free_coherent(complq->netdev->dev.parent, complq->size, in idpf_compl_desc_rel()
141 complq->desc_ring, complq->dma); in idpf_compl_desc_rel()
142 complq->desc_ring = NULL; in idpf_compl_desc_rel()
143 complq->next_to_use = 0; in idpf_compl_desc_rel()
144 complq->next_to_clean = 0; in idpf_compl_desc_rel()
148 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
157 if (!vport->txq_grps) in idpf_tx_desc_rel_all()
160 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_rel_all()
161 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_tx_desc_rel_all()
163 for (j = 0; j < txq_grp->num_txq; j++) in idpf_tx_desc_rel_all()
164 idpf_tx_desc_rel(txq_grp->txqs[j]); in idpf_tx_desc_rel_all()
166 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_rel_all()
167 idpf_compl_desc_rel(txq_grp->complq); in idpf_tx_desc_rel_all()
172 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
183 tx_q->buf_pool_size = U16_MAX; in idpf_tx_buf_alloc_all()
185 tx_q->buf_pool_size = tx_q->desc_count; in idpf_tx_buf_alloc_all()
186 tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf), in idpf_tx_buf_alloc_all()
188 if (!tx_q->tx_buf) in idpf_tx_buf_alloc_all()
189 return -ENOMEM; in idpf_tx_buf_alloc_all()
195 * idpf_tx_desc_alloc - Allocate the Tx descriptors
204 struct device *dev = tx_q->dev; in idpf_tx_desc_alloc()
212 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); in idpf_tx_desc_alloc()
215 tx_q->size = ALIGN(tx_q->size, 4096); in idpf_tx_desc_alloc()
216 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, in idpf_tx_desc_alloc()
218 if (!tx_q->desc_ring) { in idpf_tx_desc_alloc()
220 tx_q->size); in idpf_tx_desc_alloc()
221 err = -ENOMEM; in idpf_tx_desc_alloc()
225 tx_q->next_to_use = 0; in idpf_tx_desc_alloc()
226 tx_q->next_to_clean = 0; in idpf_tx_desc_alloc()
234 refillq = tx_q->refillq; in idpf_tx_desc_alloc()
235 refillq->desc_count = tx_q->buf_pool_size; in idpf_tx_desc_alloc()
236 refillq->ring = kcalloc(refillq->desc_count, sizeof(u32), in idpf_tx_desc_alloc()
238 if (!refillq->ring) { in idpf_tx_desc_alloc()
239 err = -ENOMEM; in idpf_tx_desc_alloc()
243 for (unsigned int i = 0; i < refillq->desc_count; i++) in idpf_tx_desc_alloc()
244 refillq->ring[i] = in idpf_tx_desc_alloc()
254 tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP; in idpf_tx_desc_alloc()
265 * idpf_compl_desc_alloc - allocate completion descriptors
269 * Return: 0 on success, -errno on failure.
277 sizeof(*complq->comp) : sizeof(*complq->comp_4b); in idpf_compl_desc_alloc()
278 complq->size = array_size(complq->desc_count, desc_size); in idpf_compl_desc_alloc()
280 complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent, in idpf_compl_desc_alloc()
281 complq->size, &complq->dma, in idpf_compl_desc_alloc()
283 if (!complq->desc_ring) in idpf_compl_desc_alloc()
284 return -ENOMEM; in idpf_compl_desc_alloc()
286 complq->next_to_use = 0; in idpf_compl_desc_alloc()
287 complq->next_to_clean = 0; in idpf_compl_desc_alloc()
297 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
310 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_alloc_all()
311 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { in idpf_tx_desc_alloc_all()
312 struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; in idpf_tx_desc_alloc_all()
316 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
323 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_alloc_all()
327 err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); in idpf_tx_desc_alloc_all()
329 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
344 * idpf_rx_page_rel - Release an rx buffer page
349 if (unlikely(!rx_buf->netmem)) in idpf_rx_page_rel()
352 libeth_rx_recycle_slow(rx_buf->netmem); in idpf_rx_page_rel()
354 rx_buf->netmem = 0; in idpf_rx_page_rel()
355 rx_buf->offset = 0; in idpf_rx_page_rel()
359 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
365 .fqes = bufq->hdr_buf, in idpf_rx_hdr_buf_rel_all()
366 .pp = bufq->hdr_pp, in idpf_rx_hdr_buf_rel_all()
369 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_hdr_buf_rel_all()
370 idpf_rx_page_rel(&bufq->hdr_buf[i]); in idpf_rx_hdr_buf_rel_all()
373 bufq->hdr_buf = NULL; in idpf_rx_hdr_buf_rel_all()
374 bufq->hdr_pp = NULL; in idpf_rx_hdr_buf_rel_all()
378 * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
384 .fqes = bufq->buf, in idpf_rx_buf_rel_bufq()
385 .pp = bufq->pp, in idpf_rx_buf_rel_bufq()
389 if (!bufq->buf) in idpf_rx_buf_rel_bufq()
398 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_buf_rel_bufq()
399 idpf_rx_page_rel(&bufq->buf[i]); in idpf_rx_buf_rel_bufq()
405 bufq->buf = NULL; in idpf_rx_buf_rel_bufq()
406 bufq->pp = NULL; in idpf_rx_buf_rel_bufq()
410 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
416 .fqes = rxq->rx_buf, in idpf_rx_buf_rel_all()
417 .pp = rxq->pp, in idpf_rx_buf_rel_all()
420 if (!rxq->rx_buf) in idpf_rx_buf_rel_all()
423 for (u32 i = 0; i < rxq->desc_count; i++) in idpf_rx_buf_rel_all()
424 idpf_rx_page_rel(&rxq->rx_buf[i]); in idpf_rx_buf_rel_all()
427 rxq->rx_buf = NULL; in idpf_rx_buf_rel_all()
428 rxq->pp = NULL; in idpf_rx_buf_rel_all()
432 * idpf_rx_desc_rel - Free a specific Rx q resources
446 libeth_xdp_return_stash(&rxq->xdp); in idpf_rx_desc_rel()
453 rxq->next_to_alloc = 0; in idpf_rx_desc_rel()
454 rxq->next_to_clean = 0; in idpf_rx_desc_rel()
455 rxq->next_to_use = 0; in idpf_rx_desc_rel()
456 if (!rxq->desc_ring) in idpf_rx_desc_rel()
459 dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma); in idpf_rx_desc_rel()
460 rxq->desc_ring = NULL; in idpf_rx_desc_rel()
464 * idpf_rx_desc_rel_bufq - free buffer queue resources
477 bufq->next_to_alloc = 0; in idpf_rx_desc_rel_bufq()
478 bufq->next_to_clean = 0; in idpf_rx_desc_rel_bufq()
479 bufq->next_to_use = 0; in idpf_rx_desc_rel_bufq()
481 if (!bufq->split_buf) in idpf_rx_desc_rel_bufq()
484 dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); in idpf_rx_desc_rel_bufq()
485 bufq->split_buf = NULL; in idpf_rx_desc_rel_bufq()
489 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
496 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_rel_all()
501 if (!vport->rxq_grps) in idpf_rx_desc_rel_all()
504 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_rel_all()
505 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_rel_all()
507 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rx_desc_rel_all()
508 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) in idpf_rx_desc_rel_all()
509 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, in idpf_rx_desc_rel_all()
514 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_rel_all()
516 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, in idpf_rx_desc_rel_all()
519 if (!rx_qgrp->splitq.bufq_sets) in idpf_rx_desc_rel_all()
522 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_rel_all()
524 &rx_qgrp->splitq.bufq_sets[j]; in idpf_rx_desc_rel_all()
526 idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); in idpf_rx_desc_rel_all()
532 * idpf_rx_buf_hw_update - Store the new tail and head values
538 bufq->next_to_use = val; in idpf_rx_buf_hw_update()
540 if (unlikely(!bufq->tail)) in idpf_rx_buf_hw_update()
544 writel(val, bufq->tail); in idpf_rx_buf_hw_update()
548 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
556 .count = bufq->desc_count, in idpf_rx_hdr_buf_alloc_all()
558 .xdp = idpf_xdp_enabled(bufq->q_vector->vport), in idpf_rx_hdr_buf_alloc_all()
559 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_hdr_buf_alloc_all()
563 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_hdr_buf_alloc_all()
567 bufq->hdr_pp = fq.pp; in idpf_rx_hdr_buf_alloc_all()
568 bufq->hdr_buf = fq.fqes; in idpf_rx_hdr_buf_alloc_all()
569 bufq->hdr_truesize = fq.truesize; in idpf_rx_hdr_buf_alloc_all()
570 bufq->rx_hbuf_size = fq.buf_len; in idpf_rx_hdr_buf_alloc_all()
576 * idpf_post_buf_refill - Post buffer id to refill queue
582 u32 nta = refillq->next_to_use; in idpf_post_buf_refill()
585 refillq->ring[nta] = in idpf_post_buf_refill()
590 if (unlikely(++nta == refillq->desc_count)) { in idpf_post_buf_refill()
595 refillq->next_to_use = nta; in idpf_post_buf_refill()
599 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
609 .count = bufq->desc_count, in idpf_rx_post_buf_desc()
611 u16 nta = bufq->next_to_alloc; in idpf_rx_post_buf_desc()
614 splitq_rx_desc = &bufq->split_buf[nta]; in idpf_rx_post_buf_desc()
617 fq.pp = bufq->hdr_pp; in idpf_rx_post_buf_desc()
618 fq.fqes = bufq->hdr_buf; in idpf_rx_post_buf_desc()
619 fq.truesize = bufq->hdr_truesize; in idpf_rx_post_buf_desc()
625 splitq_rx_desc->hdr_addr = cpu_to_le64(addr); in idpf_rx_post_buf_desc()
628 fq.pp = bufq->pp; in idpf_rx_post_buf_desc()
629 fq.fqes = bufq->buf; in idpf_rx_post_buf_desc()
630 fq.truesize = bufq->truesize; in idpf_rx_post_buf_desc()
636 splitq_rx_desc->pkt_addr = cpu_to_le64(addr); in idpf_rx_post_buf_desc()
637 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); in idpf_rx_post_buf_desc()
640 if (unlikely(nta == bufq->desc_count)) in idpf_rx_post_buf_desc()
642 bufq->next_to_alloc = nta; in idpf_rx_post_buf_desc()
648 * idpf_rx_post_init_bufs - Post initial buffers to bufq
664 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, in idpf_rx_post_init_bufs()
671 * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
674 * Return: 0 on success, -ENOMEM on failure.
678 if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) in idpf_rx_buf_alloc_singleq()
686 return -ENOMEM; in idpf_rx_buf_alloc_singleq()
690 * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
693 * Return: 0 on success, -errno on failure.
698 .count = rxq->desc_count, in idpf_rx_bufs_init_singleq()
700 .nid = idpf_q_vector_to_mem(rxq->q_vector), in idpf_rx_bufs_init_singleq()
704 ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); in idpf_rx_bufs_init_singleq()
708 rxq->pp = fq.pp; in idpf_rx_bufs_init_singleq()
709 rxq->rx_buf = fq.fqes; in idpf_rx_bufs_init_singleq()
710 rxq->truesize = fq.truesize; in idpf_rx_bufs_init_singleq()
711 rxq->rx_buf_size = fq.buf_len; in idpf_rx_bufs_init_singleq()
717 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
734 err = -ENOMEM; in idpf_rx_buf_alloc_all()
744 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
754 .truesize = bufq->truesize, in idpf_rx_bufs_init()
755 .count = bufq->desc_count, in idpf_rx_bufs_init()
758 .xdp = idpf_xdp_enabled(bufq->q_vector->vport), in idpf_rx_bufs_init()
759 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_bufs_init()
766 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_bufs_init()
770 bufq->pp = fq.pp; in idpf_rx_bufs_init()
771 bufq->buf = fq.fqes; in idpf_rx_bufs_init()
772 bufq->truesize = fq.truesize; in idpf_rx_bufs_init()
773 bufq->rx_buf_size = fq.buf_len; in idpf_rx_bufs_init()
779 * idpf_rx_bufs_init_all - Initialize all RX bufs
786 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_rx_bufs_init_all()
789 idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog); in idpf_rx_bufs_init_all()
791 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_bufs_init_all()
792 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_bufs_init_all()
797 int num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_bufs_init_all()
802 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_bufs_init_all()
812 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_bufs_init_all()
816 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_bufs_init_all()
817 q->truesize = truesize; in idpf_rx_bufs_init_all()
825 truesize = q->truesize >> 1; in idpf_rx_bufs_init_all()
833 * idpf_rx_desc_alloc - Allocate queue Rx resources
842 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_alloc()
844 rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc); in idpf_rx_desc_alloc()
847 rxq->size = ALIGN(rxq->size, 4096); in idpf_rx_desc_alloc()
848 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, in idpf_rx_desc_alloc()
849 &rxq->dma, GFP_KERNEL); in idpf_rx_desc_alloc()
850 if (!rxq->desc_ring) { in idpf_rx_desc_alloc()
852 rxq->size); in idpf_rx_desc_alloc()
853 return -ENOMEM; in idpf_rx_desc_alloc()
856 rxq->next_to_alloc = 0; in idpf_rx_desc_alloc()
857 rxq->next_to_clean = 0; in idpf_rx_desc_alloc()
858 rxq->next_to_use = 0; in idpf_rx_desc_alloc()
867 * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
871 * Return: 0 on success, -ENOMEM on failure.
876 struct device *dev = &vport->adapter->pdev->dev; in idpf_bufq_desc_alloc()
878 bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); in idpf_bufq_desc_alloc()
880 bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, in idpf_bufq_desc_alloc()
882 if (!bufq->split_buf) in idpf_bufq_desc_alloc()
883 return -ENOMEM; in idpf_bufq_desc_alloc()
885 bufq->next_to_alloc = 0; in idpf_bufq_desc_alloc()
886 bufq->next_to_clean = 0; in idpf_bufq_desc_alloc()
887 bufq->next_to_use = 0; in idpf_bufq_desc_alloc()
896 * idpf_rx_desc_alloc_all - allocate all RX queues resources
907 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_alloc_all()
908 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_alloc_all()
909 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
910 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_alloc_all()
912 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_desc_alloc_all()
917 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
918 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_rx_desc_alloc_all()
920 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_desc_alloc_all()
924 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
931 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
934 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_alloc_all()
937 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_desc_alloc_all()
941 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
959 const struct idpf_vport *vport = qs->vport; in idpf_init_queue_set()
963 splitq = idpf_is_queue_model_split(vport->rxq_model); in idpf_init_queue_set()
965 for (u32 i = 0; i < qs->num; i++) { in idpf_init_queue_set()
966 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_init_queue_set()
969 switch (q->type) { in idpf_init_queue_set()
971 err = idpf_rx_desc_alloc(vport, q->rxq); in idpf_init_queue_set()
975 err = idpf_xdp_rxq_info_init(q->rxq); in idpf_init_queue_set()
980 err = idpf_rx_bufs_init_singleq(q->rxq); in idpf_init_queue_set()
984 bufq = q->bufq; in idpf_init_queue_set()
990 for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) { in idpf_init_queue_set()
995 bufqs = bufq->q_vector->bufq; in idpf_init_queue_set()
1001 ts = bufqs[j - 1]->truesize >> 1; in idpf_init_queue_set()
1007 bufq->truesize = ts; in idpf_init_queue_set()
1015 err = idpf_tx_desc_alloc(vport, q->txq); in idpf_init_queue_set()
1018 err = idpf_compl_desc_alloc(vport, q->complq); in idpf_init_queue_set()
1033 const struct idpf_vport *vport = qs->vport; in idpf_clean_queue_set()
1034 struct device *dev = vport->netdev->dev.parent; in idpf_clean_queue_set()
1036 for (u32 i = 0; i < qs->num; i++) { in idpf_clean_queue_set()
1037 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_clean_queue_set()
1039 switch (q->type) { in idpf_clean_queue_set()
1041 idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model); in idpf_clean_queue_set()
1042 idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model); in idpf_clean_queue_set()
1045 idpf_rx_desc_rel_bufq(q->bufq, dev); in idpf_clean_queue_set()
1048 idpf_tx_desc_rel(q->txq); in idpf_clean_queue_set()
1050 if (idpf_queue_has(XDP, q->txq)) { in idpf_clean_queue_set()
1051 q->txq->pending = 0; in idpf_clean_queue_set()
1052 q->txq->xdp_tx = 0; in idpf_clean_queue_set()
1054 q->txq->txq_grp->num_completions_pending = 0; in idpf_clean_queue_set()
1057 writel(q->txq->next_to_use, q->txq->tail); in idpf_clean_queue_set()
1060 idpf_compl_desc_rel(q->complq); in idpf_clean_queue_set()
1061 q->complq->num_completions = 0; in idpf_clean_queue_set()
1071 if (qv->num_txq) { in idpf_qvec_ena_irq()
1074 if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode)) in idpf_qvec_ena_irq()
1075 itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix]; in idpf_qvec_ena_irq()
1077 itr = qv->tx_itr_value; in idpf_qvec_ena_irq()
1082 if (qv->num_rxq) { in idpf_qvec_ena_irq()
1085 if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode)) in idpf_qvec_ena_irq()
1086 itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix]; in idpf_qvec_ena_irq()
1088 itr = qv->rx_itr_value; in idpf_qvec_ena_irq()
1093 if (qv->num_txq || qv->num_rxq) in idpf_qvec_ena_irq()
1098 * idpf_vector_to_queue_set - create a queue set associated with the given
1112 bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq; in idpf_vector_to_queue_set()
1113 struct idpf_vport *vport = qv->vport; in idpf_vector_to_queue_set()
1117 num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq; in idpf_vector_to_queue_set()
1118 num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2; in idpf_vector_to_queue_set()
1128 for (u32 i = 0; i < qv->num_bufq; i++) { in idpf_vector_to_queue_set()
1129 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; in idpf_vector_to_queue_set()
1130 qs->qs[num++].bufq = qv->bufq[i]; in idpf_vector_to_queue_set()
1133 for (u32 i = 0; i < qv->num_rxq; i++) { in idpf_vector_to_queue_set()
1134 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_vector_to_queue_set()
1135 qs->qs[num++].rxq = qv->rx[i]; in idpf_vector_to_queue_set()
1138 for (u32 i = 0; i < qv->num_txq; i++) { in idpf_vector_to_queue_set()
1139 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_vector_to_queue_set()
1140 qs->qs[num++].txq = qv->tx[i]; in idpf_vector_to_queue_set()
1143 for (u32 i = 0; i < qv->num_complq; i++) { in idpf_vector_to_queue_set()
1144 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_vector_to_queue_set()
1145 qs->qs[num++].complq = qv->complq[i]; in idpf_vector_to_queue_set()
1148 if (!vport->xdp_txq_offset) in idpf_vector_to_queue_set()
1152 for (u32 i = 0; i < qv->num_rxq; i++) { in idpf_vector_to_queue_set()
1153 u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx; in idpf_vector_to_queue_set()
1155 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_vector_to_queue_set()
1156 qs->qs[num++].txq = vport->txqs[idx]; in idpf_vector_to_queue_set()
1158 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_vector_to_queue_set()
1159 qs->qs[num++].complq = vport->txqs[idx]->complq; in idpf_vector_to_queue_set()
1162 for (u32 i = 0; i < qv->num_xsksq; i++) { in idpf_vector_to_queue_set()
1163 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_vector_to_queue_set()
1164 qs->qs[num++].txq = qv->xsksq[i]; in idpf_vector_to_queue_set()
1166 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_vector_to_queue_set()
1167 qs->qs[num++].complq = qv->xsksq[i]->complq; in idpf_vector_to_queue_set()
1172 if (num != qs->num) { in idpf_vector_to_queue_set()
1182 struct idpf_vport *vport = qs->vport; in idpf_qp_enable()
1190 netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n", in idpf_qp_enable()
1195 if (!vport->xdp_txq_offset) in idpf_qp_enable()
1198 q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp, in idpf_qp_enable()
1199 vport->num_q_vectors), in idpf_qp_enable()
1200 sizeof(*q_vector->xsksq), GFP_KERNEL); in idpf_qp_enable()
1201 if (!q_vector->xsksq) in idpf_qp_enable()
1202 return -ENOMEM; in idpf_qp_enable()
1204 for (u32 i = 0; i < qs->num; i++) { in idpf_qp_enable()
1205 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_qp_enable()
1207 if (q->type != VIRTCHNL2_QUEUE_TYPE_TX) in idpf_qp_enable()
1210 if (!idpf_queue_has(XSK, q->txq)) in idpf_qp_enable()
1215 q->txq->q_vector = q_vector; in idpf_qp_enable()
1216 q_vector->xsksq[q_vector->num_xsksq++] = q->txq; in idpf_qp_enable()
1222 netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n", in idpf_qp_enable()
1229 netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n", in idpf_qp_enable()
1234 napi_enable(&q_vector->napi); in idpf_qp_enable()
1237 netif_start_subqueue(vport->netdev, qid); in idpf_qp_enable()
1244 struct idpf_vport *vport = qs->vport; in idpf_qp_disable()
1249 netif_stop_subqueue(vport->netdev, qid); in idpf_qp_disable()
1251 writel(0, q_vector->intr_reg.dyn_ctl); in idpf_qp_disable()
1252 napi_disable(&q_vector->napi); in idpf_qp_disable()
1256 netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n", in idpf_qp_disable()
1263 kfree(q_vector->xsksq); in idpf_qp_disable()
1264 q_vector->num_xsksq = 0; in idpf_qp_disable()
1270 * idpf_qp_switch - enable or disable queues associated with queue pair
1275 * Return: 0 on success, -errno on failure.
1283 return -EINVAL; in idpf_qp_switch()
1287 return -ENOMEM; in idpf_qp_switch()
1293 * idpf_txq_group_rel - Release all resources for txq groups
1301 if (!vport->txq_grps) in idpf_txq_group_rel()
1304 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_rel()
1305 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_rel()
1308 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_rel()
1309 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_txq_group_rel()
1311 for (j = 0; j < txq_grp->num_txq; j++) { in idpf_txq_group_rel()
1313 kfree(txq_grp->txqs[j]->refillq); in idpf_txq_group_rel()
1314 txq_grp->txqs[j]->refillq = NULL; in idpf_txq_group_rel()
1317 kfree(txq_grp->txqs[j]); in idpf_txq_group_rel()
1318 txq_grp->txqs[j] = NULL; in idpf_txq_group_rel()
1324 kfree(txq_grp->complq); in idpf_txq_group_rel()
1325 txq_grp->complq = NULL; in idpf_txq_group_rel()
1327 kfree(vport->txq_grps); in idpf_txq_group_rel()
1328 vport->txq_grps = NULL; in idpf_txq_group_rel()
1332 * idpf_rxq_sw_queue_rel - Release software queue resources
1339 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { in idpf_rxq_sw_queue_rel()
1340 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; in idpf_rxq_sw_queue_rel()
1342 for (j = 0; j < bufq_set->num_refillqs; j++) { in idpf_rxq_sw_queue_rel()
1343 kfree(bufq_set->refillqs[j].ring); in idpf_rxq_sw_queue_rel()
1344 bufq_set->refillqs[j].ring = NULL; in idpf_rxq_sw_queue_rel()
1346 kfree(bufq_set->refillqs); in idpf_rxq_sw_queue_rel()
1347 bufq_set->refillqs = NULL; in idpf_rxq_sw_queue_rel()
1352 * idpf_rxq_group_rel - Release all resources for rxq groups
1359 if (!vport->rxq_grps) in idpf_rxq_group_rel()
1362 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_rel()
1363 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_rel()
1367 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_rel()
1368 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rxq_group_rel()
1370 kfree(rx_qgrp->splitq.rxq_sets[j]); in idpf_rxq_group_rel()
1371 rx_qgrp->splitq.rxq_sets[j] = NULL; in idpf_rxq_group_rel()
1375 kfree(rx_qgrp->splitq.bufq_sets); in idpf_rxq_group_rel()
1376 rx_qgrp->splitq.bufq_sets = NULL; in idpf_rxq_group_rel()
1378 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rxq_group_rel()
1380 kfree(rx_qgrp->singleq.rxqs[j]); in idpf_rxq_group_rel()
1381 rx_qgrp->singleq.rxqs[j] = NULL; in idpf_rxq_group_rel()
1385 kfree(vport->rxq_grps); in idpf_rxq_group_rel()
1386 vport->rxq_grps = NULL; in idpf_rxq_group_rel()
1390 * idpf_vport_queue_grp_rel_all - Release all queue groups
1400 * idpf_vport_queues_rel - Free memory for all queues
1415 kfree(vport->txqs); in idpf_vport_queues_rel()
1416 vport->txqs = NULL; in idpf_vport_queues_rel()
1420 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1423 * We get a queue index from skb->queue_mapping and we need a fast way to
1431 struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; in idpf_vport_init_fast_path_txqs()
1432 struct work_struct *tstamp_task = &vport->tstamp_task; in idpf_vport_init_fast_path_txqs()
1435 vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), in idpf_vport_init_fast_path_txqs()
1438 if (!vport->txqs) in idpf_vport_init_fast_path_txqs()
1439 return -ENOMEM; in idpf_vport_init_fast_path_txqs()
1441 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_vport_init_fast_path_txqs()
1442 struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; in idpf_vport_init_fast_path_txqs()
1444 for (j = 0; j < tx_grp->num_txq; j++, k++) { in idpf_vport_init_fast_path_txqs()
1445 vport->txqs[k] = tx_grp->txqs[j]; in idpf_vport_init_fast_path_txqs()
1446 vport->txqs[k]->idx = k; in idpf_vport_init_fast_path_txqs()
1451 vport->txqs[k]->cached_tstamp_caps = caps; in idpf_vport_init_fast_path_txqs()
1452 vport->txqs[k]->tstamp_task = tstamp_task; in idpf_vport_init_fast_path_txqs()
1460 * idpf_vport_init_num_qs - Initialize number of queues
1468 u16 idx = vport->idx; in idpf_vport_init_num_qs()
1470 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_init_num_qs()
1471 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_init_num_qs()
1472 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_init_num_qs()
1476 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { in idpf_vport_init_num_qs()
1477 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_init_num_qs()
1478 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_init_num_qs()
1481 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_init_num_qs()
1482 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); in idpf_vport_init_num_qs()
1483 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_init_num_qs()
1484 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); in idpf_vport_init_num_qs()
1486 vport->xdp_prog = config_data->xdp_prog; in idpf_vport_init_num_qs()
1488 vport->xdp_txq_offset = config_data->num_req_tx_qs; in idpf_vport_init_num_qs()
1489 vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) - in idpf_vport_init_num_qs()
1490 vport->xdp_txq_offset; in idpf_vport_init_num_qs()
1491 vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq); in idpf_vport_init_num_qs()
1493 vport->xdp_txq_offset = 0; in idpf_vport_init_num_qs()
1494 vport->num_xdp_txq = 0; in idpf_vport_init_num_qs()
1495 vport->xdpsq_share = false; in idpf_vport_init_num_qs()
1499 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_vport_init_num_qs()
1500 vport->num_bufqs_per_qgrp = 0; in idpf_vport_init_num_qs()
1505 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_init_num_qs()
1509 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1515 int num_bufqs = vport->num_bufqs_per_qgrp; in idpf_vport_calc_num_q_desc()
1517 u16 idx = vport->idx; in idpf_vport_calc_num_q_desc()
1520 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_calc_num_q_desc()
1521 num_req_txq_desc = config_data->num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1522 num_req_rxq_desc = config_data->num_req_rxq_desc; in idpf_vport_calc_num_q_desc()
1524 vport->complq_desc_count = 0; in idpf_vport_calc_num_q_desc()
1526 vport->txq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1527 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_vport_calc_num_q_desc()
1528 vport->complq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1529 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) in idpf_vport_calc_num_q_desc()
1530 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1534 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1535 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_desc()
1536 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1541 vport->rxq_desc_count = num_req_rxq_desc; in idpf_vport_calc_num_q_desc()
1543 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1546 if (!vport->bufq_desc_count[i]) in idpf_vport_calc_num_q_desc()
1547 vport->bufq_desc_count[i] = in idpf_vport_calc_num_q_desc()
1548 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, in idpf_vport_calc_num_q_desc()
1554 * idpf_vport_calc_total_qs - Calculate total number of queues
1574 vport_config = adapter->vport_config[vport_idx]; in idpf_vport_calc_total_qs()
1576 num_req_tx_qs = vport_config->user_config.num_req_tx_qs; in idpf_vport_calc_total_qs()
1577 num_req_rx_qs = vport_config->user_config.num_req_rx_qs; in idpf_vport_calc_total_qs()
1581 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); in idpf_vport_calc_total_qs()
1582 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); in idpf_vport_calc_total_qs()
1583 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); in idpf_vport_calc_total_qs()
1584 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); in idpf_vport_calc_total_qs()
1587 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { in idpf_vport_calc_total_qs()
1589 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * in idpf_vport_calc_total_qs()
1591 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * in idpf_vport_calc_total_qs()
1597 vport_msg->num_tx_q = cpu_to_le16(num_qs); in idpf_vport_calc_total_qs()
1598 vport_msg->num_tx_complq = 0; in idpf_vport_calc_total_qs()
1600 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { in idpf_vport_calc_total_qs()
1602 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * in idpf_vport_calc_total_qs()
1604 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * in idpf_vport_calc_total_qs()
1610 vport_msg->num_rx_q = cpu_to_le16(num_qs); in idpf_vport_calc_total_qs()
1611 vport_msg->num_rx_bufq = 0; in idpf_vport_calc_total_qs()
1617 user = &vport_config->user_config; in idpf_vport_calc_total_qs()
1618 user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_calc_total_qs()
1619 user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_calc_total_qs()
1621 if (vport_config->user_config.xdp_prog) in idpf_vport_calc_total_qs()
1622 num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs, in idpf_vport_calc_total_qs()
1623 user->num_req_tx_qs, in idpf_vport_calc_total_qs()
1624 vport_config->max_q.max_txq); in idpf_vport_calc_total_qs()
1628 vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq); in idpf_vport_calc_total_qs()
1629 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) in idpf_vport_calc_total_qs()
1630 vport_msg->num_tx_complq = vport_msg->num_tx_q; in idpf_vport_calc_total_qs()
1636 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1641 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_groups()
1642 vport->num_txq_grp = vport->num_txq; in idpf_vport_calc_num_q_groups()
1644 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1646 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_num_q_groups()
1647 vport->num_rxq_grp = vport->num_rxq; in idpf_vport_calc_num_q_groups()
1649 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1653 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1661 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_numq_per_grp()
1664 *num_txq = vport->num_txq; in idpf_vport_calc_numq_per_grp()
1666 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_numq_per_grp()
1669 *num_rxq = vport->num_rxq; in idpf_vport_calc_numq_per_grp()
1673 * idpf_rxq_set_descids - set the descids supported by this queue
1681 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rxq_set_descids()
1684 if (vport->base_rxd) in idpf_rxq_set_descids()
1685 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; in idpf_rxq_set_descids()
1687 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; in idpf_rxq_set_descids()
1691 * idpf_txq_group_alloc - Allocate all txq group resources
1702 vport->txq_grps = kcalloc(vport->num_txq_grp, in idpf_txq_group_alloc()
1703 sizeof(*vport->txq_grps), GFP_KERNEL); in idpf_txq_group_alloc()
1704 if (!vport->txq_grps) in idpf_txq_group_alloc()
1705 return -ENOMEM; in idpf_txq_group_alloc()
1707 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_alloc()
1708 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_alloc()
1711 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_alloc()
1712 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_txq_group_alloc()
1713 struct idpf_adapter *adapter = vport->adapter; in idpf_txq_group_alloc()
1716 tx_qgrp->vport = vport; in idpf_txq_group_alloc()
1717 tx_qgrp->num_txq = num_txq; in idpf_txq_group_alloc()
1719 for (j = 0; j < tx_qgrp->num_txq; j++) { in idpf_txq_group_alloc()
1720 tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), in idpf_txq_group_alloc()
1722 if (!tx_qgrp->txqs[j]) in idpf_txq_group_alloc()
1726 for (j = 0; j < tx_qgrp->num_txq; j++) { in idpf_txq_group_alloc()
1727 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; in idpf_txq_group_alloc()
1729 q->dev = &adapter->pdev->dev; in idpf_txq_group_alloc()
1730 q->desc_count = vport->txq_desc_count; in idpf_txq_group_alloc()
1731 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); in idpf_txq_group_alloc()
1732 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); in idpf_txq_group_alloc()
1733 q->netdev = vport->netdev; in idpf_txq_group_alloc()
1734 q->txq_grp = tx_qgrp; in idpf_txq_group_alloc()
1735 q->rel_q_id = j; in idpf_txq_group_alloc()
1738 q->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1740 vport->crc_enable); in idpf_txq_group_alloc()
1748 q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL); in idpf_txq_group_alloc()
1749 if (!q->refillq) in idpf_txq_group_alloc()
1752 idpf_queue_set(GEN_CHK, q->refillq); in idpf_txq_group_alloc()
1753 idpf_queue_set(RFL_GEN_CHK, q->refillq); in idpf_txq_group_alloc()
1759 tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, in idpf_txq_group_alloc()
1760 sizeof(*tx_qgrp->complq), in idpf_txq_group_alloc()
1762 if (!tx_qgrp->complq) in idpf_txq_group_alloc()
1765 tx_qgrp->complq->desc_count = vport->complq_desc_count; in idpf_txq_group_alloc()
1766 tx_qgrp->complq->txq_grp = tx_qgrp; in idpf_txq_group_alloc()
1767 tx_qgrp->complq->netdev = vport->netdev; in idpf_txq_group_alloc()
1768 tx_qgrp->complq->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1771 idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq); in idpf_txq_group_alloc()
1779 return -ENOMEM; in idpf_txq_group_alloc()
1783 * idpf_rxq_group_alloc - Allocate all rxq group resources
1794 vport->rxq_grps = kcalloc(vport->num_rxq_grp, in idpf_rxq_group_alloc()
1796 if (!vport->rxq_grps) in idpf_rxq_group_alloc()
1797 return -ENOMEM; in idpf_rxq_group_alloc()
1801 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_alloc()
1802 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_alloc()
1805 rx_qgrp->vport = vport; in idpf_rxq_group_alloc()
1806 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1807 rx_qgrp->singleq.num_rxq = num_rxq; in idpf_rxq_group_alloc()
1809 rx_qgrp->singleq.rxqs[j] = in idpf_rxq_group_alloc()
1810 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), in idpf_rxq_group_alloc()
1812 if (!rx_qgrp->singleq.rxqs[j]) { in idpf_rxq_group_alloc()
1813 err = -ENOMEM; in idpf_rxq_group_alloc()
1819 rx_qgrp->splitq.num_rxq_sets = num_rxq; in idpf_rxq_group_alloc()
1822 rx_qgrp->splitq.rxq_sets[j] = in idpf_rxq_group_alloc()
1825 if (!rx_qgrp->splitq.rxq_sets[j]) { in idpf_rxq_group_alloc()
1826 err = -ENOMEM; in idpf_rxq_group_alloc()
1831 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, in idpf_rxq_group_alloc()
1834 if (!rx_qgrp->splitq.bufq_sets) { in idpf_rxq_group_alloc()
1835 err = -ENOMEM; in idpf_rxq_group_alloc()
1839 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rxq_group_alloc()
1841 &rx_qgrp->splitq.bufq_sets[j]; in idpf_rxq_group_alloc()
1845 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rxq_group_alloc()
1846 q->desc_count = vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1847 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; in idpf_rxq_group_alloc()
1851 bufq_set->num_refillqs = num_rxq; in idpf_rxq_group_alloc()
1852 bufq_set->refillqs = kcalloc(num_rxq, swq_size, in idpf_rxq_group_alloc()
1854 if (!bufq_set->refillqs) { in idpf_rxq_group_alloc()
1855 err = -ENOMEM; in idpf_rxq_group_alloc()
1858 for (k = 0; k < bufq_set->num_refillqs; k++) { in idpf_rxq_group_alloc()
1860 &bufq_set->refillqs[k]; in idpf_rxq_group_alloc()
1862 refillq->desc_count = in idpf_rxq_group_alloc()
1863 vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1866 refillq->ring = kcalloc(refillq->desc_count, in idpf_rxq_group_alloc()
1867 sizeof(*refillq->ring), in idpf_rxq_group_alloc()
1869 if (!refillq->ring) { in idpf_rxq_group_alloc()
1870 err = -ENOMEM; in idpf_rxq_group_alloc()
1880 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1881 q = rx_qgrp->singleq.rxqs[j]; in idpf_rxq_group_alloc()
1884 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_rxq_group_alloc()
1885 rx_qgrp->splitq.rxq_sets[j]->refillq[0] = in idpf_rxq_group_alloc()
1886 &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; in idpf_rxq_group_alloc()
1887 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) in idpf_rxq_group_alloc()
1888 rx_qgrp->splitq.rxq_sets[j]->refillq[1] = in idpf_rxq_group_alloc()
1889 &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; in idpf_rxq_group_alloc()
1894 q->desc_count = vport->rxq_desc_count; in idpf_rxq_group_alloc()
1895 q->rx_ptype_lkup = vport->rx_ptype_lkup; in idpf_rxq_group_alloc()
1896 q->bufq_sets = rx_qgrp->splitq.bufq_sets; in idpf_rxq_group_alloc()
1897 q->idx = (i * num_rxq) + j; in idpf_rxq_group_alloc()
1898 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; in idpf_rxq_group_alloc()
1899 q->rx_max_pkt_size = vport->netdev->mtu + in idpf_rxq_group_alloc()
1913 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1942 * idpf_vport_queues_alloc - Allocate memory for all queues
1981 * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
1993 tx_tstamp_caps = txq->cached_tstamp_caps; in idpf_tx_read_tstamp()
1994 spin_lock_bh(&tx_tstamp_caps->status_lock); in idpf_tx_read_tstamp()
1996 for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) { in idpf_tx_read_tstamp()
1997 tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i]; in idpf_tx_read_tstamp()
1998 if (tx_tstamp_status->state != IDPF_PTP_FREE) in idpf_tx_read_tstamp()
2001 tx_tstamp_status->skb = skb; in idpf_tx_read_tstamp()
2002 tx_tstamp_status->state = IDPF_PTP_REQUEST; in idpf_tx_read_tstamp()
2007 queue_work(system_unbound_wq, txq->tstamp_task); in idpf_tx_read_tstamp()
2011 spin_unlock_bh(&tx_tstamp_caps->status_lock); in idpf_tx_read_tstamp()
2016 if (unlikely(++(ntc) == (txq)->desc_count)) { \
2018 buf = (txq)->tx_buf; \
2019 desc = &(txq)->flex_tx[0]; \
2027 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
2032 * @descs_only: true if queue is using flow-based scheduling and should
2035 * Cleans the queue descriptor ring. If the queue is using queue-based
2037 * flow-based scheduling, only the descriptors are cleaned at this time.
2040 * this function when using flow-based scheduling.
2049 u32 ntc = tx_q->next_to_clean; in idpf_tx_splitq_clean()
2051 .dev = tx_q->dev, in idpf_tx_splitq_clean()
2059 tx_q->next_to_clean = end; in idpf_tx_splitq_clean()
2063 tx_desc = &tx_q->flex_tx[ntc]; in idpf_tx_splitq_clean()
2064 next_pending_desc = &tx_q->flex_tx[end]; in idpf_tx_splitq_clean()
2065 tx_buf = &tx_q->tx_buf[ntc]; in idpf_tx_splitq_clean()
2074 if (tx_buf->type <= LIBETH_SQE_CTX) in idpf_tx_splitq_clean()
2077 if (unlikely(tx_buf->type != LIBETH_SQE_SKB)) in idpf_tx_splitq_clean()
2080 eop_idx = tx_buf->rs_idx; in idpf_tx_splitq_clean()
2096 tx_q->next_to_clean = ntc; in idpf_tx_splitq_clean()
2100 * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
2115 .dev = txq->dev, in idpf_tx_clean_bufs()
2120 tx_buf = &txq->tx_buf[buf_id]; in idpf_tx_clean_bufs()
2121 if (tx_buf->type == LIBETH_SQE_SKB) { in idpf_tx_clean_bufs()
2122 if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS) in idpf_tx_clean_bufs()
2123 idpf_tx_read_tstamp(txq, tx_buf->skb); in idpf_tx_clean_bufs()
2126 idpf_post_buf_refill(txq->refillq, buf_id); in idpf_tx_clean_bufs()
2132 tx_buf = &txq->tx_buf[buf_id]; in idpf_tx_clean_bufs()
2134 idpf_post_buf_refill(txq->refillq, buf_id); in idpf_tx_clean_bufs()
2139 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
2157 u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head); in idpf_tx_handle_rs_completion()
2168 * idpf_tx_clean_complq - Reclaim resources on completion queue
2179 s16 ntc = complq->next_to_clean; in idpf_tx_clean_complq()
2185 complq_budget = complq->clean_budget; in idpf_tx_clean_complq()
2186 tx_desc = &complq->comp[ntc]; in idpf_tx_clean_complq()
2187 ntc -= complq->desc_count; in idpf_tx_clean_complq()
2198 gen = le16_get_bits(tx_desc->common.qid_comptype_gen, in idpf_tx_clean_complq()
2204 rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen, in idpf_tx_clean_complq()
2206 if (rel_tx_qid >= complq->txq_grp->num_txq || in idpf_tx_clean_complq()
2207 !complq->txq_grp->txqs[rel_tx_qid]) { in idpf_tx_clean_complq()
2208 netdev_err(complq->netdev, "TxQ not found\n"); in idpf_tx_clean_complq()
2211 tx_q = complq->txq_grp->txqs[rel_tx_qid]; in idpf_tx_clean_complq()
2214 ctype = le16_get_bits(tx_desc->common.qid_comptype_gen, in idpf_tx_clean_complq()
2218 hw_head = tx_desc->common.q_head_compl_tag.q_head; in idpf_tx_clean_complq()
2228 netdev_err(tx_q->netdev, in idpf_tx_clean_complq()
2233 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_clean_complq()
2234 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); in idpf_tx_clean_complq()
2235 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); in idpf_tx_clean_complq()
2236 tx_q->cleaned_pkts += cleaned_stats.packets; in idpf_tx_clean_complq()
2237 tx_q->cleaned_bytes += cleaned_stats.bytes; in idpf_tx_clean_complq()
2238 complq->num_completions++; in idpf_tx_clean_complq()
2239 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_clean_complq()
2245 ntc -= complq->desc_count; in idpf_tx_clean_complq()
2246 tx_desc = &complq->comp[0]; in idpf_tx_clean_complq()
2253 complq_budget--; in idpf_tx_clean_complq()
2259 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > in idpf_tx_clean_complq()
2263 np = netdev_priv(complq->netdev); in idpf_tx_clean_complq()
2264 for (i = 0; i < complq->txq_grp->num_txq; ++i) { in idpf_tx_clean_complq()
2265 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; in idpf_tx_clean_complq()
2270 if (!tx_q->cleaned_bytes) in idpf_tx_clean_complq()
2273 *cleaned += tx_q->cleaned_pkts; in idpf_tx_clean_complq()
2276 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_clean_complq()
2278 dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP || in idpf_tx_clean_complq()
2279 !netif_carrier_ok(tx_q->netdev); in idpf_tx_clean_complq()
2281 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, in idpf_tx_clean_complq()
2288 tx_q->cleaned_bytes = 0; in idpf_tx_clean_complq()
2289 tx_q->cleaned_pkts = 0; in idpf_tx_clean_complq()
2292 ntc += complq->desc_count; in idpf_tx_clean_complq()
2293 complq->next_to_clean = ntc; in idpf_tx_clean_complq()
2299 * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
2317 complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq; in idpf_wait_for_sw_marker_completion()
2318 ntc = complq->next_to_clean; in idpf_wait_for_sw_marker_completion()
2330 tx_desc = flow ? &complq->comp[ntc].common : in idpf_wait_for_sw_marker_completion()
2331 &complq->comp_4b[ntc]; in idpf_wait_for_sw_marker_completion()
2332 ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen); in idpf_wait_for_sw_marker_completion()
2344 target = complq->txq_grp->txqs[id]; in idpf_wait_for_sw_marker_completion()
2351 if (unlikely(++ntc == complq->desc_count)) { in idpf_wait_for_sw_marker_completion()
2358 complq->next_to_clean = ntc; in idpf_wait_for_sw_marker_completion()
2362 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2373 desc->q.qw1.cmd_dtype = in idpf_tx_splitq_build_ctb()
2374 le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M); in idpf_tx_splitq_build_ctb()
2375 desc->q.qw1.cmd_dtype |= in idpf_tx_splitq_build_ctb()
2377 desc->q.qw1.buf_size = cpu_to_le16(size); in idpf_tx_splitq_build_ctb()
2378 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); in idpf_tx_splitq_build_ctb()
2382 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2393 *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd); in idpf_tx_splitq_build_flow_desc()
2394 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); in idpf_tx_splitq_build_flow_desc()
2395 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); in idpf_tx_splitq_build_flow_desc()
2399 * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
2410 IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > in idpf_txq_has_room()
2411 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || in idpf_txq_has_room()
2412 idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed) in idpf_txq_has_room()
2418 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2433 if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, in idpf_tx_maybe_stop_splitq()
2439 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2440 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_maybe_stop_splitq()
2441 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2443 return -EBUSY; in idpf_tx_maybe_stop_splitq()
2447 * idpf_tx_buf_hw_update - Store the new tail value
2461 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_buf_hw_update()
2462 tx_q->next_to_use = val; in idpf_tx_buf_hw_update()
2466 * applicable for weak-ordered memory model archs, in idpf_tx_buf_hw_update()
2467 * such as IA-64). in idpf_tx_buf_hw_update()
2473 writel(val, tx_q->tail); in idpf_tx_buf_hw_update()
2477 * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
2497 *bufs_needed += shinfo->nr_frags; in idpf_tx_res_count_required()
2498 for (i = 0; i < shinfo->nr_frags; i++) { in idpf_tx_res_count_required()
2501 size = skb_frag_size(&shinfo->frags[i]); in idpf_tx_res_count_required()
2513 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { in idpf_tx_res_count_required()
2517 count = idpf_size_to_txd_count(skb->len); in idpf_tx_res_count_required()
2518 u64_stats_update_begin(&txq->stats_sync); in idpf_tx_res_count_required()
2519 u64_stats_inc(&txq->q_stats.linearize); in idpf_tx_res_count_required()
2520 u64_stats_update_end(&txq->stats_sync); in idpf_tx_res_count_required()
2527 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2535 if (ntu == txq->desc_count) in idpf_tx_splitq_bump_ntu()
2542 * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
2551 u32 ntc = refillq->next_to_clean; in idpf_tx_get_free_buf_id()
2554 refill_desc = refillq->ring[ntc]; in idpf_tx_get_free_buf_id()
2562 if (unlikely(++ntc == refillq->desc_count)) { in idpf_tx_get_free_buf_id()
2567 refillq->next_to_clean = ntc; in idpf_tx_get_free_buf_id()
2573 * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2582 struct idpf_sw_queue *refillq = txq->refillq; in idpf_tx_splitq_pkt_err_unmap()
2586 .dev = txq->dev, in idpf_tx_splitq_pkt_err_unmap()
2590 u64_stats_update_begin(&txq->stats_sync); in idpf_tx_splitq_pkt_err_unmap()
2591 u64_stats_inc(&txq->q_stats.dma_map_errs); in idpf_tx_splitq_pkt_err_unmap()
2592 u64_stats_update_end(&txq->stats_sync); in idpf_tx_splitq_pkt_err_unmap()
2596 tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)]; in idpf_tx_splitq_pkt_err_unmap()
2601 idpf_tx_buf_hw_update(txq, params->prev_ntu, false); in idpf_tx_splitq_pkt_err_unmap()
2607 if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq)) in idpf_tx_splitq_pkt_err_unmap()
2609 refillq->next_to_clean = params->prev_refill_ntc; in idpf_tx_splitq_pkt_err_unmap()
2613 * idpf_tx_splitq_map - Build the Tx flex descriptor
2629 u16 i = tx_q->next_to_use; in idpf_tx_splitq_map()
2637 skb = first->skb; in idpf_tx_splitq_map()
2639 td_cmd = params->offload.td_cmd; in idpf_tx_splitq_map()
2641 data_len = skb->data_len; in idpf_tx_splitq_map()
2644 tx_desc = &tx_q->flex_tx[i]; in idpf_tx_splitq_map()
2646 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); in idpf_tx_splitq_map()
2649 first->nr_frags = 0; in idpf_tx_splitq_map()
2651 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in idpf_tx_splitq_map()
2654 if (unlikely(dma_mapping_error(tx_q->dev, dma))) { in idpf_tx_splitq_map()
2660 first->nr_frags++; in idpf_tx_splitq_map()
2661 tx_buf->type = LIBETH_SQE_FRAG; in idpf_tx_splitq_map()
2668 tx_desc->q.buf_addr = cpu_to_le64(dma); in idpf_tx_splitq_map()
2671 * single descriptor i.e. frag size > 16K-1. We will need to in idpf_tx_splitq_map()
2680 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2682 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2687 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2690 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2696 * 4K - (DMA addr lower order bits) = in idpf_tx_splitq_map()
2701 * 13784 = 12K + (4096-2600) in idpf_tx_splitq_map()
2710 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); in idpf_tx_splitq_map()
2715 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2716 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2724 * max_data will be >= 12K and <= 16K-1. On any in idpf_tx_splitq_map()
2729 size -= max_data; in idpf_tx_splitq_map()
2737 tx_desc->q.buf_addr = cpu_to_le64(dma); in idpf_tx_splitq_map()
2745 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2746 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2753 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, in idpf_tx_splitq_map()
2763 tx_buf = &tx_q->tx_buf[next_buf_id]; in idpf_tx_splitq_map()
2766 data_len -= size; in idpf_tx_splitq_map()
2768 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, in idpf_tx_splitq_map()
2775 first->type = LIBETH_SQE_SKB; in idpf_tx_splitq_map()
2778 first->rs_idx = i; in idpf_tx_splitq_map()
2780 td_cmd |= params->eop_cmd; in idpf_tx_splitq_map()
2784 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_map()
2787 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_splitq_map()
2788 netdev_tx_sent_queue(nq, first->bytes); in idpf_tx_splitq_map()
2794 * idpf_tso - computes mss and TSO length to prepare for TSO
2830 if (ip.v4->version == 4) { in idpf_tso()
2831 ip.v4->tot_len = 0; in idpf_tso()
2832 ip.v4->check = 0; in idpf_tso()
2833 } else if (ip.v6->version == 6) { in idpf_tso()
2834 ip.v6->payload_len = 0; in idpf_tso()
2840 paylen = skb->len - l4_start; in idpf_tso()
2842 switch (shinfo->gso_type & ~SKB_GSO_DODGY) { in idpf_tso()
2845 csum_replace_by_diff(&l4.tcp->check, in idpf_tso()
2847 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; in idpf_tso()
2850 csum_replace_by_diff(&l4.udp->check, in idpf_tso()
2853 off->tso_hdr_len = sizeof(struct udphdr) + l4_start; in idpf_tso()
2854 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); in idpf_tso()
2857 return -EINVAL; in idpf_tso()
2860 off->tso_len = skb->len - off->tso_hdr_len; in idpf_tso()
2861 off->mss = shinfo->gso_size; in idpf_tso()
2862 off->tso_segs = shinfo->gso_segs; in idpf_tso()
2864 off->tx_flags |= IDPF_TX_FLAGS_TSO; in idpf_tso()
2871 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2881 int i = txq->next_to_use; in idpf_tx_splitq_get_ctx_desc()
2884 desc = &txq->flex_ctx[i]; in idpf_tx_splitq_get_ctx_desc()
2885 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); in idpf_tx_splitq_get_ctx_desc()
2891 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2897 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_drop_skb()
2898 u64_stats_inc(&tx_q->q_stats.skb_drops); in idpf_tx_drop_skb()
2899 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_drop_skb()
2901 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_drop_skb()
2910 * idpf_tx_tstamp - set up context descriptor for hardware timestamp
2923 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in idpf_tx_tstamp()
2924 return -1; in idpf_tx_tstamp()
2927 return -1; in idpf_tx_tstamp()
2930 if (off->tx_flags & IDPF_TX_FLAGS_TSO) in idpf_tx_tstamp()
2931 return -1; in idpf_tx_tstamp()
2936 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_tstamp()
2937 u64_stats_inc(&tx_q->q_stats.tstamp_skipped); in idpf_tx_tstamp()
2938 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_tstamp()
2940 return -1; in idpf_tx_tstamp()
2943 off->tx_flags |= IDPF_TX_FLAGS_TSYN; in idpf_tx_tstamp()
2949 * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
2952 * @idx: Index of the Tx timestamp latch
2957 ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX, in idpf_tx_set_tstamp_desc()
2967 return -1; in idpf_tx_tstamp()
2976 * idpf_tx_splitq_need_re - check whether RE bit needs to be set
2983 int gap = tx_q->next_to_use - tx_q->last_re; in idpf_tx_splitq_need_re()
2985 gap += (gap < 0) ? tx_q->desc_count : 0; in idpf_tx_splitq_need_re()
2991 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
3001 .prev_ntu = tx_q->next_to_use, in idpf_tx_splitq_frame()
3020 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_splitq_frame()
3029 ctx_desc->tso.qw1.cmd_dtype = in idpf_tx_splitq_frame()
3032 ctx_desc->tso.qw0.flex_tlen = in idpf_tx_splitq_frame()
3035 ctx_desc->tso.qw0.mss_rt = in idpf_tx_splitq_frame()
3038 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; in idpf_tx_splitq_frame()
3040 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_splitq_frame()
3041 u64_stats_inc(&tx_q->q_stats.lso_pkts); in idpf_tx_splitq_frame()
3042 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_splitq_frame()
3046 if (idx != -1) { in idpf_tx_splitq_frame()
3052 struct idpf_sw_queue *refillq = tx_q->refillq; in idpf_tx_splitq_frame()
3060 tx_params.prev_refill_ntc = refillq->next_to_clean; in idpf_tx_splitq_frame()
3062 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, in idpf_tx_splitq_frame()
3067 refillq->next_to_clean = tx_params.prev_refill_ntc; in idpf_tx_splitq_frame()
3069 tx_q->next_to_use = tx_params.prev_ntu; in idpf_tx_splitq_frame()
3082 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_frame()
3083 tx_q->last_re = tx_q->next_to_use; in idpf_tx_splitq_frame()
3086 if (skb->ip_summed == CHECKSUM_PARTIAL) in idpf_tx_splitq_frame()
3090 buf_id = tx_q->next_to_use; in idpf_tx_splitq_frame()
3095 if (skb->ip_summed == CHECKSUM_PARTIAL) in idpf_tx_splitq_frame()
3099 first = &tx_q->tx_buf[buf_id]; in idpf_tx_splitq_frame()
3100 first->skb = skb; in idpf_tx_splitq_frame()
3103 first->packets = tx_params.offload.tso_segs; in idpf_tx_splitq_frame()
3104 first->bytes = skb->len + in idpf_tx_splitq_frame()
3105 ((first->packets - 1) * tx_params.offload.tso_hdr_len); in idpf_tx_splitq_frame()
3107 first->packets = 1; in idpf_tx_splitq_frame()
3108 first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); in idpf_tx_splitq_frame()
3117 * idpf_tx_start - Selects the right Tx queue to send buffer
3129 vport->num_txq - vport->num_xdp_txq)) { in idpf_tx_start()
3135 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; in idpf_tx_start()
3140 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { in idpf_tx_start()
3141 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_start()
3146 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_start()
3153 * idpf_rx_hash - set the hash value in the skb
3166 if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded)) in idpf_rx_hash()
3169 hash = le16_to_cpu(rx_desc->hash1) | in idpf_rx_hash()
3170 (rx_desc->ff2_mirrid_hash2.hash2 << 16) | in idpf_rx_hash()
3171 (rx_desc->hash3 << 24); in idpf_rx_hash()
3177 * idpf_rx_csum - Indicate in skb if checksum is good
3183 * skb->protocol must be set before this function is called
3192 if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded)) in idpf_rx_csum()
3216 skb->ip_summed = CHECKSUM_UNNECESSARY; in idpf_rx_csum()
3220 skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum)); in idpf_rx_csum()
3221 skb->ip_summed = CHECKSUM_COMPLETE; in idpf_rx_csum()
3226 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_csum()
3227 u64_stats_inc(&rxq->q_stats.hw_csum_err); in idpf_rx_csum()
3228 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_csum()
3232 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
3243 qword0 = rx_desc->status_err0_qw0; in idpf_rx_splitq_extract_csum_bits()
3244 qword1 = rx_desc->status_err0_qw1; in idpf_rx_splitq_extract_csum_bits()
3257 !le16_get_bits(rx_desc->ptype_err_fflags0, in idpf_rx_splitq_extract_csum_bits()
3259 csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); in idpf_rx_splitq_extract_csum_bits()
3265 * idpf_rx_rsc - Set the RSC fields in the skb
3286 return -EINVAL; in idpf_rx_rsc()
3288 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); in idpf_rx_rsc()
3290 return -EINVAL; in idpf_rx_rsc()
3296 return -EINVAL; in idpf_rx_rsc()
3298 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); in idpf_rx_rsc()
3300 NAPI_GRO_CB(skb)->count = rsc_segments; in idpf_rx_rsc()
3301 skb_shinfo(skb)->gso_size = rsc_seg_len; in idpf_rx_rsc()
3308 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in idpf_rx_rsc()
3312 len = skb->len - skb_transport_offset(skb); in idpf_rx_rsc()
3315 tcp_hdr(skb)->check = in idpf_rx_rsc()
3316 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); in idpf_rx_rsc()
3320 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in idpf_rx_rsc()
3322 len = skb->len - skb_transport_offset(skb); in idpf_rx_rsc()
3323 tcp_hdr(skb)->check = in idpf_rx_rsc()
3324 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); in idpf_rx_rsc()
3329 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_rsc()
3330 u64_stats_inc(&rxq->q_stats.rsc_pkts); in idpf_rx_rsc()
3331 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_rsc()
3337 * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
3350 if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) in idpf_rx_hwtstamp()
3353 cached_time = READ_ONCE(rxq->cached_phc_time); in idpf_rx_hwtstamp()
3355 ts_high = le32_to_cpu(rx_desc->ts_high); in idpf_rx_hwtstamp()
3364 * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3381 rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, in __idpf_rx_process_skb_fields()
3383 decoded = rxq->rx_ptype_lkup[rx_ptype]; in __idpf_rx_process_skb_fields()
3391 if (le16_get_bits(rx_desc->hdrlen_flags, in __idpf_rx_process_skb_fields()
3409 return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc); in idpf_rx_process_skb_fields()
3420 * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3428 * the header split is active since it doesn't reserve any head- or tailroom.
3443 if (unlikely(netmem_is_net_iov(buf->netmem)) || in idpf_rx_hsplit_wa()
3447 hdr_page = __netmem_to_page(hdr->netmem); in idpf_rx_hsplit_wa()
3448 buf_page = __netmem_to_page(buf->netmem); in idpf_rx_hsplit_wa()
3449 dst = page_address(hdr_page) + hdr->offset + in idpf_rx_hsplit_wa()
3450 pp_page_to_nmdesc(hdr_page)->pp->p.offset; in idpf_rx_hsplit_wa()
3451 src = page_address(buf_page) + buf->offset + in idpf_rx_hsplit_wa()
3452 pp_page_to_nmdesc(buf_page)->pp->p.offset; in idpf_rx_hsplit_wa()
3455 buf->offset += copy; in idpf_rx_hsplit_wa()
3461 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3474 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3478 * otherwise return false indicating that this is in fact a non-EOP buffer.
3483 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, in idpf_rx_splitq_is_eop()
3488 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3503 u16 ntc = rxq->next_to_clean; in idpf_rx_splitq_clean()
3507 libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev, in idpf_rx_splitq_clean()
3508 rxq->xdpsqs, rxq->num_xdp_txq); in idpf_rx_splitq_clean()
3509 libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq); in idpf_rx_splitq_clean()
3524 rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; in idpf_rx_splitq_clean()
3527 gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3535 rx_desc->rxdid_ucast); in idpf_rx_splitq_clean()
3538 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3539 u64_stats_inc(&rxq->q_stats.bad_descs); in idpf_rx_splitq_clean()
3540 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3544 pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3547 bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3551 refillq = rxq_set->refillq[bufq_id]; in idpf_rx_splitq_clean()
3554 rx_bufq = &rxq->bufq_sets[bufq_id].bufq; in idpf_rx_splitq_clean()
3556 buf_id = le16_to_cpu(rx_desc->buf_id); in idpf_rx_splitq_clean()
3558 rx_buf = &rx_bufq->buf[buf_id]; in idpf_rx_splitq_clean()
3560 if (!rx_bufq->hdr_pp) in idpf_rx_splitq_clean()
3565 if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT))) in idpf_rx_splitq_clean()
3571 hdr_len = le16_get_bits(rx_desc->hdrlen_flags, in idpf_rx_splitq_clean()
3576 hdr = &rx_bufq->hdr_buf[buf_id]; in idpf_rx_splitq_clean()
3578 if (unlikely(!hdr_len && !xdp->data)) { in idpf_rx_splitq_clean()
3581 pkt_len -= hdr_len ? : pkt_len; in idpf_rx_splitq_clean()
3583 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3584 u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); in idpf_rx_splitq_clean()
3585 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3591 hdr->netmem = 0; in idpf_rx_splitq_clean()
3595 rx_buf->netmem = 0; in idpf_rx_splitq_clean()
3601 if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data)) in idpf_rx_splitq_clean()
3604 idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc); in idpf_rx_splitq_clean()
3609 rxq->next_to_clean = ntc; in idpf_rx_splitq_clean()
3610 libeth_xdp_save_buff(&rxq->xdp, xdp); in idpf_rx_splitq_clean()
3612 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3613 u64_stats_add(&rxq->q_stats.packets, rs.packets); in idpf_rx_splitq_clean()
3614 u64_stats_add(&rxq->q_stats.bytes, rs.bytes); in idpf_rx_splitq_clean()
3615 u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit); in idpf_rx_splitq_clean()
3616 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3622 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3633 .pp = bufq->pp, in idpf_rx_update_bufq_desc()
3634 .fqes = bufq->buf, in idpf_rx_update_bufq_desc()
3635 .truesize = bufq->truesize, in idpf_rx_update_bufq_desc()
3636 .count = bufq->desc_count, in idpf_rx_update_bufq_desc()
3642 return -ENOMEM; in idpf_rx_update_bufq_desc()
3644 buf_desc->pkt_addr = cpu_to_le64(addr); in idpf_rx_update_bufq_desc()
3645 buf_desc->qword0.buf_id = cpu_to_le16(buf_id); in idpf_rx_update_bufq_desc()
3650 fq.pp = bufq->hdr_pp; in idpf_rx_update_bufq_desc()
3651 fq.fqes = bufq->hdr_buf; in idpf_rx_update_bufq_desc()
3652 fq.truesize = bufq->hdr_truesize; in idpf_rx_update_bufq_desc()
3656 return -ENOMEM; in idpf_rx_update_bufq_desc()
3658 buf_desc->hdr_addr = cpu_to_le64(addr); in idpf_rx_update_bufq_desc()
3664 * idpf_rx_clean_refillq - Clean refill queue buffers
3674 u16 bufq_nta = bufq->next_to_alloc; in idpf_rx_clean_refillq()
3675 u16 ntc = refillq->next_to_clean; in idpf_rx_clean_refillq()
3678 buf_desc = &bufq->split_buf[bufq_nta]; in idpf_rx_clean_refillq()
3681 while (likely(cleaned < refillq->desc_count)) { in idpf_rx_clean_refillq()
3682 u32 buf_id, refill_desc = refillq->ring[ntc]; in idpf_rx_clean_refillq()
3694 if (unlikely(++ntc == refillq->desc_count)) { in idpf_rx_clean_refillq()
3699 if (unlikely(++bufq_nta == bufq->desc_count)) { in idpf_rx_clean_refillq()
3700 buf_desc = &bufq->split_buf[0]; in idpf_rx_clean_refillq()
3716 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + in idpf_rx_clean_refillq()
3717 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) in idpf_rx_clean_refillq()
3722 refillq->next_to_clean = ntc; in idpf_rx_clean_refillq()
3723 bufq->next_to_alloc = bufq_nta; in idpf_rx_clean_refillq()
3727 * idpf_rx_clean_refillq_all - Clean all refill queues
3740 page_pool_nid_changed(bufq->pp, nid); in idpf_rx_clean_refillq_all()
3741 if (bufq->hdr_pp) in idpf_rx_clean_refillq_all()
3742 page_pool_nid_changed(bufq->hdr_pp, nid); in idpf_rx_clean_refillq_all()
3745 for (i = 0; i < bufq_set->num_refillqs; i++) in idpf_rx_clean_refillq_all()
3746 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); in idpf_rx_clean_refillq_all()
3750 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3760 q_vector->total_events++; in idpf_vport_intr_clean_queues()
3761 napi_schedule_irqoff(&q_vector->napi); in idpf_vport_intr_clean_queues()
3767 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3775 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_del_all()
3776 netif_napi_del(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_del_all()
3780 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3787 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_dis_all()
3788 napi_disable(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_dis_all()
3792 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3799 for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_rel()
3800 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_rel()
3802 kfree(q_vector->xsksq); in idpf_vport_intr_rel()
3803 q_vector->xsksq = NULL; in idpf_vport_intr_rel()
3804 kfree(q_vector->complq); in idpf_vport_intr_rel()
3805 q_vector->complq = NULL; in idpf_vport_intr_rel()
3806 kfree(q_vector->bufq); in idpf_vport_intr_rel()
3807 q_vector->bufq = NULL; in idpf_vport_intr_rel()
3808 kfree(q_vector->tx); in idpf_vport_intr_rel()
3809 q_vector->tx = NULL; in idpf_vport_intr_rel()
3810 kfree(q_vector->rx); in idpf_vport_intr_rel()
3811 q_vector->rx = NULL; in idpf_vport_intr_rel()
3814 kfree(vport->q_vectors); in idpf_vport_intr_rel()
3815 vport->q_vectors = NULL; in idpf_vport_intr_rel()
3820 struct napi_struct *napi = link ? &q_vector->napi : NULL; in idpf_q_vector_set_napi()
3821 struct net_device *dev = q_vector->vport->netdev; in idpf_q_vector_set_napi()
3823 for (u32 i = 0; i < q_vector->num_rxq; i++) in idpf_q_vector_set_napi()
3824 netif_queue_set_napi(dev, q_vector->rx[i]->idx, in idpf_q_vector_set_napi()
3827 for (u32 i = 0; i < q_vector->num_txq; i++) in idpf_q_vector_set_napi()
3828 netif_queue_set_napi(dev, q_vector->tx[i]->idx, in idpf_q_vector_set_napi()
3833 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3838 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_rel_irq()
3841 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_rel_irq()
3842 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_rel_irq()
3849 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_rel_irq()
3850 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_rel_irq()
3858 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3863 struct idpf_q_vector *q_vector = vport->q_vectors; in idpf_vport_intr_dis_irq_all()
3866 writel(0, vport->noirq_dyn_ctl); in idpf_vport_intr_dis_irq_all()
3868 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) in idpf_vport_intr_dis_irq_all()
3873 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3878 u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m; in idpf_vport_intr_buildreg_itr()
3882 if (q_vector->wb_on_itr) { in idpf_vport_intr_buildreg_itr()
3888 itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m | in idpf_vport_intr_buildreg_itr()
3889 q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m; in idpf_vport_intr_buildreg_itr()
3898 itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) | in idpf_vport_intr_buildreg_itr()
3899 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); in idpf_vport_intr_buildreg_itr()
3905 * idpf_update_dim_sample - Update dim sample with packets and bytes
3919 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); in idpf_update_dim_sample()
3920 dim_sample->comp_ctr = 0; in idpf_update_dim_sample()
3926 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) in idpf_update_dim_sample()
3927 dim->state = DIM_START_MEASURE; in idpf_update_dim_sample()
3931 * idpf_net_dim - Update net DIM algorithm
3937 * This function is a no-op if the queue is not configured to dynamic ITR.
3945 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) in idpf_net_dim()
3948 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { in idpf_net_dim()
3949 struct idpf_tx_queue *txq = q_vector->tx[i]; in idpf_net_dim()
3953 start = u64_stats_fetch_begin(&txq->stats_sync); in idpf_net_dim()
3954 packets += u64_stats_read(&txq->q_stats.packets); in idpf_net_dim()
3955 bytes += u64_stats_read(&txq->q_stats.bytes); in idpf_net_dim()
3956 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); in idpf_net_dim()
3959 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, in idpf_net_dim()
3961 net_dim(&q_vector->tx_dim, &dim_sample); in idpf_net_dim()
3964 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) in idpf_net_dim()
3967 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { in idpf_net_dim()
3968 struct idpf_rx_queue *rxq = q_vector->rx[i]; in idpf_net_dim()
3972 start = u64_stats_fetch_begin(&rxq->stats_sync); in idpf_net_dim()
3973 packets += u64_stats_read(&rxq->q_stats.packets); in idpf_net_dim()
3974 bytes += u64_stats_read(&rxq->q_stats.bytes); in idpf_net_dim()
3975 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); in idpf_net_dim()
3978 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, in idpf_net_dim()
3980 net_dim(&q_vector->rx_dim, &dim_sample); in idpf_net_dim()
3984 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3987 * Update the net_dim() algorithm and re-enable the interrupt associated with
3994 /* net_dim() updates ITR out-of-band using a work item */ in idpf_vport_intr_update_itr_ena_irq()
3998 q_vector->wb_on_itr = false; in idpf_vport_intr_update_itr_ena_irq()
4000 writel(intval, q_vector->intr_reg.dyn_ctl); in idpf_vport_intr_update_itr_ena_irq()
4004 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
4009 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_req_irq()
4013 drv_name = dev_driver_string(&adapter->pdev->dev); in idpf_vport_intr_req_irq()
4014 if_name = netdev_name(vport->netdev); in idpf_vport_intr_req_irq()
4016 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_req_irq()
4017 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_req_irq()
4020 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
4021 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_req_irq()
4023 if (q_vector->num_rxq && q_vector->num_txq) in idpf_vport_intr_req_irq()
4025 else if (q_vector->num_rxq) in idpf_vport_intr_req_irq()
4027 else if (q_vector->num_txq) in idpf_vport_intr_req_irq()
4032 name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name, in idpf_vport_intr_req_irq()
4038 netdev_err(vport->netdev, in idpf_vport_intr_req_irq()
4049 while (--vector >= 0) { in idpf_vport_intr_req_irq()
4050 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
4051 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_req_irq()
4052 kfree(free_irq(irq_num, &vport->q_vectors[vector])); in idpf_vport_intr_req_irq()
4059 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
4068 if (tx && !q_vector->tx) in idpf_vport_intr_write_itr()
4070 else if (!tx && !q_vector->rx) in idpf_vport_intr_write_itr()
4073 intr_reg = &q_vector->intr_reg; in idpf_vport_intr_write_itr()
4075 tx ? intr_reg->tx_itr : intr_reg->rx_itr); in idpf_vport_intr_write_itr()
4079 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
4088 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_ena_irq_all()
4089 struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; in idpf_vport_intr_ena_irq_all()
4092 if (qv->num_txq) { in idpf_vport_intr_ena_irq_all()
4093 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); in idpf_vport_intr_ena_irq_all()
4094 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
4096 itr : qv->tx_itr_value, in idpf_vport_intr_ena_irq_all()
4100 if (qv->num_rxq) { in idpf_vport_intr_ena_irq_all()
4101 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); in idpf_vport_intr_ena_irq_all()
4102 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
4104 itr : qv->rx_itr_value, in idpf_vport_intr_ena_irq_all()
4108 if (qv->num_txq || qv->num_rxq) in idpf_vport_intr_ena_irq_all()
4112 writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl); in idpf_vport_intr_ena_irq_all()
4116 * idpf_vport_intr_deinit - Release all vector associations for the vport
4128 * idpf_tx_dim_work - Call back from the stack
4140 vport = q_vector->vport; in idpf_tx_dim_work()
4142 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) in idpf_tx_dim_work()
4143 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; in idpf_tx_dim_work()
4146 itr = vport->tx_itr_profile[dim->profile_ix]; in idpf_tx_dim_work()
4150 dim->state = DIM_START_MEASURE; in idpf_tx_dim_work()
4154 * idpf_rx_dim_work - Call back from the stack
4166 vport = q_vector->vport; in idpf_rx_dim_work()
4168 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) in idpf_rx_dim_work()
4169 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; in idpf_rx_dim_work()
4172 itr = vport->rx_itr_profile[dim->profile_ix]; in idpf_rx_dim_work()
4176 dim->state = DIM_START_MEASURE; in idpf_rx_dim_work()
4180 * idpf_init_dim - Set up dynamic interrupt moderation
4185 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); in idpf_init_dim()
4186 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in idpf_init_dim()
4187 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; in idpf_init_dim()
4189 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); in idpf_init_dim()
4190 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in idpf_init_dim()
4191 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; in idpf_init_dim()
4195 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
4202 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_napi_ena_all()
4203 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; in idpf_vport_intr_napi_ena_all()
4206 napi_enable(&q_vector->napi); in idpf_vport_intr_napi_ena_all()
4211 * idpf_tx_splitq_clean_all- Clean completion queues
4221 u16 num_complq = q_vec->num_complq; in idpf_tx_splitq_clean_all()
4231 clean_complete &= idpf_tx_clean_complq(q_vec->complq[i], in idpf_tx_splitq_clean_all()
4238 * idpf_rx_splitq_clean_all- Clean completion queues
4248 u16 num_rxq = q_vec->num_rxq; in idpf_rx_splitq_clean_all()
4259 struct idpf_rx_queue *rxq = q_vec->rx[i]; in idpf_rx_splitq_clean_all()
4274 for (i = 0; i < q_vec->num_bufq; i++) { in idpf_rx_splitq_clean_all()
4275 if (!idpf_queue_has(XSK, q_vec->bufq[i])) in idpf_rx_splitq_clean_all()
4276 idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); in idpf_rx_splitq_clean_all()
4283 * idpf_vport_splitq_napi_poll - NAPI handler
4301 for (u32 i = 0; i < q_vector->num_xsksq; i++) in idpf_vport_splitq_napi_poll()
4302 clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]); in idpf_vport_splitq_napi_poll()
4315 work_done = min_t(int, work_done, budget - 1); in idpf_vport_splitq_napi_poll()
4317 /* Exit the polling mode, but don't re-enable interrupts if stack might in idpf_vport_splitq_napi_poll()
4318 * poll us due to busy-polling in idpf_vport_splitq_napi_poll()
4329 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4336 u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq; in idpf_vport_intr_map_vector_to_qs()
4337 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_vport_intr_map_vector_to_qs()
4342 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { in idpf_vport_intr_map_vector_to_qs()
4345 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4348 rx_qgrp = &vport->rxq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4350 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_vport_intr_map_vector_to_qs()
4352 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_vport_intr_map_vector_to_qs()
4358 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_vport_intr_map_vector_to_qs()
4360 q = rx_qgrp->singleq.rxqs[j]; in idpf_vport_intr_map_vector_to_qs()
4361 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4362 q_index = q->q_vector->num_rxq; in idpf_vport_intr_map_vector_to_qs()
4363 q->q_vector->rx[q_index] = q; in idpf_vport_intr_map_vector_to_qs()
4364 q->q_vector->num_rxq++; in idpf_vport_intr_map_vector_to_qs()
4367 q->napi = &q->q_vector->napi; in idpf_vport_intr_map_vector_to_qs()
4371 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_vport_intr_map_vector_to_qs()
4374 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_vport_intr_map_vector_to_qs()
4375 bufq->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4376 q_index = bufq->q_vector->num_bufq; in idpf_vport_intr_map_vector_to_qs()
4377 bufq->q_vector->bufq[q_index] = bufq; in idpf_vport_intr_map_vector_to_qs()
4378 bufq->q_vector->num_bufq++; in idpf_vport_intr_map_vector_to_qs()
4385 split = idpf_is_queue_model_split(vport->txq_model); in idpf_vport_intr_map_vector_to_qs()
4390 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4393 tx_qgrp = &vport->txq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4394 num_txq = tx_qgrp->num_txq; in idpf_vport_intr_map_vector_to_qs()
4399 q = tx_qgrp->txqs[j]; in idpf_vport_intr_map_vector_to_qs()
4400 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4401 q->q_vector->tx[q->q_vector->num_txq++] = q; in idpf_vport_intr_map_vector_to_qs()
4405 struct idpf_compl_queue *q = tx_qgrp->complq; in idpf_vport_intr_map_vector_to_qs()
4407 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4408 q->q_vector->complq[q->q_vector->num_complq++] = q; in idpf_vport_intr_map_vector_to_qs()
4414 for (i = 0; i < vport->num_xdp_txq; i++) { in idpf_vport_intr_map_vector_to_qs()
4418 xdpsq = vport->txqs[vport->xdp_txq_offset + i]; in idpf_vport_intr_map_vector_to_qs()
4425 xdpsq->q_vector = qv; in idpf_vport_intr_map_vector_to_qs()
4426 qv->xsksq[qv->num_xsksq++] = xdpsq; in idpf_vport_intr_map_vector_to_qs()
4431 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4438 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_init_vec_idx()
4443 ac = adapter->req_vec_chunks; in idpf_vport_intr_init_vec_idx()
4445 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4446 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; in idpf_vport_intr_init_vec_idx()
4448 vport->noirq_v_idx = vport->q_vector_idxs[i]; in idpf_vport_intr_init_vec_idx()
4456 return -ENOMEM; in idpf_vport_intr_init_vec_idx()
4458 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); in idpf_vport_intr_init_vec_idx()
4460 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4461 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; in idpf_vport_intr_init_vec_idx()
4463 vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]]; in idpf_vport_intr_init_vec_idx()
4471 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4480 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_intr_napi_add_all()
4485 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_napi_add_all()
4486 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_napi_add_all()
4487 qv_idx = vport->q_vector_idxs[v_idx]; in idpf_vport_intr_napi_add_all()
4488 irq_num = vport->adapter->msix_entries[qv_idx].vector; in idpf_vport_intr_napi_add_all()
4490 netif_napi_add_config(vport->netdev, &q_vector->napi, in idpf_vport_intr_napi_add_all()
4492 netif_napi_set_irq(&q_vector->napi, irq_num); in idpf_vport_intr_napi_add_all()
4497 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4501 * return -ENOMEM.
4510 u16 idx = vport->idx; in idpf_vport_intr_alloc()
4512 user_config = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_intr_alloc()
4513 vport->q_vectors = kcalloc(vport->num_q_vectors, in idpf_vport_intr_alloc()
4515 if (!vport->q_vectors) in idpf_vport_intr_alloc()
4516 return -ENOMEM; in idpf_vport_intr_alloc()
4518 txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4519 vport->num_q_vectors); in idpf_vport_intr_alloc()
4520 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4521 vport->num_q_vectors); in idpf_vport_intr_alloc()
4522 bufqs_per_vector = vport->num_bufqs_per_qgrp * in idpf_vport_intr_alloc()
4523 DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4524 vport->num_q_vectors); in idpf_vport_intr_alloc()
4525 complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4526 vport->num_q_vectors); in idpf_vport_intr_alloc()
4528 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_alloc()
4529 q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_alloc()
4530 q_coal = &user_config->q_coalesce[v_idx]; in idpf_vport_intr_alloc()
4531 q_vector->vport = vport; in idpf_vport_intr_alloc()
4533 q_vector->tx_itr_value = q_coal->tx_coalesce_usecs; in idpf_vport_intr_alloc()
4534 q_vector->tx_intr_mode = q_coal->tx_intr_mode; in idpf_vport_intr_alloc()
4535 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; in idpf_vport_intr_alloc()
4537 q_vector->rx_itr_value = q_coal->rx_coalesce_usecs; in idpf_vport_intr_alloc()
4538 q_vector->rx_intr_mode = q_coal->rx_intr_mode; in idpf_vport_intr_alloc()
4539 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; in idpf_vport_intr_alloc()
4541 q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), in idpf_vport_intr_alloc()
4543 if (!q_vector->tx) in idpf_vport_intr_alloc()
4546 q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), in idpf_vport_intr_alloc()
4548 if (!q_vector->rx) in idpf_vport_intr_alloc()
4551 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_intr_alloc()
4554 q_vector->bufq = kcalloc(bufqs_per_vector, in idpf_vport_intr_alloc()
4555 sizeof(*q_vector->bufq), in idpf_vport_intr_alloc()
4557 if (!q_vector->bufq) in idpf_vport_intr_alloc()
4560 q_vector->complq = kcalloc(complqs_per_vector, in idpf_vport_intr_alloc()
4561 sizeof(*q_vector->complq), in idpf_vport_intr_alloc()
4563 if (!q_vector->complq) in idpf_vport_intr_alloc()
4566 if (!vport->xdp_txq_offset) in idpf_vport_intr_alloc()
4569 q_vector->xsksq = kcalloc(rxqs_per_vector, in idpf_vport_intr_alloc()
4570 sizeof(*q_vector->xsksq), in idpf_vport_intr_alloc()
4572 if (!q_vector->xsksq) in idpf_vport_intr_alloc()
4581 return -ENOMEM; in idpf_vport_intr_alloc()
4585 * idpf_vport_intr_init - Setup all vectors for the given vport
4601 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); in idpf_vport_intr_init()
4624 * idpf_config_rss - Send virtchnl messages to configure RSS
4641 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4646 struct idpf_adapter *adapter = vport->adapter; in idpf_fill_dflt_rss_lut()
4647 u16 num_active_rxq = vport->num_rxq; in idpf_fill_dflt_rss_lut()
4651 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_fill_dflt_rss_lut()
4653 for (i = 0; i < rss_data->rss_lut_size; i++) { in idpf_fill_dflt_rss_lut()
4654 rss_data->rss_lut[i] = i % num_active_rxq; in idpf_fill_dflt_rss_lut()
4655 rss_data->cached_lut[i] = rss_data->rss_lut[i]; in idpf_fill_dflt_rss_lut()
4660 * idpf_init_rss - Allocate and initialize RSS resources
4667 struct idpf_adapter *adapter = vport->adapter; in idpf_init_rss()
4671 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_init_rss()
4673 lut_size = rss_data->rss_lut_size * sizeof(u32); in idpf_init_rss()
4674 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); in idpf_init_rss()
4675 if (!rss_data->rss_lut) in idpf_init_rss()
4676 return -ENOMEM; in idpf_init_rss()
4678 rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); in idpf_init_rss()
4679 if (!rss_data->cached_lut) { in idpf_init_rss()
4680 kfree(rss_data->rss_lut); in idpf_init_rss()
4681 rss_data->rss_lut = NULL; in idpf_init_rss()
4683 return -ENOMEM; in idpf_init_rss()
4693 * idpf_deinit_rss - Release RSS resources
4698 struct idpf_adapter *adapter = vport->adapter; in idpf_deinit_rss()
4701 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_deinit_rss()
4702 kfree(rss_data->cached_lut); in idpf_deinit_rss()
4703 rss_data->cached_lut = NULL; in idpf_deinit_rss()
4704 kfree(rss_data->rss_lut); in idpf_deinit_rss()
4705 rss_data->rss_lut = NULL; in idpf_deinit_rss()