Lines Matching +full:rx +full:- +full:shared

1 /* SPDX-License-Identifier: BSD-3-Clause */
34 * @brief Tx/Rx hotpath implementation for the iflib driver
36 * Contains functions used to implement the Tx and Rx hotpaths of the iflib
63 * @brief iflib Tx/Rx operations for head write back
80 * @brief iflib Tx/Rx operations for descriptor write back
97 * iavf_is_tx_desc_done - Check if a Tx descriptor is ready
107 return (((txr->tx_base[idx].cmd_type_offset_bsz >> IAVF_TXD_QW1_DTYPE_SHIFT) in iavf_is_tx_desc_done()
113 * iavf_tso_detect_sparse - detect TSO packets with too many segments
134 if (nsegs <= IAVF_MAX_TX_SEGS-2) in iavf_tso_detect_sparse()
136 segsz = pi->ipi_tso_segsz; in iavf_tso_detect_sparse()
139 hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; in iavf_tso_detect_sparse()
140 tsolen = pi->ipi_len - hlen; in iavf_tso_detect_sparse()
146 if (count > IAVF_MAX_TX_SEGS - 2) in iavf_tso_detect_sparse()
156 curseg -= seglen; in iavf_tso_detect_sparse()
157 hlen -= seglen; in iavf_tso_detect_sparse()
160 segsz = pi->ipi_tso_segsz; in iavf_tso_detect_sparse()
163 if (count > IAVF_MAX_TX_SEGS - 2) { in iavf_tso_detect_sparse()
174 segsz -= seglen; in iavf_tso_detect_sparse()
175 curseg -= seglen; in iavf_tso_detect_sparse()
176 tsolen -= seglen; in iavf_tso_detect_sparse()
185 * iavf_tx_setup_offload - Setup Tx offload parameters
198 switch (pi->ipi_etype) { in iavf_tx_setup_offload()
201 if (pi->ipi_csum_flags & IAVF_CSUM_IPV4) in iavf_tx_setup_offload()
216 *off |= (pi->ipi_ehdrlen >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; in iavf_tx_setup_offload()
217 *off |= (pi->ipi_ip_hlen >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; in iavf_tx_setup_offload()
219 switch (pi->ipi_ipproto) { in iavf_tx_setup_offload()
221 if (pi->ipi_csum_flags & IAVF_CSUM_TCP) { in iavf_tx_setup_offload()
223 *off |= (pi->ipi_tcp_hlen >> 2) << in iavf_tx_setup_offload()
226 MPASS(pi->ipi_tcp_hlen != 0); in iavf_tx_setup_offload()
230 if (pi->ipi_csum_flags & IAVF_CSUM_UDP) { in iavf_tx_setup_offload()
237 if (pi->ipi_csum_flags & IAVF_CSUM_SCTP) { in iavf_tx_setup_offload()
249 * iavf_tso_setup - Setup TSO context descriptor
267 idx = pi->ipi_pidx; in iavf_tso_setup()
268 TXD = (struct iavf_tx_context_desc *) &txr->tx_base[idx]; in iavf_tso_setup()
269 total_hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; in iavf_tso_setup()
270 tsolen = pi->ipi_len - total_hdr_len; in iavf_tso_setup()
271 scctx = txr->que->vsi->shared; in iavf_tso_setup()
279 if (pi->ipi_tso_segsz < IAVF_MIN_TSO_MSS) { in iavf_tso_setup()
280 txr->mss_too_small++; in iavf_tso_setup()
281 pi->ipi_tso_segsz = IAVF_MIN_TSO_MSS; in iavf_tso_setup()
283 mss = pi->ipi_tso_segsz; in iavf_tso_setup()
288 MPASS(pi->ipi_ehdrlen != 0); in iavf_tso_setup()
289 MPASS(pi->ipi_ip_hlen != 0); in iavf_tso_setup()
299 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); in iavf_tso_setup()
301 TXD->tunneling_params = htole32(0); in iavf_tso_setup()
302 txr->que->tso++; in iavf_tso_setup()
304 return ((idx + 1) & (scctx->isc_ntxd[0]-1)); in iavf_tso_setup()
310 * iavf_isc_txd_encap - Encapsulate a Tx packet into descriptors
323 if_softc_ctx_t scctx = vsi->shared; in iavf_isc_txd_encap()
324 struct iavf_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx]; in iavf_isc_txd_encap()
325 struct tx_ring *txr = &que->txr; in iavf_isc_txd_encap()
326 int nsegs = pi->ipi_nsegs; in iavf_isc_txd_encap()
327 bus_dma_segment_t *segs = pi->ipi_segs; in iavf_isc_txd_encap()
332 if (__predict_false(pi->ipi_len < IAVF_MIN_FRAME)) { in iavf_isc_txd_encap()
333 que->pkt_too_small++; in iavf_isc_txd_encap()
338 i = pi->ipi_pidx; in iavf_isc_txd_encap()
340 tx_intr = (pi->ipi_flags & IPI_TX_INTR); in iavf_isc_txd_encap()
343 if (pi->ipi_csum_flags & CSUM_OFFLOAD) { in iavf_isc_txd_encap()
345 if (pi->ipi_csum_flags & CSUM_TSO) { in iavf_isc_txd_encap()
353 if (pi->ipi_mflags & M_VLANTAG) in iavf_isc_txd_encap()
357 mask = scctx->isc_ntxd[0] - 1; in iavf_isc_txd_encap()
359 MPASS(pi->ipi_len >= IAVF_MIN_FRAME); in iavf_isc_txd_encap()
361 if (!(pi->ipi_csum_flags & CSUM_TSO)) in iavf_isc_txd_encap()
362 MPASS(pi->ipi_len <= IAVF_MAX_FRAME); in iavf_isc_txd_encap()
367 txd = &txr->tx_base[i]; in iavf_isc_txd_encap()
373 txd->buffer_addr = htole64(segs[j].ds_addr); in iavf_isc_txd_encap()
374 txd->cmd_type_offset_bsz = in iavf_isc_txd_encap()
379 | ((u64)htole16(pi->ipi_vtag) << IAVF_TXD_QW1_L2TAG1_SHIFT)); in iavf_isc_txd_encap()
381 txr->tx_bytes += seglen; in iavf_isc_txd_encap()
386 txd->cmd_type_offset_bsz |= in iavf_isc_txd_encap()
389 if (!vsi->enable_head_writeback && tx_intr) { in iavf_isc_txd_encap()
390 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; in iavf_isc_txd_encap()
391 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask; in iavf_isc_txd_encap()
392 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); in iavf_isc_txd_encap()
394 pi->ipi_new_pidx = i; in iavf_isc_txd_encap()
396 ++txr->tx_packets; in iavf_isc_txd_encap()
401 * iavf_isc_txd_flush - Flush Tx ring
413 struct tx_ring *txr = &vsi->tx_queues[txqid].txr; in iavf_isc_txd_flush()
416 MPASS(pidx < vsi->shared->isc_ntxd[0]); in iavf_isc_txd_flush()
417 wr32(vsi->hw, txr->tail, pidx); in iavf_isc_txd_flush()
421 * iavf_init_tx_ring - Initialize queue Tx ring
430 struct tx_ring *txr = &que->txr; in iavf_init_tx_ring()
433 bzero((void *)txr->tx_base, in iavf_init_tx_ring()
435 (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0))); in iavf_init_tx_ring()
437 wr32(vsi->hw, txr->tail, 0); in iavf_init_tx_ring()
441 * iavf_get_tx_head - Get the index of the head of a ring
451 if_softc_ctx_t scctx = que->vsi->shared; in iavf_get_tx_head()
452 struct tx_ring *txr = &que->txr; in iavf_get_tx_head()
453 void *head = &txr->tx_base[scctx->isc_ntxd[0]]; in iavf_get_tx_head()
459 * iavf_isc_txd_credits_update_hwb - Update Tx ring credits
477 if_softc_ctx_t scctx = vsi->shared; in iavf_isc_txd_credits_update_hwb()
478 struct iavf_tx_queue *que = &vsi->tx_queues[qid]; in iavf_isc_txd_credits_update_hwb()
479 struct tx_ring *txr = &que->txr; in iavf_isc_txd_credits_update_hwb()
485 credits = head - txr->tx_cidx_processed; in iavf_isc_txd_credits_update_hwb()
487 credits += scctx->isc_ntxd[0]; in iavf_isc_txd_credits_update_hwb()
489 txr->tx_cidx_processed = head; in iavf_isc_txd_credits_update_hwb()
495 * iavf_isc_txd_credits_update_dwb - Update Tx ring credits
513 struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid]; in iavf_isc_txd_credits_update_dwb()
514 if_softc_ctx_t scctx = vsi->shared; in iavf_isc_txd_credits_update_dwb()
515 struct tx_ring *txr = &tx_que->txr; in iavf_isc_txd_credits_update_dwb()
522 rs_cidx = txr->tx_rs_cidx; in iavf_isc_txd_credits_update_dwb()
523 if (rs_cidx == txr->tx_rs_pidx) in iavf_isc_txd_credits_update_dwb()
525 cur = txr->tx_rsq[rs_cidx]; in iavf_isc_txd_credits_update_dwb()
537 prev = txr->tx_cidx_processed; in iavf_isc_txd_credits_update_dwb()
538 ntxd = scctx->isc_ntxd[0]; in iavf_isc_txd_credits_update_dwb()
541 delta = (int32_t)cur - (int32_t)prev; in iavf_isc_txd_credits_update_dwb()
547 rs_cidx = (rs_cidx + 1) & (ntxd-1); in iavf_isc_txd_credits_update_dwb()
548 if (rs_cidx == txr->tx_rs_pidx) in iavf_isc_txd_credits_update_dwb()
550 cur = txr->tx_rsq[rs_cidx]; in iavf_isc_txd_credits_update_dwb()
555 txr->tx_rs_cidx = rs_cidx; in iavf_isc_txd_credits_update_dwb()
556 txr->tx_cidx_processed = prev; in iavf_isc_txd_credits_update_dwb()
562 * iavf_isc_rxd_refill - Prepare descriptors for re-use
564 * @iru: the Rx descriptor update structure
566 * Update Rx descriptors for a given queue so that they can be re-used by
573 if_softc_ctx_t scctx = vsi->shared; in iavf_isc_rxd_refill()
574 struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr); in iavf_isc_rxd_refill()
580 paddrs = iru->iru_paddrs; in iavf_isc_rxd_refill()
581 pidx = iru->iru_pidx; in iavf_isc_rxd_refill()
582 count = iru->iru_count; in iavf_isc_rxd_refill()
585 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); in iavf_isc_rxd_refill()
586 if (++next_pidx == scctx->isc_nrxd[0]) in iavf_isc_rxd_refill()
592 * iavf_isc_rxd_flush - Notify hardware of new Rx descriptors
594 * @rxqid: Rx queue to update
598 * Updates the tail pointer of the Rx ring, notifying hardware of new
605 struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr; in iavf_isc_rxd_flush()
607 wr32(vsi->hw, rxr->tail, pidx); in iavf_isc_rxd_flush()
611 * iavf_isc_rxd_available - Calculate number of available Rx descriptors
613 * @rxqid: Rx queue to check
615 * @budget: maximum Rx budget
617 * Determines how many packets are ready to be processed in the Rx queue, up
626 struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr; in iavf_isc_rxd_available()
632 nrxd = vsi->shared->isc_nrxd[0]; in iavf_isc_rxd_available()
634 for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) { in iavf_isc_rxd_available()
635 rxd = &rxr->rx_base[i]; in iavf_isc_rxd_available()
636 qword = le64toh(rxd->wb.qword1.status_error_len); in iavf_isc_rxd_available()
652 * iavf_isc_rxd_pkt_get - Decapsulate packet from Rx descriptors
656 * Read packet data from the Rx ring descriptors and fill in the packet info
667 if_softc_ctx_t scctx = vsi->shared; in iavf_isc_rxd_pkt_get()
668 struct iavf_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx]; in iavf_isc_rxd_pkt_get()
669 struct rx_ring *rxr = &que->rxr; in iavf_isc_rxd_pkt_get()
678 cidx = ri->iri_cidx; in iavf_isc_rxd_pkt_get()
684 cur = &rxr->rx_base[cidx]; in iavf_isc_rxd_pkt_get()
685 qword = le64toh(cur->wb.qword1.status_error_len); in iavf_isc_rxd_pkt_get()
698 ri->iri_len += plen; in iavf_isc_rxd_pkt_get()
699 rxr->rx_bytes += plen; in iavf_isc_rxd_pkt_get()
701 cur->wb.qword1.status_error_len = 0; in iavf_isc_rxd_pkt_get()
710 rxr->desc_errs++; in iavf_isc_rxd_pkt_get()
713 ri->iri_frags[i].irf_flid = 0; in iavf_isc_rxd_pkt_get()
714 ri->iri_frags[i].irf_idx = cidx; in iavf_isc_rxd_pkt_get()
715 ri->iri_frags[i].irf_len = plen; in iavf_isc_rxd_pkt_get()
716 if (++cidx == vsi->shared->isc_nrxd[0]) in iavf_isc_rxd_pkt_get()
722 rxr->packets++; in iavf_isc_rxd_pkt_get()
723 rxr->rx_packets++; in iavf_isc_rxd_pkt_get()
725 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) in iavf_isc_rxd_pkt_get()
727 ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss); in iavf_isc_rxd_pkt_get()
728 ri->iri_rsstype = iavf_ptype_to_hash(ptype); in iavf_isc_rxd_pkt_get()
730 ri->iri_vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1); in iavf_isc_rxd_pkt_get()
731 ri->iri_flags |= M_VLANTAG; in iavf_isc_rxd_pkt_get()
733 ri->iri_nfrags = i; in iavf_isc_rxd_pkt_get()
738 * iavf_rx_checksum - Handle Rx hardware checksum indication
739 * @ri: Rx packet info structure
740 * @status: status from Rx descriptor
741 * @error: error from Rx descriptor
753 ri->iri_csum_flags = 0; in iavf_rx_checksum()
766 ri->iri_csum_flags = 0; in iavf_rx_checksum()
771 ri->iri_csum_flags |= CSUM_L3_CALC; in iavf_rx_checksum()
777 ri->iri_csum_flags |= CSUM_L3_VALID; in iavf_rx_checksum()
778 ri->iri_csum_flags |= CSUM_L4_CALC; in iavf_rx_checksum()
784 ri->iri_csum_flags |= CSUM_L4_VALID; in iavf_rx_checksum()
785 ri->iri_csum_data |= htons(0xffff); in iavf_rx_checksum()