xref: /freebsd/sys/dev/ice/ice_iflib_txrx.c (revision 8d5feede40b489a6c0b2f2ee5258f8851ae6af88)
171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
28923de59SPiotr Kubaj /*  Copyright (c) 2022, Intel Corporation
371d10453SEric Joyner  *  All rights reserved.
471d10453SEric Joyner  *
571d10453SEric Joyner  *  Redistribution and use in source and binary forms, with or without
671d10453SEric Joyner  *  modification, are permitted provided that the following conditions are met:
771d10453SEric Joyner  *
871d10453SEric Joyner  *   1. Redistributions of source code must retain the above copyright notice,
971d10453SEric Joyner  *      this list of conditions and the following disclaimer.
1071d10453SEric Joyner  *
1171d10453SEric Joyner  *   2. Redistributions in binary form must reproduce the above copyright
1271d10453SEric Joyner  *      notice, this list of conditions and the following disclaimer in the
1371d10453SEric Joyner  *      documentation and/or other materials provided with the distribution.
1471d10453SEric Joyner  *
1571d10453SEric Joyner  *   3. Neither the name of the Intel Corporation nor the names of its
1671d10453SEric Joyner  *      contributors may be used to endorse or promote products derived from
1771d10453SEric Joyner  *      this software without specific prior written permission.
1871d10453SEric Joyner  *
1971d10453SEric Joyner  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2071d10453SEric Joyner  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2171d10453SEric Joyner  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2271d10453SEric Joyner  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2371d10453SEric Joyner  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2471d10453SEric Joyner  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2571d10453SEric Joyner  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2671d10453SEric Joyner  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2771d10453SEric Joyner  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2871d10453SEric Joyner  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2971d10453SEric Joyner  *  POSSIBILITY OF SUCH DAMAGE.
3071d10453SEric Joyner  */
3171d10453SEric Joyner /*$FreeBSD$*/
3271d10453SEric Joyner 
3371d10453SEric Joyner /**
3471d10453SEric Joyner  * @file ice_iflib_txrx.c
3571d10453SEric Joyner  * @brief iflib Tx/Rx hotpath
3671d10453SEric Joyner  *
3771d10453SEric Joyner  * Main location for the iflib Tx/Rx hotpath implementation.
3871d10453SEric Joyner  *
3971d10453SEric Joyner  * Contains the implementation for the iflib function callbacks and the
4071d10453SEric Joyner  * if_txrx ops structure.
4171d10453SEric Joyner  */
4271d10453SEric Joyner 
4371d10453SEric Joyner #include "ice_iflib.h"
4471d10453SEric Joyner 
4571d10453SEric Joyner /* Tx/Rx hotpath utility functions */
4671d10453SEric Joyner #include "ice_common_txrx.h"
4771d10453SEric Joyner 
4871d10453SEric Joyner /*
4971d10453SEric Joyner  * iflib txrx method declarations
5071d10453SEric Joyner  */
5171d10453SEric Joyner static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
5271d10453SEric Joyner static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
5371d10453SEric Joyner static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
5471d10453SEric Joyner static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
5571d10453SEric Joyner static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
5671d10453SEric Joyner static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
5771d10453SEric Joyner static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
588923de59SPiotr Kubaj static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
5971d10453SEric Joyner 
6071d10453SEric Joyner /* Macro to help extract the NIC mode flexible Rx descriptor fields from the
6171d10453SEric Joyner  * advanced 32byte Rx descriptors.
6271d10453SEric Joyner  */
6371d10453SEric Joyner #define RX_FLEX_NIC(desc, field) \
6471d10453SEric Joyner 	(((struct ice_32b_rx_flex_desc_nic *)desc)->field)
6571d10453SEric Joyner 
6671d10453SEric Joyner /**
6771d10453SEric Joyner  * @var ice_txrx
6871d10453SEric Joyner  * @brief Tx/Rx operations for the iflib stack
6971d10453SEric Joyner  *
7071d10453SEric Joyner  * Structure defining the Tx and Rx related operations that iflib can request
7171d10453SEric Joyner  * the driver to perform. These are the main entry points for the hot path of
7271d10453SEric Joyner  * the transmit and receive paths in the iflib driver.
7371d10453SEric Joyner  */
7471d10453SEric Joyner struct if_txrx ice_txrx = {
7571d10453SEric Joyner 	.ift_txd_encap = ice_ift_txd_encap,
7671d10453SEric Joyner 	.ift_txd_flush = ice_ift_txd_flush,
7771d10453SEric Joyner 	.ift_txd_credits_update = ice_ift_txd_credits_update,
7871d10453SEric Joyner 	.ift_rxd_available = ice_ift_rxd_available,
7971d10453SEric Joyner 	.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
8071d10453SEric Joyner 	.ift_rxd_refill = ice_ift_rxd_refill,
8171d10453SEric Joyner 	.ift_rxd_flush = ice_ift_rxd_flush,
828923de59SPiotr Kubaj 	.ift_txq_select_v2 = ice_ift_queue_select,
8371d10453SEric Joyner };
8471d10453SEric Joyner 
8571d10453SEric Joyner /**
8671d10453SEric Joyner  * ice_ift_txd_encap - prepare Tx descriptors for a packet
8771d10453SEric Joyner  * @arg: the iflib softc structure pointer
8871d10453SEric Joyner  * @pi: packet info
8971d10453SEric Joyner  *
9071d10453SEric Joyner  * Prepares and encapsulates the given packet into into Tx descriptors, in
9171d10453SEric Joyner  * preparation for sending to the transmit engine. Sets the necessary context
9271d10453SEric Joyner  * descriptors for TSO and other offloads, and prepares the last descriptor
9371d10453SEric Joyner  * for the writeback status.
9471d10453SEric Joyner  *
9571d10453SEric Joyner  * Return 0 on success, non-zero error code on failure.
9671d10453SEric Joyner  */
9771d10453SEric Joyner static int
9871d10453SEric Joyner ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
9971d10453SEric Joyner {
10071d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
10171d10453SEric Joyner 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
10271d10453SEric Joyner 	int nsegs = pi->ipi_nsegs;
10371d10453SEric Joyner 	bus_dma_segment_t *segs = pi->ipi_segs;
10471d10453SEric Joyner 	struct ice_tx_desc *txd = NULL;
10571d10453SEric Joyner 	int i, j, mask, pidx_last;
10671d10453SEric Joyner 	u32 cmd, off;
10771d10453SEric Joyner 
10871d10453SEric Joyner 	cmd = off = 0;
10971d10453SEric Joyner 	i = pi->ipi_pidx;
11071d10453SEric Joyner 
11171d10453SEric Joyner 	/* Set up the TSO/CSUM offload */
11271d10453SEric Joyner 	if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
11371d10453SEric Joyner 		/* Set up the TSO context descriptor if required */
11471d10453SEric Joyner 		if (pi->ipi_csum_flags & CSUM_TSO) {
11571d10453SEric Joyner 			if (ice_tso_detect_sparse(pi))
11671d10453SEric Joyner 				return (EFBIG);
11771d10453SEric Joyner 			i = ice_tso_setup(txq, pi);
11871d10453SEric Joyner 		}
11971d10453SEric Joyner 		ice_tx_setup_offload(txq, pi, &cmd, &off);
12071d10453SEric Joyner 	}
12171d10453SEric Joyner 	if (pi->ipi_mflags & M_VLANTAG)
12271d10453SEric Joyner 		cmd |= ICE_TX_DESC_CMD_IL2TAG1;
12371d10453SEric Joyner 
12471d10453SEric Joyner 	mask = txq->desc_count - 1;
12571d10453SEric Joyner 	for (j = 0; j < nsegs; j++) {
12671d10453SEric Joyner 		bus_size_t seglen;
12771d10453SEric Joyner 
12871d10453SEric Joyner 		txd = &txq->tx_base[i];
12971d10453SEric Joyner 		seglen = segs[j].ds_len;
13071d10453SEric Joyner 
13171d10453SEric Joyner 		txd->buf_addr = htole64(segs[j].ds_addr);
13271d10453SEric Joyner 		txd->cmd_type_offset_bsz =
13371d10453SEric Joyner 		    htole64(ICE_TX_DESC_DTYPE_DATA
13471d10453SEric Joyner 		    | ((u64)cmd  << ICE_TXD_QW1_CMD_S)
13571d10453SEric Joyner 		    | ((u64)off << ICE_TXD_QW1_OFFSET_S)
13671d10453SEric Joyner 		    | ((u64)seglen  << ICE_TXD_QW1_TX_BUF_SZ_S)
13771d10453SEric Joyner 		    | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
13871d10453SEric Joyner 
13971d10453SEric Joyner 		txq->stats.tx_bytes += seglen;
14071d10453SEric Joyner 		pidx_last = i;
14171d10453SEric Joyner 		i = (i+1) & mask;
14271d10453SEric Joyner 	}
14371d10453SEric Joyner 
14471d10453SEric Joyner 	/* Set the last descriptor for report */
14571d10453SEric Joyner #define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
14671d10453SEric Joyner 	txd->cmd_type_offset_bsz |=
14771d10453SEric Joyner 	    htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
14871d10453SEric Joyner 
14971d10453SEric Joyner 	/* Add to report status array */
15071d10453SEric Joyner 	txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
15171d10453SEric Joyner 	txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
15271d10453SEric Joyner 	MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
15371d10453SEric Joyner 
15471d10453SEric Joyner 	pi->ipi_new_pidx = i;
15571d10453SEric Joyner 
15671d10453SEric Joyner 	++txq->stats.tx_packets;
15771d10453SEric Joyner 	return (0);
15871d10453SEric Joyner }
15971d10453SEric Joyner 
16071d10453SEric Joyner /**
16171d10453SEric Joyner  * ice_ift_txd_flush - Flush Tx descriptors to hardware
16271d10453SEric Joyner  * @arg: device specific softc pointer
16371d10453SEric Joyner  * @txqid: the Tx queue to flush
16471d10453SEric Joyner  * @pidx: descriptor index to advance tail to
16571d10453SEric Joyner  *
16671d10453SEric Joyner  * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that
16771d10453SEric Joyner  * frames are available for transmit.
16871d10453SEric Joyner  */
16971d10453SEric Joyner static void
17071d10453SEric Joyner ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
17171d10453SEric Joyner {
17271d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
17371d10453SEric Joyner 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
17471d10453SEric Joyner 	struct ice_hw *hw = &sc->hw;
17571d10453SEric Joyner 
17671d10453SEric Joyner 	wr32(hw, txq->tail, pidx);
17771d10453SEric Joyner }
17871d10453SEric Joyner 
17971d10453SEric Joyner /**
18071d10453SEric Joyner  * ice_ift_txd_credits_update - cleanup Tx descriptors
18171d10453SEric Joyner  * @arg: device private softc
18271d10453SEric Joyner  * @txqid: the Tx queue to update
18371d10453SEric Joyner  * @clear: if false, only report, do not actually clean
18471d10453SEric Joyner  *
18571d10453SEric Joyner  * If clear is false, iflib is asking if we *could* clean up any Tx
18671d10453SEric Joyner  * descriptors.
18771d10453SEric Joyner  *
18871d10453SEric Joyner  * If clear is true, iflib is requesting to cleanup and reclaim used Tx
18971d10453SEric Joyner  * descriptors.
19071d10453SEric Joyner  */
19171d10453SEric Joyner static int
19271d10453SEric Joyner ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
19371d10453SEric Joyner {
19471d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
19571d10453SEric Joyner 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
19671d10453SEric Joyner 
19771d10453SEric Joyner 	qidx_t processed = 0;
19871d10453SEric Joyner 	qidx_t cur, prev, ntxd, rs_cidx;
19971d10453SEric Joyner 	int32_t delta;
20071d10453SEric Joyner 	bool is_done;
20171d10453SEric Joyner 
20271d10453SEric Joyner 	rs_cidx = txq->tx_rs_cidx;
20371d10453SEric Joyner 	if (rs_cidx == txq->tx_rs_pidx)
20471d10453SEric Joyner 		return (0);
20571d10453SEric Joyner 	cur = txq->tx_rsq[rs_cidx];
20671d10453SEric Joyner 	MPASS(cur != QIDX_INVALID);
20771d10453SEric Joyner 	is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
20871d10453SEric Joyner 
20971d10453SEric Joyner 	if (!is_done)
21071d10453SEric Joyner 		return (0);
21171d10453SEric Joyner 	else if (clear == false)
21271d10453SEric Joyner 		return (1);
21371d10453SEric Joyner 
21471d10453SEric Joyner 	prev = txq->tx_cidx_processed;
21571d10453SEric Joyner 	ntxd = txq->desc_count;
21671d10453SEric Joyner 	do {
21771d10453SEric Joyner 		MPASS(prev != cur);
21871d10453SEric Joyner 		delta = (int32_t)cur - (int32_t)prev;
21971d10453SEric Joyner 		if (delta < 0)
22071d10453SEric Joyner 			delta += ntxd;
22171d10453SEric Joyner 		MPASS(delta > 0);
22271d10453SEric Joyner 		processed += delta;
22371d10453SEric Joyner 		prev = cur;
22471d10453SEric Joyner 		rs_cidx = (rs_cidx + 1) & (ntxd-1);
22571d10453SEric Joyner 		if (rs_cidx == txq->tx_rs_pidx)
22671d10453SEric Joyner 			break;
22771d10453SEric Joyner 		cur = txq->tx_rsq[rs_cidx];
22871d10453SEric Joyner 		MPASS(cur != QIDX_INVALID);
22971d10453SEric Joyner 		is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
23071d10453SEric Joyner 	} while (is_done);
23171d10453SEric Joyner 
23271d10453SEric Joyner 	txq->tx_rs_cidx = rs_cidx;
23371d10453SEric Joyner 	txq->tx_cidx_processed = prev;
23471d10453SEric Joyner 
23571d10453SEric Joyner 	return (processed);
23671d10453SEric Joyner }
23771d10453SEric Joyner 
23871d10453SEric Joyner /**
23971d10453SEric Joyner  * ice_ift_rxd_available - Return number of available Rx packets
24071d10453SEric Joyner  * @arg: device private softc
24171d10453SEric Joyner  * @rxqid: the Rx queue id
24271d10453SEric Joyner  * @pidx: descriptor start point
24371d10453SEric Joyner  * @budget: maximum Rx budget
24471d10453SEric Joyner  *
24571d10453SEric Joyner  * Determines how many Rx packets are available on the queue, up to a maximum
24671d10453SEric Joyner  * of the given budget.
24771d10453SEric Joyner  */
24871d10453SEric Joyner static int
24971d10453SEric Joyner ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
25071d10453SEric Joyner {
25171d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
25271d10453SEric Joyner 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
25371d10453SEric Joyner 	union ice_32b_rx_flex_desc *rxd;
25471d10453SEric Joyner 	uint16_t status0;
25571d10453SEric Joyner 	int cnt, i, nrxd;
25671d10453SEric Joyner 
25771d10453SEric Joyner 	nrxd = rxq->desc_count;
25871d10453SEric Joyner 
25971d10453SEric Joyner 	for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
26071d10453SEric Joyner 		rxd = &rxq->rx_base[i];
26171d10453SEric Joyner 		status0 = le16toh(rxd->wb.status_error0);
26271d10453SEric Joyner 
26371d10453SEric Joyner 		if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
26471d10453SEric Joyner 			break;
26571d10453SEric Joyner 		if (++i == nrxd)
26671d10453SEric Joyner 			i = 0;
26771d10453SEric Joyner 		if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
26871d10453SEric Joyner 			cnt++;
26971d10453SEric Joyner 	}
27071d10453SEric Joyner 
27171d10453SEric Joyner 	return (cnt);
27271d10453SEric Joyner }
27371d10453SEric Joyner 
27471d10453SEric Joyner /**
27571d10453SEric Joyner  * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
27671d10453SEric Joyner  * @arg: device specific softc
27771d10453SEric Joyner  * @ri: receive packet info
27871d10453SEric Joyner  *
27971d10453SEric Joyner  * This function is called by iflib, and executes in ithread context. It is
28071d10453SEric Joyner  * called by iflib to obtain data which has been DMA'ed into host memory.
28156429daeSEric Joyner  * Returns zero on success, and EBADMSG on failure.
28271d10453SEric Joyner  */
28371d10453SEric Joyner static int
28471d10453SEric Joyner ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
28571d10453SEric Joyner {
28671d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
28771d10453SEric Joyner 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
28871d10453SEric Joyner 	union ice_32b_rx_flex_desc *cur;
289f7926a6dSVincenzo Maffione 	u16 status0, plen, ptype;
29071d10453SEric Joyner 	bool eop;
29171d10453SEric Joyner 	size_t cidx;
29271d10453SEric Joyner 	int i;
29371d10453SEric Joyner 
29471d10453SEric Joyner 	cidx = ri->iri_cidx;
29571d10453SEric Joyner 	i = 0;
29671d10453SEric Joyner 	do {
29771d10453SEric Joyner 		/* 5 descriptor receive limit */
29871d10453SEric Joyner 		MPASS(i < ICE_MAX_RX_SEGS);
29971d10453SEric Joyner 
30071d10453SEric Joyner 		cur = &rxq->rx_base[cidx];
30171d10453SEric Joyner 		status0 = le16toh(cur->wb.status_error0);
30271d10453SEric Joyner 		plen = le16toh(cur->wb.pkt_len) &
30371d10453SEric Joyner 			ICE_RX_FLX_DESC_PKT_LEN_M;
30471d10453SEric Joyner 
30571d10453SEric Joyner 		/* we should never be called without a valid descriptor */
30671d10453SEric Joyner 		MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
30771d10453SEric Joyner 
30871d10453SEric Joyner 		ri->iri_len += plen;
30971d10453SEric Joyner 
31071d10453SEric Joyner 		cur->wb.status_error0 = 0;
31171d10453SEric Joyner 		eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
31271d10453SEric Joyner 
31371d10453SEric Joyner 		ri->iri_frags[i].irf_flid = 0;
31471d10453SEric Joyner 		ri->iri_frags[i].irf_idx = cidx;
31571d10453SEric Joyner 		ri->iri_frags[i].irf_len = plen;
31671d10453SEric Joyner 		if (++cidx == rxq->desc_count)
31771d10453SEric Joyner 			cidx = 0;
31871d10453SEric Joyner 		i++;
31971d10453SEric Joyner 	} while (!eop);
32071d10453SEric Joyner 
32156429daeSEric Joyner 	/* End of Packet reached; cur is eop/last descriptor */
32271d10453SEric Joyner 
32356429daeSEric Joyner 	/* Make sure packets with bad L2 values are discarded.
32456429daeSEric Joyner 	 * This bit is only valid in the last descriptor.
32556429daeSEric Joyner 	 */
32656429daeSEric Joyner 	if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) {
32756429daeSEric Joyner 		rxq->stats.desc_errs++;
32856429daeSEric Joyner 		return (EBADMSG);
32956429daeSEric Joyner 	}
33056429daeSEric Joyner 
33156429daeSEric Joyner 	/* Get VLAN tag information if one is in descriptor */
332f7926a6dSVincenzo Maffione 	if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
333f7926a6dSVincenzo Maffione 		ri->iri_vtag = le16toh(cur->wb.l2tag1);
33471d10453SEric Joyner 		ri->iri_flags |= M_VLANTAG;
335f7926a6dSVincenzo Maffione 	}
33656429daeSEric Joyner 
33756429daeSEric Joyner 	/* Capture soft statistics for this Rx queue */
33856429daeSEric Joyner 	rxq->stats.rx_packets++;
33956429daeSEric Joyner 	rxq->stats.rx_bytes += ri->iri_len;
34056429daeSEric Joyner 
34156429daeSEric Joyner 	/* Get packet type and set checksum flags */
34256429daeSEric Joyner 	ptype = le16toh(cur->wb.ptype_flex_flags0) &
34356429daeSEric Joyner 		ICE_RX_FLEX_DESC_PTYPE_M;
3448923de59SPiotr Kubaj 	if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
34556429daeSEric Joyner 		ice_rx_checksum(rxq, &ri->iri_csum_flags,
34656429daeSEric Joyner 				&ri->iri_csum_data, status0, ptype);
34756429daeSEric Joyner 
34856429daeSEric Joyner 	/* Set remaining iflib RX descriptor info fields */
34956429daeSEric Joyner 	ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
35056429daeSEric Joyner 	ri->iri_rsstype = ice_ptype_to_hash(ptype);
351f7926a6dSVincenzo Maffione 	ri->iri_nfrags = i;
35271d10453SEric Joyner 	return (0);
35371d10453SEric Joyner }
35471d10453SEric Joyner 
35571d10453SEric Joyner /**
35671d10453SEric Joyner  * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
35771d10453SEric Joyner  * @arg: device specific softc structure
35871d10453SEric Joyner  * @iru: the Rx descriptor update structure
35971d10453SEric Joyner  *
36071d10453SEric Joyner  * Update the Rx descriptor indices for a given queue, assigning new physical
36171d10453SEric Joyner  * addresses to the descriptors, preparing them for re-use by the hardware.
36271d10453SEric Joyner  */
36371d10453SEric Joyner static void
36471d10453SEric Joyner ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
36571d10453SEric Joyner {
36671d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
36771d10453SEric Joyner 	struct ice_rx_queue *rxq;
36871d10453SEric Joyner 	uint32_t next_pidx;
36971d10453SEric Joyner 	int i;
37071d10453SEric Joyner 	uint64_t *paddrs;
37171d10453SEric Joyner 	uint32_t pidx;
37271d10453SEric Joyner 	uint16_t qsidx, count;
37371d10453SEric Joyner 
37471d10453SEric Joyner 	paddrs = iru->iru_paddrs;
37571d10453SEric Joyner 	pidx = iru->iru_pidx;
37671d10453SEric Joyner 	qsidx = iru->iru_qsidx;
37771d10453SEric Joyner 	count = iru->iru_count;
37871d10453SEric Joyner 
37971d10453SEric Joyner 	rxq = &(sc->pf_vsi.rx_queues[qsidx]);
38071d10453SEric Joyner 
38171d10453SEric Joyner 	for (i = 0, next_pidx = pidx; i < count; i++) {
38271d10453SEric Joyner 		rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
38371d10453SEric Joyner 		if (++next_pidx == (uint32_t)rxq->desc_count)
38471d10453SEric Joyner 			next_pidx = 0;
38571d10453SEric Joyner 	}
38671d10453SEric Joyner }
38771d10453SEric Joyner 
38871d10453SEric Joyner /**
38971d10453SEric Joyner  * ice_ift_rxd_flush - Flush Rx descriptors to hardware
39071d10453SEric Joyner  * @arg: device specific softc pointer
39171d10453SEric Joyner  * @rxqid: the Rx queue to flush
39271d10453SEric Joyner  * @flidx: unused parameter
39371d10453SEric Joyner  * @pidx: descriptor index to advance tail to
39471d10453SEric Joyner  *
39571d10453SEric Joyner  * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
39671d10453SEric Joyner  * software is done with the descriptor and it can be recycled.
39771d10453SEric Joyner  */
39871d10453SEric Joyner static void
39971d10453SEric Joyner ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
40071d10453SEric Joyner 		  qidx_t pidx)
40171d10453SEric Joyner {
40271d10453SEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
40371d10453SEric Joyner 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
40471d10453SEric Joyner 	struct ice_hw *hw = &sc->hw;
40571d10453SEric Joyner 
40671d10453SEric Joyner 	wr32(hw, rxq->tail, pidx);
40771d10453SEric Joyner }
40856429daeSEric Joyner 
40956429daeSEric Joyner static qidx_t
4108923de59SPiotr Kubaj ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
41156429daeSEric Joyner {
41256429daeSEric Joyner 	struct ice_softc *sc = (struct ice_softc *)arg;
4138923de59SPiotr Kubaj 	struct ice_dcbx_cfg *local_dcbx_cfg;
41456429daeSEric Joyner 	struct ice_vsi *vsi = &sc->pf_vsi;
41556429daeSEric Joyner 	u16 tc_base_queue, tc_qcount;
41656429daeSEric Joyner 	u8 up, tc;
41756429daeSEric Joyner 
41861d83041SEric Joyner #ifdef ALTQ
41961d83041SEric Joyner 	/* Included to match default iflib behavior */
42056429daeSEric Joyner 	/* Only go out on default queue if ALTQ is enabled */
42161d83041SEric Joyner 	struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx);
422*8d5feedeSJustin Hibbits 	if (if_altq_is_enabled(ifp))
42356429daeSEric Joyner 		return (0);
42461d83041SEric Joyner #endif
42556429daeSEric Joyner 
42656429daeSEric Joyner 	if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) {
42756429daeSEric Joyner 		if (M_HASHTYPE_GET(m)) {
42856429daeSEric Joyner 			/* Default iflib queue selection method */
42956429daeSEric Joyner 			return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues);
43056429daeSEric Joyner 		} else
43156429daeSEric Joyner 			return (0);
43256429daeSEric Joyner 	}
43356429daeSEric Joyner 
4348923de59SPiotr Kubaj 	/* Use default TC unless overridden later */
43556429daeSEric Joyner 	tc = 0; /* XXX: Get default TC for traffic if >1 TC? */
43656429daeSEric Joyner 
4378923de59SPiotr Kubaj 	local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg;
4388923de59SPiotr Kubaj 
4398923de59SPiotr Kubaj #if defined(INET) || defined(INET6)
4408923de59SPiotr Kubaj 	if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
4418923de59SPiotr Kubaj 	    (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) {
4428923de59SPiotr Kubaj 		u8 dscp_val = pi->ipi_ip_tos >> 2;
4438923de59SPiotr Kubaj 		tc = local_dcbx_cfg->dscp_map[dscp_val];
4448923de59SPiotr Kubaj 	} else
4458923de59SPiotr Kubaj #endif /* defined(INET) || defined(INET6) */
4468923de59SPiotr Kubaj 	if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */
44756429daeSEric Joyner 		up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
4488923de59SPiotr Kubaj 		tc = local_dcbx_cfg->etscfg.prio_table[up];
44956429daeSEric Joyner 	}
45056429daeSEric Joyner 
45156429daeSEric Joyner 	tc_base_queue = vsi->tc_info[tc].qoffset;
45256429daeSEric Joyner 	tc_qcount = vsi->tc_info[tc].qcount_tx;
45356429daeSEric Joyner 
45456429daeSEric Joyner 	if (M_HASHTYPE_GET(m))
45556429daeSEric Joyner 		return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue);
45656429daeSEric Joyner 	else
45756429daeSEric Joyner 		return (tc_base_queue);
45856429daeSEric Joyner }
459