171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */ 2015f8cc5SEric Joyner /* Copyright (c) 2024, Intel Corporation 371d10453SEric Joyner * All rights reserved. 471d10453SEric Joyner * 571d10453SEric Joyner * Redistribution and use in source and binary forms, with or without 671d10453SEric Joyner * modification, are permitted provided that the following conditions are met: 771d10453SEric Joyner * 871d10453SEric Joyner * 1. Redistributions of source code must retain the above copyright notice, 971d10453SEric Joyner * this list of conditions and the following disclaimer. 1071d10453SEric Joyner * 1171d10453SEric Joyner * 2. Redistributions in binary form must reproduce the above copyright 1271d10453SEric Joyner * notice, this list of conditions and the following disclaimer in the 1371d10453SEric Joyner * documentation and/or other materials provided with the distribution. 1471d10453SEric Joyner * 1571d10453SEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its 1671d10453SEric Joyner * contributors may be used to endorse or promote products derived from 1771d10453SEric Joyner * this software without specific prior written permission. 1871d10453SEric Joyner * 1971d10453SEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 2071d10453SEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2171d10453SEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2271d10453SEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 2371d10453SEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2471d10453SEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2571d10453SEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2671d10453SEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2771d10453SEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2871d10453SEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2971d10453SEric Joyner * POSSIBILITY OF SUCH DAMAGE. 3071d10453SEric Joyner */ 3171d10453SEric Joyner 3271d10453SEric Joyner /** 3371d10453SEric Joyner * @file ice_iflib_txrx.c 3471d10453SEric Joyner * @brief iflib Tx/Rx hotpath 3571d10453SEric Joyner * 3671d10453SEric Joyner * Main location for the iflib Tx/Rx hotpath implementation. 3771d10453SEric Joyner * 3871d10453SEric Joyner * Contains the implementation for the iflib function callbacks and the 3971d10453SEric Joyner * if_txrx ops structure. 4071d10453SEric Joyner */ 4171d10453SEric Joyner 4271d10453SEric Joyner #include "ice_iflib.h" 4371d10453SEric Joyner 4471d10453SEric Joyner /* Tx/Rx hotpath utility functions */ 4571d10453SEric Joyner #include "ice_common_txrx.h" 4671d10453SEric Joyner 4771d10453SEric Joyner /* 48*9e54973fSEric Joyner * Driver private implementations 49*9e54973fSEric Joyner */ 50*9e54973fSEric Joyner static int _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi); 51*9e54973fSEric Joyner static int _ice_ift_txd_credits_update(struct ice_softc *sc, struct ice_tx_queue *txq, bool clear); 52*9e54973fSEric Joyner static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget); 53*9e54973fSEric Joyner static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri); 54*9e54973fSEric Joyner static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx, 55*9e54973fSEric Joyner uint64_t *paddrs, uint16_t count); 56*9e54973fSEric Joyner static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, 57*9e54973fSEric Joyner uint32_t pidx); 58*9e54973fSEric Joyner 59*9e54973fSEric Joyner /* 6071d10453SEric Joyner * iflib txrx method declarations 6171d10453SEric Joyner */ 6271d10453SEric Joyner static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi); 6371d10453SEric Joyner static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri); 6471d10453SEric Joyner static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 6571d10453SEric Joyner static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear); 6671d10453SEric Joyner static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); 6771d10453SEric Joyner static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); 6871d10453SEric Joyner static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru); 698923de59SPiotr Kubaj static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi); 70*9e54973fSEric Joyner static int ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear); 71*9e54973fSEric Joyner static int ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi); 72*9e54973fSEric Joyner static void ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx); 73*9e54973fSEric Joyner static int ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); 74*9e54973fSEric Joyner static int ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri); 75*9e54973fSEric Joyner static void ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru); 76*9e54973fSEric Joyner static void ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); 7771d10453SEric Joyner 7871d10453SEric Joyner /* Macro to help extract the NIC mode flexible Rx descriptor fields from the 7971d10453SEric Joyner * advanced 32byte Rx descriptors. 8071d10453SEric Joyner */ 8171d10453SEric Joyner #define RX_FLEX_NIC(desc, field) \ 8271d10453SEric Joyner (((struct ice_32b_rx_flex_desc_nic *)desc)->field) 8371d10453SEric Joyner 8471d10453SEric Joyner /** 8571d10453SEric Joyner * @var ice_txrx 8671d10453SEric Joyner * @brief Tx/Rx operations for the iflib stack 8771d10453SEric Joyner * 8871d10453SEric Joyner * Structure defining the Tx and Rx related operations that iflib can request 8971d10453SEric Joyner * the driver to perform. These are the main entry points for the hot path of 9071d10453SEric Joyner * the transmit and receive paths in the iflib driver. 9171d10453SEric Joyner */ 9271d10453SEric Joyner struct if_txrx ice_txrx = { 9371d10453SEric Joyner .ift_txd_encap = ice_ift_txd_encap, 9471d10453SEric Joyner .ift_txd_flush = ice_ift_txd_flush, 9571d10453SEric Joyner .ift_txd_credits_update = ice_ift_txd_credits_update, 9671d10453SEric Joyner .ift_rxd_available = ice_ift_rxd_available, 9771d10453SEric Joyner .ift_rxd_pkt_get = ice_ift_rxd_pkt_get, 9871d10453SEric Joyner .ift_rxd_refill = ice_ift_rxd_refill, 9971d10453SEric Joyner .ift_rxd_flush = ice_ift_rxd_flush, 1008923de59SPiotr Kubaj .ift_txq_select_v2 = ice_ift_queue_select, 10171d10453SEric Joyner }; 10271d10453SEric Joyner 10371d10453SEric Joyner /** 104*9e54973fSEric Joyner * @var ice_subif_txrx 105*9e54973fSEric Joyner * @brief Tx/Rx operations for the iflib stack, for subinterfaces 106*9e54973fSEric Joyner * 107*9e54973fSEric Joyner * Structure defining the Tx and Rx related operations that iflib can request 108*9e54973fSEric Joyner * the subinterface driver to perform. These are the main entry points for the 109*9e54973fSEric Joyner * hot path of the transmit and receive paths in the iflib driver. 110*9e54973fSEric Joyner */ 111*9e54973fSEric Joyner struct if_txrx ice_subif_txrx = { 112*9e54973fSEric Joyner .ift_txd_credits_update = ice_ift_txd_credits_update_subif, 113*9e54973fSEric Joyner .ift_txd_encap = ice_ift_txd_encap_subif, 114*9e54973fSEric Joyner .ift_txd_flush = ice_ift_txd_flush_subif, 115*9e54973fSEric Joyner .ift_rxd_available = ice_ift_rxd_available_subif, 116*9e54973fSEric Joyner .ift_rxd_pkt_get = ice_ift_rxd_pkt_get_subif, 117*9e54973fSEric Joyner .ift_rxd_refill = ice_ift_rxd_refill_subif, 118*9e54973fSEric Joyner .ift_rxd_flush = ice_ift_rxd_flush_subif, 119*9e54973fSEric Joyner .ift_txq_select_v2 = NULL, 120*9e54973fSEric Joyner }; 121*9e54973fSEric Joyner 122*9e54973fSEric Joyner /** 123*9e54973fSEric Joyner * _ice_ift_txd_encap - prepare Tx descriptors for a packet 124*9e54973fSEric Joyner * @txq: driver's TX queue context 12571d10453SEric Joyner * @pi: packet info 12671d10453SEric Joyner * 12771d10453SEric Joyner * Prepares and encapsulates the given packet into into Tx descriptors, in 12871d10453SEric Joyner * preparation for sending to the transmit engine. Sets the necessary context 12971d10453SEric Joyner * descriptors for TSO and other offloads, and prepares the last descriptor 13071d10453SEric Joyner * for the writeback status. 13171d10453SEric Joyner * 13271d10453SEric Joyner * Return 0 on success, non-zero error code on failure. 13371d10453SEric Joyner */ 13471d10453SEric Joyner static int 135*9e54973fSEric Joyner _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi) 13671d10453SEric Joyner { 13771d10453SEric Joyner int nsegs = pi->ipi_nsegs; 13871d10453SEric Joyner bus_dma_segment_t *segs = pi->ipi_segs; 13971d10453SEric Joyner struct ice_tx_desc *txd = NULL; 14071d10453SEric Joyner int i, j, mask, pidx_last; 14171d10453SEric Joyner u32 cmd, off; 14271d10453SEric Joyner 14371d10453SEric Joyner cmd = off = 0; 14471d10453SEric Joyner i = pi->ipi_pidx; 14571d10453SEric Joyner 14671d10453SEric Joyner /* Set up the TSO/CSUM offload */ 14771d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) { 14871d10453SEric Joyner /* Set up the TSO context descriptor if required */ 14971d10453SEric Joyner if (pi->ipi_csum_flags & CSUM_TSO) { 15071d10453SEric Joyner if (ice_tso_detect_sparse(pi)) 15171d10453SEric Joyner return (EFBIG); 15271d10453SEric Joyner i = ice_tso_setup(txq, pi); 15371d10453SEric Joyner } 15471d10453SEric Joyner ice_tx_setup_offload(txq, pi, &cmd, &off); 15571d10453SEric Joyner } 15671d10453SEric Joyner if (pi->ipi_mflags & M_VLANTAG) 15771d10453SEric Joyner cmd |= ICE_TX_DESC_CMD_IL2TAG1; 15871d10453SEric Joyner 15971d10453SEric Joyner mask = txq->desc_count - 1; 16071d10453SEric Joyner for (j = 0; j < nsegs; j++) { 16171d10453SEric Joyner bus_size_t seglen; 16271d10453SEric Joyner 16371d10453SEric Joyner txd = &txq->tx_base[i]; 16471d10453SEric Joyner seglen = segs[j].ds_len; 16571d10453SEric Joyner 16671d10453SEric Joyner txd->buf_addr = htole64(segs[j].ds_addr); 16771d10453SEric Joyner txd->cmd_type_offset_bsz = 16871d10453SEric Joyner htole64(ICE_TX_DESC_DTYPE_DATA 16971d10453SEric Joyner | ((u64)cmd << ICE_TXD_QW1_CMD_S) 17071d10453SEric Joyner | ((u64)off << ICE_TXD_QW1_OFFSET_S) 17171d10453SEric Joyner | ((u64)seglen << ICE_TXD_QW1_TX_BUF_SZ_S) 17271d10453SEric Joyner | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S)); 17371d10453SEric Joyner 17471d10453SEric Joyner txq->stats.tx_bytes += seglen; 17571d10453SEric Joyner pidx_last = i; 17671d10453SEric Joyner i = (i+1) & mask; 17771d10453SEric Joyner } 17871d10453SEric Joyner 17971d10453SEric Joyner /* Set the last descriptor for report */ 18071d10453SEric Joyner #define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) 18171d10453SEric Joyner txd->cmd_type_offset_bsz |= 18271d10453SEric Joyner htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S)); 18371d10453SEric Joyner 18471d10453SEric Joyner /* Add to report status array */ 18571d10453SEric Joyner txq->tx_rsq[txq->tx_rs_pidx] = pidx_last; 18671d10453SEric Joyner txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask; 18771d10453SEric Joyner MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx); 18871d10453SEric Joyner 18971d10453SEric Joyner pi->ipi_new_pidx = i; 19071d10453SEric Joyner 19171d10453SEric Joyner ++txq->stats.tx_packets; 19271d10453SEric Joyner return (0); 19371d10453SEric Joyner } 19471d10453SEric Joyner 19571d10453SEric Joyner /** 196*9e54973fSEric Joyner * ice_ift_txd_encap - prepare Tx descriptors for a packet 197*9e54973fSEric Joyner * @arg: the iflib softc structure pointer 198*9e54973fSEric Joyner * @pi: packet info 199*9e54973fSEric Joyner * 200*9e54973fSEric Joyner * Prepares and encapsulates the given packet into Tx descriptors, in 201*9e54973fSEric Joyner * preparation for sending to the transmit engine. Sets the necessary context 202*9e54973fSEric Joyner * descriptors for TSO and other offloads, and prepares the last descriptor 203*9e54973fSEric Joyner * for the writeback status. 204*9e54973fSEric Joyner * 205*9e54973fSEric Joyner * Return 0 on success, non-zero error code on failure. 206*9e54973fSEric Joyner */ 207*9e54973fSEric Joyner static int 208*9e54973fSEric Joyner ice_ift_txd_encap(void *arg, if_pkt_info_t pi) 209*9e54973fSEric Joyner { 210*9e54973fSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 211*9e54973fSEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx]; 212*9e54973fSEric Joyner 213*9e54973fSEric Joyner return _ice_ift_txd_encap(txq, pi); 214*9e54973fSEric Joyner } 215*9e54973fSEric Joyner 216*9e54973fSEric Joyner /** 21771d10453SEric Joyner * ice_ift_txd_flush - Flush Tx descriptors to hardware 21871d10453SEric Joyner * @arg: device specific softc pointer 21971d10453SEric Joyner * @txqid: the Tx queue to flush 22071d10453SEric Joyner * @pidx: descriptor index to advance tail to 22171d10453SEric Joyner * 22271d10453SEric Joyner * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that 22371d10453SEric Joyner * frames are available for transmit. 22471d10453SEric Joyner */ 22571d10453SEric Joyner static void 22671d10453SEric Joyner ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 22771d10453SEric Joyner { 22871d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 22971d10453SEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid]; 23071d10453SEric Joyner struct ice_hw *hw = &sc->hw; 23171d10453SEric Joyner 23271d10453SEric Joyner wr32(hw, txq->tail, pidx); 23371d10453SEric Joyner } 23471d10453SEric Joyner 23571d10453SEric Joyner /** 236*9e54973fSEric Joyner * _ice_ift_txd_credits_update - cleanup Tx descriptors 237*9e54973fSEric Joyner * @sc: device private softc 238*9e54973fSEric Joyner * @txq: the Tx queue to update 23971d10453SEric Joyner * @clear: if false, only report, do not actually clean 24071d10453SEric Joyner * 24171d10453SEric Joyner * If clear is false, iflib is asking if we *could* clean up any Tx 24271d10453SEric Joyner * descriptors. 24371d10453SEric Joyner * 24471d10453SEric Joyner * If clear is true, iflib is requesting to cleanup and reclaim used Tx 24571d10453SEric Joyner * descriptors. 246*9e54973fSEric Joyner * 247*9e54973fSEric Joyner * Called by other txd_credits_update functions passed to iflib. 24871d10453SEric Joyner */ 24971d10453SEric Joyner static int 250*9e54973fSEric Joyner _ice_ift_txd_credits_update(struct ice_softc *sc __unused, struct ice_tx_queue *txq, bool clear) 25171d10453SEric Joyner { 25271d10453SEric Joyner qidx_t processed = 0; 25371d10453SEric Joyner qidx_t cur, prev, ntxd, rs_cidx; 25471d10453SEric Joyner int32_t delta; 25571d10453SEric Joyner bool is_done; 25671d10453SEric Joyner 25771d10453SEric Joyner rs_cidx = txq->tx_rs_cidx; 25871d10453SEric Joyner if (rs_cidx == txq->tx_rs_pidx) 25971d10453SEric Joyner return (0); 26071d10453SEric Joyner cur = txq->tx_rsq[rs_cidx]; 26171d10453SEric Joyner MPASS(cur != QIDX_INVALID); 26271d10453SEric Joyner is_done = ice_is_tx_desc_done(&txq->tx_base[cur]); 26371d10453SEric Joyner 26471d10453SEric Joyner if (!is_done) 26571d10453SEric Joyner return (0); 26671d10453SEric Joyner else if (clear == false) 26771d10453SEric Joyner return (1); 26871d10453SEric Joyner 26971d10453SEric Joyner prev = txq->tx_cidx_processed; 27071d10453SEric Joyner ntxd = txq->desc_count; 27171d10453SEric Joyner do { 27271d10453SEric Joyner MPASS(prev != cur); 27371d10453SEric Joyner delta = (int32_t)cur - (int32_t)prev; 27471d10453SEric Joyner if (delta < 0) 27571d10453SEric Joyner delta += ntxd; 27671d10453SEric Joyner MPASS(delta > 0); 27771d10453SEric Joyner processed += delta; 27871d10453SEric Joyner prev = cur; 27971d10453SEric Joyner rs_cidx = (rs_cidx + 1) & (ntxd-1); 28071d10453SEric Joyner if (rs_cidx == txq->tx_rs_pidx) 28171d10453SEric Joyner break; 28271d10453SEric Joyner cur = txq->tx_rsq[rs_cidx]; 28371d10453SEric Joyner MPASS(cur != QIDX_INVALID); 28471d10453SEric Joyner is_done = ice_is_tx_desc_done(&txq->tx_base[cur]); 28571d10453SEric Joyner } while (is_done); 28671d10453SEric Joyner 28771d10453SEric Joyner txq->tx_rs_cidx = rs_cidx; 28871d10453SEric Joyner txq->tx_cidx_processed = prev; 28971d10453SEric Joyner 29071d10453SEric Joyner return (processed); 29171d10453SEric Joyner } 29271d10453SEric Joyner 29371d10453SEric Joyner /** 294*9e54973fSEric Joyner * ice_ift_txd_credits_update - cleanup PF VSI Tx descriptors 29571d10453SEric Joyner * @arg: device private softc 296*9e54973fSEric Joyner * @txqid: the Tx queue to update 297*9e54973fSEric Joyner * @clear: if false, only report, do not actually clean 298*9e54973fSEric Joyner * 299*9e54973fSEric Joyner * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that 300*9e54973fSEric Joyner * belong to the PF VSI. 301*9e54973fSEric Joyner * 302*9e54973fSEric Joyner * @see _ice_ift_txd_credits_update() 303*9e54973fSEric Joyner */ 304*9e54973fSEric Joyner static int 305*9e54973fSEric Joyner ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear) 306*9e54973fSEric Joyner { 307*9e54973fSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 308*9e54973fSEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid]; 309*9e54973fSEric Joyner 310*9e54973fSEric Joyner return _ice_ift_txd_credits_update(sc, txq, clear); 311*9e54973fSEric Joyner } 312*9e54973fSEric Joyner 313*9e54973fSEric Joyner /** 314*9e54973fSEric Joyner * _ice_ift_rxd_available - Return number of available Rx packets 315*9e54973fSEric Joyner * @rxq: RX queue driver structure 31671d10453SEric Joyner * @pidx: descriptor start point 31771d10453SEric Joyner * @budget: maximum Rx budget 31871d10453SEric Joyner * 31971d10453SEric Joyner * Determines how many Rx packets are available on the queue, up to a maximum 32071d10453SEric Joyner * of the given budget. 32171d10453SEric Joyner */ 32271d10453SEric Joyner static int 323*9e54973fSEric Joyner _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget) 32471d10453SEric Joyner { 32571d10453SEric Joyner union ice_32b_rx_flex_desc *rxd; 32671d10453SEric Joyner uint16_t status0; 32771d10453SEric Joyner int cnt, i, nrxd; 32871d10453SEric Joyner 32971d10453SEric Joyner nrxd = rxq->desc_count; 33071d10453SEric Joyner 33171d10453SEric Joyner for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) { 33271d10453SEric Joyner rxd = &rxq->rx_base[i]; 33371d10453SEric Joyner status0 = le16toh(rxd->wb.status_error0); 33471d10453SEric Joyner 33571d10453SEric Joyner if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0) 33671d10453SEric Joyner break; 33771d10453SEric Joyner if (++i == nrxd) 33871d10453SEric Joyner i = 0; 33971d10453SEric Joyner if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)) 34071d10453SEric Joyner cnt++; 34171d10453SEric Joyner } 34271d10453SEric Joyner 34371d10453SEric Joyner return (cnt); 34471d10453SEric Joyner } 34571d10453SEric Joyner 34671d10453SEric Joyner /** 347*9e54973fSEric Joyner * ice_ift_rxd_available - Return number of available Rx packets 348*9e54973fSEric Joyner * @arg: device private softc 349*9e54973fSEric Joyner * @rxqid: the Rx queue id 350*9e54973fSEric Joyner * @pidx: descriptor start point 351*9e54973fSEric Joyner * @budget: maximum Rx budget 352*9e54973fSEric Joyner * 353*9e54973fSEric Joyner * Wrapper for _ice_ift_rxd_available() that provides a function pointer 354*9e54973fSEric Joyner * that iflib requires for RX processing. 355*9e54973fSEric Joyner */ 356*9e54973fSEric Joyner static int 357*9e54973fSEric Joyner ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget) 358*9e54973fSEric Joyner { 359*9e54973fSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 360*9e54973fSEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; 361*9e54973fSEric Joyner 362*9e54973fSEric Joyner return _ice_ift_rxd_available(rxq, pidx, budget); 363*9e54973fSEric Joyner } 364*9e54973fSEric Joyner 365*9e54973fSEric Joyner /** 36671d10453SEric Joyner * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer 36771d10453SEric Joyner * @arg: device specific softc 36871d10453SEric Joyner * @ri: receive packet info 36971d10453SEric Joyner * 370*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer 371*9e54973fSEric Joyner * used by iflib for RX packet processing. 372*9e54973fSEric Joyner */ 373*9e54973fSEric Joyner static int 374*9e54973fSEric Joyner ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri) 375*9e54973fSEric Joyner { 376*9e54973fSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 377*9e54973fSEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; 378*9e54973fSEric Joyner 379*9e54973fSEric Joyner return _ice_ift_rxd_pkt_get(rxq, ri); 380*9e54973fSEric Joyner } 381*9e54973fSEric Joyner 382*9e54973fSEric Joyner /** 383*9e54973fSEric Joyner * _ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer 384*9e54973fSEric Joyner * @rxq: RX queue driver structure 385*9e54973fSEric Joyner * @ri: receive packet info 386*9e54973fSEric Joyner * 38771d10453SEric Joyner * This function is called by iflib, and executes in ithread context. It is 38871d10453SEric Joyner * called by iflib to obtain data which has been DMA'ed into host memory. 38956429daeSEric Joyner * Returns zero on success, and EBADMSG on failure. 39071d10453SEric Joyner */ 39171d10453SEric Joyner static int 392*9e54973fSEric Joyner _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri) 39371d10453SEric Joyner { 39471d10453SEric Joyner union ice_32b_rx_flex_desc *cur; 395f7926a6dSVincenzo Maffione u16 status0, plen, ptype; 39671d10453SEric Joyner bool eop; 39771d10453SEric Joyner size_t cidx; 39871d10453SEric Joyner int i; 39971d10453SEric Joyner 40071d10453SEric Joyner cidx = ri->iri_cidx; 40171d10453SEric Joyner i = 0; 40271d10453SEric Joyner do { 40371d10453SEric Joyner /* 5 descriptor receive limit */ 40471d10453SEric Joyner MPASS(i < ICE_MAX_RX_SEGS); 40571d10453SEric Joyner 40671d10453SEric Joyner cur = &rxq->rx_base[cidx]; 40771d10453SEric Joyner status0 = le16toh(cur->wb.status_error0); 40871d10453SEric Joyner plen = le16toh(cur->wb.pkt_len) & 40971d10453SEric Joyner ICE_RX_FLX_DESC_PKT_LEN_M; 41071d10453SEric Joyner 41171d10453SEric Joyner /* we should never be called without a valid descriptor */ 41271d10453SEric Joyner MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0); 41371d10453SEric Joyner 41471d10453SEric Joyner ri->iri_len += plen; 41571d10453SEric Joyner 41671d10453SEric Joyner cur->wb.status_error0 = 0; 41771d10453SEric Joyner eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)); 41871d10453SEric Joyner 41971d10453SEric Joyner ri->iri_frags[i].irf_flid = 0; 42071d10453SEric Joyner ri->iri_frags[i].irf_idx = cidx; 42171d10453SEric Joyner ri->iri_frags[i].irf_len = plen; 42271d10453SEric Joyner if (++cidx == rxq->desc_count) 42371d10453SEric Joyner cidx = 0; 42471d10453SEric Joyner i++; 42571d10453SEric Joyner } while (!eop); 42671d10453SEric Joyner 42756429daeSEric Joyner /* End of Packet reached; cur is eop/last descriptor */ 42871d10453SEric Joyner 42956429daeSEric Joyner /* Make sure packets with bad L2 values are discarded. 43056429daeSEric Joyner * This bit is only valid in the last descriptor. 43156429daeSEric Joyner */ 43256429daeSEric Joyner if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) { 43356429daeSEric Joyner rxq->stats.desc_errs++; 43456429daeSEric Joyner return (EBADMSG); 43556429daeSEric Joyner } 43656429daeSEric Joyner 43756429daeSEric Joyner /* Get VLAN tag information if one is in descriptor */ 438f7926a6dSVincenzo Maffione if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { 439f7926a6dSVincenzo Maffione ri->iri_vtag = le16toh(cur->wb.l2tag1); 44071d10453SEric Joyner ri->iri_flags |= M_VLANTAG; 441f7926a6dSVincenzo Maffione } 44256429daeSEric Joyner 44356429daeSEric Joyner /* Capture soft statistics for this Rx queue */ 44456429daeSEric Joyner rxq->stats.rx_packets++; 44556429daeSEric Joyner rxq->stats.rx_bytes += ri->iri_len; 44656429daeSEric Joyner 44756429daeSEric Joyner /* Get packet type and set checksum flags */ 44856429daeSEric Joyner ptype = le16toh(cur->wb.ptype_flex_flags0) & 44956429daeSEric Joyner ICE_RX_FLEX_DESC_PTYPE_M; 450*9e54973fSEric Joyner if ((if_getcapenable(ri->iri_ifp) & IFCAP_RXCSUM) != 0) 45156429daeSEric Joyner ice_rx_checksum(rxq, &ri->iri_csum_flags, 45256429daeSEric Joyner &ri->iri_csum_data, status0, ptype); 45356429daeSEric Joyner 45456429daeSEric Joyner /* Set remaining iflib RX descriptor info fields */ 45556429daeSEric Joyner ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash)); 45656429daeSEric Joyner ri->iri_rsstype = ice_ptype_to_hash(ptype); 457f7926a6dSVincenzo Maffione ri->iri_nfrags = i; 45871d10453SEric Joyner return (0); 45971d10453SEric Joyner } 46071d10453SEric Joyner 46171d10453SEric Joyner /** 46271d10453SEric Joyner * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware 46371d10453SEric Joyner * @arg: device specific softc structure 46471d10453SEric Joyner * @iru: the Rx descriptor update structure 46571d10453SEric Joyner * 466*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer 467*9e54973fSEric Joyner * used by iflib for RX packet processing. 46871d10453SEric Joyner */ 46971d10453SEric Joyner static void 47071d10453SEric Joyner ice_ift_rxd_refill(void *arg, if_rxd_update_t iru) 47171d10453SEric Joyner { 47271d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 47371d10453SEric Joyner struct ice_rx_queue *rxq; 47471d10453SEric Joyner uint64_t *paddrs; 47571d10453SEric Joyner uint32_t pidx; 47671d10453SEric Joyner uint16_t qsidx, count; 47771d10453SEric Joyner 47871d10453SEric Joyner paddrs = iru->iru_paddrs; 47971d10453SEric Joyner pidx = iru->iru_pidx; 48071d10453SEric Joyner qsidx = iru->iru_qsidx; 48171d10453SEric Joyner count = iru->iru_count; 48271d10453SEric Joyner 48371d10453SEric Joyner rxq = &(sc->pf_vsi.rx_queues[qsidx]); 48471d10453SEric Joyner 485*9e54973fSEric Joyner _ice_ift_rxd_refill(rxq, pidx, paddrs, count); 486*9e54973fSEric Joyner } 487*9e54973fSEric Joyner 488*9e54973fSEric Joyner /** 489*9e54973fSEric Joyner * _ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware 490*9e54973fSEric Joyner * @rxq: RX queue driver structure 491*9e54973fSEric Joyner * @pidx: first index to refill 492*9e54973fSEric Joyner * @paddrs: physical addresses to use 493*9e54973fSEric Joyner * @count: number of descriptors to refill 494*9e54973fSEric Joyner * 495*9e54973fSEric Joyner * Update the Rx descriptor indices for a given queue, assigning new physical 496*9e54973fSEric Joyner * addresses to the descriptors, preparing them for re-use by the hardware. 497*9e54973fSEric Joyner */ 498*9e54973fSEric Joyner static void 499*9e54973fSEric Joyner _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx, 500*9e54973fSEric Joyner uint64_t *paddrs, uint16_t count) 501*9e54973fSEric Joyner { 502*9e54973fSEric Joyner uint32_t next_pidx; 503*9e54973fSEric Joyner int i; 504*9e54973fSEric Joyner 50571d10453SEric Joyner for (i = 0, next_pidx = pidx; i < count; i++) { 50671d10453SEric Joyner rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); 50771d10453SEric Joyner if (++next_pidx == (uint32_t)rxq->desc_count) 50871d10453SEric Joyner next_pidx = 0; 50971d10453SEric Joyner } 51071d10453SEric Joyner } 51171d10453SEric Joyner 51271d10453SEric Joyner /** 51371d10453SEric Joyner * ice_ift_rxd_flush - Flush Rx descriptors to hardware 51471d10453SEric Joyner * @arg: device specific softc pointer 51571d10453SEric Joyner * @rxqid: the Rx queue to flush 51671d10453SEric Joyner * @flidx: unused parameter 51771d10453SEric Joyner * @pidx: descriptor index to advance tail to 51871d10453SEric Joyner * 519*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer 520*9e54973fSEric Joyner * used by iflib for RX packet processing. 52171d10453SEric Joyner */ 52271d10453SEric Joyner static void 52371d10453SEric Joyner ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused, 52471d10453SEric Joyner qidx_t pidx) 52571d10453SEric Joyner { 52671d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 52771d10453SEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; 52871d10453SEric Joyner 529*9e54973fSEric Joyner _ice_ift_rxd_flush(sc, rxq, (uint32_t)pidx); 53071d10453SEric Joyner } 53156429daeSEric Joyner 532*9e54973fSEric Joyner /** 533*9e54973fSEric Joyner * _ice_ift_rxd_flush - Flush Rx descriptors to hardware 534*9e54973fSEric Joyner * @sc: device specific softc pointer 535*9e54973fSEric Joyner * @rxq: RX queue driver structure 536*9e54973fSEric Joyner * @pidx: descriptor index to advance tail to 537*9e54973fSEric Joyner * 538*9e54973fSEric Joyner * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that 539*9e54973fSEric Joyner * software is done with the descriptor and it can be recycled. 540*9e54973fSEric Joyner */ 541*9e54973fSEric Joyner static void 542*9e54973fSEric Joyner _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, uint32_t pidx) 543*9e54973fSEric Joyner { 544*9e54973fSEric Joyner wr32(&sc->hw, rxq->tail, pidx); 545*9e54973fSEric Joyner } 546*9e54973fSEric Joyner 547*9e54973fSEric Joyner /** 548*9e54973fSEric Joyner * ice_ift_queue_select - Select queue index to transmit packet on 549*9e54973fSEric Joyner * @arg: device specific softc 550*9e54973fSEric Joyner * @m: transmit packet data 551*9e54973fSEric Joyner * @pi: transmit packet metadata 552*9e54973fSEric Joyner * 553*9e54973fSEric Joyner * Called by iflib to determine which queue index to transmit the packet 554*9e54973fSEric Joyner * pointed to by @m on. In particular, ensures packets go out on the right 555*9e54973fSEric Joyner * queue index for the right transmit class when multiple traffic classes are 556*9e54973fSEric Joyner * enabled in the driver. 557*9e54973fSEric Joyner */ 55856429daeSEric Joyner static qidx_t 5598923de59SPiotr Kubaj ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi) 56056429daeSEric Joyner { 56156429daeSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 5628923de59SPiotr Kubaj struct ice_dcbx_cfg *local_dcbx_cfg; 56356429daeSEric Joyner struct ice_vsi *vsi = &sc->pf_vsi; 56456429daeSEric Joyner u16 tc_base_queue, tc_qcount; 56556429daeSEric Joyner u8 up, tc; 56656429daeSEric Joyner 56761d83041SEric Joyner #ifdef ALTQ 56861d83041SEric Joyner /* Included to match default iflib behavior */ 56956429daeSEric Joyner /* Only go out on default queue if ALTQ is enabled */ 57061d83041SEric Joyner struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx); 5718d5feedeSJustin Hibbits if (if_altq_is_enabled(ifp)) 57256429daeSEric Joyner return (0); 57361d83041SEric Joyner #endif 57456429daeSEric Joyner 57556429daeSEric Joyner if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) { 57656429daeSEric Joyner if (M_HASHTYPE_GET(m)) { 57756429daeSEric Joyner /* Default iflib queue selection method */ 57856429daeSEric Joyner return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues); 57956429daeSEric Joyner } else 58056429daeSEric Joyner return (0); 58156429daeSEric Joyner } 58256429daeSEric Joyner 5838923de59SPiotr Kubaj /* Use default TC unless overridden later */ 58456429daeSEric Joyner tc = 0; /* XXX: Get default TC for traffic if >1 TC? */ 58556429daeSEric Joyner 5868923de59SPiotr Kubaj local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg; 5878923de59SPiotr Kubaj 5888923de59SPiotr Kubaj #if defined(INET) || defined(INET6) 5898923de59SPiotr Kubaj if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) && 5908923de59SPiotr Kubaj (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) { 5918923de59SPiotr Kubaj u8 dscp_val = pi->ipi_ip_tos >> 2; 5928923de59SPiotr Kubaj tc = local_dcbx_cfg->dscp_map[dscp_val]; 5938923de59SPiotr Kubaj } else 5948923de59SPiotr Kubaj #endif /* defined(INET) || defined(INET6) */ 5958923de59SPiotr Kubaj if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */ 59656429daeSEric Joyner up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag); 5978923de59SPiotr Kubaj tc = local_dcbx_cfg->etscfg.prio_table[up]; 59856429daeSEric Joyner } 59956429daeSEric Joyner 60056429daeSEric Joyner tc_base_queue = vsi->tc_info[tc].qoffset; 60156429daeSEric Joyner tc_qcount = vsi->tc_info[tc].qcount_tx; 60256429daeSEric Joyner 60356429daeSEric Joyner if (M_HASHTYPE_GET(m)) 60456429daeSEric Joyner return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue); 60556429daeSEric Joyner else 60656429daeSEric Joyner return (tc_base_queue); 60756429daeSEric Joyner } 608*9e54973fSEric Joyner 609*9e54973fSEric Joyner /** 610*9e54973fSEric Joyner * ice_ift_txd_credits_update_subif - cleanup subinterface VSI Tx descriptors 611*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 612*9e54973fSEric Joyner * @txqid: the Tx queue to update 613*9e54973fSEric Joyner * @clear: if false, only report, do not actually clean 614*9e54973fSEric Joyner * 615*9e54973fSEric Joyner * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that 616*9e54973fSEric Joyner * do not belong to the PF VSI. 617*9e54973fSEric Joyner * 618*9e54973fSEric Joyner * See _ice_ift_txd_credits_update(). 619*9e54973fSEric Joyner */ 620*9e54973fSEric Joyner static int 621*9e54973fSEric Joyner ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear) 622*9e54973fSEric Joyner { 623*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 624*9e54973fSEric Joyner struct ice_softc *sc = mif->back; 625*9e54973fSEric Joyner struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid]; 626*9e54973fSEric Joyner 627*9e54973fSEric Joyner return _ice_ift_txd_credits_update(sc, txq, clear); 628*9e54973fSEric Joyner } 629*9e54973fSEric Joyner 630*9e54973fSEric Joyner /** 631*9e54973fSEric Joyner * ice_ift_txd_encap_subif - prepare Tx descriptors for a packet 632*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 633*9e54973fSEric Joyner * @pi: packet info 634*9e54973fSEric Joyner * 635*9e54973fSEric Joyner * Wrapper for _ice_ift_txd_encap_subif() meant for TX queues that 636*9e54973fSEric Joyner * do not belong to the PF VSI. 637*9e54973fSEric Joyner * 638*9e54973fSEric Joyner * See _ice_ift_txd_encap_subif(). 639*9e54973fSEric Joyner */ 640*9e54973fSEric Joyner static int 641*9e54973fSEric Joyner ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi) 642*9e54973fSEric Joyner { 643*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 644*9e54973fSEric Joyner struct ice_tx_queue *txq = &mif->vsi->tx_queues[pi->ipi_qsidx]; 645*9e54973fSEric Joyner 646*9e54973fSEric Joyner return _ice_ift_txd_encap(txq, pi); 647*9e54973fSEric Joyner } 648*9e54973fSEric Joyner 649*9e54973fSEric Joyner /** 650*9e54973fSEric Joyner * ice_ift_txd_flush_subif - Flush Tx descriptors to hardware 651*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 652*9e54973fSEric Joyner * @txqid: the Tx queue to flush 653*9e54973fSEric Joyner * @pidx: descriptor index to advance tail to 654*9e54973fSEric Joyner * 655*9e54973fSEric Joyner * Advance the Transmit Descriptor Tail (TDT). Functionally identical to 656*9e54973fSEric Joyner * the ice_ift_txd_encap() meant for the main PF VSI, but provides a function 657*9e54973fSEric Joyner * pointer to iflib for use with non-main-PF VSI TX queues. 658*9e54973fSEric Joyner */ 659*9e54973fSEric Joyner static void 660*9e54973fSEric Joyner ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx) 661*9e54973fSEric Joyner { 662*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 663*9e54973fSEric Joyner struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid]; 664*9e54973fSEric Joyner struct ice_hw *hw = &mif->back->hw; 665*9e54973fSEric Joyner 666*9e54973fSEric Joyner wr32(hw, txq->tail, pidx); 667*9e54973fSEric Joyner } 668*9e54973fSEric Joyner 669*9e54973fSEric Joyner /** 670*9e54973fSEric Joyner * ice_ift_rxd_available_subif - Return number of available Rx packets 671*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 672*9e54973fSEric Joyner * @rxqid: the Rx queue id 673*9e54973fSEric Joyner * @pidx: descriptor start point 674*9e54973fSEric Joyner * @budget: maximum Rx budget 675*9e54973fSEric Joyner * 676*9e54973fSEric Joyner * Determines how many Rx packets are available on the queue, up to a maximum 677*9e54973fSEric Joyner * of the given budget. 678*9e54973fSEric Joyner * 679*9e54973fSEric Joyner * See _ice_ift_rxd_available(). 680*9e54973fSEric Joyner */ 681*9e54973fSEric Joyner static int 682*9e54973fSEric Joyner ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget) 683*9e54973fSEric Joyner { 684*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 685*9e54973fSEric Joyner struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid]; 686*9e54973fSEric Joyner 687*9e54973fSEric Joyner return _ice_ift_rxd_available(rxq, pidx, budget); 688*9e54973fSEric Joyner } 689*9e54973fSEric Joyner 690*9e54973fSEric Joyner /** 691*9e54973fSEric Joyner * ice_ift_rxd_pkt_get_subif - Called by iflib to send data to upper layer 692*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 693*9e54973fSEric Joyner * @ri: receive packet info 694*9e54973fSEric Joyner * 695*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer 696*9e54973fSEric Joyner * used by iflib for RX packet processing, for iflib subinterfaces. 697*9e54973fSEric Joyner */ 698*9e54973fSEric Joyner static int 699*9e54973fSEric Joyner ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri) 700*9e54973fSEric Joyner { 701*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 702*9e54973fSEric Joyner struct ice_rx_queue *rxq = &mif->vsi->rx_queues[ri->iri_qsidx]; 703*9e54973fSEric Joyner 704*9e54973fSEric Joyner return _ice_ift_rxd_pkt_get(rxq, ri); 705*9e54973fSEric Joyner } 706*9e54973fSEric Joyner 707*9e54973fSEric Joyner /** 708*9e54973fSEric Joyner * ice_ift_rxd_refill_subif - Prepare Rx descriptors for re-use by hardware 709*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 710*9e54973fSEric Joyner * @iru: the Rx descriptor update structure 711*9e54973fSEric Joyner * 712*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer 713*9e54973fSEric Joyner * used by iflib for RX packet processing, for iflib subinterfaces. 714*9e54973fSEric Joyner */ 715*9e54973fSEric Joyner static void 716*9e54973fSEric Joyner ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru) 717*9e54973fSEric Joyner { 718*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 719*9e54973fSEric Joyner struct ice_rx_queue *rxq = &mif->vsi->rx_queues[iru->iru_qsidx]; 720*9e54973fSEric Joyner 721*9e54973fSEric Joyner uint64_t *paddrs; 722*9e54973fSEric Joyner uint32_t pidx; 723*9e54973fSEric Joyner uint16_t count; 724*9e54973fSEric Joyner 725*9e54973fSEric Joyner paddrs = iru->iru_paddrs; 726*9e54973fSEric Joyner pidx = iru->iru_pidx; 727*9e54973fSEric Joyner count = iru->iru_count; 728*9e54973fSEric Joyner 729*9e54973fSEric Joyner _ice_ift_rxd_refill(rxq, pidx, paddrs, count); 730*9e54973fSEric Joyner } 731*9e54973fSEric Joyner 732*9e54973fSEric Joyner /** 733*9e54973fSEric Joyner * ice_ift_rxd_flush_subif - Flush Rx descriptors to hardware 734*9e54973fSEric Joyner * @arg: subinterface private structure (struct ice_mirr_if) 735*9e54973fSEric Joyner * @rxqid: the Rx queue to flush 736*9e54973fSEric Joyner * @flidx: unused parameter 737*9e54973fSEric Joyner * @pidx: descriptor index to advance tail to 738*9e54973fSEric Joyner * 739*9e54973fSEric Joyner * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer 740*9e54973fSEric Joyner * used by iflib for RX packet processing. 741*9e54973fSEric Joyner */ 742*9e54973fSEric Joyner static void 743*9e54973fSEric Joyner ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx __unused, 744*9e54973fSEric Joyner qidx_t pidx) 745*9e54973fSEric Joyner { 746*9e54973fSEric Joyner struct ice_mirr_if *mif = (struct ice_mirr_if *)arg; 747*9e54973fSEric Joyner struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid]; 748*9e54973fSEric Joyner 749*9e54973fSEric Joyner _ice_ift_rxd_flush(mif->back, rxq, pidx); 750*9e54973fSEric Joyner } 751