xref: /freebsd/sys/dev/ice/ice_iflib_txrx.c (revision 9e54973fc33aa44b77d1c851cb36fcd82dc44cda)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2024, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file ice_iflib_txrx.c
34  * @brief iflib Tx/Rx hotpath
35  *
36  * Main location for the iflib Tx/Rx hotpath implementation.
37  *
38  * Contains the implementation for the iflib function callbacks and the
39  * if_txrx ops structure.
40  */
41 
42 #include "ice_iflib.h"
43 
44 /* Tx/Rx hotpath utility functions */
45 #include "ice_common_txrx.h"
46 
47 /*
48  * Driver private implementations
49  */
50 static int _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi);
51 static int _ice_ift_txd_credits_update(struct ice_softc *sc, struct ice_tx_queue *txq, bool clear);
52 static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget);
53 static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri);
54 static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
55 				uint64_t *paddrs, uint16_t count);
56 static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq,
57 			       uint32_t pidx);
58 
59 /*
60  * iflib txrx method declarations
61  */
62 static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
63 static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
64 static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
65 static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
66 static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
67 static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
68 static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
69 static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
70 static int ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear);
71 static int ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi);
72 static void ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx);
73 static int ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
74 static int ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri);
75 static void ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru);
76 static void ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
77 
78 /* Macro to help extract the NIC mode flexible Rx descriptor fields from the
79  * advanced 32byte Rx descriptors.
80  */
81 #define RX_FLEX_NIC(desc, field) \
82 	(((struct ice_32b_rx_flex_desc_nic *)desc)->field)
83 
84 /**
85  * @var ice_txrx
86  * @brief Tx/Rx operations for the iflib stack
87  *
88  * Structure defining the Tx and Rx related operations that iflib can request
89  * the driver to perform. These are the main entry points for the hot path of
90  * the transmit and receive paths in the iflib driver.
91  */
92 struct if_txrx ice_txrx = {
93 	.ift_txd_encap = ice_ift_txd_encap,
94 	.ift_txd_flush = ice_ift_txd_flush,
95 	.ift_txd_credits_update = ice_ift_txd_credits_update,
96 	.ift_rxd_available = ice_ift_rxd_available,
97 	.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
98 	.ift_rxd_refill = ice_ift_rxd_refill,
99 	.ift_rxd_flush = ice_ift_rxd_flush,
100 	.ift_txq_select_v2 = ice_ift_queue_select,
101 };
102 
103 /**
104  * @var ice_subif_txrx
105  * @brief Tx/Rx operations for the iflib stack, for subinterfaces
106  *
107  * Structure defining the Tx and Rx related operations that iflib can request
108  * the subinterface driver to perform. These are the main entry points for the
109  * hot path of the transmit and receive paths in the iflib driver.
110  */
111 struct if_txrx ice_subif_txrx = {
112 	.ift_txd_credits_update = ice_ift_txd_credits_update_subif,
113 	.ift_txd_encap = ice_ift_txd_encap_subif,
114 	.ift_txd_flush = ice_ift_txd_flush_subif,
115 	.ift_rxd_available = ice_ift_rxd_available_subif,
116 	.ift_rxd_pkt_get = ice_ift_rxd_pkt_get_subif,
117 	.ift_rxd_refill = ice_ift_rxd_refill_subif,
118 	.ift_rxd_flush = ice_ift_rxd_flush_subif,
119 	.ift_txq_select_v2 = NULL,
120 };
121 
122 /**
123  * _ice_ift_txd_encap - prepare Tx descriptors for a packet
124  * @txq: driver's TX queue context
125  * @pi: packet info
126  *
127  * Prepares and encapsulates the given packet into into Tx descriptors, in
128  * preparation for sending to the transmit engine. Sets the necessary context
129  * descriptors for TSO and other offloads, and prepares the last descriptor
130  * for the writeback status.
131  *
132  * Return 0 on success, non-zero error code on failure.
133  */
134 static int
135 _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi)
136 {
137 	int nsegs = pi->ipi_nsegs;
138 	bus_dma_segment_t *segs = pi->ipi_segs;
139 	struct ice_tx_desc *txd = NULL;
140 	int i, j, mask, pidx_last;
141 	u32 cmd, off;
142 
143 	cmd = off = 0;
144 	i = pi->ipi_pidx;
145 
146 	/* Set up the TSO/CSUM offload */
147 	if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
148 		/* Set up the TSO context descriptor if required */
149 		if (pi->ipi_csum_flags & CSUM_TSO) {
150 			if (ice_tso_detect_sparse(pi))
151 				return (EFBIG);
152 			i = ice_tso_setup(txq, pi);
153 		}
154 		ice_tx_setup_offload(txq, pi, &cmd, &off);
155 	}
156 	if (pi->ipi_mflags & M_VLANTAG)
157 		cmd |= ICE_TX_DESC_CMD_IL2TAG1;
158 
159 	mask = txq->desc_count - 1;
160 	for (j = 0; j < nsegs; j++) {
161 		bus_size_t seglen;
162 
163 		txd = &txq->tx_base[i];
164 		seglen = segs[j].ds_len;
165 
166 		txd->buf_addr = htole64(segs[j].ds_addr);
167 		txd->cmd_type_offset_bsz =
168 		    htole64(ICE_TX_DESC_DTYPE_DATA
169 		    | ((u64)cmd  << ICE_TXD_QW1_CMD_S)
170 		    | ((u64)off << ICE_TXD_QW1_OFFSET_S)
171 		    | ((u64)seglen  << ICE_TXD_QW1_TX_BUF_SZ_S)
172 		    | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
173 
174 		txq->stats.tx_bytes += seglen;
175 		pidx_last = i;
176 		i = (i+1) & mask;
177 	}
178 
179 	/* Set the last descriptor for report */
180 #define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
181 	txd->cmd_type_offset_bsz |=
182 	    htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
183 
184 	/* Add to report status array */
185 	txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
186 	txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
187 	MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
188 
189 	pi->ipi_new_pidx = i;
190 
191 	++txq->stats.tx_packets;
192 	return (0);
193 }
194 
195 /**
196  * ice_ift_txd_encap - prepare Tx descriptors for a packet
197  * @arg: the iflib softc structure pointer
198  * @pi: packet info
199  *
200  * Prepares and encapsulates the given packet into Tx descriptors, in
201  * preparation for sending to the transmit engine. Sets the necessary context
202  * descriptors for TSO and other offloads, and prepares the last descriptor
203  * for the writeback status.
204  *
205  * Return 0 on success, non-zero error code on failure.
206  */
207 static int
208 ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
209 {
210 	struct ice_softc *sc = (struct ice_softc *)arg;
211 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
212 
213 	return _ice_ift_txd_encap(txq, pi);
214 }
215 
216 /**
217  * ice_ift_txd_flush - Flush Tx descriptors to hardware
218  * @arg: device specific softc pointer
219  * @txqid: the Tx queue to flush
220  * @pidx: descriptor index to advance tail to
221  *
222  * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that
223  * frames are available for transmit.
224  */
225 static void
226 ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
227 {
228 	struct ice_softc *sc = (struct ice_softc *)arg;
229 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
230 	struct ice_hw *hw = &sc->hw;
231 
232 	wr32(hw, txq->tail, pidx);
233 }
234 
235 /**
236  * _ice_ift_txd_credits_update - cleanup Tx descriptors
237  * @sc: device private softc
238  * @txq: the Tx queue to update
239  * @clear: if false, only report, do not actually clean
240  *
241  * If clear is false, iflib is asking if we *could* clean up any Tx
242  * descriptors.
243  *
244  * If clear is true, iflib is requesting to cleanup and reclaim used Tx
245  * descriptors.
246  *
247  * Called by other txd_credits_update functions passed to iflib.
248  */
249 static int
250 _ice_ift_txd_credits_update(struct ice_softc *sc __unused, struct ice_tx_queue *txq, bool clear)
251 {
252 	qidx_t processed = 0;
253 	qidx_t cur, prev, ntxd, rs_cidx;
254 	int32_t delta;
255 	bool is_done;
256 
257 	rs_cidx = txq->tx_rs_cidx;
258 	if (rs_cidx == txq->tx_rs_pidx)
259 		return (0);
260 	cur = txq->tx_rsq[rs_cidx];
261 	MPASS(cur != QIDX_INVALID);
262 	is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
263 
264 	if (!is_done)
265 		return (0);
266 	else if (clear == false)
267 		return (1);
268 
269 	prev = txq->tx_cidx_processed;
270 	ntxd = txq->desc_count;
271 	do {
272 		MPASS(prev != cur);
273 		delta = (int32_t)cur - (int32_t)prev;
274 		if (delta < 0)
275 			delta += ntxd;
276 		MPASS(delta > 0);
277 		processed += delta;
278 		prev = cur;
279 		rs_cidx = (rs_cidx + 1) & (ntxd-1);
280 		if (rs_cidx == txq->tx_rs_pidx)
281 			break;
282 		cur = txq->tx_rsq[rs_cidx];
283 		MPASS(cur != QIDX_INVALID);
284 		is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
285 	} while (is_done);
286 
287 	txq->tx_rs_cidx = rs_cidx;
288 	txq->tx_cidx_processed = prev;
289 
290 	return (processed);
291 }
292 
293 /**
294  * ice_ift_txd_credits_update - cleanup PF VSI Tx descriptors
295  * @arg: device private softc
296  * @txqid: the Tx queue to update
297  * @clear: if false, only report, do not actually clean
298  *
299  * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
300  * belong to the PF VSI.
301  *
302  * @see _ice_ift_txd_credits_update()
303  */
304 static int
305 ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
306 {
307 	struct ice_softc *sc = (struct ice_softc *)arg;
308 	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
309 
310 	return _ice_ift_txd_credits_update(sc, txq, clear);
311 }
312 
313 /**
314  * _ice_ift_rxd_available - Return number of available Rx packets
315  * @rxq: RX queue driver structure
316  * @pidx: descriptor start point
317  * @budget: maximum Rx budget
318  *
319  * Determines how many Rx packets are available on the queue, up to a maximum
320  * of the given budget.
321  */
322 static int
323 _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget)
324 {
325 	union ice_32b_rx_flex_desc *rxd;
326 	uint16_t status0;
327 	int cnt, i, nrxd;
328 
329 	nrxd = rxq->desc_count;
330 
331 	for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
332 		rxd = &rxq->rx_base[i];
333 		status0 = le16toh(rxd->wb.status_error0);
334 
335 		if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
336 			break;
337 		if (++i == nrxd)
338 			i = 0;
339 		if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
340 			cnt++;
341 	}
342 
343 	return (cnt);
344 }
345 
346 /**
347  * ice_ift_rxd_available - Return number of available Rx packets
348  * @arg: device private softc
349  * @rxqid: the Rx queue id
350  * @pidx: descriptor start point
351  * @budget: maximum Rx budget
352  *
353  * Wrapper for _ice_ift_rxd_available() that provides a function pointer
354  * that iflib requires for RX processing.
355  */
356 static int
357 ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
358 {
359 	struct ice_softc *sc = (struct ice_softc *)arg;
360 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
361 
362 	return _ice_ift_rxd_available(rxq, pidx, budget);
363 }
364 
365 /**
366  * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
367  * @arg: device specific softc
368  * @ri: receive packet info
369  *
370  * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
371  * used by iflib for RX packet processing.
372  */
373 static int
374 ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
375 {
376 	struct ice_softc *sc = (struct ice_softc *)arg;
377 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
378 
379 	return _ice_ift_rxd_pkt_get(rxq, ri);
380 }
381 
382 /**
383  * _ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
384  * @rxq: RX queue driver structure
385  * @ri: receive packet info
386  *
387  * This function is called by iflib, and executes in ithread context. It is
388  * called by iflib to obtain data which has been DMA'ed into host memory.
389  * Returns zero on success, and EBADMSG on failure.
390  */
391 static int
392 _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri)
393 {
394 	union ice_32b_rx_flex_desc *cur;
395 	u16 status0, plen, ptype;
396 	bool eop;
397 	size_t cidx;
398 	int i;
399 
400 	cidx = ri->iri_cidx;
401 	i = 0;
402 	do {
403 		/* 5 descriptor receive limit */
404 		MPASS(i < ICE_MAX_RX_SEGS);
405 
406 		cur = &rxq->rx_base[cidx];
407 		status0 = le16toh(cur->wb.status_error0);
408 		plen = le16toh(cur->wb.pkt_len) &
409 			ICE_RX_FLX_DESC_PKT_LEN_M;
410 
411 		/* we should never be called without a valid descriptor */
412 		MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
413 
414 		ri->iri_len += plen;
415 
416 		cur->wb.status_error0 = 0;
417 		eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
418 
419 		ri->iri_frags[i].irf_flid = 0;
420 		ri->iri_frags[i].irf_idx = cidx;
421 		ri->iri_frags[i].irf_len = plen;
422 		if (++cidx == rxq->desc_count)
423 			cidx = 0;
424 		i++;
425 	} while (!eop);
426 
427 	/* End of Packet reached; cur is eop/last descriptor */
428 
429 	/* Make sure packets with bad L2 values are discarded.
430 	 * This bit is only valid in the last descriptor.
431 	 */
432 	if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) {
433 		rxq->stats.desc_errs++;
434 		return (EBADMSG);
435 	}
436 
437 	/* Get VLAN tag information if one is in descriptor */
438 	if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
439 		ri->iri_vtag = le16toh(cur->wb.l2tag1);
440 		ri->iri_flags |= M_VLANTAG;
441 	}
442 
443 	/* Capture soft statistics for this Rx queue */
444 	rxq->stats.rx_packets++;
445 	rxq->stats.rx_bytes += ri->iri_len;
446 
447 	/* Get packet type and set checksum flags */
448 	ptype = le16toh(cur->wb.ptype_flex_flags0) &
449 		ICE_RX_FLEX_DESC_PTYPE_M;
450 	if ((if_getcapenable(ri->iri_ifp) & IFCAP_RXCSUM) != 0)
451 		ice_rx_checksum(rxq, &ri->iri_csum_flags,
452 				&ri->iri_csum_data, status0, ptype);
453 
454 	/* Set remaining iflib RX descriptor info fields */
455 	ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
456 	ri->iri_rsstype = ice_ptype_to_hash(ptype);
457 	ri->iri_nfrags = i;
458 	return (0);
459 }
460 
461 /**
462  * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
463  * @arg: device specific softc structure
464  * @iru: the Rx descriptor update structure
465  *
466  * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
467  * used by iflib for RX packet processing.
468  */
469 static void
470 ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
471 {
472 	struct ice_softc *sc = (struct ice_softc *)arg;
473 	struct ice_rx_queue *rxq;
474 	uint64_t *paddrs;
475 	uint32_t pidx;
476 	uint16_t qsidx, count;
477 
478 	paddrs = iru->iru_paddrs;
479 	pidx = iru->iru_pidx;
480 	qsidx = iru->iru_qsidx;
481 	count = iru->iru_count;
482 
483 	rxq = &(sc->pf_vsi.rx_queues[qsidx]);
484 
485 	_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
486 }
487 
488 /**
489  * _ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
490  * @rxq: RX queue driver structure
491  * @pidx: first index to refill
492  * @paddrs: physical addresses to use
493  * @count: number of descriptors to refill
494  *
495  * Update the Rx descriptor indices for a given queue, assigning new physical
496  * addresses to the descriptors, preparing them for re-use by the hardware.
497  */
498 static void
499 _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
500 		    uint64_t *paddrs, uint16_t count)
501 {
502 	uint32_t next_pidx;
503 	int i;
504 
505 	for (i = 0, next_pidx = pidx; i < count; i++) {
506 		rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
507 		if (++next_pidx == (uint32_t)rxq->desc_count)
508 			next_pidx = 0;
509 	}
510 }
511 
512 /**
513  * ice_ift_rxd_flush - Flush Rx descriptors to hardware
514  * @arg: device specific softc pointer
515  * @rxqid: the Rx queue to flush
516  * @flidx: unused parameter
517  * @pidx: descriptor index to advance tail to
518  *
519  * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
520  * used by iflib for RX packet processing.
521  */
522 static void
523 ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
524 		  qidx_t pidx)
525 {
526 	struct ice_softc *sc = (struct ice_softc *)arg;
527 	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
528 
529 	_ice_ift_rxd_flush(sc, rxq, (uint32_t)pidx);
530 }
531 
532 /**
533  * _ice_ift_rxd_flush - Flush Rx descriptors to hardware
534  * @sc: device specific softc pointer
535  * @rxq: RX queue driver structure
536  * @pidx: descriptor index to advance tail to
537  *
538  * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
539  * software is done with the descriptor and it can be recycled.
540  */
541 static void
542 _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, uint32_t pidx)
543 {
544 	wr32(&sc->hw, rxq->tail, pidx);
545 }
546 
547 /**
548  * ice_ift_queue_select - Select queue index to transmit packet on
549  * @arg: device specific softc
550  * @m: transmit packet data
551  * @pi: transmit packet metadata
552  *
553  * Called by iflib to determine which queue index to transmit the packet
554  * pointed to by @m on. In particular, ensures packets go out on the right
555  * queue index for the right transmit class when multiple traffic classes are
556  * enabled in the driver.
557  */
558 static qidx_t
559 ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
560 {
561 	struct ice_softc *sc = (struct ice_softc *)arg;
562 	struct ice_dcbx_cfg *local_dcbx_cfg;
563 	struct ice_vsi *vsi = &sc->pf_vsi;
564 	u16 tc_base_queue, tc_qcount;
565 	u8 up, tc;
566 
567 #ifdef ALTQ
568 	/* Included to match default iflib behavior */
569 	/* Only go out on default queue if ALTQ is enabled */
570 	struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx);
571 	if (if_altq_is_enabled(ifp))
572 		return (0);
573 #endif
574 
575 	if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) {
576 		if (M_HASHTYPE_GET(m)) {
577 			/* Default iflib queue selection method */
578 			return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues);
579 		} else
580 			return (0);
581 	}
582 
583 	/* Use default TC unless overridden later */
584 	tc = 0; /* XXX: Get default TC for traffic if >1 TC? */
585 
586 	local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg;
587 
588 #if defined(INET) || defined(INET6)
589 	if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
590 	    (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) {
591 		u8 dscp_val = pi->ipi_ip_tos >> 2;
592 		tc = local_dcbx_cfg->dscp_map[dscp_val];
593 	} else
594 #endif /* defined(INET) || defined(INET6) */
595 	if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */
596 		up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
597 		tc = local_dcbx_cfg->etscfg.prio_table[up];
598 	}
599 
600 	tc_base_queue = vsi->tc_info[tc].qoffset;
601 	tc_qcount = vsi->tc_info[tc].qcount_tx;
602 
603 	if (M_HASHTYPE_GET(m))
604 		return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue);
605 	else
606 		return (tc_base_queue);
607 }
608 
609 /**
610  * ice_ift_txd_credits_update_subif - cleanup subinterface VSI Tx descriptors
611  * @arg: subinterface private structure (struct ice_mirr_if)
612  * @txqid: the Tx queue to update
613  * @clear: if false, only report, do not actually clean
614  *
615  * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
616  * do not belong to the PF VSI.
617  *
618  * See _ice_ift_txd_credits_update().
619  */
620 static int
621 ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear)
622 {
623 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
624 	struct ice_softc *sc = mif->back;
625 	struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
626 
627 	return _ice_ift_txd_credits_update(sc, txq, clear);
628 }
629 
630 /**
631  * ice_ift_txd_encap_subif - prepare Tx descriptors for a packet
632  * @arg: subinterface private structure (struct ice_mirr_if)
633  * @pi: packet info
634  *
635  * Wrapper for _ice_ift_txd_encap_subif() meant for TX queues that
636  * do not belong to the PF VSI.
637  *
638  * See _ice_ift_txd_encap_subif().
639  */
640 static int
641 ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi)
642 {
643 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
644 	struct ice_tx_queue *txq = &mif->vsi->tx_queues[pi->ipi_qsidx];
645 
646 	return _ice_ift_txd_encap(txq, pi);
647 }
648 
649 /**
650  * ice_ift_txd_flush_subif - Flush Tx descriptors to hardware
651  * @arg: subinterface private structure (struct ice_mirr_if)
652  * @txqid: the Tx queue to flush
653  * @pidx: descriptor index to advance tail to
654  *
655  * Advance the Transmit Descriptor Tail (TDT). Functionally identical to
656  * the ice_ift_txd_encap() meant for the main PF VSI, but provides a function
657  * pointer to iflib for use with non-main-PF VSI TX queues.
658  */
659 static void
660 ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx)
661 {
662 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
663 	struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
664 	struct ice_hw *hw = &mif->back->hw;
665 
666 	wr32(hw, txq->tail, pidx);
667 }
668 
669 /**
670  * ice_ift_rxd_available_subif - Return number of available Rx packets
671  * @arg: subinterface private structure (struct ice_mirr_if)
672  * @rxqid: the Rx queue id
673  * @pidx: descriptor start point
674  * @budget: maximum Rx budget
675  *
676  * Determines how many Rx packets are available on the queue, up to a maximum
677  * of the given budget.
678  *
679  * See _ice_ift_rxd_available().
680  */
681 static int
682 ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
683 {
684 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
685 	struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
686 
687 	return _ice_ift_rxd_available(rxq, pidx, budget);
688 }
689 
690 /**
691  * ice_ift_rxd_pkt_get_subif - Called by iflib to send data to upper layer
692  * @arg: subinterface private structure (struct ice_mirr_if)
693  * @ri: receive packet info
694  *
695  * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
696  * used by iflib for RX packet processing, for iflib subinterfaces.
697  */
698 static int
699 ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri)
700 {
701 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
702 	struct ice_rx_queue *rxq = &mif->vsi->rx_queues[ri->iri_qsidx];
703 
704 	return _ice_ift_rxd_pkt_get(rxq, ri);
705 }
706 
707 /**
708  * ice_ift_rxd_refill_subif - Prepare Rx descriptors for re-use by hardware
709  * @arg: subinterface private structure (struct ice_mirr_if)
710  * @iru: the Rx descriptor update structure
711  *
712  * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
713  * used by iflib for RX packet processing, for iflib subinterfaces.
714  */
715 static void
716 ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru)
717 {
718 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
719 	struct ice_rx_queue *rxq = &mif->vsi->rx_queues[iru->iru_qsidx];
720 
721 	uint64_t *paddrs;
722 	uint32_t pidx;
723 	uint16_t count;
724 
725 	paddrs = iru->iru_paddrs;
726 	pidx = iru->iru_pidx;
727 	count = iru->iru_count;
728 
729 	_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
730 }
731 
732 /**
733  * ice_ift_rxd_flush_subif - Flush Rx descriptors to hardware
734  * @arg: subinterface private structure (struct ice_mirr_if)
735  * @rxqid: the Rx queue to flush
736  * @flidx: unused parameter
737  * @pidx: descriptor index to advance tail to
738  *
739  * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
740  * used by iflib for RX packet processing.
741  */
742 static void
743 ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx __unused,
744 			qidx_t pidx)
745 {
746 	struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
747 	struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
748 
749 	_ice_ift_rxd_flush(mif->back, rxq, pidx);
750 }
751