xref: /freebsd/sys/dev/ixgbe/ix_txrx.c (revision 5bb3134a8c21cb87b30e135ef168483f0333dabb)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifndef IXGBE_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40 
41 #include "ixgbe.h"
42 
43 /************************************************************************
44  * Local Function prototypes
45  ************************************************************************/
46 static int ixgbe_isc_txd_encap(void *, if_pkt_info_t);
47 static void ixgbe_isc_txd_flush(void *, uint16_t, qidx_t);
48 static int ixgbe_isc_txd_credits_update(void *, uint16_t, bool);
49 
50 static void ixgbe_isc_rxd_refill(void *, if_rxd_update_t);
51 static void ixgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
52 static int ixgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
53 static int ixgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
54 
55 static void ixgbe_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
56 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *,
57     if_pkt_info_t);
58 
59 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
60 static int ixgbe_determine_rsstype(uint16_t pkt_info);
61 
62 struct if_txrx ixgbe_txrx  = {
63 	.ift_txd_encap = ixgbe_isc_txd_encap,
64 	.ift_txd_flush = ixgbe_isc_txd_flush,
65 	.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
66 	.ift_rxd_available = ixgbe_isc_rxd_available,
67 	.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
68 	.ift_rxd_refill = ixgbe_isc_rxd_refill,
69 	.ift_rxd_flush = ixgbe_isc_rxd_flush,
70 	.ift_legacy_intr = NULL
71 };
72 
73 /************************************************************************
74  * ixgbe_tx_ctx_setup
75  *
76  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
77  *
78  ************************************************************************/
79 static int
80 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
81 {
82 	uint32_t vlan_macip_lens, type_tucmd_mlhl;
83 	uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
84 	u8  ehdrlen;
85 
86 	offload = true;
87 	olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
88 	/* VLAN MACLEN IPLEN */
89 	vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
90 
91 	/*
92 	 * Some of our VF devices need a context descriptor for every
93 	 * packet.  That means the ehdrlen needs to be non-zero in order
94 	 * for the host driver not to flag a malicious event. The stack
95 	 * will most likely populate this for all other reasons of why
96 	 * this function was called.
97 	 */
98 	if (pi->ipi_ehdrlen == 0) {
99 		ehdrlen = ETHER_HDR_LEN;
100 		ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
101 	} else
102 		ehdrlen = pi->ipi_ehdrlen;
103 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
104 
105 	pktlen = pi->ipi_len;
106 	/* First check if TSO is to be used */
107 	if (pi->ipi_csum_flags & CSUM_TSO) {
108 		/* This is used in the transmit desc in encap */
109 		pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
110 		mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
111 		mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
112 	}
113 
114 	olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
115 
116 	if (pi->ipi_flags & IPI_TX_IPV4) {
117 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
118 		/* Tell transmit desc to also do IPv4 checksum. */
119 		if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
120 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
121 	} else if (pi->ipi_flags & IPI_TX_IPV6)
122 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
123 	else
124 		offload = false;
125 
126 	vlan_macip_lens |= pi->ipi_ip_hlen;
127 
128 	switch (pi->ipi_ipproto) {
129 	case IPPROTO_TCP:
130 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
131 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
132 		else
133 			offload = false;
134 		break;
135 	case IPPROTO_UDP:
136 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
137 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
138 		else
139 			offload = false;
140 		break;
141 	case IPPROTO_SCTP:
142 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
143 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
144 		else
145 			offload = false;
146 		break;
147 	default:
148 		offload = false;
149 		break;
150 	}
151 	/* Insert L4 checksum into data descriptors */
152 	if (offload)
153 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
154 
155 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
156 
157 	/* Now copy bits into descriptor */
158 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
159 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
160 	TXD->seqnum_seed = htole32(0);
161 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
162 
163 	return (olinfo_status);
164 } /* ixgbe_tx_ctx_setup */
165 
166 /************************************************************************
167  * ixgbe_isc_txd_encap
168  ************************************************************************/
169 static int
170 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
171 {
172 	struct ixgbe_softc               *sc = arg;
173 	if_softc_ctx_t                   scctx = sc->shared;
174 	struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
175 	struct tx_ring                   *txr = &que->txr;
176 	int                              nsegs = pi->ipi_nsegs;
177 	bus_dma_segment_t                *segs = pi->ipi_segs;
178 	union ixgbe_adv_tx_desc          *txd = NULL;
179 	struct ixgbe_adv_tx_context_desc *TXD;
180 	int                              i, j, first, pidx_last;
181 	uint32_t                         olinfo_status, cmd, flags;
182 	qidx_t                           ntxd;
183 
184 	cmd =  (IXGBE_ADVTXD_DTYP_DATA |
185 		IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
186 
187 	if (pi->ipi_mflags & M_VLANTAG)
188 		cmd |= IXGBE_ADVTXD_DCMD_VLE;
189 
190 	i = first = pi->ipi_pidx;
191 	flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
192 	ntxd = scctx->isc_ntxd[0];
193 
194 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
195 	if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
196 	    (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
197 	    pi->ipi_vtag) {
198 		/*********************************************
199 		 * Set up the appropriate offload context
200 		 * this will consume the first descriptor
201 		 *********************************************/
202 		olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
203 		if (pi->ipi_csum_flags & CSUM_TSO) {
204 			cmd |= IXGBE_ADVTXD_DCMD_TSE;
205 			++txr->tso_tx;
206 		}
207 
208 		if (++i == scctx->isc_ntxd[0])
209 			i = 0;
210 	} else {
211 		/* Indicate the whole packet as payload when not doing TSO */
212 		olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
213 	}
214 
215 	olinfo_status |= IXGBE_ADVTXD_CC;
216 	pidx_last = 0;
217 	for (j = 0; j < nsegs; j++) {
218 		bus_size_t seglen;
219 
220 		txd = &txr->tx_base[i];
221 		seglen = segs[j].ds_len;
222 
223 		txd->read.buffer_addr = htole64(segs[j].ds_addr);
224 		txd->read.cmd_type_len = htole32(cmd | seglen);
225 		txd->read.olinfo_status = htole32(olinfo_status);
226 
227 		pidx_last = i;
228 		if (++i == scctx->isc_ntxd[0]) {
229 			i = 0;
230 		}
231 	}
232 
233 	if (flags) {
234 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
235 		txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
236 	}
237 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
238 
239 	txr->bytes += pi->ipi_len;
240 	pi->ipi_new_pidx = i;
241 
242 	++txr->total_packets;
243 
244 	return (0);
245 } /* ixgbe_isc_txd_encap */
246 
247 /************************************************************************
248  * ixgbe_isc_txd_flush
249  ************************************************************************/
250 static void
251 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
252 {
253 	struct ixgbe_softc     *sc = arg;
254 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
255 	struct tx_ring     *txr = &que->txr;
256 
257 	IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
258 } /* ixgbe_isc_txd_flush */
259 
260 /************************************************************************
261  * ixgbe_isc_txd_credits_update
262  ************************************************************************/
263 static int
264 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
265 {
266 	struct ixgbe_softc *sc = arg;
267 	if_softc_ctx_t     scctx = sc->shared;
268 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
269 	struct tx_ring     *txr = &que->txr;
270 	qidx_t             processed = 0;
271 	int                updated;
272 	qidx_t             cur, prev, ntxd, rs_cidx;
273 	int32_t            delta;
274 	uint8_t            status;
275 
276 	rs_cidx = txr->tx_rs_cidx;
277 	if (rs_cidx == txr->tx_rs_pidx)
278 		return (0);
279 
280 	cur = txr->tx_rsq[rs_cidx];
281 	status = txr->tx_base[cur].wb.status;
282 	updated = !!(status & IXGBE_TXD_STAT_DD);
283 
284 	if (!updated)
285 		return (0);
286 
287 	/* If clear is false just let caller know that there
288 	 * are descriptors to reclaim */
289 	if (!clear)
290 		return (1);
291 
292 	prev = txr->tx_cidx_processed;
293 	ntxd = scctx->isc_ntxd[0];
294 	do {
295 		MPASS(prev != cur);
296 		delta = (int32_t)cur - (int32_t)prev;
297 		if (delta < 0)
298 			delta += ntxd;
299 		MPASS(delta > 0);
300 
301 		processed += delta;
302 		prev = cur;
303 		rs_cidx = (rs_cidx + 1) & (ntxd - 1);
304 		if (rs_cidx == txr->tx_rs_pidx)
305 			break;
306 
307 		cur = txr->tx_rsq[rs_cidx];
308 		status = txr->tx_base[cur].wb.status;
309 	} while ((status & IXGBE_TXD_STAT_DD));
310 
311 	txr->tx_rs_cidx = rs_cidx;
312 	txr->tx_cidx_processed = prev;
313 
314 	return (processed);
315 } /* ixgbe_isc_txd_credits_update */
316 
317 /************************************************************************
318  * ixgbe_isc_rxd_refill
319  ************************************************************************/
320 static void
321 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
322 {
323 	struct ixgbe_softc *sc   = arg;
324 	struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
325 	struct rx_ring *rxr      = &que->rxr;
326 	uint64_t *paddrs;
327 	int i;
328 	uint32_t next_pidx, pidx;
329 	uint16_t count;
330 
331 	paddrs = iru->iru_paddrs;
332 	pidx = iru->iru_pidx;
333 	count = iru->iru_count;
334 
335 	for (i = 0, next_pidx = pidx; i < count; i++) {
336 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
337 		if (++next_pidx == sc->shared->isc_nrxd[0])
338 			next_pidx = 0;
339 	}
340 } /* ixgbe_isc_rxd_refill */
341 
342 /************************************************************************
343  * ixgbe_isc_rxd_flush
344  ************************************************************************/
345 static void
346 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
347 {
348 	struct ixgbe_softc *sc  = arg;
349 	struct ix_rx_queue *que = &sc->rx_queues[qsidx];
350 	struct rx_ring     *rxr = &que->rxr;
351 
352 	IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
353 } /* ixgbe_isc_rxd_flush */
354 
355 /************************************************************************
356  * ixgbe_isc_rxd_available
357  ************************************************************************/
358 static int
359 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
360 {
361 	struct ixgbe_softc      *sc = arg;
362 	struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
363 	struct rx_ring          *rxr = &que->rxr;
364 	union ixgbe_adv_rx_desc *rxd;
365 	uint32_t                 staterr;
366 	int                      cnt, i, nrxd;
367 
368 	nrxd = sc->shared->isc_nrxd[0];
369 	for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
370 		rxd = &rxr->rx_base[i];
371 		staterr = le32toh(rxd->wb.upper.status_error);
372 
373 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
374 			break;
375 		if (++i == nrxd)
376 			i = 0;
377 		if (staterr & IXGBE_RXD_STAT_EOP)
378 			cnt++;
379 	}
380 	return (cnt);
381 } /* ixgbe_isc_rxd_available */
382 
383 /************************************************************************
384  * ixgbe_isc_rxd_pkt_get
385  *
386  *   Routine sends data which has been dma'ed into host memory
387  *   to upper layer. Initialize ri structure.
388  *
389  *   Returns 0 upon success, errno on failure
390  ************************************************************************/
391 
392 static int
393 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
394 {
395 	struct ixgbe_softc       *sc = arg;
396 	struct ix_rx_queue       *que = &sc->rx_queues[ri->iri_qsidx];
397 	struct rx_ring           *rxr = &que->rxr;
398 	struct ifnet             *ifp = iflib_get_ifp(sc->ctx);
399 	union ixgbe_adv_rx_desc  *rxd;
400 
401 	uint16_t                  pkt_info, len, cidx, i;
402 	uint16_t                  vtag = 0;
403 	uint32_t                  ptype;
404 	uint32_t                  staterr = 0;
405 	bool                      eop;
406 
407 	i = 0;
408 	cidx = ri->iri_cidx;
409 	do {
410 		rxd = &rxr->rx_base[cidx];
411 		staterr = le32toh(rxd->wb.upper.status_error);
412 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
413 
414 		/* Error Checking then decrement count */
415 		MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
416 
417 		len = le16toh(rxd->wb.upper.length);
418 		ptype = le32toh(rxd->wb.lower.lo_dword.data) &
419 			IXGBE_RXDADV_PKTTYPE_MASK;
420 
421 		ri->iri_len += len;
422 		rxr->bytes += len;
423 
424 		rxd->wb.upper.status_error = 0;
425 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
426 
427 		if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
428 			vtag = le16toh(rxd->wb.upper.vlan);
429 		} else {
430 			vtag = 0;
431 		}
432 
433 		/* Make sure bad packets are discarded */
434 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
435 			if (sc->feat_en & IXGBE_FEATURE_VF)
436 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
437 
438 			rxr->rx_discarded++;
439 			return (EBADMSG);
440 		}
441 		ri->iri_frags[i].irf_flid = 0;
442 		ri->iri_frags[i].irf_idx = cidx;
443 		ri->iri_frags[i].irf_len = len;
444 		if (++cidx == sc->shared->isc_nrxd[0])
445 			cidx = 0;
446 		i++;
447 		/* even a 16K packet shouldn't consume more than 8 clusters */
448 		MPASS(i < 9);
449 	} while (!eop);
450 
451 	rxr->rx_packets++;
452 	rxr->packets++;
453 	rxr->rx_bytes += ri->iri_len;
454 
455 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
456 		ixgbe_rx_checksum(staterr, ri,  ptype);
457 
458 	ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
459 	ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
460 	if ((sc->feat_en & IXGBE_FEATURE_RSS) == 0) {
461 		if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
462 			ri->iri_rsstype = M_HASHTYPE_NONE;
463 		else
464 			ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
465 	}
466 	ri->iri_vtag = vtag;
467 	ri->iri_nfrags = i;
468 	if (vtag)
469 		ri->iri_flags |= M_VLANTAG;
470 	return (0);
471 } /* ixgbe_isc_rxd_pkt_get */
472 
473 /************************************************************************
474  * ixgbe_rx_checksum
475  *
476  *   Verify that the hardware indicated that the checksum is valid.
477  *   Inform the stack about the status of checksum so that stack
478  *   doesn't spend time verifying the checksum.
479  ************************************************************************/
480 static void
481 ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
482 {
483 	uint16_t status = (uint16_t)staterr;
484 	uint8_t errors = (uint8_t)(staterr >> 24);
485 
486 	/* If there is a layer 3 or 4 error we are done */
487 	if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
488 		return;
489 
490 	/* IP Checksum Good */
491 	if (status & IXGBE_RXD_STAT_IPCS)
492 		ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
493 
494 	/* Valid L4E checksum */
495 	if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) {
496 		/* SCTP header present. */
497 		if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
498 		    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
499 			ri->iri_csum_flags |= CSUM_SCTP_VALID;
500 		} else {
501 			ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
502 			ri->iri_csum_data = htons(0xffff);
503 		}
504 	}
505 } /* ixgbe_rx_checksum */
506 
507 /************************************************************************
508  * ixgbe_determine_rsstype
509  *
510  *   Parse the packet type to determine the appropriate hash
511  ************************************************************************/
512 static int
513 ixgbe_determine_rsstype(uint16_t pkt_info)
514 {
515 	switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
516 	case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
517 		return M_HASHTYPE_RSS_TCP_IPV4;
518 	case IXGBE_RXDADV_RSSTYPE_IPV4:
519 		return M_HASHTYPE_RSS_IPV4;
520 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
521 		return M_HASHTYPE_RSS_TCP_IPV6;
522 	case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
523 		return M_HASHTYPE_RSS_IPV6_EX;
524 	case IXGBE_RXDADV_RSSTYPE_IPV6:
525 		return M_HASHTYPE_RSS_IPV6;
526 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
527 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
528 	case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
529 		return M_HASHTYPE_RSS_UDP_IPV4;
530 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
531 		return M_HASHTYPE_RSS_UDP_IPV6;
532 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
533 		return M_HASHTYPE_RSS_UDP_IPV6_EX;
534 	default:
535 		return M_HASHTYPE_OPAQUE;
536 	}
537 } /* ixgbe_determine_rsstype */
538