xref: /freebsd/sys/dev/ixgbe/ix_txrx.c (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 
45 /************************************************************************
46  * Local Function prototypes
47  ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
51 
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx,
54     uint8_t flidx __unused, qidx_t pidx);
55 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
56     qidx_t budget);
57 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
58 
59 static void ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri,
60     uint32_t ptype);
61 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *,
62     if_pkt_info_t);
63 
64 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
65 static int ixgbe_determine_rsstype(uint16_t pkt_info);
66 
67 struct if_txrx ixgbe_txrx  = {
68 	.ift_txd_encap = ixgbe_isc_txd_encap,
69 	.ift_txd_flush = ixgbe_isc_txd_flush,
70 	.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
71 	.ift_rxd_available = ixgbe_isc_rxd_available,
72 	.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
73 	.ift_rxd_refill = ixgbe_isc_rxd_refill,
74 	.ift_rxd_flush = ixgbe_isc_rxd_flush,
75 	.ift_legacy_intr = NULL
76 };
77 
78 /************************************************************************
79  * ixgbe_tx_ctx_setup
80  *
81  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
82  *
83  ************************************************************************/
84 static int
85 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
86 {
87 	uint32_t vlan_macip_lens, type_tucmd_mlhl;
88 	uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
89 	u8  ehdrlen;
90 
91 	offload = TRUE;
92 	olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
93 	/* VLAN MACLEN IPLEN */
94 	vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
95 
96 	/*
97 	 * Some of our VF devices need a context descriptor for every
98 	 * packet.  That means the ehdrlen needs to be non-zero in order
99 	 * for the host driver not to flag a malicious event. The stack
100 	 * will most likely populate this for all other reasons of why
101 	 * this function was called.
102 	 */
103 	if (pi->ipi_ehdrlen == 0) {
104 		ehdrlen = ETHER_HDR_LEN;
105 		ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
106 	} else
107 		ehdrlen = pi->ipi_ehdrlen;
108 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
109 
110 	pktlen = pi->ipi_len;
111 	/* First check if TSO is to be used */
112 	if (pi->ipi_csum_flags & CSUM_TSO) {
113 		/* This is used in the transmit desc in encap */
114 		pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
115 		mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
116 		mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
117 	}
118 
119 	olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
120 
121 	if (pi->ipi_flags & IPI_TX_IPV4) {
122 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
123 		/* Tell transmit desc to also do IPv4 checksum. */
124 		if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
125 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
126 	} else if (pi->ipi_flags & IPI_TX_IPV6)
127 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
128 	else
129 		offload = FALSE;
130 
131 	vlan_macip_lens |= pi->ipi_ip_hlen;
132 
133 	switch (pi->ipi_ipproto) {
134 	case IPPROTO_TCP:
135 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
136 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
137 		else
138 			offload = FALSE;
139 		break;
140 	case IPPROTO_UDP:
141 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
142 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
143 		else
144 			offload = FALSE;
145 		break;
146 	case IPPROTO_SCTP:
147 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
148 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
149 		else
150 			offload = FALSE;
151 		break;
152 	default:
153 		offload = FALSE;
154 		break;
155 	}
156 	/* Insert L4 checksum into data descriptors */
157 	if (offload)
158 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
159 
160 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
161 
162 	/* Now copy bits into descriptor */
163 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
164 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
165 	TXD->seqnum_seed = htole32(0);
166 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
167 
168 	return (olinfo_status);
169 } /* ixgbe_tx_ctx_setup */
170 
171 /************************************************************************
172  * ixgbe_isc_txd_encap
173  ************************************************************************/
174 static int
175 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
176 {
177 	struct adapter                   *sc = arg;
178 	if_softc_ctx_t                   scctx = sc->shared;
179 	struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
180 	struct tx_ring                   *txr = &que->txr;
181 	int                              nsegs = pi->ipi_nsegs;
182 	bus_dma_segment_t                *segs = pi->ipi_segs;
183 	union ixgbe_adv_tx_desc          *txd = NULL;
184 	struct ixgbe_adv_tx_context_desc *TXD;
185 	int                              i, j, first, pidx_last;
186 	uint32_t                         olinfo_status, cmd, flags;
187 	qidx_t                           ntxd;
188 
189 	cmd =  (IXGBE_ADVTXD_DTYP_DATA |
190 		IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
191 
192 	if (pi->ipi_mflags & M_VLANTAG)
193 		cmd |= IXGBE_ADVTXD_DCMD_VLE;
194 
195 	i = first = pi->ipi_pidx;
196 	flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
197 	ntxd = scctx->isc_ntxd[0];
198 
199 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
200 	if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
201 	    (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
202 	    pi->ipi_vtag) {
203 		/*********************************************
204 		 * Set up the appropriate offload context
205 		 * this will consume the first descriptor
206 		 *********************************************/
207 		olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
208 		if (pi->ipi_csum_flags & CSUM_TSO) {
209 			cmd |= IXGBE_ADVTXD_DCMD_TSE;
210 			++txr->tso_tx;
211 		}
212 
213 		if (++i == scctx->isc_ntxd[0])
214 			i = 0;
215 	} else {
216 		/* Indicate the whole packet as payload when not doing TSO */
217 		olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
218 	}
219 
220 	olinfo_status |= IXGBE_ADVTXD_CC;
221 	pidx_last = 0;
222 	for (j = 0; j < nsegs; j++) {
223 		bus_size_t seglen;
224 
225 		txd = &txr->tx_base[i];
226 		seglen = segs[j].ds_len;
227 
228 		txd->read.buffer_addr = htole64(segs[j].ds_addr);
229 		txd->read.cmd_type_len = htole32(cmd | seglen);
230 		txd->read.olinfo_status = htole32(olinfo_status);
231 
232 		pidx_last = i;
233 		if (++i == scctx->isc_ntxd[0]) {
234 			i = 0;
235 		}
236 	}
237 
238 	if (flags) {
239 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
240 		txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
241 	}
242 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
243 
244 	txr->bytes += pi->ipi_len;
245 	pi->ipi_new_pidx = i;
246 
247 	++txr->total_packets;
248 
249 	return (0);
250 } /* ixgbe_isc_txd_encap */
251 
252 /************************************************************************
253  * ixgbe_isc_txd_flush
254  ************************************************************************/
255 static void
256 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
257 {
258 	struct adapter     *sc = arg;
259 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
260 	struct tx_ring     *txr = &que->txr;
261 
262 	IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
263 } /* ixgbe_isc_txd_flush */
264 
265 /************************************************************************
266  * ixgbe_isc_txd_credits_update
267  ************************************************************************/
268 static int
269 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
270 {
271 	struct adapter     *sc = arg;
272 	if_softc_ctx_t     scctx = sc->shared;
273 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
274 	struct tx_ring     *txr = &que->txr;
275 	qidx_t             processed = 0;
276 	int                updated;
277 	qidx_t             cur, prev, ntxd, rs_cidx;
278 	int32_t            delta;
279 	uint8_t            status;
280 
281 	rs_cidx = txr->tx_rs_cidx;
282 	if (rs_cidx == txr->tx_rs_pidx)
283 		return (0);
284 
285 	cur = txr->tx_rsq[rs_cidx];
286 	status = txr->tx_base[cur].wb.status;
287 	updated = !!(status & IXGBE_TXD_STAT_DD);
288 
289 	if (!updated)
290 		return (0);
291 
292 	/* If clear is false just let caller know that there
293 	 * are descriptors to reclaim */
294 	if (!clear)
295 		return (1);
296 
297 	prev = txr->tx_cidx_processed;
298 	ntxd = scctx->isc_ntxd[0];
299 	do {
300 		MPASS(prev != cur);
301 		delta = (int32_t)cur - (int32_t)prev;
302 		if (delta < 0)
303 			delta += ntxd;
304 		MPASS(delta > 0);
305 
306 		processed += delta;
307 		prev = cur;
308 		rs_cidx = (rs_cidx + 1) & (ntxd - 1);
309 		if (rs_cidx == txr->tx_rs_pidx)
310 			break;
311 
312 		cur = txr->tx_rsq[rs_cidx];
313 		status = txr->tx_base[cur].wb.status;
314 	} while ((status & IXGBE_TXD_STAT_DD));
315 
316 	txr->tx_rs_cidx = rs_cidx;
317 	txr->tx_cidx_processed = prev;
318 
319 	return (processed);
320 } /* ixgbe_isc_txd_credits_update */
321 
322 /************************************************************************
323  * ixgbe_isc_rxd_refill
324  ************************************************************************/
325 static void
326 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
327 {
328 	struct adapter *sc       = arg;
329 	struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
330 	struct rx_ring *rxr      = &que->rxr;
331 	uint64_t *paddrs;
332 	int i;
333 	uint32_t next_pidx, pidx;
334 	uint16_t count;
335 
336 	paddrs = iru->iru_paddrs;
337 	pidx = iru->iru_pidx;
338 	count = iru->iru_count;
339 
340 	for (i = 0, next_pidx = pidx; i < count; i++) {
341 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
342 		if (++next_pidx == sc->shared->isc_nrxd[0])
343 			next_pidx = 0;
344 	}
345 } /* ixgbe_isc_rxd_refill */
346 
347 /************************************************************************
348  * ixgbe_isc_rxd_flush
349  ************************************************************************/
350 static void
351 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
352 {
353 	struct adapter     *sc  = arg;
354 	struct ix_rx_queue *que = &sc->rx_queues[qsidx];
355 	struct rx_ring     *rxr = &que->rxr;
356 
357 	IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
358 } /* ixgbe_isc_rxd_flush */
359 
360 /************************************************************************
361  * ixgbe_isc_rxd_available
362  ************************************************************************/
363 static int
364 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
365 {
366 	struct adapter          *sc = arg;
367 	struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
368 	struct rx_ring          *rxr = &que->rxr;
369 	union ixgbe_adv_rx_desc *rxd;
370 	uint32_t                 staterr;
371 	int                      cnt, i, nrxd;
372 
373 	nrxd = sc->shared->isc_nrxd[0];
374 	for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
375 		rxd = &rxr->rx_base[i];
376 		staterr = le32toh(rxd->wb.upper.status_error);
377 
378 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
379 			break;
380 		if (++i == nrxd)
381 			i = 0;
382 		if (staterr & IXGBE_RXD_STAT_EOP)
383 			cnt++;
384 	}
385 	return (cnt);
386 } /* ixgbe_isc_rxd_available */
387 
388 /************************************************************************
389  * ixgbe_isc_rxd_pkt_get
390  *
391  *   Routine sends data which has been dma'ed into host memory
392  *   to upper layer. Initialize ri structure.
393  *
394  *   Returns 0 upon success, errno on failure
395  ************************************************************************/
396 
397 static int
398 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
399 {
400 	struct adapter           *adapter = arg;
401 	struct ix_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
402 	struct rx_ring           *rxr = &que->rxr;
403 	struct ifnet             *ifp = iflib_get_ifp(adapter->ctx);
404 	union ixgbe_adv_rx_desc  *rxd;
405 
406 	uint16_t                  pkt_info, len, cidx, i;
407 	uint16_t                  vtag = 0;
408 	uint32_t                  ptype;
409 	uint32_t                  staterr = 0;
410 	bool                      eop;
411 
412 	i = 0;
413 	cidx = ri->iri_cidx;
414 	do {
415 		rxd = &rxr->rx_base[cidx];
416 		staterr = le32toh(rxd->wb.upper.status_error);
417 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
418 
419 		/* Error Checking then decrement count */
420 		MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
421 
422 		len = le16toh(rxd->wb.upper.length);
423 		ptype = le32toh(rxd->wb.lower.lo_dword.data) &
424 			IXGBE_RXDADV_PKTTYPE_MASK;
425 
426 		ri->iri_len += len;
427 		rxr->bytes += len;
428 
429 		rxd->wb.upper.status_error = 0;
430 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
431 
432 		if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
433 			vtag = le16toh(rxd->wb.upper.vlan);
434 		} else {
435 			vtag = 0;
436 		}
437 
438 		/* Make sure bad packets are discarded */
439 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
440 			if (adapter->feat_en & IXGBE_FEATURE_VF)
441 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
442 
443 			rxr->rx_discarded++;
444 			return (EBADMSG);
445 		}
446 		ri->iri_frags[i].irf_flid = 0;
447 		ri->iri_frags[i].irf_idx = cidx;
448 		ri->iri_frags[i].irf_len = len;
449 		if (++cidx == adapter->shared->isc_nrxd[0])
450 			cidx = 0;
451 		i++;
452 		/* even a 16K packet shouldn't consume more than 8 clusters */
453 		MPASS(i < 9);
454 	} while (!eop);
455 
456 	rxr->rx_packets++;
457 	rxr->packets++;
458 	rxr->rx_bytes += ri->iri_len;
459 
460 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
461 		ixgbe_rx_checksum(staterr, ri,  ptype);
462 
463 	ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
464 	ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
465 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
466 		if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
467 			ri->iri_rsstype = M_HASHTYPE_NONE;
468 		else
469 			ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
470 	}
471 	ri->iri_vtag = vtag;
472 	ri->iri_nfrags = i;
473 	if (vtag)
474 		ri->iri_flags |= M_VLANTAG;
475 	return (0);
476 } /* ixgbe_isc_rxd_pkt_get */
477 
478 /************************************************************************
479  * ixgbe_rx_checksum
480  *
481  *   Verify that the hardware indicated that the checksum is valid.
482  *   Inform the stack about the status of checksum so that stack
483  *   doesn't spend time verifying the checksum.
484  ************************************************************************/
485 static void
486 ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
487 {
488 	uint16_t status = (uint16_t)staterr;
489 	uint8_t errors = (uint8_t)(staterr >> 24);
490 
491 	/* If there is a layer 3 or 4 error we are done */
492 	if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
493 		return;
494 
495 	/* IP Checksum Good */
496 	if (status & IXGBE_RXD_STAT_IPCS)
497 		ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
498 
499 	/* Valid L4E checksum */
500 	if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) {
501 		/* SCTP header present. */
502 		if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
503 		    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
504 			ri->iri_csum_flags |= CSUM_SCTP_VALID;
505 		} else {
506 			ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
507 			ri->iri_csum_data = htons(0xffff);
508 		}
509 	}
510 } /* ixgbe_rx_checksum */
511 
512 /************************************************************************
513  * ixgbe_determine_rsstype
514  *
515  *   Parse the packet type to determine the appropriate hash
516  ************************************************************************/
517 static int
518 ixgbe_determine_rsstype(uint16_t pkt_info)
519 {
520 	switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
521 	case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
522 		return M_HASHTYPE_RSS_TCP_IPV4;
523 	case IXGBE_RXDADV_RSSTYPE_IPV4:
524 		return M_HASHTYPE_RSS_IPV4;
525 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
526 		return M_HASHTYPE_RSS_TCP_IPV6;
527 	case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
528 		return M_HASHTYPE_RSS_IPV6_EX;
529 	case IXGBE_RXDADV_RSSTYPE_IPV6:
530 		return M_HASHTYPE_RSS_IPV6;
531 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
532 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
533 	case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
534 		return M_HASHTYPE_RSS_UDP_IPV4;
535 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
536 		return M_HASHTYPE_RSS_UDP_IPV6;
537 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
538 		return M_HASHTYPE_RSS_UDP_IPV6_EX;
539 	default:
540 		return M_HASHTYPE_OPAQUE;
541 	}
542 } /* ixgbe_determine_rsstype */
543