xref: /freebsd/sys/dev/ixgbe/ix_txrx.c (revision 550cb4ab85c7e514629c8bacbbb07085b81d916b)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 
45 /************************************************************************
46  * Local Function prototypes
47  ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
51 
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
54 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
55 				   qidx_t budget);
56 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
57 
58 static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
59 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
60 
61 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
62 static int ixgbe_determine_rsstype(u16 pkt_info);
63 
64 struct if_txrx ixgbe_txrx  = {
65 	.ift_txd_encap = ixgbe_isc_txd_encap,
66 	.ift_txd_flush = ixgbe_isc_txd_flush,
67 	.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
68 	.ift_rxd_available = ixgbe_isc_rxd_available,
69 	.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
70 	.ift_rxd_refill = ixgbe_isc_rxd_refill,
71 	.ift_rxd_flush = ixgbe_isc_rxd_flush,
72 	.ift_legacy_intr = NULL
73 };
74 
75 /************************************************************************
76  * ixgbe_tx_ctx_setup
77  *
78  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
79  *
80  ************************************************************************/
81 static int
82 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
83 {
84 	u32 vlan_macip_lens, type_tucmd_mlhl;
85 	u32 olinfo_status, mss_l4len_idx, pktlen, offload;
86 	u8  ehdrlen;
87 
88 	offload = TRUE;
89 	olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
90 	/* VLAN MACLEN IPLEN */
91 	vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
92 
93 	/*
94 	 * Some of our VF devices need a context descriptor for every
95 	 * packet.  That means the ehdrlen needs to be non-zero in order
96 	 * for the host driver not to flag a malicious event. The stack
97 	 * will most likely populate this for all other reasons of why
98 	 * this function was called.
99 	 */
100 	if (pi->ipi_ehdrlen == 0) {
101 		ehdrlen = ETHER_HDR_LEN;
102 		ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
103 	} else
104 		ehdrlen = pi->ipi_ehdrlen;
105 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
106 
107 	pktlen = pi->ipi_len;
108 	/* First check if TSO is to be used */
109 	if (pi->ipi_csum_flags & CSUM_TSO) {
110 		/* This is used in the transmit desc in encap */
111 		pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
112 		mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
113 		mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
114 	}
115 
116 	olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
117 
118 	if (pi->ipi_flags & IPI_TX_IPV4) {
119 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
120 		/* Tell transmit desc to also do IPv4 checksum. */
121 		if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
122 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
123 	} else if (pi->ipi_flags & IPI_TX_IPV6)
124 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
125 	else
126 		offload = FALSE;
127 
128 	vlan_macip_lens |= pi->ipi_ip_hlen;
129 
130 	switch (pi->ipi_ipproto) {
131 	case IPPROTO_TCP:
132 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
133 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
134 		else
135 			offload = FALSE;
136 		break;
137 	case IPPROTO_UDP:
138 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
139 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
140 		else
141 			offload = FALSE;
142 		break;
143 	case IPPROTO_SCTP:
144 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
145 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
146 		else
147 			offload = FALSE;
148 		break;
149 	default:
150 		offload = FALSE;
151 		break;
152 	}
153 /* Insert L4 checksum into data descriptors */
154 	if (offload)
155 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
156 
157 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
158 
159 	/* Now copy bits into descriptor */
160 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
161 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
162 	TXD->seqnum_seed = htole32(0);
163 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
164 
165 	return (olinfo_status);
166 } /* ixgbe_tx_ctx_setup */
167 
168 /************************************************************************
169  * ixgbe_isc_txd_encap
170  ************************************************************************/
171 static int
172 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
173 {
174 	struct adapter                   *sc = arg;
175 	if_softc_ctx_t                   scctx = sc->shared;
176 	struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
177 	struct tx_ring                   *txr = &que->txr;
178 	int                              nsegs = pi->ipi_nsegs;
179 	bus_dma_segment_t                *segs = pi->ipi_segs;
180 	union ixgbe_adv_tx_desc          *txd = NULL;
181 	struct ixgbe_adv_tx_context_desc *TXD;
182 	int                              i, j, first, pidx_last;
183 	u32                              olinfo_status, cmd, flags;
184 	qidx_t                           ntxd;
185 
186 	cmd =  (IXGBE_ADVTXD_DTYP_DATA |
187 		IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
188 
189 	if (pi->ipi_mflags & M_VLANTAG)
190 		cmd |= IXGBE_ADVTXD_DCMD_VLE;
191 
192 	i = first = pi->ipi_pidx;
193 	flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
194 	ntxd = scctx->isc_ntxd[0];
195 
196 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
197 	if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
198 	    (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
199 	    pi->ipi_vtag) {
200 		/*********************************************
201 		 * Set up the appropriate offload context
202 		 * this will consume the first descriptor
203 		 *********************************************/
204 		olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
205 		if (pi->ipi_csum_flags & CSUM_TSO) {
206 			cmd |= IXGBE_ADVTXD_DCMD_TSE;
207 			++txr->tso_tx;
208 		}
209 
210 		if (++i == scctx->isc_ntxd[0])
211 			i = 0;
212 	} else {
213 		/* Indicate the whole packet as payload when not doing TSO */
214 		olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
215 	}
216 
217 	olinfo_status |= IXGBE_ADVTXD_CC;
218 	pidx_last = 0;
219 	for (j = 0; j < nsegs; j++) {
220 		bus_size_t seglen;
221 
222 		txd = &txr->tx_base[i];
223 		seglen = segs[j].ds_len;
224 
225 		txd->read.buffer_addr = htole64(segs[j].ds_addr);
226 		txd->read.cmd_type_len = htole32(cmd | seglen);
227 		txd->read.olinfo_status = htole32(olinfo_status);
228 
229 		pidx_last = i;
230 		if (++i == scctx->isc_ntxd[0]) {
231 			i = 0;
232 		}
233 	}
234 
235 	if (flags) {
236 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
237 		txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
238 	}
239 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
240 
241 	txr->bytes += pi->ipi_len;
242 	pi->ipi_new_pidx = i;
243 
244 	++txr->total_packets;
245 
246 	return (0);
247 } /* ixgbe_isc_txd_encap */
248 
249 /************************************************************************
250  * ixgbe_isc_txd_flush
251  ************************************************************************/
252 static void
253 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
254 {
255 	struct adapter     *sc = arg;
256 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
257 	struct tx_ring     *txr = &que->txr;
258 
259 	IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
260 } /* ixgbe_isc_txd_flush */
261 
262 /************************************************************************
263  * ixgbe_isc_txd_credits_update
264  ************************************************************************/
265 static int
266 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
267 {
268 	struct adapter     *sc = arg;
269 	if_softc_ctx_t     scctx = sc->shared;
270 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
271 	struct tx_ring     *txr = &que->txr;
272 	qidx_t             processed = 0;
273 	int                updated;
274 	qidx_t             cur, prev, ntxd, rs_cidx;
275 	int32_t            delta;
276 	uint8_t            status;
277 
278 	rs_cidx = txr->tx_rs_cidx;
279 	if (rs_cidx == txr->tx_rs_pidx)
280 		return (0);
281 
282 	cur = txr->tx_rsq[rs_cidx];
283 	status = txr->tx_base[cur].wb.status;
284 	updated = !!(status & IXGBE_TXD_STAT_DD);
285 
286 	if (!updated)
287 		return (0);
288 
289 	/* If clear is false just let caller know that there
290 	 * are descriptors to reclaim */
291 	if (!clear)
292 		return (1);
293 
294 	prev = txr->tx_cidx_processed;
295 	ntxd = scctx->isc_ntxd[0];
296 	do {
297 		MPASS(prev != cur);
298 		delta = (int32_t)cur - (int32_t)prev;
299 		if (delta < 0)
300 			delta += ntxd;
301 		MPASS(delta > 0);
302 
303 		processed += delta;
304 		prev = cur;
305 		rs_cidx = (rs_cidx + 1) & (ntxd - 1);
306 		if (rs_cidx == txr->tx_rs_pidx)
307 			break;
308 
309 		cur = txr->tx_rsq[rs_cidx];
310 		status = txr->tx_base[cur].wb.status;
311 	} while ((status & IXGBE_TXD_STAT_DD));
312 
313 	txr->tx_rs_cidx = rs_cidx;
314 	txr->tx_cidx_processed = prev;
315 
316 	return (processed);
317 } /* ixgbe_isc_txd_credits_update */
318 
319 /************************************************************************
320  * ixgbe_isc_rxd_refill
321  ************************************************************************/
322 static void
323 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
324 {
325 	struct adapter *sc       = arg;
326 	struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
327 	struct rx_ring *rxr      = &que->rxr;
328 	uint64_t *paddrs;
329 	int i;
330 	uint32_t next_pidx, pidx;
331 	uint16_t count;
332 
333 	paddrs = iru->iru_paddrs;
334 	pidx = iru->iru_pidx;
335 	count = iru->iru_count;
336 
337 	for (i = 0, next_pidx = pidx; i < count; i++) {
338 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
339 		if (++next_pidx == sc->shared->isc_nrxd[0])
340 			next_pidx = 0;
341 	}
342 } /* ixgbe_isc_rxd_refill */
343 
344 /************************************************************************
345  * ixgbe_isc_rxd_flush
346  ************************************************************************/
347 static void
348 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
349 {
350 	struct adapter     *sc  = arg;
351 	struct ix_rx_queue *que = &sc->rx_queues[qsidx];
352 	struct rx_ring     *rxr = &que->rxr;
353 
354 	IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
355 } /* ixgbe_isc_rxd_flush */
356 
357 /************************************************************************
358  * ixgbe_isc_rxd_available
359  ************************************************************************/
360 static int
361 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
362 {
363 	struct adapter          *sc = arg;
364 	struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
365 	struct rx_ring          *rxr = &que->rxr;
366 	union ixgbe_adv_rx_desc *rxd;
367 	u32                      staterr;
368 	int                      cnt, i, nrxd;
369 
370 	nrxd = sc->shared->isc_nrxd[0];
371 	for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
372 		rxd = &rxr->rx_base[i];
373 		staterr = le32toh(rxd->wb.upper.status_error);
374 
375 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
376 			break;
377 		if (++i == nrxd)
378 			i = 0;
379 		if (staterr & IXGBE_RXD_STAT_EOP)
380 			cnt++;
381 	}
382 	return (cnt);
383 } /* ixgbe_isc_rxd_available */
384 
385 /************************************************************************
386  * ixgbe_isc_rxd_pkt_get
387  *
388  *   Routine sends data which has been dma'ed into host memory
389  *   to upper layer. Initialize ri structure.
390  *
391  *   Returns 0 upon success, errno on failure
392  ************************************************************************/
393 
394 static int
395 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
396 {
397 	struct adapter           *adapter = arg;
398 	struct ix_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
399 	struct rx_ring           *rxr = &que->rxr;
400 	struct ifnet             *ifp = iflib_get_ifp(adapter->ctx);
401 	union ixgbe_adv_rx_desc  *rxd;
402 
403 	u16                      pkt_info, len, cidx, i;
404 	u16                      vtag = 0;
405 	u32                      ptype;
406 	u32                      staterr = 0;
407 	bool                     eop;
408 
409 	i = 0;
410 	cidx = ri->iri_cidx;
411 	do {
412 		rxd = &rxr->rx_base[cidx];
413 		staterr = le32toh(rxd->wb.upper.status_error);
414 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
415 
416 		/* Error Checking then decrement count */
417 		MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
418 
419 		len = le16toh(rxd->wb.upper.length);
420 		ptype = le32toh(rxd->wb.lower.lo_dword.data) &
421 			IXGBE_RXDADV_PKTTYPE_MASK;
422 
423 		ri->iri_len += len;
424 		rxr->bytes += len;
425 
426 		rxd->wb.upper.status_error = 0;
427 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
428 
429 		if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
430 			vtag = le16toh(rxd->wb.upper.vlan);
431 		} else {
432 			vtag = 0;
433 		}
434 
435 		/* Make sure bad packets are discarded */
436 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
437 
438 #if __FreeBSD_version >= 1100036
439 			if (adapter->feat_en & IXGBE_FEATURE_VF)
440 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
441 #endif
442 
443 			rxr->rx_discarded++;
444 			return (EBADMSG);
445 		}
446 		ri->iri_frags[i].irf_flid = 0;
447 		ri->iri_frags[i].irf_idx = cidx;
448 		ri->iri_frags[i].irf_len = len;
449 		if (++cidx == adapter->shared->isc_nrxd[0])
450 			cidx = 0;
451 		i++;
452 		/* even a 16K packet shouldn't consume more than 8 clusters */
453 		MPASS(i < 9);
454 	} while (!eop);
455 
456 	rxr->rx_packets++;
457 	rxr->packets++;
458 	rxr->rx_bytes += ri->iri_len;
459 
460 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
461 		ixgbe_rx_checksum(staterr, ri,  ptype);
462 
463 	ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
464 	ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
465 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
466 		if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
467 			ri->iri_rsstype = M_HASHTYPE_NONE;
468 		else
469 			ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
470 	}
471 	ri->iri_vtag = vtag;
472 	ri->iri_nfrags = i;
473 	if (vtag)
474 		ri->iri_flags |= M_VLANTAG;
475 	return (0);
476 } /* ixgbe_isc_rxd_pkt_get */
477 
478 /************************************************************************
479  * ixgbe_rx_checksum
480  *
481  *   Verify that the hardware indicated that the checksum is valid.
482  *   Inform the stack about the status of checksum so that stack
483  *   doesn't spend time verifying the checksum.
484  ************************************************************************/
485 static void
486 ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
487 {
488 	u16  status = (u16)staterr;
489 	u8   errors = (u8)(staterr >> 24);
490 	bool sctp = false;
491 
492 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
493 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
494 		sctp = TRUE;
495 
496 	/* IPv4 checksum */
497 	if (status & IXGBE_RXD_STAT_IPCS) {
498 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
499 			/* IP Checksum Good */
500 			ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
501 		} else
502 			ri->iri_csum_flags = 0;
503 	}
504 	/* TCP/UDP/SCTP checksum */
505 	if (status & IXGBE_RXD_STAT_L4CS) {
506 		u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
507 #if __FreeBSD_version >= 800000
508 		if (sctp)
509 			type = CSUM_SCTP_VALID;
510 #endif
511 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
512 			ri->iri_csum_flags |= type;
513 			if (!sctp)
514 				ri->iri_csum_data = htons(0xffff);
515 		}
516 	}
517 } /* ixgbe_rx_checksum */
518 
519 /************************************************************************
520  * ixgbe_determine_rsstype
521  *
522  *   Parse the packet type to determine the appropriate hash
523  ************************************************************************/
524 static int
525 ixgbe_determine_rsstype(u16 pkt_info)
526 {
527 	switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
528 	case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
529 		return M_HASHTYPE_RSS_TCP_IPV4;
530 	case IXGBE_RXDADV_RSSTYPE_IPV4:
531 		return M_HASHTYPE_RSS_IPV4;
532 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
533 		return M_HASHTYPE_RSS_TCP_IPV6;
534 	case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
535 		return M_HASHTYPE_RSS_IPV6_EX;
536 	case IXGBE_RXDADV_RSSTYPE_IPV6:
537 		return M_HASHTYPE_RSS_IPV6;
538 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
539 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
540 	case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
541 		return M_HASHTYPE_RSS_UDP_IPV4;
542 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
543 		return M_HASHTYPE_RSS_UDP_IPV6;
544 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
545 		return M_HASHTYPE_RSS_UDP_IPV6_EX;
546 	default:
547 		return M_HASHTYPE_OPAQUE;
548 	}
549 } /* ixgbe_determine_rsstype */
550