xref: /freebsd/sys/dev/ixgbe/ix_txrx.c (revision 3cbb4cc200f8a0ad7ed08233425ea54524a21f1c)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 
45 /************************************************************************
46  * Local Function prototypes
47  ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
51 
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
54 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
55 				   qidx_t budget);
56 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
57 
58 static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
59 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
60 
61 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
62 static int ixgbe_determine_rsstype(u16 pkt_info);
63 
64 struct if_txrx ixgbe_txrx  = {
65 	.ift_txd_encap = ixgbe_isc_txd_encap,
66 	.ift_txd_flush = ixgbe_isc_txd_flush,
67 	.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
68 	.ift_rxd_available = ixgbe_isc_rxd_available,
69 	.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
70 	.ift_rxd_refill = ixgbe_isc_rxd_refill,
71 	.ift_rxd_flush = ixgbe_isc_rxd_flush,
72 	.ift_legacy_intr = NULL
73 };
74 
75 extern if_shared_ctx_t ixgbe_sctx;
76 
77 /************************************************************************
78  * ixgbe_tx_ctx_setup
79  *
80  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
81  *
82  ************************************************************************/
83 static int
84 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
85 {
86 	u32 vlan_macip_lens, type_tucmd_mlhl;
87 	u32 olinfo_status, mss_l4len_idx, pktlen, offload;
88 	u8  ehdrlen;
89 
90 	offload = TRUE;
91 	olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
92 	/* VLAN MACLEN IPLEN */
93 	vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
94 
95 	/*
96 	 * Some of our VF devices need a context descriptor for every
97 	 * packet.  That means the ehdrlen needs to be non-zero in order
98 	 * for the host driver not to flag a malicious event. The stack
99 	 * will most likely populate this for all other reasons of why
100 	 * this function was called.
101 	 */
102 	if (pi->ipi_ehdrlen == 0) {
103 		ehdrlen = ETHER_HDR_LEN;
104 		ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
105 	} else
106 		ehdrlen = pi->ipi_ehdrlen;
107 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
108 
109 	pktlen = pi->ipi_len;
110 	/* First check if TSO is to be used */
111 	if (pi->ipi_csum_flags & CSUM_TSO) {
112 		/* This is used in the transmit desc in encap */
113 		pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
114 		mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
115 		mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
116 	}
117 
118 	olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
119 
120 	if (pi->ipi_flags & IPI_TX_IPV4) {
121 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
122 		/* Tell transmit desc to also do IPv4 checksum. */
123 		if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
124 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
125 	} else if (pi->ipi_flags & IPI_TX_IPV6)
126 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
127 	else
128 		offload = FALSE;
129 
130 	vlan_macip_lens |= pi->ipi_ip_hlen;
131 
132 	switch (pi->ipi_ipproto) {
133 	case IPPROTO_TCP:
134 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
135 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
136 		else
137 			offload = FALSE;
138 		break;
139 	case IPPROTO_UDP:
140 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
141 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
142 		else
143 			offload = FALSE;
144 		break;
145 	case IPPROTO_SCTP:
146 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
147 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
148 		else
149 			offload = FALSE;
150 		break;
151 	default:
152 		offload = FALSE;
153 		break;
154 	}
155 /* Insert L4 checksum into data descriptors */
156 	if (offload)
157 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
158 
159 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
160 
161 	/* Now copy bits into descriptor */
162 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
163 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
164 	TXD->seqnum_seed = htole32(0);
165 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
166 
167 	return (olinfo_status);
168 } /* ixgbe_tx_ctx_setup */
169 
170 /************************************************************************
171  * ixgbe_isc_txd_encap
172  ************************************************************************/
173 static int
174 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
175 {
176 	struct adapter                   *sc = arg;
177 	if_softc_ctx_t                   scctx = sc->shared;
178 	struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
179 	struct tx_ring                   *txr = &que->txr;
180 	int                              nsegs = pi->ipi_nsegs;
181 	bus_dma_segment_t                *segs = pi->ipi_segs;
182 	union ixgbe_adv_tx_desc          *txd = NULL;
183 	struct ixgbe_adv_tx_context_desc *TXD;
184 	int                              i, j, first, pidx_last;
185 	u32                              olinfo_status, cmd, flags;
186 	qidx_t                           ntxd;
187 
188 	cmd =  (IXGBE_ADVTXD_DTYP_DATA |
189 		IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
190 
191 	if (pi->ipi_mflags & M_VLANTAG)
192 		cmd |= IXGBE_ADVTXD_DCMD_VLE;
193 
194 	i = first = pi->ipi_pidx;
195 	flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
196 	ntxd = scctx->isc_ntxd[0];
197 
198 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
199 	if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
200 	    (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
201 	    pi->ipi_vtag) {
202 		/*********************************************
203 		 * Set up the appropriate offload context
204 		 * this will consume the first descriptor
205 		 *********************************************/
206 		olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
207 		if (pi->ipi_csum_flags & CSUM_TSO) {
208 			cmd |= IXGBE_ADVTXD_DCMD_TSE;
209 			++txr->tso_tx;
210 		}
211 
212 		if (++i == scctx->isc_ntxd[0])
213 			i = 0;
214 	} else {
215 		/* Indicate the whole packet as payload when not doing TSO */
216 		olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
217 	}
218 
219 	olinfo_status |= IXGBE_ADVTXD_CC;
220 	pidx_last = 0;
221 	for (j = 0; j < nsegs; j++) {
222 		bus_size_t seglen;
223 
224 		txd = &txr->tx_base[i];
225 		seglen = segs[j].ds_len;
226 
227 		txd->read.buffer_addr = htole64(segs[j].ds_addr);
228 		txd->read.cmd_type_len = htole32(cmd | seglen);
229 		txd->read.olinfo_status = htole32(olinfo_status);
230 
231 		pidx_last = i;
232 		if (++i == scctx->isc_ntxd[0]) {
233 			i = 0;
234 		}
235 	}
236 
237 	if (flags) {
238 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
239 		txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
240 	}
241 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
242 
243 	txr->bytes += pi->ipi_len;
244 	pi->ipi_new_pidx = i;
245 
246 	++txr->total_packets;
247 
248 	return (0);
249 } /* ixgbe_isc_txd_encap */
250 
251 /************************************************************************
252  * ixgbe_isc_txd_flush
253  ************************************************************************/
254 static void
255 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
256 {
257 	struct adapter     *sc = arg;
258 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
259 	struct tx_ring     *txr = &que->txr;
260 
261 	IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
262 } /* ixgbe_isc_txd_flush */
263 
264 /************************************************************************
265  * ixgbe_isc_txd_credits_update
266  ************************************************************************/
267 static int
268 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
269 {
270 	struct adapter     *sc = arg;
271 	if_softc_ctx_t     scctx = sc->shared;
272 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
273 	struct tx_ring     *txr = &que->txr;
274 	qidx_t             processed = 0;
275 	int                updated;
276 	qidx_t             cur, prev, ntxd, rs_cidx;
277 	int32_t            delta;
278 	uint8_t            status;
279 
280 	rs_cidx = txr->tx_rs_cidx;
281 	if (rs_cidx == txr->tx_rs_pidx)
282 		return (0);
283 
284 	cur = txr->tx_rsq[rs_cidx];
285 	status = txr->tx_base[cur].wb.status;
286 	updated = !!(status & IXGBE_TXD_STAT_DD);
287 
288 	if (!updated)
289 		return (0);
290 
291 	/* If clear is false just let caller know that there
292 	 * are descriptors to reclaim */
293 	if (!clear)
294 		return (1);
295 
296 	prev = txr->tx_cidx_processed;
297 	ntxd = scctx->isc_ntxd[0];
298 	do {
299 		MPASS(prev != cur);
300 		delta = (int32_t)cur - (int32_t)prev;
301 		if (delta < 0)
302 			delta += ntxd;
303 		MPASS(delta > 0);
304 
305 		processed += delta;
306 		prev = cur;
307 		rs_cidx = (rs_cidx + 1) & (ntxd - 1);
308 		if (rs_cidx == txr->tx_rs_pidx)
309 			break;
310 
311 		cur = txr->tx_rsq[rs_cidx];
312 		status = txr->tx_base[cur].wb.status;
313 	} while ((status & IXGBE_TXD_STAT_DD));
314 
315 	txr->tx_rs_cidx = rs_cidx;
316 	txr->tx_cidx_processed = prev;
317 
318 	return (processed);
319 } /* ixgbe_isc_txd_credits_update */
320 
321 /************************************************************************
322  * ixgbe_isc_rxd_refill
323  ************************************************************************/
324 static void
325 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
326 {
327 	struct adapter *sc       = arg;
328 	struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
329 	struct rx_ring *rxr      = &que->rxr;
330 	uint64_t *paddrs;
331 	int i;
332 	uint32_t next_pidx, pidx;
333 	uint16_t count;
334 
335 	paddrs = iru->iru_paddrs;
336 	pidx = iru->iru_pidx;
337 	count = iru->iru_count;
338 
339 	for (i = 0, next_pidx = pidx; i < count; i++) {
340 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
341 		if (++next_pidx == sc->shared->isc_nrxd[0])
342 			next_pidx = 0;
343 	}
344 } /* ixgbe_isc_rxd_refill */
345 
346 /************************************************************************
347  * ixgbe_isc_rxd_flush
348  ************************************************************************/
349 static void
350 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
351 {
352 	struct adapter     *sc  = arg;
353 	struct ix_rx_queue *que = &sc->rx_queues[qsidx];
354 	struct rx_ring     *rxr = &que->rxr;
355 
356 	IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
357 } /* ixgbe_isc_rxd_flush */
358 
359 /************************************************************************
360  * ixgbe_isc_rxd_available
361  ************************************************************************/
362 static int
363 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
364 {
365 	struct adapter          *sc = arg;
366 	struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
367 	struct rx_ring          *rxr = &que->rxr;
368 	union ixgbe_adv_rx_desc *rxd;
369 	u32                      staterr;
370 	int                      cnt, i, nrxd;
371 
372 	nrxd = sc->shared->isc_nrxd[0];
373 	for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
374 		rxd = &rxr->rx_base[i];
375 		staterr = le32toh(rxd->wb.upper.status_error);
376 
377 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
378 			break;
379 		if (++i == nrxd)
380 			i = 0;
381 		if (staterr & IXGBE_RXD_STAT_EOP)
382 			cnt++;
383 	}
384 	return (cnt);
385 } /* ixgbe_isc_rxd_available */
386 
387 /************************************************************************
388  * ixgbe_isc_rxd_pkt_get
389  *
390  *   Routine sends data which has been dma'ed into host memory
391  *   to upper layer. Initialize ri structure.
392  *
393  *   Returns 0 upon success, errno on failure
394  ************************************************************************/
395 
396 static int
397 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
398 {
399 	struct adapter           *adapter = arg;
400 	struct ix_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
401 	struct rx_ring           *rxr = &que->rxr;
402 	struct ifnet             *ifp = iflib_get_ifp(adapter->ctx);
403 	union ixgbe_adv_rx_desc  *rxd;
404 
405 	u16                      pkt_info, len, cidx, i;
406 	u16                      vtag = 0;
407 	u32                      ptype;
408 	u32                      staterr = 0;
409 	bool                     eop;
410 
411 	i = 0;
412 	cidx = ri->iri_cidx;
413 	do {
414 		rxd = &rxr->rx_base[cidx];
415 		staterr = le32toh(rxd->wb.upper.status_error);
416 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
417 
418 		/* Error Checking then decrement count */
419 		MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
420 
421 		len = le16toh(rxd->wb.upper.length);
422 		ptype = le32toh(rxd->wb.lower.lo_dword.data) &
423 			IXGBE_RXDADV_PKTTYPE_MASK;
424 
425 		ri->iri_len += len;
426 		rxr->bytes += len;
427 
428 		rxd->wb.upper.status_error = 0;
429 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
430 
431 		if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
432 			vtag = le16toh(rxd->wb.upper.vlan);
433 		} else {
434 			vtag = 0;
435 		}
436 
437 		/* Make sure bad packets are discarded */
438 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
439 
440 #if __FreeBSD_version >= 1100036
441 			if (adapter->feat_en & IXGBE_FEATURE_VF)
442 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
443 #endif
444 
445 			rxr->rx_discarded++;
446 			return (EBADMSG);
447 		}
448 		ri->iri_frags[i].irf_flid = 0;
449 		ri->iri_frags[i].irf_idx = cidx;
450 		ri->iri_frags[i].irf_len = len;
451 		if (++cidx == adapter->shared->isc_nrxd[0])
452 			cidx = 0;
453 		i++;
454 		/* even a 16K packet shouldn't consume more than 8 clusters */
455 		MPASS(i < 9);
456 	} while (!eop);
457 
458 	rxr->rx_packets++;
459 	rxr->packets++;
460 	rxr->rx_bytes += ri->iri_len;
461 
462 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
463 		ixgbe_rx_checksum(staterr, ri,  ptype);
464 
465 	ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
466 	ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
467 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
468 		if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
469 			ri->iri_rsstype = M_HASHTYPE_NONE;
470 		else
471 			ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
472 	}
473 	ri->iri_vtag = vtag;
474 	ri->iri_nfrags = i;
475 	if (vtag)
476 		ri->iri_flags |= M_VLANTAG;
477 	return (0);
478 } /* ixgbe_isc_rxd_pkt_get */
479 
480 /************************************************************************
481  * ixgbe_rx_checksum
482  *
483  *   Verify that the hardware indicated that the checksum is valid.
484  *   Inform the stack about the status of checksum so that stack
485  *   doesn't spend time verifying the checksum.
486  ************************************************************************/
487 static void
488 ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
489 {
490 	u16  status = (u16)staterr;
491 	u8   errors = (u8)(staterr >> 24);
492 	bool sctp = false;
493 
494 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
495 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
496 		sctp = TRUE;
497 
498 	/* IPv4 checksum */
499 	if (status & IXGBE_RXD_STAT_IPCS) {
500 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
501 			/* IP Checksum Good */
502 			ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
503 		} else
504 			ri->iri_csum_flags = 0;
505 	}
506 	/* TCP/UDP/SCTP checksum */
507 	if (status & IXGBE_RXD_STAT_L4CS) {
508 		u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
509 #if __FreeBSD_version >= 800000
510 		if (sctp)
511 			type = CSUM_SCTP_VALID;
512 #endif
513 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
514 			ri->iri_csum_flags |= type;
515 			if (!sctp)
516 				ri->iri_csum_data = htons(0xffff);
517 		}
518 	}
519 } /* ixgbe_rx_checksum */
520 
521 /************************************************************************
522  * ixgbe_determine_rsstype
523  *
524  *   Parse the packet type to determine the appropriate hash
525  ************************************************************************/
526 static int
527 ixgbe_determine_rsstype(u16 pkt_info)
528 {
529 	switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
530 	case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
531 		return M_HASHTYPE_RSS_TCP_IPV4;
532 	case IXGBE_RXDADV_RSSTYPE_IPV4:
533 		return M_HASHTYPE_RSS_IPV4;
534 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
535 		return M_HASHTYPE_RSS_TCP_IPV6;
536 	case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
537 		return M_HASHTYPE_RSS_IPV6_EX;
538 	case IXGBE_RXDADV_RSSTYPE_IPV6:
539 		return M_HASHTYPE_RSS_IPV6;
540 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
541 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
542 	case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
543 		return M_HASHTYPE_RSS_UDP_IPV4;
544 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
545 		return M_HASHTYPE_RSS_UDP_IPV6;
546 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
547 		return M_HASHTYPE_RSS_UDP_IPV6_EX;
548 	default:
549 		return M_HASHTYPE_OPAQUE;
550 	}
551 } /* ixgbe_determine_rsstype */
552