xref: /freebsd/sys/dev/ixgbe/ix_txrx.c (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 
45 /************************************************************************
46  * Local Function prototypes
47  ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
51 
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
54 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
55 				   qidx_t budget);
56 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
57 
58 static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
59 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
60 
61 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
62 static int ixgbe_determine_rsstype(u16 pkt_info);
63 
64 struct if_txrx ixgbe_txrx  = {
65 	.ift_txd_encap = ixgbe_isc_txd_encap,
66 	.ift_txd_flush = ixgbe_isc_txd_flush,
67 	.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
68 	.ift_rxd_available = ixgbe_isc_rxd_available,
69 	.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
70 	.ift_rxd_refill = ixgbe_isc_rxd_refill,
71 	.ift_rxd_flush = ixgbe_isc_rxd_flush,
72 	.ift_legacy_intr = NULL
73 };
74 
75 extern if_shared_ctx_t ixgbe_sctx;
76 
77 /************************************************************************
78  * ixgbe_tx_ctx_setup
79  *
80  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
81  *
82  ************************************************************************/
83 static int
84 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
85 {
86 	u32 vlan_macip_lens, type_tucmd_mlhl;
87 	u32 olinfo_status, mss_l4len_idx, pktlen, offload;
88 	u8  ehdrlen;
89 
90 	offload = TRUE;
91 	olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
92 	/* VLAN MACLEN IPLEN */
93 	vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
94 
95 	/*
96 	 * Some of our VF devices need a context descriptor for every
97 	 * packet.  That means the ehdrlen needs to be non-zero in order
98 	 * for the host driver not to flag a malicious event. The stack
99 	 * will most likely populate this for all other reasons of why
100 	 * this function was called.
101 	 */
102 	if (pi->ipi_ehdrlen == 0) {
103 		ehdrlen = ETHER_HDR_LEN;
104 		ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
105 	} else
106 		ehdrlen = pi->ipi_ehdrlen;
107 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
108 
109 	pktlen = pi->ipi_len;
110 	/* First check if TSO is to be used */
111 	if (pi->ipi_csum_flags & CSUM_TSO) {
112 		/* This is used in the transmit desc in encap */
113 		pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
114 		mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
115 		mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
116 	}
117 
118 	olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
119 
120 	if (pi->ipi_flags & IPI_TX_IPV4) {
121 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
122 		/* Tell transmit desc to also do IPv4 checksum. */
123 		if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
124 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
125 	} else if (pi->ipi_flags & IPI_TX_IPV6)
126 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
127 	else
128 		offload = FALSE;
129 
130 	vlan_macip_lens |= pi->ipi_ip_hlen;
131 
132 	switch (pi->ipi_ipproto) {
133 	case IPPROTO_TCP:
134 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
135 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
136 		else
137 			offload = FALSE;
138 		break;
139 	case IPPROTO_UDP:
140 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
141 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
142 		else
143 			offload = FALSE;
144 		break;
145 	case IPPROTO_SCTP:
146 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
147 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
148 		else
149 			offload = FALSE;
150 		break;
151 	default:
152 		offload = FALSE;
153 		break;
154 	}
155 /* Insert L4 checksum into data descriptors */
156 	if (offload)
157 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
158 
159 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
160 
161 	/* Now copy bits into descriptor */
162 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
163 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
164 	TXD->seqnum_seed = htole32(0);
165 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
166 
167 	return (olinfo_status);
168 } /* ixgbe_tx_ctx_setup */
169 
170 /************************************************************************
171  * ixgbe_isc_txd_encap
172  ************************************************************************/
173 static int
174 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
175 {
176 	struct adapter                   *sc = arg;
177 	if_softc_ctx_t                   scctx = sc->shared;
178 	struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
179 	struct tx_ring                   *txr = &que->txr;
180 	int                              nsegs = pi->ipi_nsegs;
181 	bus_dma_segment_t                *segs = pi->ipi_segs;
182 	union ixgbe_adv_tx_desc          *txd = NULL;
183 	struct ixgbe_adv_tx_context_desc *TXD;
184 	int                              i, j, first, pidx_last;
185 	u32                              olinfo_status, cmd, flags;
186 	qidx_t                           ntxd;
187 
188 	cmd =  (IXGBE_ADVTXD_DTYP_DATA |
189 		IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
190 
191 	if (pi->ipi_mflags & M_VLANTAG)
192 		cmd |= IXGBE_ADVTXD_DCMD_VLE;
193 
194 	i = first = pi->ipi_pidx;
195 	flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
196 	ntxd = scctx->isc_ntxd[0];
197 
198 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
199 	if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
200 	    (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
201 	    pi->ipi_vtag) {
202 		/*********************************************
203 		 * Set up the appropriate offload context
204 		 * this will consume the first descriptor
205 		 *********************************************/
206 		olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
207 		if (pi->ipi_csum_flags & CSUM_TSO) {
208 			cmd |= IXGBE_ADVTXD_DCMD_TSE;
209 			++txr->tso_tx;
210 		}
211 
212 		if (++i == scctx->isc_ntxd[0])
213 			i = 0;
214 	} else {
215 		/* Indicate the whole packet as payload when not doing TSO */
216 		olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
217 	}
218 
219 	olinfo_status |= IXGBE_ADVTXD_CC;
220 	for (j = 0; j < nsegs; j++) {
221 		bus_size_t seglen;
222 
223 		txd = &txr->tx_base[i];
224 		seglen = segs[j].ds_len;
225 
226 		txd->read.buffer_addr = htole64(segs[j].ds_addr);
227 		txd->read.cmd_type_len = htole32(cmd | seglen);
228 		txd->read.olinfo_status = htole32(olinfo_status);
229 
230 		pidx_last = i;
231 		if (++i == scctx->isc_ntxd[0]) {
232 			i = 0;
233 		}
234 	}
235 
236 	if (flags) {
237 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
238 		txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
239 	}
240 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
241 
242 	txr->bytes += pi->ipi_len;
243 	pi->ipi_new_pidx = i;
244 
245 	++txr->total_packets;
246 
247 	return (0);
248 } /* ixgbe_isc_txd_encap */
249 
250 /************************************************************************
251  * ixgbe_isc_txd_flush
252  ************************************************************************/
253 static void
254 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
255 {
256 	struct adapter     *sc = arg;
257 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
258 	struct tx_ring     *txr = &que->txr;
259 
260 	IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
261 } /* ixgbe_isc_txd_flush */
262 
263 /************************************************************************
264  * ixgbe_isc_txd_credits_update
265  ************************************************************************/
266 static int
267 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
268 {
269 	struct adapter     *sc = arg;
270 	if_softc_ctx_t     scctx = sc->shared;
271 	struct ix_tx_queue *que = &sc->tx_queues[txqid];
272 	struct tx_ring     *txr = &que->txr;
273 	qidx_t             processed = 0;
274 	int                updated;
275 	qidx_t             cur, prev, ntxd, rs_cidx;
276 	int32_t            delta;
277 	uint8_t            status;
278 
279 	rs_cidx = txr->tx_rs_cidx;
280 	if (rs_cidx == txr->tx_rs_pidx)
281 		return (0);
282 
283 	cur = txr->tx_rsq[rs_cidx];
284 	status = txr->tx_base[cur].wb.status;
285 	updated = !!(status & IXGBE_TXD_STAT_DD);
286 
287 	if (clear == false || updated == 0)
288 		return (updated);
289 
290 	prev = txr->tx_cidx_processed;
291 	ntxd = scctx->isc_ntxd[0];
292 	do {
293 		delta = (int32_t)cur - (int32_t)prev;
294 		if (delta < 0)
295 			delta += ntxd;
296 
297 		processed += delta;
298 		prev = cur;
299 		rs_cidx = (rs_cidx + 1) & (ntxd - 1);
300 		if (rs_cidx == txr->tx_rs_pidx)
301 			break;
302 
303 		cur = txr->tx_rsq[rs_cidx];
304 		status = txr->tx_base[cur].wb.status;
305 	} while ((status & IXGBE_TXD_STAT_DD));
306 
307 	txr->tx_rs_cidx = rs_cidx;
308 	txr->tx_cidx_processed = prev;
309 
310 	return (processed);
311 } /* ixgbe_isc_txd_credits_update */
312 
313 /************************************************************************
314  * ixgbe_isc_rxd_refill
315  ************************************************************************/
316 static void
317 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
318 {
319 	struct adapter *sc       = arg;
320 	struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
321 	struct rx_ring *rxr      = &que->rxr;
322 	uint64_t *paddrs;
323 	int i;
324 	uint32_t next_pidx, pidx;
325 	uint16_t count;
326 
327 	paddrs = iru->iru_paddrs;
328 	pidx = iru->iru_pidx;
329 	count = iru->iru_count;
330 
331 	for (i = 0, next_pidx = pidx; i < count; i++) {
332 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
333 		if (++next_pidx == sc->shared->isc_nrxd[0])
334 			next_pidx = 0;
335 	}
336 } /* ixgbe_isc_rxd_refill */
337 
338 /************************************************************************
339  * ixgbe_isc_rxd_flush
340  ************************************************************************/
341 static void
342 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
343 {
344 	struct adapter     *sc  = arg;
345 	struct ix_rx_queue *que = &sc->rx_queues[qsidx];
346 	struct rx_ring     *rxr = &que->rxr;
347 
348 	IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
349 } /* ixgbe_isc_rxd_flush */
350 
351 /************************************************************************
352  * ixgbe_isc_rxd_available
353  ************************************************************************/
354 static int
355 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
356 {
357 	struct adapter          *sc = arg;
358 	struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
359 	struct rx_ring          *rxr = &que->rxr;
360 	union ixgbe_adv_rx_desc *rxd;
361 	u32                      staterr;
362 	int                      cnt, i, nrxd;
363 
364 	if (budget == 1) {
365 		rxd = &rxr->rx_base[pidx];
366 		staterr = le32toh(rxd->wb.upper.status_error);
367 
368 		return (staterr & IXGBE_RXD_STAT_DD);
369 	}
370 
371 	nrxd = sc->shared->isc_nrxd[0];
372 	// em has cnt < nrxd. off by 1 here or there?
373 //	for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
374 	for (cnt = 0, i = pidx; cnt < nrxd-1 && cnt <= budget;) {
375 		rxd = &rxr->rx_base[i];
376 		staterr = le32toh(rxd->wb.upper.status_error);
377 
378 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
379 			break;
380 		if (++i == nrxd)
381 			i = 0;
382 		if (staterr & IXGBE_RXD_STAT_EOP)
383 			cnt++;
384 	}
385 
386 	return (cnt);
387 } /* ixgbe_isc_rxd_available */
388 
389 /************************************************************************
390  * ixgbe_isc_rxd_pkt_get
391  *
392  *   Routine sends data which has been dma'ed into host memory
393  *   to upper layer. Initialize ri structure.
394  *
395  *   Returns 0 upon success, errno on failure
396  ************************************************************************/
397 
398 static int
399 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
400 {
401 	struct adapter           *adapter = arg;
402 	struct ix_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
403 	struct rx_ring           *rxr = &que->rxr;
404 	struct ifnet             *ifp = iflib_get_ifp(adapter->ctx);
405 	union ixgbe_adv_rx_desc  *rxd;
406 
407 	u16                      pkt_info, len, cidx, i;
408 	u16                      vtag = 0;
409 	u32                      ptype;
410 	u32                      staterr = 0;
411 	bool                     eop;
412 
413 	i = 0;
414 	cidx = ri->iri_cidx;
415 	do {
416 		rxd = &rxr->rx_base[cidx];
417 		staterr = le32toh(rxd->wb.upper.status_error);
418 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
419 
420 		/* Error Checking then decrement count */
421 		MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
422 
423 		len = le16toh(rxd->wb.upper.length);
424 		ptype = le32toh(rxd->wb.lower.lo_dword.data) &
425 			IXGBE_RXDADV_PKTTYPE_MASK;
426 
427 		ri->iri_len += len;
428 		rxr->bytes += len;
429 
430 		rxd->wb.upper.status_error = 0;
431 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
432 		if (staterr & IXGBE_RXD_STAT_VP) {
433 			vtag = le16toh(rxd->wb.upper.vlan);
434 		} else {
435 			vtag = 0;
436 		}
437 
438 		/* Make sure bad packets are discarded */
439 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
440 
441 #if __FreeBSD_version >= 1100036
442 			if (adapter->feat_en & IXGBE_FEATURE_VF)
443 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
444 #endif
445 
446 			rxr->rx_discarded++;
447 			return (EBADMSG);
448 		}
449 		ri->iri_frags[i].irf_flid = 0;
450 		ri->iri_frags[i].irf_idx = cidx;
451 		ri->iri_frags[i].irf_len = len;
452 		if (++cidx == adapter->shared->isc_nrxd[0])
453 			cidx = 0;
454 		i++;
455 		/* even a 16K packet shouldn't consume more than 8 clusters */
456 		MPASS(i < 9);
457 	} while (!eop);
458 
459 	rxr->rx_packets++;
460 	rxr->packets++;
461 	rxr->rx_bytes += ri->iri_len;
462 
463 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
464 		ixgbe_rx_checksum(staterr, ri,  ptype);
465 
466 	ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
467 	ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
468 	ri->iri_vtag = vtag;
469 	ri->iri_nfrags = i;
470 	if (vtag)
471 		ri->iri_flags |= M_VLANTAG;
472 	return (0);
473 } /* ixgbe_isc_rxd_pkt_get */
474 
475 /************************************************************************
476  * ixgbe_rx_checksum
477  *
478  *   Verify that the hardware indicated that the checksum is valid.
479  *   Inform the stack about the status of checksum so that stack
480  *   doesn't spend time verifying the checksum.
481  ************************************************************************/
482 static void
483 ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
484 {
485 	u16  status = (u16)staterr;
486 	u8   errors = (u8)(staterr >> 24);
487 	bool sctp = false;
488 
489 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
490 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
491 		sctp = TRUE;
492 
493 	/* IPv4 checksum */
494 	if (status & IXGBE_RXD_STAT_IPCS) {
495 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
496 			/* IP Checksum Good */
497 			ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
498 		} else
499 			ri->iri_csum_flags = 0;
500 	}
501 	/* TCP/UDP/SCTP checksum */
502 	if (status & IXGBE_RXD_STAT_L4CS) {
503 		u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
504 #if __FreeBSD_version >= 800000
505 		if (sctp)
506 			type = CSUM_SCTP_VALID;
507 #endif
508 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
509 			ri->iri_csum_flags |= type;
510 			if (!sctp)
511 				ri->iri_csum_data = htons(0xffff);
512 		}
513 	}
514 } /* ixgbe_rx_checksum */
515 
516 /************************************************************************
517  * ixgbe_determine_rsstype
518  *
519  *   Parse the packet type to determine the appropriate hash
520  ************************************************************************/
521 static int
522 ixgbe_determine_rsstype(u16 pkt_info)
523 {
524 	switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
525 	case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
526 		return M_HASHTYPE_RSS_TCP_IPV4;
527 	case IXGBE_RXDADV_RSSTYPE_IPV4:
528 		return M_HASHTYPE_RSS_IPV4;
529 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
530 		return M_HASHTYPE_RSS_TCP_IPV6;
531 	case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
532 		return M_HASHTYPE_RSS_IPV6_EX;
533 	case IXGBE_RXDADV_RSSTYPE_IPV6:
534 		return M_HASHTYPE_RSS_IPV6;
535 	case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
536 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
537 	case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
538 		return M_HASHTYPE_RSS_UDP_IPV4;
539 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
540 		return M_HASHTYPE_RSS_UDP_IPV6;
541 	case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
542 		return M_HASHTYPE_RSS_UDP_IPV6_EX;
543 	default:
544 		return M_HASHTYPE_OPAQUE;
545 	}
546 } /* ixgbe_determine_rsstype */
547