xref: /freebsd/sys/dev/e1000/igb_txrx.c (revision 718519f4efc71096422fc71dab90b2a3369871ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "if_em.h"
30 
31 #ifdef RSS
32 #include <net/rss_config.h>
33 #include <netinet/in_rss.h>
34 #endif
35 
36 #ifdef VERBOSE_DEBUG
37 #define DPRINTF device_printf
38 #else
39 #define DPRINTF(...)
40 #endif
41 
42 /*********************************************************************
43  *  Local Function prototypes
44  *********************************************************************/
45 static int igb_isc_txd_encap(void *, if_pkt_info_t);
46 static void igb_isc_txd_flush(void *, uint16_t, qidx_t);
47 static int igb_isc_txd_credits_update(void *, uint16_t, bool);
48 
49 static void igb_isc_rxd_refill(void *, if_rxd_update_t);
50 
51 static void igb_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
52 static int igb_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
53 
54 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
55 
56 static int igb_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
57     uint32_t *);
58 static int igb_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
59     uint32_t *);
60 
61 static void igb_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
62 static int igb_determine_rsstype(uint16_t);
63 
64 extern void igb_if_enable_intr(if_ctx_t);
65 extern int em_intr(void *);
66 
67 struct if_txrx igb_txrx = {
68 	.ift_txd_encap = igb_isc_txd_encap,
69 	.ift_txd_flush = igb_isc_txd_flush,
70 	.ift_txd_credits_update = igb_isc_txd_credits_update,
71 	.ift_rxd_available = igb_isc_rxd_available,
72 	.ift_rxd_pkt_get = igb_isc_rxd_pkt_get,
73 	.ift_rxd_refill = igb_isc_rxd_refill,
74 	.ift_rxd_flush = igb_isc_rxd_flush,
75 	.ift_legacy_intr = em_intr
76 };
77 
78 /**********************************************************************
79  *
80  *  Setup work for hardware segmentation offload (TSO) on
81  *  adapters using advanced tx descriptors
82  *
83  **********************************************************************/
84 static int
85 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
86     uint32_t *olinfo_status)
87 {
88 	struct e1000_adv_tx_context_desc *TXD;
89 	struct e1000_softc *sc = txr->sc;
90 	uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0;
91 	uint32_t mss_l4len_idx = 0;
92 	uint32_t paylen;
93 
94 	switch(pi->ipi_etype) {
95 	case ETHERTYPE_IPV6:
96 		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
97 		break;
98 	case ETHERTYPE_IP:
99 		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
100 		/* Tell transmit desc to also do IPv4 checksum. */
101 		*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
102 		break;
103 	default:
104 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
105 		    __func__, ntohs(pi->ipi_etype));
106 		break;
107 	}
108 
109 	TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
110 
111 	/* This is used in the transmit desc in encap */
112 	paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen -
113 	    pi->ipi_tcp_hlen;
114 
115 	/* VLAN MACLEN IPLEN */
116 	if (pi->ipi_mflags & M_VLANTAG) {
117 		vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
118 	}
119 
120 	vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
121 	vlan_macip_lens |= pi->ipi_ip_hlen;
122 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
123 
124 	/* ADV DTYPE TUCMD */
125 	type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
126 	type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
127 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
128 
129 	/* MSS L4LEN IDX */
130 	mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT);
131 	mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
132 	/* 82575 needs the queue index added */
133 	if (sc->hw.mac.type == e1000_82575)
134 		mss_l4len_idx |= txr->me << 4;
135 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
136 
137 	TXD->u.seqnum_seed = htole32(0);
138 	*cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
139 	*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
140 	*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
141 
142 	return (1);
143 }
144 
145 /*********************************************************************
146  *
147  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
148  *
149  **********************************************************************/
150 static int
151 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
152     uint32_t *cmd_type_len, uint32_t *olinfo_status)
153 {
154 	struct e1000_adv_tx_context_desc *TXD;
155 	struct e1000_softc *sc = txr->sc;
156 	uint32_t vlan_macip_lens, type_tucmd_mlhl;
157 	uint32_t mss_l4len_idx;
158 	mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
159 
160 	/* First check if TSO is to be used */
161 	if (pi->ipi_csum_flags & CSUM_TSO)
162 		return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status));
163 
164 	/* Indicate the whole packet as payload when not doing TSO */
165 	*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
166 
167 	/* Now ready a context descriptor */
168 	TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
169 
170 	/*
171 	** In advanced descriptors the vlan tag must
172 	** be placed into the context descriptor. Hence
173 	** we need to make one even if not doing offloads.
174 	*/
175 	if (pi->ipi_mflags & M_VLANTAG) {
176 		vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
177 	} else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) {
178 		return (0);
179 	}
180 
181 	/* Set the ether header length */
182 	vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
183 
184 	switch(pi->ipi_etype) {
185 	case ETHERTYPE_IP:
186 		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
187 		break;
188 	case ETHERTYPE_IPV6:
189 		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
190 		break;
191 	default:
192 		break;
193 	}
194 
195 	vlan_macip_lens |= pi->ipi_ip_hlen;
196 	type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
197 
198 	switch (pi->ipi_ipproto) {
199 	case IPPROTO_TCP:
200 		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) {
201 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
202 			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
203 		}
204 		break;
205 	case IPPROTO_UDP:
206 		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) {
207 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
208 			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
209 		}
210 		break;
211 	case IPPROTO_SCTP:
212 		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) {
213 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
214 			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
215 		}
216 		break;
217 	default:
218 		break;
219 	}
220 
221 	/* 82575 needs the queue index added */
222 	if (sc->hw.mac.type == e1000_82575)
223 		mss_l4len_idx = txr->me << 4;
224 
225 	/* Now copy bits into descriptor */
226 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
227 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
228 	TXD->u.seqnum_seed = htole32(0);
229 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
230 
231 	return (1);
232 }
233 
234 static int
235 igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
236 {
237 	struct e1000_softc *sc = arg;
238 	if_softc_ctx_t scctx = sc->shared;
239 	struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
240 	struct tx_ring *txr = &que->txr;
241 	int nsegs = pi->ipi_nsegs;
242 	bus_dma_segment_t *segs = pi->ipi_segs;
243 	union e1000_adv_tx_desc *txd = NULL;
244 	int i, j, pidx_last;
245 	uint32_t olinfo_status, cmd_type_len, txd_flags;
246 	qidx_t ntxd;
247 
248 	pidx_last = olinfo_status = 0;
249 	/* Basic descriptor defines */
250 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
251 	    E1000_ADVTXD_DCMD_DEXT);
252 
253 	if (pi->ipi_mflags & M_VLANTAG)
254 		cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
255 
256 	i = pi->ipi_pidx;
257 	ntxd = scctx->isc_ntxd[0];
258 	txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0;
259 	/* Consume the first descriptor */
260 	i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
261 	if (i == scctx->isc_ntxd[0])
262 		i = 0;
263 
264 	/* 82575 needs the queue index added */
265 	if (sc->hw.mac.type == e1000_82575)
266 		olinfo_status |= txr->me << 4;
267 
268 	for (j = 0; j < nsegs; j++) {
269 		bus_size_t seglen;
270 		bus_addr_t segaddr;
271 
272 		txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
273 		seglen = segs[j].ds_len;
274 		segaddr = htole64(segs[j].ds_addr);
275 
276 		txd->read.buffer_addr = segaddr;
277 		txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS |
278 		    cmd_type_len | seglen);
279 		txd->read.olinfo_status = htole32(olinfo_status);
280 		pidx_last = i;
281 		if (++i == scctx->isc_ntxd[0]) {
282 			i = 0;
283 		}
284 	}
285 	if (txd_flags) {
286 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
287 		txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
288 		MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
289 	}
290 
291 	txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
292 	pi->ipi_new_pidx = i;
293 
294 	/* Sent data accounting for AIM */
295 	txr->tx_bytes += pi->ipi_len;
296 	++txr->tx_packets;
297 
298 	return (0);
299 }
300 
301 static void
302 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
303 {
304 	struct e1000_softc *sc = arg;
305 	struct em_tx_queue *que = &sc->tx_queues[txqid];
306 	struct tx_ring *txr = &que->txr;
307 
308 	E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx);
309 }
310 
311 static int
312 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
313 {
314 	struct e1000_softc *sc = arg;
315 	if_softc_ctx_t scctx = sc->shared;
316 	struct em_tx_queue *que = &sc->tx_queues[txqid];
317 	struct tx_ring *txr = &que->txr;
318 
319 	qidx_t processed = 0;
320 	int updated;
321 	qidx_t cur, prev, ntxd, rs_cidx;
322 	int32_t delta;
323 	uint8_t status;
324 
325 	rs_cidx = txr->tx_rs_cidx;
326 	if (rs_cidx == txr->tx_rs_pidx)
327 		return (0);
328 	cur = txr->tx_rsq[rs_cidx];
329 	status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
330 	updated = !!(status & E1000_TXD_STAT_DD);
331 
332 	if (!updated)
333 		return (0);
334 
335 	/* If clear is false just let caller know that there
336 	 * are descriptors to reclaim */
337 	if (!clear)
338 		return (1);
339 
340 	prev = txr->tx_cidx_processed;
341 	ntxd = scctx->isc_ntxd[0];
342 	do {
343 		MPASS(prev != cur);
344 		delta = (int32_t)cur - (int32_t)prev;
345 		if (delta < 0)
346 			delta += ntxd;
347 		MPASS(delta > 0);
348 
349 		processed += delta;
350 		prev  = cur;
351 		rs_cidx = (rs_cidx + 1) & (ntxd-1);
352 		if (rs_cidx  == txr->tx_rs_pidx)
353 			break;
354 		cur = txr->tx_rsq[rs_cidx];
355 		status = ((union e1000_adv_tx_desc *)
356 		    &txr->tx_base[cur])->wb.status;
357 	} while ((status & E1000_TXD_STAT_DD));
358 
359 	txr->tx_rs_cidx = rs_cidx;
360 	txr->tx_cidx_processed = prev;
361 	return (processed);
362 }
363 
364 static void
365 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
366 {
367 	struct e1000_softc *sc = arg;
368 	if_softc_ctx_t scctx = sc->shared;
369 	uint16_t rxqid = iru->iru_qsidx;
370 	struct em_rx_queue *que = &sc->rx_queues[rxqid];
371 	union e1000_adv_rx_desc *rxd;
372 	struct rx_ring *rxr = &que->rxr;
373 	uint64_t *paddrs;
374 	uint32_t next_pidx, pidx;
375 	uint16_t count;
376 	int i;
377 
378 	paddrs = iru->iru_paddrs;
379 	pidx = iru->iru_pidx;
380 	count = iru->iru_count;
381 
382 	for (i = 0, next_pidx = pidx; i < count; i++) {
383 		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx];
384 
385 		rxd->read.pkt_addr = htole64(paddrs[i]);
386 		if (++next_pidx == scctx->isc_nrxd[0])
387 			next_pidx = 0;
388 	}
389 }
390 
391 static void
392 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
393     qidx_t pidx)
394 {
395 	struct e1000_softc *sc = arg;
396 	struct em_rx_queue *que = &sc->rx_queues[rxqid];
397 	struct rx_ring *rxr = &que->rxr;
398 
399 	E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
400 }
401 
402 static int
403 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
404 {
405 	struct e1000_softc *sc = arg;
406 	if_softc_ctx_t scctx = sc->shared;
407 	struct em_rx_queue *que = &sc->rx_queues[rxqid];
408 	struct rx_ring *rxr = &que->rxr;
409 	union e1000_adv_rx_desc *rxd;
410 	uint32_t staterr = 0;
411 	int cnt, i;
412 
413 	for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
414 		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i];
415 		staterr = le32toh(rxd->wb.upper.status_error);
416 
417 		if ((staterr & E1000_RXD_STAT_DD) == 0)
418 			break;
419 		if (++i == scctx->isc_nrxd[0])
420 			i = 0;
421 		if (staterr & E1000_RXD_STAT_EOP)
422 			cnt++;
423 	}
424 	return (cnt);
425 }
426 
427 /****************************************************************
428  * Routine sends data which has been dma'ed into host memory
429  * to upper layer. Initialize ri structure.
430  *
431  * Returns 0 upon success, errno on failure
432  ***************************************************************/
433 
434 static int
435 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
436 {
437 	struct e1000_softc *sc = arg;
438 	if_softc_ctx_t scctx = sc->shared;
439 	struct em_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
440 	struct rx_ring *rxr = &que->rxr;
441 	union e1000_adv_rx_desc *rxd;
442 
443 	uint16_t pkt_info, len;
444 	uint32_t ptype, staterr;
445 	int i, cidx;
446 	bool eop;
447 
448 	staterr = i = 0;
449 	cidx = ri->iri_cidx;
450 
451 	do {
452 		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx];
453 		staterr = le32toh(rxd->wb.upper.status_error);
454 		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
455 
456 		MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
457 
458 		len = le16toh(rxd->wb.upper.length);
459 		ptype =
460 		    le32toh(rxd->wb.lower.lo_dword.data) &  IGB_PKTTYPE_MASK;
461 
462 		ri->iri_len += len;
463 		rxr->rx_bytes += ri->iri_len;
464 
465 		rxd->wb.upper.status_error = 0;
466 		eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
467 
468 		/* Make sure bad packets are discarded */
469 		if (eop &&
470 		    ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
471 			sc->dropped_pkts++;
472 			++rxr->rx_discarded;
473 			return (EBADMSG);
474 		}
475 		ri->iri_frags[i].irf_flid = 0;
476 		ri->iri_frags[i].irf_idx = cidx;
477 		ri->iri_frags[i].irf_len = len;
478 
479 		if (++cidx == scctx->isc_nrxd[0])
480 			cidx = 0;
481 #ifdef notyet
482 		if (rxr->hdr_split == true) {
483 			ri->iri_frags[i].irf_flid = 1;
484 			ri->iri_frags[i].irf_idx = cidx;
485 			if (++cidx == scctx->isc_nrxd[0])
486 				cidx = 0;
487 		}
488 #endif
489 		i++;
490 	} while (!eop);
491 
492 	rxr->rx_packets++;
493 
494 	if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
495 		igb_rx_checksum(staterr, ri, ptype);
496 
497 	if (staterr & E1000_RXD_STAT_VP) {
498 		if (((sc->hw.mac.type == e1000_i350) ||
499 		    (sc->hw.mac.type == e1000_i354)) &&
500 		    (staterr & E1000_RXDEXT_STATERR_LB))
501 			ri->iri_vtag = be16toh(rxd->wb.upper.vlan);
502 		else
503 			ri->iri_vtag = le16toh(rxd->wb.upper.vlan);
504 		ri->iri_flags |= M_VLANTAG;
505 	}
506 
507 	ri->iri_flowid =
508 		le32toh(rxd->wb.lower.hi_dword.rss);
509 	ri->iri_rsstype = igb_determine_rsstype(pkt_info);
510 	ri->iri_nfrags = i;
511 
512 	return (0);
513 }
514 
515 /*********************************************************************
516  *
517  *  Verify that the hardware indicated that the checksum is valid.
518  *  Inform the stack about the status of checksum so that stack
519  *  doesn't spend time verifying the checksum.
520  *
521  *********************************************************************/
522 static void
523 igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
524 {
525 	uint16_t status = (uint16_t)staterr;
526 	uint8_t errors = (uint8_t)(staterr >> 24);
527 
528 	if (__predict_false(status & E1000_RXD_STAT_IXSM))
529 		return;
530 
531 	/* If there is a layer 3 or 4 error we are done */
532 	if (__predict_false(errors &
533 	    (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
534 		return;
535 
536 	/* IP Checksum Good */
537 	if (status & E1000_RXD_STAT_IPCS)
538 		ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
539 
540 	/* Valid L4E checksum */
541 	if (__predict_true(status &
542 	    (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
543 		/* SCTP header present */
544 		if (__predict_false(
545 		    (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
546 		    (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) {
547 			ri->iri_csum_flags |= CSUM_SCTP_VALID;
548 		} else {
549 			ri->iri_csum_flags |=
550 			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
551 			ri->iri_csum_data = htons(0xffff);
552 		}
553 	}
554 }
555 
556 /********************************************************************
557  *
558  *  Parse the packet type to determine the appropriate hash
559  *
560  ******************************************************************/
561 static int
562 igb_determine_rsstype(uint16_t pkt_info)
563 {
564 	switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
565 	case E1000_RXDADV_RSSTYPE_IPV4_TCP:
566 		return M_HASHTYPE_RSS_TCP_IPV4;
567 	case E1000_RXDADV_RSSTYPE_IPV4:
568 		return M_HASHTYPE_RSS_IPV4;
569 	case E1000_RXDADV_RSSTYPE_IPV6_TCP:
570 		return M_HASHTYPE_RSS_TCP_IPV6;
571 	case E1000_RXDADV_RSSTYPE_IPV6_EX:
572 		return M_HASHTYPE_RSS_IPV6_EX;
573 	case E1000_RXDADV_RSSTYPE_IPV6:
574 		return M_HASHTYPE_RSS_IPV6;
575 	case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
576 		return M_HASHTYPE_RSS_TCP_IPV6_EX;
577 	default:
578 		return M_HASHTYPE_OPAQUE;
579 	}
580 }
581