1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "if_em.h" 30 31 #include <net/rss_config.h> 32 #include <netinet/in_rss.h> 33 34 #ifdef VERBOSE_DEBUG 35 #define DPRINTF device_printf 36 #else 37 #define DPRINTF(...) 38 #endif 39 40 /********************************************************************* 41 * Local Function prototypes 42 *********************************************************************/ 43 static int igb_isc_txd_encap(void *, if_pkt_info_t); 44 static void igb_isc_txd_flush(void *, uint16_t, qidx_t); 45 static int igb_isc_txd_credits_update(void *, uint16_t, bool); 46 47 static void igb_isc_rxd_refill(void *, if_rxd_update_t); 48 49 static void igb_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); 50 static int igb_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); 51 52 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); 53 54 static int igb_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *, 55 uint32_t *); 56 static int igb_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *, 57 uint32_t *); 58 59 static void igb_rx_checksum(uint32_t, if_rxd_info_t, uint32_t); 60 static int igb_determine_rsstype(uint16_t); 61 62 extern void igb_if_enable_intr(if_ctx_t); 63 extern int em_intr(void *); 64 65 struct if_txrx igb_txrx = { 66 .ift_txd_encap = igb_isc_txd_encap, 67 .ift_txd_flush = igb_isc_txd_flush, 68 .ift_txd_credits_update = igb_isc_txd_credits_update, 69 .ift_rxd_available = igb_isc_rxd_available, 70 .ift_rxd_pkt_get = igb_isc_rxd_pkt_get, 71 .ift_rxd_refill = igb_isc_rxd_refill, 72 .ift_rxd_flush = igb_isc_rxd_flush, 73 .ift_legacy_intr = em_intr 74 }; 75 76 /********************************************************************** 77 * 78 * Setup work for hardware segmentation offload (TSO) on 79 * adapters using advanced tx descriptors 80 * 81 **********************************************************************/ 82 static int 83 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 84 uint32_t *olinfo_status) 85 { 86 struct e1000_adv_tx_context_desc *TXD; 87 struct e1000_softc *sc = txr->sc; 88 uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0; 89 uint32_t mss_l4len_idx = 0; 90 uint32_t paylen; 91 92 switch(pi->ipi_etype) { 93 case ETHERTYPE_IPV6: 94 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 95 break; 96 case ETHERTYPE_IP: 97 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 98 /* Tell transmit desc to also do IPv4 checksum. */ 99 *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 100 break; 101 default: 102 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 103 __func__, ntohs(pi->ipi_etype)); 104 break; 105 } 106 107 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx]; 108 109 /* This is used in the transmit desc in encap */ 110 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - 111 pi->ipi_tcp_hlen; 112 113 /* VLAN MACLEN IPLEN */ 114 if (pi->ipi_mflags & M_VLANTAG) { 115 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 116 } 117 118 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 119 vlan_macip_lens |= pi->ipi_ip_hlen; 120 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 121 122 /* ADV DTYPE TUCMD */ 123 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 124 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 125 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 126 127 /* MSS L4LEN IDX */ 128 mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT); 129 mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); 130 /* 82575 needs the queue index added */ 131 if (sc->hw.mac.type == e1000_82575) 132 mss_l4len_idx |= txr->me << 4; 133 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 134 135 TXD->u.seqnum_seed = htole32(0); 136 *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 137 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 138 *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; 139 140 return (1); 141 } 142 143 /********************************************************************* 144 * 145 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 146 * 147 **********************************************************************/ 148 static int 149 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, 150 uint32_t *cmd_type_len, uint32_t *olinfo_status) 151 { 152 struct e1000_adv_tx_context_desc *TXD; 153 struct e1000_softc *sc = txr->sc; 154 uint32_t vlan_macip_lens, type_tucmd_mlhl; 155 uint32_t mss_l4len_idx; 156 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 157 158 /* First check if TSO is to be used */ 159 if (pi->ipi_csum_flags & CSUM_TSO) 160 return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 161 162 /* Indicate the whole packet as payload when not doing TSO */ 163 *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT; 164 165 /* Now ready a context descriptor */ 166 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx]; 167 168 /* 169 ** In advanced descriptors the vlan tag must 170 ** be placed into the context descriptor. Hence 171 ** we need to make one even if not doing offloads. 172 */ 173 if (pi->ipi_mflags & M_VLANTAG) { 174 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 175 } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) { 176 return (0); 177 } 178 179 /* Set the ether header length */ 180 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 181 182 switch(pi->ipi_etype) { 183 case ETHERTYPE_IP: 184 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 185 break; 186 case ETHERTYPE_IPV6: 187 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 188 break; 189 default: 190 break; 191 } 192 193 vlan_macip_lens |= pi->ipi_ip_hlen; 194 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 195 196 switch (pi->ipi_ipproto) { 197 case IPPROTO_TCP: 198 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) { 199 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 200 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 201 } 202 break; 203 case IPPROTO_UDP: 204 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) { 205 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 206 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 207 } 208 break; 209 case IPPROTO_SCTP: 210 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) { 211 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; 212 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 213 } 214 break; 215 default: 216 break; 217 } 218 219 /* 82575 needs the queue index added */ 220 if (sc->hw.mac.type == e1000_82575) 221 mss_l4len_idx = txr->me << 4; 222 223 /* Now copy bits into descriptor */ 224 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 225 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 226 TXD->u.seqnum_seed = htole32(0); 227 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 228 229 return (1); 230 } 231 232 static int 233 igb_isc_txd_encap(void *arg, if_pkt_info_t pi) 234 { 235 struct e1000_softc *sc = arg; 236 if_softc_ctx_t scctx = sc->shared; 237 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 238 struct tx_ring *txr = &que->txr; 239 int nsegs = pi->ipi_nsegs; 240 bus_dma_segment_t *segs = pi->ipi_segs; 241 union e1000_adv_tx_desc *txd = NULL; 242 int i, j, pidx_last; 243 uint32_t olinfo_status, cmd_type_len, txd_flags; 244 qidx_t ntxd; 245 246 pidx_last = olinfo_status = 0; 247 /* Basic descriptor defines */ 248 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 249 E1000_ADVTXD_DCMD_DEXT); 250 251 if (pi->ipi_mflags & M_VLANTAG) 252 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 253 254 i = pi->ipi_pidx; 255 ntxd = scctx->isc_ntxd[0]; 256 txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0; 257 /* Consume the first descriptor */ 258 i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 259 if (i == scctx->isc_ntxd[0]) 260 i = 0; 261 262 /* 82575 needs the queue index added */ 263 if (sc->hw.mac.type == e1000_82575) 264 olinfo_status |= txr->me << 4; 265 266 for (j = 0; j < nsegs; j++) { 267 bus_size_t seglen; 268 bus_addr_t segaddr; 269 270 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 271 seglen = segs[j].ds_len; 272 segaddr = htole64(segs[j].ds_addr); 273 274 txd->read.buffer_addr = segaddr; 275 txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | 276 cmd_type_len | seglen); 277 txd->read.olinfo_status = htole32(olinfo_status); 278 pidx_last = i; 279 if (++i == scctx->isc_ntxd[0]) { 280 i = 0; 281 } 282 } 283 if (txd_flags) { 284 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 285 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 286 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 287 } 288 289 txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags); 290 pi->ipi_new_pidx = i; 291 292 /* Sent data accounting for AIM */ 293 txr->tx_bytes += pi->ipi_len; 294 ++txr->tx_packets; 295 296 return (0); 297 } 298 299 static void 300 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 301 { 302 struct e1000_softc *sc = arg; 303 struct em_tx_queue *que = &sc->tx_queues[txqid]; 304 struct tx_ring *txr = &que->txr; 305 306 E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx); 307 } 308 309 static int 310 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 311 { 312 struct e1000_softc *sc = arg; 313 if_softc_ctx_t scctx = sc->shared; 314 struct em_tx_queue *que = &sc->tx_queues[txqid]; 315 struct tx_ring *txr = &que->txr; 316 317 qidx_t processed = 0; 318 int updated; 319 qidx_t cur, prev, ntxd, rs_cidx; 320 int32_t delta; 321 uint8_t status; 322 323 rs_cidx = txr->tx_rs_cidx; 324 if (rs_cidx == txr->tx_rs_pidx) 325 return (0); 326 cur = txr->tx_rsq[rs_cidx]; 327 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 328 updated = !!(status & E1000_TXD_STAT_DD); 329 330 if (!updated) 331 return (0); 332 333 /* If clear is false just let caller know that there 334 * are descriptors to reclaim */ 335 if (!clear) 336 return (1); 337 338 prev = txr->tx_cidx_processed; 339 ntxd = scctx->isc_ntxd[0]; 340 do { 341 MPASS(prev != cur); 342 delta = (int32_t)cur - (int32_t)prev; 343 if (delta < 0) 344 delta += ntxd; 345 MPASS(delta > 0); 346 347 processed += delta; 348 prev = cur; 349 rs_cidx = (rs_cidx + 1) & (ntxd-1); 350 if (rs_cidx == txr->tx_rs_pidx) 351 break; 352 cur = txr->tx_rsq[rs_cidx]; 353 status = ((union e1000_adv_tx_desc *) 354 &txr->tx_base[cur])->wb.status; 355 } while ((status & E1000_TXD_STAT_DD)); 356 357 txr->tx_rs_cidx = rs_cidx; 358 txr->tx_cidx_processed = prev; 359 return (processed); 360 } 361 362 static void 363 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru) 364 { 365 struct e1000_softc *sc = arg; 366 if_softc_ctx_t scctx = sc->shared; 367 uint16_t rxqid = iru->iru_qsidx; 368 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 369 union e1000_adv_rx_desc *rxd; 370 struct rx_ring *rxr = &que->rxr; 371 uint64_t *paddrs; 372 uint32_t next_pidx, pidx; 373 uint16_t count; 374 int i; 375 376 paddrs = iru->iru_paddrs; 377 pidx = iru->iru_pidx; 378 count = iru->iru_count; 379 380 for (i = 0, next_pidx = pidx; i < count; i++) { 381 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx]; 382 383 rxd->read.pkt_addr = htole64(paddrs[i]); 384 if (++next_pidx == scctx->isc_nrxd[0]) 385 next_pidx = 0; 386 } 387 } 388 389 static void 390 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, 391 qidx_t pidx) 392 { 393 struct e1000_softc *sc = arg; 394 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 395 struct rx_ring *rxr = &que->rxr; 396 397 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); 398 } 399 400 static int 401 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 402 { 403 struct e1000_softc *sc = arg; 404 if_softc_ctx_t scctx = sc->shared; 405 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 406 struct rx_ring *rxr = &que->rxr; 407 union e1000_adv_rx_desc *rxd; 408 uint32_t staterr = 0; 409 int cnt, i; 410 411 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { 412 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i]; 413 staterr = le32toh(rxd->wb.upper.status_error); 414 415 if ((staterr & E1000_RXD_STAT_DD) == 0) 416 break; 417 if (++i == scctx->isc_nrxd[0]) 418 i = 0; 419 if (staterr & E1000_RXD_STAT_EOP) 420 cnt++; 421 } 422 return (cnt); 423 } 424 425 /**************************************************************** 426 * Routine sends data which has been dma'ed into host memory 427 * to upper layer. Initialize ri structure. 428 * 429 * Returns 0 upon success, errno on failure 430 ***************************************************************/ 431 432 static int 433 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 434 { 435 struct e1000_softc *sc = arg; 436 if_softc_ctx_t scctx = sc->shared; 437 struct em_rx_queue *que = &sc->rx_queues[ri->iri_qsidx]; 438 struct rx_ring *rxr = &que->rxr; 439 union e1000_adv_rx_desc *rxd; 440 441 uint16_t pkt_info, len; 442 uint32_t ptype, staterr; 443 int i, cidx; 444 bool eop; 445 446 staterr = i = 0; 447 cidx = ri->iri_cidx; 448 449 do { 450 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx]; 451 staterr = le32toh(rxd->wb.upper.status_error); 452 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 453 454 MPASS ((staterr & E1000_RXD_STAT_DD) != 0); 455 456 len = le16toh(rxd->wb.upper.length); 457 ptype = 458 le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; 459 460 ri->iri_len += len; 461 rxr->rx_bytes += ri->iri_len; 462 463 rxd->wb.upper.status_error = 0; 464 eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); 465 466 /* Make sure bad packets are discarded */ 467 if (eop && 468 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { 469 sc->dropped_pkts++; 470 ++rxr->rx_discarded; 471 return (EBADMSG); 472 } 473 ri->iri_frags[i].irf_flid = 0; 474 ri->iri_frags[i].irf_idx = cidx; 475 ri->iri_frags[i].irf_len = len; 476 477 if (++cidx == scctx->isc_nrxd[0]) 478 cidx = 0; 479 #ifdef notyet 480 if (rxr->hdr_split == true) { 481 ri->iri_frags[i].irf_flid = 1; 482 ri->iri_frags[i].irf_idx = cidx; 483 if (++cidx == scctx->isc_nrxd[0]) 484 cidx = 0; 485 } 486 #endif 487 i++; 488 } while (!eop); 489 490 rxr->rx_packets++; 491 492 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) 493 igb_rx_checksum(staterr, ri, ptype); 494 495 if (staterr & E1000_RXD_STAT_VP) { 496 if (((sc->hw.mac.type == e1000_i350) || 497 (sc->hw.mac.type == e1000_i354)) && 498 (staterr & E1000_RXDEXT_STATERR_LB)) 499 ri->iri_vtag = be16toh(rxd->wb.upper.vlan); 500 else 501 ri->iri_vtag = le16toh(rxd->wb.upper.vlan); 502 ri->iri_flags |= M_VLANTAG; 503 } 504 505 ri->iri_flowid = 506 le32toh(rxd->wb.lower.hi_dword.rss); 507 ri->iri_rsstype = igb_determine_rsstype(pkt_info); 508 ri->iri_nfrags = i; 509 510 return (0); 511 } 512 513 /********************************************************************* 514 * 515 * Verify that the hardware indicated that the checksum is valid. 516 * Inform the stack about the status of checksum so that stack 517 * doesn't spend time verifying the checksum. 518 * 519 *********************************************************************/ 520 static void 521 igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype) 522 { 523 uint16_t status = (uint16_t)staterr; 524 uint8_t errors = (uint8_t)(staterr >> 24); 525 526 if (__predict_false(status & E1000_RXD_STAT_IXSM)) 527 return; 528 529 /* If there is a layer 3 or 4 error we are done */ 530 if (__predict_false(errors & 531 (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE))) 532 return; 533 534 /* IP Checksum Good */ 535 if (status & E1000_RXD_STAT_IPCS) 536 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); 537 538 /* Valid L4E checksum */ 539 if (__predict_true(status & 540 (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) { 541 /* SCTP header present */ 542 if (__predict_false( 543 (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 544 (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) { 545 ri->iri_csum_flags |= CSUM_SCTP_VALID; 546 } else { 547 ri->iri_csum_flags |= 548 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 549 ri->iri_csum_data = htons(0xffff); 550 } 551 } 552 } 553 554 /******************************************************************** 555 * 556 * Parse the packet type to determine the appropriate hash 557 * 558 ******************************************************************/ 559 static int 560 igb_determine_rsstype(uint16_t pkt_info) 561 { 562 switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { 563 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 564 return M_HASHTYPE_RSS_TCP_IPV4; 565 case E1000_RXDADV_RSSTYPE_IPV4: 566 return M_HASHTYPE_RSS_IPV4; 567 case E1000_RXDADV_RSSTYPE_IPV6_TCP: 568 return M_HASHTYPE_RSS_TCP_IPV6; 569 case E1000_RXDADV_RSSTYPE_IPV6_EX: 570 return M_HASHTYPE_RSS_IPV6_EX; 571 case E1000_RXDADV_RSSTYPE_IPV6: 572 return M_HASHTYPE_RSS_IPV6; 573 case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: 574 return M_HASHTYPE_RSS_TCP_IPV6_EX; 575 default: 576 return M_HASHTYPE_OPAQUE; 577 } 578 } 579