1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "if_em.h" 30 31 #ifdef RSS 32 #include <net/rss_config.h> 33 #include <netinet/in_rss.h> 34 #endif 35 36 #ifdef VERBOSE_DEBUG 37 #define DPRINTF device_printf 38 #else 39 #define DPRINTF(...) 40 #endif 41 42 /********************************************************************* 43 * Local Function prototypes 44 *********************************************************************/ 45 static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi); 46 static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 47 static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); 48 49 static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru); 50 51 static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, 52 qidx_t pidx); 53 static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, 54 qidx_t budget); 55 56 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); 57 58 static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, 59 uint32_t *cmd_type_len, uint32_t *olinfo_status); 60 static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, 61 uint32_t *cmd_type_len, uint32_t *olinfo_status); 62 63 static void igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype); 64 static int igb_determine_rsstype(uint16_t pkt_info); 65 66 extern void igb_if_enable_intr(if_ctx_t ctx); 67 extern int em_intr(void *arg); 68 69 struct if_txrx igb_txrx = { 70 .ift_txd_encap = igb_isc_txd_encap, 71 .ift_txd_flush = igb_isc_txd_flush, 72 .ift_txd_credits_update = igb_isc_txd_credits_update, 73 .ift_rxd_available = igb_isc_rxd_available, 74 .ift_rxd_pkt_get = igb_isc_rxd_pkt_get, 75 .ift_rxd_refill = igb_isc_rxd_refill, 76 .ift_rxd_flush = igb_isc_rxd_flush, 77 .ift_legacy_intr = em_intr 78 }; 79 80 /********************************************************************** 81 * 82 * Setup work for hardware segmentation offload (TSO) on 83 * adapters using advanced tx descriptors 84 * 85 **********************************************************************/ 86 static int 87 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 88 uint32_t *olinfo_status) 89 { 90 struct e1000_adv_tx_context_desc *TXD; 91 struct e1000_softc *sc = txr->sc; 92 uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0; 93 uint32_t mss_l4len_idx = 0; 94 uint32_t paylen; 95 96 switch(pi->ipi_etype) { 97 case ETHERTYPE_IPV6: 98 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 99 break; 100 case ETHERTYPE_IP: 101 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 102 /* Tell transmit desc to also do IPv4 checksum. */ 103 *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 104 break; 105 default: 106 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 107 __func__, ntohs(pi->ipi_etype)); 108 break; 109 } 110 111 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 112 113 /* This is used in the transmit desc in encap */ 114 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; 115 116 /* VLAN MACLEN IPLEN */ 117 if (pi->ipi_mflags & M_VLANTAG) { 118 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 119 } 120 121 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 122 vlan_macip_lens |= pi->ipi_ip_hlen; 123 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 124 125 /* ADV DTYPE TUCMD */ 126 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 127 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 128 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 129 130 /* MSS L4LEN IDX */ 131 mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT); 132 mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); 133 /* 82575 needs the queue index added */ 134 if (sc->hw.mac.type == e1000_82575) 135 mss_l4len_idx |= txr->me << 4; 136 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 137 138 TXD->u.seqnum_seed = htole32(0); 139 *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 140 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 141 *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; 142 143 return (1); 144 } 145 146 /********************************************************************* 147 * 148 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 149 * 150 **********************************************************************/ 151 static int 152 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 153 uint32_t *olinfo_status) 154 { 155 struct e1000_adv_tx_context_desc *TXD; 156 struct e1000_softc *sc = txr->sc; 157 uint32_t vlan_macip_lens, type_tucmd_mlhl; 158 uint32_t mss_l4len_idx; 159 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 160 161 /* First check if TSO is to be used */ 162 if (pi->ipi_csum_flags & CSUM_TSO) 163 return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 164 165 /* Indicate the whole packet as payload when not doing TSO */ 166 *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT; 167 168 /* Now ready a context descriptor */ 169 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 170 171 /* 172 ** In advanced descriptors the vlan tag must 173 ** be placed into the context descriptor. Hence 174 ** we need to make one even if not doing offloads. 175 */ 176 if (pi->ipi_mflags & M_VLANTAG) { 177 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 178 } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) { 179 return (0); 180 } 181 182 /* Set the ether header length */ 183 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 184 185 switch(pi->ipi_etype) { 186 case ETHERTYPE_IP: 187 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 188 break; 189 case ETHERTYPE_IPV6: 190 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 191 break; 192 default: 193 break; 194 } 195 196 vlan_macip_lens |= pi->ipi_ip_hlen; 197 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 198 199 switch (pi->ipi_ipproto) { 200 case IPPROTO_TCP: 201 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) { 202 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 203 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 204 } 205 break; 206 case IPPROTO_UDP: 207 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) { 208 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 209 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 210 } 211 break; 212 case IPPROTO_SCTP: 213 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) { 214 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; 215 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 216 } 217 break; 218 default: 219 break; 220 } 221 222 /* 82575 needs the queue index added */ 223 if (sc->hw.mac.type == e1000_82575) 224 mss_l4len_idx = txr->me << 4; 225 226 /* Now copy bits into descriptor */ 227 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 228 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 229 TXD->u.seqnum_seed = htole32(0); 230 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 231 232 return (1); 233 } 234 235 static int 236 igb_isc_txd_encap(void *arg, if_pkt_info_t pi) 237 { 238 struct e1000_softc *sc = arg; 239 if_softc_ctx_t scctx = sc->shared; 240 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 241 struct tx_ring *txr = &que->txr; 242 int nsegs = pi->ipi_nsegs; 243 bus_dma_segment_t *segs = pi->ipi_segs; 244 union e1000_adv_tx_desc *txd = NULL; 245 int i, j, pidx_last; 246 uint32_t olinfo_status, cmd_type_len, txd_flags; 247 qidx_t ntxd; 248 249 pidx_last = olinfo_status = 0; 250 /* Basic descriptor defines */ 251 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | 252 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); 253 254 if (pi->ipi_mflags & M_VLANTAG) 255 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 256 257 i = pi->ipi_pidx; 258 ntxd = scctx->isc_ntxd[0]; 259 txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0; 260 /* Consume the first descriptor */ 261 i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 262 if (i == scctx->isc_ntxd[0]) 263 i = 0; 264 265 /* 82575 needs the queue index added */ 266 if (sc->hw.mac.type == e1000_82575) 267 olinfo_status |= txr->me << 4; 268 269 for (j = 0; j < nsegs; j++) { 270 bus_size_t seglen; 271 bus_addr_t segaddr; 272 273 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 274 seglen = segs[j].ds_len; 275 segaddr = htole64(segs[j].ds_addr); 276 277 txd->read.buffer_addr = segaddr; 278 txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | 279 cmd_type_len | seglen); 280 txd->read.olinfo_status = htole32(olinfo_status); 281 pidx_last = i; 282 if (++i == scctx->isc_ntxd[0]) { 283 i = 0; 284 } 285 } 286 if (txd_flags) { 287 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 288 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 289 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 290 } 291 292 txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags); 293 pi->ipi_new_pidx = i; 294 295 return (0); 296 } 297 298 static void 299 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 300 { 301 struct e1000_softc *sc = arg; 302 struct em_tx_queue *que = &sc->tx_queues[txqid]; 303 struct tx_ring *txr = &que->txr; 304 305 E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx); 306 } 307 308 static int 309 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 310 { 311 struct e1000_softc *sc = arg; 312 if_softc_ctx_t scctx = sc->shared; 313 struct em_tx_queue *que = &sc->tx_queues[txqid]; 314 struct tx_ring *txr = &que->txr; 315 316 qidx_t processed = 0; 317 int updated; 318 qidx_t cur, prev, ntxd, rs_cidx; 319 int32_t delta; 320 uint8_t status; 321 322 rs_cidx = txr->tx_rs_cidx; 323 if (rs_cidx == txr->tx_rs_pidx) 324 return (0); 325 cur = txr->tx_rsq[rs_cidx]; 326 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 327 updated = !!(status & E1000_TXD_STAT_DD); 328 329 if (!updated) 330 return (0); 331 332 /* If clear is false just let caller know that there 333 * are descriptors to reclaim */ 334 if (!clear) 335 return (1); 336 337 prev = txr->tx_cidx_processed; 338 ntxd = scctx->isc_ntxd[0]; 339 do { 340 MPASS(prev != cur); 341 delta = (int32_t)cur - (int32_t)prev; 342 if (delta < 0) 343 delta += ntxd; 344 MPASS(delta > 0); 345 346 processed += delta; 347 prev = cur; 348 rs_cidx = (rs_cidx + 1) & (ntxd-1); 349 if (rs_cidx == txr->tx_rs_pidx) 350 break; 351 cur = txr->tx_rsq[rs_cidx]; 352 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 353 } while ((status & E1000_TXD_STAT_DD)); 354 355 txr->tx_rs_cidx = rs_cidx; 356 txr->tx_cidx_processed = prev; 357 return (processed); 358 } 359 360 static void 361 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru) 362 { 363 struct e1000_softc *sc = arg; 364 if_softc_ctx_t scctx = sc->shared; 365 uint16_t rxqid = iru->iru_qsidx; 366 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 367 union e1000_adv_rx_desc *rxd; 368 struct rx_ring *rxr = &que->rxr; 369 uint64_t *paddrs; 370 uint32_t next_pidx, pidx; 371 uint16_t count; 372 int i; 373 374 paddrs = iru->iru_paddrs; 375 pidx = iru->iru_pidx; 376 count = iru->iru_count; 377 378 for (i = 0, next_pidx = pidx; i < count; i++) { 379 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx]; 380 381 rxd->read.pkt_addr = htole64(paddrs[i]); 382 if (++next_pidx == scctx->isc_nrxd[0]) 383 next_pidx = 0; 384 } 385 } 386 387 static void 388 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) 389 { 390 struct e1000_softc *sc = arg; 391 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 392 struct rx_ring *rxr = &que->rxr; 393 394 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); 395 } 396 397 static int 398 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 399 { 400 struct e1000_softc *sc = arg; 401 if_softc_ctx_t scctx = sc->shared; 402 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 403 struct rx_ring *rxr = &que->rxr; 404 union e1000_adv_rx_desc *rxd; 405 uint32_t staterr = 0; 406 int cnt, i; 407 408 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { 409 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i]; 410 staterr = le32toh(rxd->wb.upper.status_error); 411 412 if ((staterr & E1000_RXD_STAT_DD) == 0) 413 break; 414 if (++i == scctx->isc_nrxd[0]) 415 i = 0; 416 if (staterr & E1000_RXD_STAT_EOP) 417 cnt++; 418 } 419 return (cnt); 420 } 421 422 /**************************************************************** 423 * Routine sends data which has been dma'ed into host memory 424 * to upper layer. Initialize ri structure. 425 * 426 * Returns 0 upon success, errno on failure 427 ***************************************************************/ 428 429 static int 430 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 431 { 432 struct e1000_softc *sc = arg; 433 if_softc_ctx_t scctx = sc->shared; 434 struct em_rx_queue *que = &sc->rx_queues[ri->iri_qsidx]; 435 struct rx_ring *rxr = &que->rxr; 436 union e1000_adv_rx_desc *rxd; 437 438 uint16_t pkt_info, len; 439 uint32_t ptype, staterr; 440 int i, cidx; 441 bool eop; 442 443 staterr = i = 0; 444 cidx = ri->iri_cidx; 445 446 do { 447 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx]; 448 staterr = le32toh(rxd->wb.upper.status_error); 449 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 450 451 MPASS ((staterr & E1000_RXD_STAT_DD) != 0); 452 453 len = le16toh(rxd->wb.upper.length); 454 ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; 455 456 ri->iri_len += len; 457 rxr->rx_bytes += ri->iri_len; 458 459 rxd->wb.upper.status_error = 0; 460 eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); 461 462 /* Make sure bad packets are discarded */ 463 if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { 464 sc->dropped_pkts++; 465 ++rxr->rx_discarded; 466 return (EBADMSG); 467 } 468 ri->iri_frags[i].irf_flid = 0; 469 ri->iri_frags[i].irf_idx = cidx; 470 ri->iri_frags[i].irf_len = len; 471 472 if (++cidx == scctx->isc_nrxd[0]) 473 cidx = 0; 474 #ifdef notyet 475 if (rxr->hdr_split == true) { 476 ri->iri_frags[i].irf_flid = 1; 477 ri->iri_frags[i].irf_idx = cidx; 478 if (++cidx == scctx->isc_nrxd[0]) 479 cidx = 0; 480 } 481 #endif 482 i++; 483 } while (!eop); 484 485 rxr->rx_packets++; 486 487 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) 488 igb_rx_checksum(staterr, ri, ptype); 489 490 if (staterr & E1000_RXD_STAT_VP) { 491 if (((sc->hw.mac.type == e1000_i350) || 492 (sc->hw.mac.type == e1000_i354)) && 493 (staterr & E1000_RXDEXT_STATERR_LB)) 494 ri->iri_vtag = be16toh(rxd->wb.upper.vlan); 495 else 496 ri->iri_vtag = le16toh(rxd->wb.upper.vlan); 497 ri->iri_flags |= M_VLANTAG; 498 } 499 500 ri->iri_flowid = 501 le32toh(rxd->wb.lower.hi_dword.rss); 502 ri->iri_rsstype = igb_determine_rsstype(pkt_info); 503 ri->iri_nfrags = i; 504 505 return (0); 506 } 507 508 /********************************************************************* 509 * 510 * Verify that the hardware indicated that the checksum is valid. 511 * Inform the stack about the status of checksum so that stack 512 * doesn't spend time verifying the checksum. 513 * 514 *********************************************************************/ 515 static void 516 igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype) 517 { 518 uint16_t status = (uint16_t)staterr; 519 uint8_t errors = (uint8_t)(staterr >> 24); 520 521 if (__predict_false(status & E1000_RXD_STAT_IXSM)) 522 return; 523 524 /* If there is a layer 3 or 4 error we are done */ 525 if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE))) 526 return; 527 528 /* IP Checksum Good */ 529 if (status & E1000_RXD_STAT_IPCS) 530 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); 531 532 /* Valid L4E checksum */ 533 if (__predict_true(status & 534 (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) { 535 /* SCTP header present */ 536 if (__predict_false((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 537 (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) { 538 ri->iri_csum_flags |= CSUM_SCTP_VALID; 539 } else { 540 ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 541 ri->iri_csum_data = htons(0xffff); 542 } 543 } 544 } 545 546 /******************************************************************** 547 * 548 * Parse the packet type to determine the appropriate hash 549 * 550 ******************************************************************/ 551 static int 552 igb_determine_rsstype(uint16_t pkt_info) 553 { 554 switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { 555 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 556 return M_HASHTYPE_RSS_TCP_IPV4; 557 case E1000_RXDADV_RSSTYPE_IPV4: 558 return M_HASHTYPE_RSS_IPV4; 559 case E1000_RXDADV_RSSTYPE_IPV6_TCP: 560 return M_HASHTYPE_RSS_TCP_IPV6; 561 case E1000_RXDADV_RSSTYPE_IPV6_EX: 562 return M_HASHTYPE_RSS_IPV6_EX; 563 case E1000_RXDADV_RSSTYPE_IPV6: 564 return M_HASHTYPE_RSS_IPV6; 565 case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: 566 return M_HASHTYPE_RSS_TCP_IPV6_EX; 567 default: 568 return M_HASHTYPE_OPAQUE; 569 } 570 } 571