1 /*- 2 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 #include "if_em.h" 29 30 #ifdef RSS 31 #include <net/rss_config.h> 32 #include <netinet/in_rss.h> 33 #endif 34 35 #ifdef VERBOSE_DEBUG 36 #define DPRINTF device_printf 37 #else 38 #define DPRINTF(...) 39 #endif 40 41 /********************************************************************* 42 * Local Function prototypes 43 *********************************************************************/ 44 static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi); 45 static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 46 static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); 47 48 static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru); 49 50 static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx); 51 static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); 52 53 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); 54 55 static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); 56 static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); 57 58 static void igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype); 59 static int igb_determine_rsstype(u16 pkt_info); 60 61 extern void igb_if_enable_intr(if_ctx_t ctx); 62 extern int em_intr(void *arg); 63 64 struct if_txrx igb_txrx = { 65 .ift_txd_encap = igb_isc_txd_encap, 66 .ift_txd_flush = igb_isc_txd_flush, 67 .ift_txd_credits_update = igb_isc_txd_credits_update, 68 .ift_rxd_available = igb_isc_rxd_available, 69 .ift_rxd_pkt_get = igb_isc_rxd_pkt_get, 70 .ift_rxd_refill = igb_isc_rxd_refill, 71 .ift_rxd_flush = igb_isc_rxd_flush, 72 .ift_legacy_intr = em_intr 73 }; 74 75 /********************************************************************** 76 * 77 * Setup work for hardware segmentation offload (TSO) on 78 * adapters using advanced tx descriptors 79 * 80 **********************************************************************/ 81 static int 82 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) 83 { 84 struct e1000_adv_tx_context_desc *TXD; 85 struct adapter *adapter = txr->adapter; 86 u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0; 87 u32 mss_l4len_idx = 0; 88 u32 paylen; 89 90 switch(pi->ipi_etype) { 91 case ETHERTYPE_IPV6: 92 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 93 break; 94 case ETHERTYPE_IP: 95 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 96 /* Tell transmit desc to also do IPv4 checksum. */ 97 *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 98 break; 99 default: 100 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 101 __func__, ntohs(pi->ipi_etype)); 102 break; 103 } 104 105 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 106 107 /* This is used in the transmit desc in encap */ 108 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; 109 110 /* VLAN MACLEN IPLEN */ 111 if (pi->ipi_mflags & M_VLANTAG) { 112 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 113 } 114 115 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 116 vlan_macip_lens |= pi->ipi_ip_hlen; 117 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 118 119 /* ADV DTYPE TUCMD */ 120 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 121 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 122 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 123 124 /* MSS L4LEN IDX */ 125 mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT); 126 mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); 127 /* 82575 needs the queue index added */ 128 if (adapter->hw.mac.type == e1000_82575) 129 mss_l4len_idx |= txr->me << 4; 130 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 131 132 TXD->seqnum_seed = htole32(0); 133 *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 134 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 135 *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; 136 137 return (1); 138 } 139 140 /********************************************************************* 141 * 142 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 143 * 144 **********************************************************************/ 145 static int 146 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) 147 { 148 struct e1000_adv_tx_context_desc *TXD; 149 struct adapter *adapter = txr->adapter; 150 u32 vlan_macip_lens, type_tucmd_mlhl; 151 u32 mss_l4len_idx; 152 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 153 154 /* First check if TSO is to be used */ 155 if (pi->ipi_csum_flags & CSUM_TSO) 156 return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 157 158 /* Indicate the whole packet as payload when not doing TSO */ 159 *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT; 160 161 /* Now ready a context descriptor */ 162 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 163 164 /* 165 ** In advanced descriptors the vlan tag must 166 ** be placed into the context descriptor. Hence 167 ** we need to make one even if not doing offloads. 168 */ 169 if (pi->ipi_mflags & M_VLANTAG) { 170 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 171 } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) { 172 return (0); 173 } 174 175 /* Set the ether header length */ 176 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 177 178 switch(pi->ipi_etype) { 179 case ETHERTYPE_IP: 180 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 181 break; 182 case ETHERTYPE_IPV6: 183 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 184 break; 185 default: 186 break; 187 } 188 189 vlan_macip_lens |= pi->ipi_ip_hlen; 190 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 191 192 switch (pi->ipi_ipproto) { 193 case IPPROTO_TCP: 194 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) { 195 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 196 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 197 } 198 break; 199 case IPPROTO_UDP: 200 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) { 201 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 202 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 203 } 204 break; 205 case IPPROTO_SCTP: 206 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) { 207 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; 208 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 209 } 210 break; 211 default: 212 break; 213 } 214 215 /* 82575 needs the queue index added */ 216 if (adapter->hw.mac.type == e1000_82575) 217 mss_l4len_idx = txr->me << 4; 218 219 /* Now copy bits into descriptor */ 220 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 221 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 222 TXD->seqnum_seed = htole32(0); 223 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 224 225 return (1); 226 } 227 228 static int 229 igb_isc_txd_encap(void *arg, if_pkt_info_t pi) 230 { 231 struct adapter *sc = arg; 232 if_softc_ctx_t scctx = sc->shared; 233 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 234 struct tx_ring *txr = &que->txr; 235 int nsegs = pi->ipi_nsegs; 236 bus_dma_segment_t *segs = pi->ipi_segs; 237 union e1000_adv_tx_desc *txd = NULL; 238 int i, j, pidx_last; 239 u32 olinfo_status, cmd_type_len, txd_flags; 240 qidx_t ntxd; 241 242 pidx_last = olinfo_status = 0; 243 /* Basic descriptor defines */ 244 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | 245 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); 246 247 if (pi->ipi_mflags & M_VLANTAG) 248 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 249 250 i = pi->ipi_pidx; 251 ntxd = scctx->isc_ntxd[0]; 252 txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0; 253 /* Consume the first descriptor */ 254 i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 255 if (i == scctx->isc_ntxd[0]) 256 i = 0; 257 258 /* 82575 needs the queue index added */ 259 if (sc->hw.mac.type == e1000_82575) 260 olinfo_status |= txr->me << 4; 261 262 for (j = 0; j < nsegs; j++) { 263 bus_size_t seglen; 264 bus_addr_t segaddr; 265 266 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 267 seglen = segs[j].ds_len; 268 segaddr = htole64(segs[j].ds_addr); 269 270 txd->read.buffer_addr = segaddr; 271 txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | 272 cmd_type_len | seglen); 273 txd->read.olinfo_status = htole32(olinfo_status); 274 pidx_last = i; 275 if (++i == scctx->isc_ntxd[0]) { 276 i = 0; 277 } 278 } 279 if (txd_flags) { 280 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 281 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 282 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 283 } 284 285 txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags); 286 pi->ipi_new_pidx = i; 287 288 return (0); 289 } 290 291 static void 292 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 293 { 294 struct adapter *adapter = arg; 295 struct em_tx_queue *que = &adapter->tx_queues[txqid]; 296 struct tx_ring *txr = &que->txr; 297 298 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx); 299 } 300 301 static int 302 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 303 { 304 struct adapter *adapter = arg; 305 if_softc_ctx_t scctx = adapter->shared; 306 struct em_tx_queue *que = &adapter->tx_queues[txqid]; 307 struct tx_ring *txr = &que->txr; 308 309 qidx_t processed = 0; 310 int updated; 311 qidx_t cur, prev, ntxd, rs_cidx; 312 int32_t delta; 313 uint8_t status; 314 315 rs_cidx = txr->tx_rs_cidx; 316 if (rs_cidx == txr->tx_rs_pidx) 317 return (0); 318 cur = txr->tx_rsq[rs_cidx]; 319 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 320 updated = !!(status & E1000_TXD_STAT_DD); 321 322 if (!updated) 323 return (0); 324 325 /* If clear is false just let caller know that there 326 * are descriptors to reclaim */ 327 if (!clear) 328 return (1); 329 330 prev = txr->tx_cidx_processed; 331 ntxd = scctx->isc_ntxd[0]; 332 do { 333 MPASS(prev != cur); 334 delta = (int32_t)cur - (int32_t)prev; 335 if (delta < 0) 336 delta += ntxd; 337 MPASS(delta > 0); 338 339 processed += delta; 340 prev = cur; 341 rs_cidx = (rs_cidx + 1) & (ntxd-1); 342 if (rs_cidx == txr->tx_rs_pidx) 343 break; 344 cur = txr->tx_rsq[rs_cidx]; 345 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 346 } while ((status & E1000_TXD_STAT_DD)); 347 348 txr->tx_rs_cidx = rs_cidx; 349 txr->tx_cidx_processed = prev; 350 return (processed); 351 } 352 353 static void 354 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru) 355 { 356 struct adapter *sc = arg; 357 if_softc_ctx_t scctx = sc->shared; 358 uint16_t rxqid = iru->iru_qsidx; 359 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 360 union e1000_adv_rx_desc *rxd; 361 struct rx_ring *rxr = &que->rxr; 362 uint64_t *paddrs; 363 uint32_t next_pidx, pidx; 364 uint16_t count; 365 int i; 366 367 paddrs = iru->iru_paddrs; 368 pidx = iru->iru_pidx; 369 count = iru->iru_count; 370 371 for (i = 0, next_pidx = pidx; i < count; i++) { 372 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx]; 373 374 rxd->read.pkt_addr = htole64(paddrs[i]); 375 if (++next_pidx == scctx->isc_nrxd[0]) 376 next_pidx = 0; 377 } 378 } 379 380 static void 381 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) 382 { 383 struct adapter *sc = arg; 384 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 385 struct rx_ring *rxr = &que->rxr; 386 387 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); 388 } 389 390 static int 391 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 392 { 393 struct adapter *sc = arg; 394 if_softc_ctx_t scctx = sc->shared; 395 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 396 struct rx_ring *rxr = &que->rxr; 397 union e1000_adv_rx_desc *rxd; 398 u32 staterr = 0; 399 int cnt, i; 400 401 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { 402 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i]; 403 staterr = le32toh(rxd->wb.upper.status_error); 404 405 if ((staterr & E1000_RXD_STAT_DD) == 0) 406 break; 407 if (++i == scctx->isc_nrxd[0]) 408 i = 0; 409 if (staterr & E1000_RXD_STAT_EOP) 410 cnt++; 411 } 412 return (cnt); 413 } 414 415 /**************************************************************** 416 * Routine sends data which has been dma'ed into host memory 417 * to upper layer. Initialize ri structure. 418 * 419 * Returns 0 upon success, errno on failure 420 ***************************************************************/ 421 422 static int 423 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 424 { 425 struct adapter *adapter = arg; 426 if_softc_ctx_t scctx = adapter->shared; 427 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; 428 struct rx_ring *rxr = &que->rxr; 429 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 430 union e1000_adv_rx_desc *rxd; 431 432 u16 pkt_info, len; 433 u16 vtag = 0; 434 u32 ptype; 435 u32 staterr = 0; 436 bool eop; 437 int i = 0; 438 int cidx = ri->iri_cidx; 439 440 do { 441 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx]; 442 staterr = le32toh(rxd->wb.upper.status_error); 443 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 444 445 MPASS ((staterr & E1000_RXD_STAT_DD) != 0); 446 447 len = le16toh(rxd->wb.upper.length); 448 ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; 449 450 ri->iri_len += len; 451 rxr->rx_bytes += ri->iri_len; 452 453 rxd->wb.upper.status_error = 0; 454 eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); 455 456 if (((adapter->hw.mac.type == e1000_i350) || 457 (adapter->hw.mac.type == e1000_i354)) && 458 (staterr & E1000_RXDEXT_STATERR_LB)) 459 vtag = be16toh(rxd->wb.upper.vlan); 460 else 461 vtag = le16toh(rxd->wb.upper.vlan); 462 463 /* Make sure bad packets are discarded */ 464 if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { 465 adapter->dropped_pkts++; 466 ++rxr->rx_discarded; 467 return (EBADMSG); 468 } 469 ri->iri_frags[i].irf_flid = 0; 470 ri->iri_frags[i].irf_idx = cidx; 471 ri->iri_frags[i].irf_len = len; 472 473 if (++cidx == scctx->isc_nrxd[0]) 474 cidx = 0; 475 #ifdef notyet 476 if (rxr->hdr_split == TRUE) { 477 ri->iri_frags[i].irf_flid = 1; 478 ri->iri_frags[i].irf_idx = cidx; 479 if (++cidx == scctx->isc_nrxd[0]) 480 cidx = 0; 481 } 482 #endif 483 i++; 484 } while (!eop); 485 486 rxr->rx_packets++; 487 488 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 489 igb_rx_checksum(staterr, ri, ptype); 490 491 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 492 (staterr & E1000_RXD_STAT_VP) != 0) { 493 ri->iri_vtag = vtag; 494 ri->iri_flags |= M_VLANTAG; 495 } 496 ri->iri_flowid = 497 le32toh(rxd->wb.lower.hi_dword.rss); 498 ri->iri_rsstype = igb_determine_rsstype(pkt_info); 499 ri->iri_nfrags = i; 500 501 return (0); 502 } 503 504 /********************************************************************* 505 * 506 * Verify that the hardware indicated that the checksum is valid. 507 * Inform the stack about the status of checksum so that stack 508 * doesn't spend time verifying the checksum. 509 * 510 *********************************************************************/ 511 static void 512 igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype) 513 { 514 u16 status = (u16)staterr; 515 u8 errors = (u8) (staterr >> 24); 516 bool sctp = FALSE; 517 518 /* Ignore Checksum bit is set */ 519 if (status & E1000_RXD_STAT_IXSM) { 520 ri->iri_csum_flags = 0; 521 return; 522 } 523 524 if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 525 (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) 526 sctp = 1; 527 else 528 sctp = 0; 529 530 if (status & E1000_RXD_STAT_IPCS) { 531 /* Did it pass? */ 532 if (!(errors & E1000_RXD_ERR_IPE)) { 533 /* IP Checksum Good */ 534 ri->iri_csum_flags = CSUM_IP_CHECKED; 535 ri->iri_csum_flags |= CSUM_IP_VALID; 536 } else 537 ri->iri_csum_flags = 0; 538 } 539 540 if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 541 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 542 if (sctp) /* reassign */ 543 type = CSUM_SCTP_VALID; 544 /* Did it pass? */ 545 if (!(errors & E1000_RXD_ERR_TCPE)) { 546 ri->iri_csum_flags |= type; 547 if (sctp == 0) 548 ri->iri_csum_data = htons(0xffff); 549 } 550 } 551 return; 552 } 553 554 /******************************************************************** 555 * 556 * Parse the packet type to determine the appropriate hash 557 * 558 ******************************************************************/ 559 static int 560 igb_determine_rsstype(u16 pkt_info) 561 { 562 switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { 563 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 564 return M_HASHTYPE_RSS_TCP_IPV4; 565 case E1000_RXDADV_RSSTYPE_IPV4: 566 return M_HASHTYPE_RSS_IPV4; 567 case E1000_RXDADV_RSSTYPE_IPV6_TCP: 568 return M_HASHTYPE_RSS_TCP_IPV6; 569 case E1000_RXDADV_RSSTYPE_IPV6_EX: 570 return M_HASHTYPE_RSS_IPV6_EX; 571 case E1000_RXDADV_RSSTYPE_IPV6: 572 return M_HASHTYPE_RSS_IPV6; 573 case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: 574 return M_HASHTYPE_RSS_TCP_IPV6_EX; 575 default: 576 return M_HASHTYPE_OPAQUE; 577 } 578 } 579