1 /*- 2 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 #include "if_em.h" 29 30 #ifdef RSS 31 #include <net/rss_config.h> 32 #include <netinet/in_rss.h> 33 #endif 34 35 #ifdef VERBOSE_DEBUG 36 #define DPRINTF device_printf 37 #else 38 #define DPRINTF(...) 39 #endif 40 41 /********************************************************************* 42 * Local Function prototypes 43 *********************************************************************/ 44 static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi); 45 static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 46 static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); 47 48 static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru); 49 50 static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx); 51 static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); 52 53 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); 54 55 static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); 56 static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); 57 58 static void igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype); 59 static int igb_determine_rsstype(u16 pkt_info); 60 61 extern void igb_if_enable_intr(if_ctx_t ctx); 62 extern int em_intr(void *arg); 63 64 struct if_txrx igb_txrx = { 65 .ift_txd_encap = igb_isc_txd_encap, 66 .ift_txd_flush = igb_isc_txd_flush, 67 .ift_txd_credits_update = igb_isc_txd_credits_update, 68 .ift_rxd_available = igb_isc_rxd_available, 69 .ift_rxd_pkt_get = igb_isc_rxd_pkt_get, 70 .ift_rxd_refill = igb_isc_rxd_refill, 71 .ift_rxd_flush = igb_isc_rxd_flush, 72 .ift_legacy_intr = em_intr 73 }; 74 75 extern if_shared_ctx_t em_sctx; 76 77 /********************************************************************** 78 * 79 * Setup work for hardware segmentation offload (TSO) on 80 * adapters using advanced tx descriptors 81 * 82 **********************************************************************/ 83 static int 84 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) 85 { 86 struct e1000_adv_tx_context_desc *TXD; 87 struct adapter *adapter = txr->adapter; 88 u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0; 89 u32 mss_l4len_idx = 0; 90 u32 paylen; 91 92 switch(pi->ipi_etype) { 93 case ETHERTYPE_IPV6: 94 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 95 break; 96 case ETHERTYPE_IP: 97 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 98 /* Tell transmit desc to also do IPv4 checksum. */ 99 *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 100 break; 101 default: 102 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 103 __func__, ntohs(pi->ipi_etype)); 104 break; 105 } 106 107 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 108 109 /* This is used in the transmit desc in encap */ 110 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; 111 112 /* VLAN MACLEN IPLEN */ 113 if (pi->ipi_mflags & M_VLANTAG) { 114 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 115 } 116 117 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 118 vlan_macip_lens |= pi->ipi_ip_hlen; 119 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 120 121 /* ADV DTYPE TUCMD */ 122 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 123 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 124 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 125 126 /* MSS L4LEN IDX */ 127 mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT); 128 mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); 129 /* 82575 needs the queue index added */ 130 if (adapter->hw.mac.type == e1000_82575) 131 mss_l4len_idx |= txr->me << 4; 132 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 133 134 TXD->seqnum_seed = htole32(0); 135 *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 136 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 137 *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; 138 139 return (1); 140 } 141 142 /********************************************************************* 143 * 144 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 145 * 146 **********************************************************************/ 147 static int 148 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) 149 { 150 struct e1000_adv_tx_context_desc *TXD; 151 struct adapter *adapter = txr->adapter; 152 u32 vlan_macip_lens, type_tucmd_mlhl; 153 u32 mss_l4len_idx; 154 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 155 int offload = TRUE; 156 157 /* First check if TSO is to be used */ 158 if (pi->ipi_csum_flags & CSUM_TSO) 159 return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 160 161 /* Indicate the whole packet as payload when not doing TSO */ 162 *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT; 163 164 /* Now ready a context descriptor */ 165 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 166 167 /* 168 ** In advanced descriptors the vlan tag must 169 ** be placed into the context descriptor. Hence 170 ** we need to make one even if not doing offloads. 171 */ 172 if (pi->ipi_mflags & M_VLANTAG) { 173 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); 174 } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) { 175 return (0); 176 } 177 178 /* Set the ether header length */ 179 vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 180 181 switch(pi->ipi_etype) { 182 case ETHERTYPE_IP: 183 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 184 break; 185 case ETHERTYPE_IPV6: 186 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 187 break; 188 default: 189 offload = FALSE; 190 break; 191 } 192 193 vlan_macip_lens |= pi->ipi_ip_hlen; 194 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 195 196 switch (pi->ipi_ipproto) { 197 case IPPROTO_TCP: 198 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) 199 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 200 break; 201 case IPPROTO_UDP: 202 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) 203 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 204 break; 205 case IPPROTO_SCTP: 206 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) 207 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; 208 break; 209 default: 210 offload = FALSE; 211 break; 212 } 213 214 if (offload) /* For the TX descriptor setup */ 215 *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 216 217 /* 82575 needs the queue index added */ 218 if (adapter->hw.mac.type == e1000_82575) 219 mss_l4len_idx = txr->me << 4; 220 221 /* Now copy bits into descriptor */ 222 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 223 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 224 TXD->seqnum_seed = htole32(0); 225 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 226 227 return (1); 228 } 229 230 static int 231 igb_isc_txd_encap(void *arg, if_pkt_info_t pi) 232 { 233 struct adapter *sc = arg; 234 if_softc_ctx_t scctx = sc->shared; 235 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 236 struct tx_ring *txr = &que->txr; 237 int nsegs = pi->ipi_nsegs; 238 bus_dma_segment_t *segs = pi->ipi_segs; 239 union e1000_adv_tx_desc *txd = NULL; 240 int i, j, pidx_last; 241 u32 olinfo_status, cmd_type_len, txd_flags; 242 qidx_t ntxd; 243 244 pidx_last = olinfo_status = 0; 245 /* Basic descriptor defines */ 246 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | 247 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); 248 249 if (pi->ipi_mflags & M_VLANTAG) 250 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 251 252 i = pi->ipi_pidx; 253 ntxd = scctx->isc_ntxd[0]; 254 txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0; 255 /* Consume the first descriptor */ 256 i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 257 if (i == scctx->isc_ntxd[0]) 258 i = 0; 259 260 /* 82575 needs the queue index added */ 261 if (sc->hw.mac.type == e1000_82575) 262 olinfo_status |= txr->me << 4; 263 264 for (j = 0; j < nsegs; j++) { 265 bus_size_t seglen; 266 bus_addr_t segaddr; 267 268 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 269 seglen = segs[j].ds_len; 270 segaddr = htole64(segs[j].ds_addr); 271 272 txd->read.buffer_addr = segaddr; 273 txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | 274 cmd_type_len | seglen); 275 txd->read.olinfo_status = htole32(olinfo_status); 276 pidx_last = i; 277 if (++i == scctx->isc_ntxd[0]) { 278 i = 0; 279 } 280 } 281 if (txd_flags) { 282 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 283 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 284 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 285 } 286 287 txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags); 288 pi->ipi_new_pidx = i; 289 290 return (0); 291 } 292 293 static void 294 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 295 { 296 struct adapter *adapter = arg; 297 struct em_tx_queue *que = &adapter->tx_queues[txqid]; 298 struct tx_ring *txr = &que->txr; 299 300 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx); 301 } 302 303 static int 304 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 305 { 306 struct adapter *adapter = arg; 307 if_softc_ctx_t scctx = adapter->shared; 308 struct em_tx_queue *que = &adapter->tx_queues[txqid]; 309 struct tx_ring *txr = &que->txr; 310 311 qidx_t processed = 0; 312 int updated; 313 qidx_t cur, prev, ntxd, rs_cidx; 314 int32_t delta; 315 uint8_t status; 316 317 rs_cidx = txr->tx_rs_cidx; 318 if (rs_cidx == txr->tx_rs_pidx) 319 return (0); 320 cur = txr->tx_rsq[rs_cidx]; 321 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 322 updated = !!(status & E1000_TXD_STAT_DD); 323 324 if (!clear || !updated) 325 return (updated); 326 327 prev = txr->tx_cidx_processed; 328 ntxd = scctx->isc_ntxd[0]; 329 do { 330 delta = (int32_t)cur - (int32_t)prev; 331 MPASS(prev == 0 || delta != 0); 332 if (delta < 0) 333 delta += ntxd; 334 335 processed += delta; 336 prev = cur; 337 rs_cidx = (rs_cidx + 1) & (ntxd-1); 338 if (rs_cidx == txr->tx_rs_pidx) 339 break; 340 cur = txr->tx_rsq[rs_cidx]; 341 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 342 } while ((status & E1000_TXD_STAT_DD)); 343 344 txr->tx_rs_cidx = rs_cidx; 345 txr->tx_cidx_processed = prev; 346 return (processed); 347 } 348 349 static void 350 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru) 351 { 352 struct adapter *sc = arg; 353 if_softc_ctx_t scctx = sc->shared; 354 uint16_t rxqid = iru->iru_qsidx; 355 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 356 union e1000_adv_rx_desc *rxd; 357 struct rx_ring *rxr = &que->rxr; 358 uint64_t *paddrs; 359 uint32_t next_pidx, pidx; 360 uint16_t count; 361 int i; 362 363 paddrs = iru->iru_paddrs; 364 pidx = iru->iru_pidx; 365 count = iru->iru_count; 366 367 for (i = 0, next_pidx = pidx; i < count; i++) { 368 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx]; 369 370 rxd->read.pkt_addr = htole64(paddrs[i]); 371 if (++next_pidx == scctx->isc_nrxd[0]) 372 next_pidx = 0; 373 } 374 } 375 376 static void 377 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) 378 { 379 struct adapter *sc = arg; 380 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 381 struct rx_ring *rxr = &que->rxr; 382 383 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); 384 } 385 386 static int 387 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 388 { 389 struct adapter *sc = arg; 390 if_softc_ctx_t scctx = sc->shared; 391 struct em_rx_queue *que = &sc->rx_queues[rxqid]; 392 struct rx_ring *rxr = &que->rxr; 393 union e1000_adv_rx_desc *rxd; 394 u32 staterr = 0; 395 int cnt, i, iter; 396 397 if (budget == 1) { 398 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[idx]; 399 staterr = le32toh(rxd->wb.upper.status_error); 400 return (staterr & E1000_RXD_STAT_DD); 401 } 402 403 for (iter = cnt = 0, i = idx; iter < scctx->isc_nrxd[0] && iter <= budget;) { 404 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i]; 405 staterr = le32toh(rxd->wb.upper.status_error); 406 407 if ((staterr & E1000_RXD_STAT_DD) == 0) 408 break; 409 410 if (++i == scctx->isc_nrxd[0]) { 411 i = 0; 412 } 413 414 if (staterr & E1000_RXD_STAT_EOP) 415 cnt++; 416 iter++; 417 } 418 return (cnt); 419 } 420 421 /**************************************************************** 422 * Routine sends data which has been dma'ed into host memory 423 * to upper layer. Initialize ri structure. 424 * 425 * Returns 0 upon success, errno on failure 426 ***************************************************************/ 427 428 static int 429 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 430 { 431 struct adapter *adapter = arg; 432 if_softc_ctx_t scctx = adapter->shared; 433 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; 434 struct rx_ring *rxr = &que->rxr; 435 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 436 union e1000_adv_rx_desc *rxd; 437 438 u16 pkt_info, len; 439 u16 vtag = 0; 440 u32 ptype; 441 u32 staterr = 0; 442 bool eop; 443 int i = 0; 444 int cidx = ri->iri_cidx; 445 446 do { 447 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx]; 448 staterr = le32toh(rxd->wb.upper.status_error); 449 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 450 451 MPASS ((staterr & E1000_RXD_STAT_DD) != 0); 452 453 len = le16toh(rxd->wb.upper.length); 454 ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; 455 456 ri->iri_len += len; 457 rxr->rx_bytes += ri->iri_len; 458 459 rxd->wb.upper.status_error = 0; 460 eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); 461 462 if (((adapter->hw.mac.type == e1000_i350) || 463 (adapter->hw.mac.type == e1000_i354)) && 464 (staterr & E1000_RXDEXT_STATERR_LB)) 465 vtag = be16toh(rxd->wb.upper.vlan); 466 else 467 vtag = le16toh(rxd->wb.upper.vlan); 468 469 /* Make sure bad packets are discarded */ 470 if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { 471 adapter->dropped_pkts++; 472 ++rxr->rx_discarded; 473 return (EBADMSG); 474 } 475 ri->iri_frags[i].irf_flid = 0; 476 ri->iri_frags[i].irf_idx = cidx; 477 ri->iri_frags[i].irf_len = len; 478 479 if (++cidx == scctx->isc_nrxd[0]) 480 cidx = 0; 481 #ifdef notyet 482 if (rxr->hdr_split == TRUE) { 483 ri->iri_frags[i].irf_flid = 1; 484 ri->iri_frags[i].irf_idx = cidx; 485 if (++cidx == scctx->isc_nrxd[0]) 486 cidx = 0; 487 } 488 #endif 489 i++; 490 } while (!eop); 491 492 rxr->rx_packets++; 493 494 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 495 igb_rx_checksum(staterr, ri, ptype); 496 497 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 498 (staterr & E1000_RXD_STAT_VP) != 0) { 499 ri->iri_vtag = vtag; 500 ri->iri_flags |= M_VLANTAG; 501 } 502 ri->iri_flowid = 503 le32toh(rxd->wb.lower.hi_dword.rss); 504 ri->iri_rsstype = igb_determine_rsstype(pkt_info); 505 ri->iri_nfrags = i; 506 507 return (0); 508 } 509 510 /********************************************************************* 511 * 512 * Verify that the hardware indicated that the checksum is valid. 513 * Inform the stack about the status of checksum so that stack 514 * doesn't spend time verifying the checksum. 515 * 516 *********************************************************************/ 517 static void 518 igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype) 519 { 520 u16 status = (u16)staterr; 521 u8 errors = (u8) (staterr >> 24); 522 bool sctp = FALSE; 523 524 /* Ignore Checksum bit is set */ 525 if (status & E1000_RXD_STAT_IXSM) { 526 ri->iri_csum_flags = 0; 527 return; 528 } 529 530 if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 531 (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) 532 sctp = 1; 533 else 534 sctp = 0; 535 536 if (status & E1000_RXD_STAT_IPCS) { 537 /* Did it pass? */ 538 if (!(errors & E1000_RXD_ERR_IPE)) { 539 /* IP Checksum Good */ 540 ri->iri_csum_flags = CSUM_IP_CHECKED; 541 ri->iri_csum_flags |= CSUM_IP_VALID; 542 } else 543 ri->iri_csum_flags = 0; 544 } 545 546 if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 547 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 548 if (sctp) /* reassign */ 549 type = CSUM_SCTP_VALID; 550 /* Did it pass? */ 551 if (!(errors & E1000_RXD_ERR_TCPE)) { 552 ri->iri_csum_flags |= type; 553 if (sctp == 0) 554 ri->iri_csum_data = htons(0xffff); 555 } 556 } 557 return; 558 } 559 560 /******************************************************************** 561 * 562 * Parse the packet type to determine the appropriate hash 563 * 564 ******************************************************************/ 565 static int 566 igb_determine_rsstype(u16 pkt_info) 567 { 568 switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { 569 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 570 return M_HASHTYPE_RSS_TCP_IPV4; 571 case E1000_RXDADV_RSSTYPE_IPV4: 572 return M_HASHTYPE_RSS_IPV4; 573 case E1000_RXDADV_RSSTYPE_IPV6_TCP: 574 return M_HASHTYPE_RSS_TCP_IPV6; 575 case E1000_RXDADV_RSSTYPE_IPV6_EX: 576 return M_HASHTYPE_RSS_IPV6_EX; 577 case E1000_RXDADV_RSSTYPE_IPV6: 578 return M_HASHTYPE_RSS_IPV6; 579 case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: 580 return M_HASHTYPE_RSS_TCP_IPV6_EX; 581 default: 582 return M_HASHTYPE_OPAQUE; 583 } 584 } 585