1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 5 * All rights reserved. 6 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "if_igc.h" 32 33 #include <net/rss_config.h> 34 #include <netinet/in_rss.h> 35 36 #ifdef VERBOSE_DEBUG 37 #define DPRINTF device_printf 38 #else 39 #define DPRINTF(...) 40 #endif 41 42 /********************************************************************* 43 * Local Function prototypes 44 *********************************************************************/ 45 static int igc_isc_txd_encap(void *, if_pkt_info_t); 46 static void igc_isc_txd_flush(void *, uint16_t, qidx_t); 47 static int igc_isc_txd_credits_update(void *, uint16_t, bool); 48 49 static void igc_isc_rxd_refill(void *, if_rxd_update_t); 50 51 static void igc_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); 52 static int igc_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); 53 54 static int igc_isc_rxd_pkt_get(void *, if_rxd_info_t); 55 56 static int igc_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *, 57 uint32_t *); 58 static int igc_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *, 59 uint32_t *); 60 61 static void igc_rx_checksum(uint32_t, if_rxd_info_t, uint32_t); 62 static int igc_determine_rsstype(uint16_t); 63 64 extern void igc_if_enable_intr(if_ctx_t); 65 extern int igc_intr(void *); 66 67 struct if_txrx igc_txrx = { 68 .ift_txd_encap = igc_isc_txd_encap, 69 .ift_txd_flush = igc_isc_txd_flush, 70 .ift_txd_credits_update = igc_isc_txd_credits_update, 71 .ift_rxd_available = igc_isc_rxd_available, 72 .ift_rxd_pkt_get = igc_isc_rxd_pkt_get, 73 .ift_rxd_refill = igc_isc_rxd_refill, 74 .ift_rxd_flush = igc_isc_rxd_flush, 75 .ift_legacy_intr = igc_intr 76 }; 77 78 void 79 igc_dump_rs(struct igc_softc *sc) 80 { 81 if_softc_ctx_t scctx = sc->shared; 82 struct igc_tx_queue *que; 83 struct tx_ring *txr; 84 qidx_t i, ntxd, qid, cur; 85 int16_t rs_cidx; 86 uint8_t status; 87 88 printf("\n"); 89 ntxd = scctx->isc_ntxd[0]; 90 for (qid = 0; qid < sc->tx_num_queues; qid++) { 91 que = &sc->tx_queues[qid]; 92 txr = &que->txr; 93 rs_cidx = txr->tx_rs_cidx; 94 if (rs_cidx != txr->tx_rs_pidx) { 95 cur = txr->tx_rsq[rs_cidx]; 96 status = txr->tx_base[cur].upper.fields.status; 97 if (!(status & IGC_TXD_STAT_DD)) 98 printf("qid[%d]->tx_rsq[%d]: %d clear ", 99 qid, rs_cidx, cur); 100 } else { 101 rs_cidx = (rs_cidx-1)&(ntxd-1); 102 cur = txr->tx_rsq[rs_cidx]; 103 printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", 104 qid, rs_cidx, cur); 105 } 106 printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, 107 txr->tx_rs_pidx); 108 for (i = 0; i < ntxd; i++) { 109 if (txr->tx_base[i].upper.fields.status & 110 IGC_TXD_STAT_DD) 111 printf("%d set ", i); 112 } 113 printf("\n"); 114 } 115 } 116 117 /********************************************************************** 118 * 119 * Setup work for hardware segmentation offload (TSO) on 120 * adapters using advanced tx descriptors 121 * 122 **********************************************************************/ 123 static int 124 igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 125 uint32_t *olinfo_status) 126 { 127 struct igc_adv_tx_context_desc *TXD; 128 uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0; 129 uint32_t mss_l4len_idx = 0; 130 uint32_t paylen; 131 132 switch(pi->ipi_etype) { 133 case ETHERTYPE_IPV6: 134 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 135 break; 136 case ETHERTYPE_IP: 137 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 138 /* Tell transmit desc to also do IPv4 checksum. */ 139 *olinfo_status |= IGC_TXD_POPTS_IXSM << 8; 140 break; 141 default: 142 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 143 __func__, ntohs(pi->ipi_etype)); 144 break; 145 } 146 147 TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 148 149 /* This is used in the transmit desc in encap */ 150 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - 151 pi->ipi_tcp_hlen; 152 153 /* VLAN MACLEN IPLEN */ 154 if (pi->ipi_mflags & M_VLANTAG) { 155 vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT); 156 } 157 158 vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT; 159 vlan_macip_lens |= pi->ipi_ip_hlen; 160 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 161 162 /* ADV DTYPE TUCMD */ 163 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 164 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 165 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 166 167 /* MSS L4LEN IDX */ 168 mss_l4len_idx |= (pi->ipi_tso_segsz << IGC_ADVTXD_MSS_SHIFT); 169 mss_l4len_idx |= (pi->ipi_tcp_hlen << IGC_ADVTXD_L4LEN_SHIFT); 170 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 171 172 TXD->seqnum_seed = htole32(0); 173 *cmd_type_len |= IGC_ADVTXD_DCMD_TSE; 174 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 175 *olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT; 176 177 return (1); 178 } 179 180 /********************************************************************* 181 * 182 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 183 * 184 **********************************************************************/ 185 static int 186 igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, 187 uint32_t *cmd_type_len, uint32_t *olinfo_status) 188 { 189 struct igc_adv_tx_context_desc *TXD; 190 uint32_t vlan_macip_lens, type_tucmd_mlhl; 191 uint32_t mss_l4len_idx; 192 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 193 194 /* First check if TSO is to be used */ 195 if (pi->ipi_csum_flags & CSUM_TSO) 196 return (igc_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 197 198 /* Indicate the whole packet as payload when not doing TSO */ 199 *olinfo_status |= pi->ipi_len << IGC_ADVTXD_PAYLEN_SHIFT; 200 201 /* Now ready a context descriptor */ 202 TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 203 204 /* 205 ** In advanced descriptors the vlan tag must 206 ** be placed into the context descriptor. Hence 207 ** we need to make one even if not doing offloads. 208 */ 209 if (pi->ipi_mflags & M_VLANTAG) { 210 vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT); 211 } else if ((pi->ipi_csum_flags & IGC_CSUM_OFFLOAD) == 0) { 212 return (0); 213 } 214 215 /* Set the ether header length */ 216 vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT; 217 218 switch(pi->ipi_etype) { 219 case ETHERTYPE_IP: 220 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 221 break; 222 case ETHERTYPE_IPV6: 223 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 224 break; 225 default: 226 break; 227 } 228 229 vlan_macip_lens |= pi->ipi_ip_hlen; 230 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 231 232 switch (pi->ipi_ipproto) { 233 case IPPROTO_TCP: 234 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) { 235 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 236 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 237 } 238 break; 239 case IPPROTO_UDP: 240 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) { 241 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; 242 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 243 } 244 break; 245 case IPPROTO_SCTP: 246 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) { 247 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP; 248 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 249 } 250 break; 251 default: 252 break; 253 } 254 255 /* Now copy bits into descriptor */ 256 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 257 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 258 TXD->seqnum_seed = htole32(0); 259 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 260 261 return (1); 262 } 263 264 static int 265 igc_isc_txd_encap(void *arg, if_pkt_info_t pi) 266 { 267 struct igc_softc *sc = arg; 268 if_softc_ctx_t scctx = sc->shared; 269 struct igc_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 270 struct tx_ring *txr = &que->txr; 271 int nsegs = pi->ipi_nsegs; 272 bus_dma_segment_t *segs = pi->ipi_segs; 273 union igc_adv_tx_desc *txd = NULL; 274 int i, j, pidx_last; 275 uint32_t olinfo_status, cmd_type_len, txd_flags; 276 qidx_t ntxd; 277 278 pidx_last = olinfo_status = 0; 279 /* Basic descriptor defines */ 280 cmd_type_len = (IGC_ADVTXD_DTYP_DATA | 281 IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT); 282 283 if (pi->ipi_mflags & M_VLANTAG) 284 cmd_type_len |= IGC_ADVTXD_DCMD_VLE; 285 286 i = pi->ipi_pidx; 287 ntxd = scctx->isc_ntxd[0]; 288 txd_flags = pi->ipi_flags & IPI_TX_INTR ? IGC_ADVTXD_DCMD_RS : 0; 289 /* Consume the first descriptor */ 290 i += igc_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 291 if (i == scctx->isc_ntxd[0]) 292 i = 0; 293 294 for (j = 0; j < nsegs; j++) { 295 bus_size_t seglen; 296 bus_addr_t segaddr; 297 298 txd = (union igc_adv_tx_desc *)&txr->tx_base[i]; 299 seglen = segs[j].ds_len; 300 segaddr = htole64(segs[j].ds_addr); 301 302 txd->read.buffer_addr = segaddr; 303 txd->read.cmd_type_len = htole32(IGC_ADVTXD_DCMD_IFCS | 304 cmd_type_len | seglen); 305 txd->read.olinfo_status = htole32(olinfo_status); 306 pidx_last = i; 307 if (++i == scctx->isc_ntxd[0]) { 308 i = 0; 309 } 310 } 311 if (txd_flags) { 312 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 313 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 314 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 315 } 316 317 txd->read.cmd_type_len |= htole32(IGC_ADVTXD_DCMD_EOP | txd_flags); 318 pi->ipi_new_pidx = i; 319 320 /* Sent data accounting for AIM */ 321 txr->tx_bytes += pi->ipi_len; 322 ++txr->tx_packets; 323 324 return (0); 325 } 326 327 static void 328 igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 329 { 330 struct igc_softc *sc = arg; 331 struct igc_tx_queue *que = &sc->tx_queues[txqid]; 332 struct tx_ring *txr = &que->txr; 333 334 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), pidx); 335 } 336 337 static int 338 igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 339 { 340 struct igc_softc *sc = arg; 341 if_softc_ctx_t scctx = sc->shared; 342 struct igc_tx_queue *que = &sc->tx_queues[txqid]; 343 struct tx_ring *txr = &que->txr; 344 345 qidx_t processed = 0; 346 int updated; 347 qidx_t cur, prev, ntxd, rs_cidx; 348 int32_t delta; 349 uint8_t status; 350 351 rs_cidx = txr->tx_rs_cidx; 352 if (rs_cidx == txr->tx_rs_pidx) 353 return (0); 354 cur = txr->tx_rsq[rs_cidx]; 355 status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 356 updated = !!(status & IGC_TXD_STAT_DD); 357 358 if (!updated) 359 return (0); 360 361 /* If clear is false just let caller know that there 362 * are descriptors to reclaim */ 363 if (!clear) 364 return (1); 365 366 prev = txr->tx_cidx_processed; 367 ntxd = scctx->isc_ntxd[0]; 368 do { 369 MPASS(prev != cur); 370 delta = (int32_t)cur - (int32_t)prev; 371 if (delta < 0) 372 delta += ntxd; 373 MPASS(delta > 0); 374 375 processed += delta; 376 prev = cur; 377 rs_cidx = (rs_cidx + 1) & (ntxd-1); 378 if (rs_cidx == txr->tx_rs_pidx) 379 break; 380 cur = txr->tx_rsq[rs_cidx]; 381 status = 382 ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 383 } while ((status & IGC_TXD_STAT_DD)); 384 385 txr->tx_rs_cidx = rs_cidx; 386 txr->tx_cidx_processed = prev; 387 return (processed); 388 } 389 390 static void 391 igc_isc_rxd_refill(void *arg, if_rxd_update_t iru) 392 { 393 struct igc_softc *sc = arg; 394 if_softc_ctx_t scctx = sc->shared; 395 uint16_t rxqid = iru->iru_qsidx; 396 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 397 union igc_adv_rx_desc *rxd; 398 struct rx_ring *rxr = &que->rxr; 399 uint64_t *paddrs; 400 uint32_t next_pidx, pidx; 401 uint16_t count; 402 int i; 403 404 paddrs = iru->iru_paddrs; 405 pidx = iru->iru_pidx; 406 count = iru->iru_count; 407 408 for (i = 0, next_pidx = pidx; i < count; i++) { 409 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[next_pidx]; 410 411 rxd->read.pkt_addr = htole64(paddrs[i]); 412 if (++next_pidx == scctx->isc_nrxd[0]) 413 next_pidx = 0; 414 } 415 } 416 417 static void 418 igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, 419 qidx_t pidx) 420 { 421 struct igc_softc *sc = arg; 422 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 423 struct rx_ring *rxr = &que->rxr; 424 425 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), pidx); 426 } 427 428 static int 429 igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 430 { 431 struct igc_softc *sc = arg; 432 if_softc_ctx_t scctx = sc->shared; 433 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 434 struct rx_ring *rxr = &que->rxr; 435 union igc_adv_rx_desc *rxd; 436 uint32_t staterr = 0; 437 int cnt, i; 438 439 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { 440 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[i]; 441 staterr = le32toh(rxd->wb.upper.status_error); 442 443 if ((staterr & IGC_RXD_STAT_DD) == 0) 444 break; 445 if (++i == scctx->isc_nrxd[0]) 446 i = 0; 447 if (staterr & IGC_RXD_STAT_EOP) 448 cnt++; 449 } 450 return (cnt); 451 } 452 453 /**************************************************************** 454 * Routine sends data which has been dma'ed into host memory 455 * to upper layer. Initialize ri structure. 456 * 457 * Returns 0 upon success, errno on failure 458 ***************************************************************/ 459 460 static int 461 igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 462 { 463 struct igc_softc *sc = arg; 464 if_softc_ctx_t scctx = sc->shared; 465 struct igc_rx_queue *que = &sc->rx_queues[ri->iri_qsidx]; 466 struct rx_ring *rxr = &que->rxr; 467 union igc_adv_rx_desc *rxd; 468 469 uint16_t pkt_info, len; 470 uint32_t ptype, staterr; 471 int i, cidx; 472 bool eop; 473 474 staterr = i = 0; 475 cidx = ri->iri_cidx; 476 477 do { 478 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[cidx]; 479 staterr = le32toh(rxd->wb.upper.status_error); 480 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 481 482 MPASS ((staterr & IGC_RXD_STAT_DD) != 0); 483 484 len = le16toh(rxd->wb.upper.length); 485 ptype = 486 le32toh(rxd->wb.lower.lo_dword.data) & IGC_PKTTYPE_MASK; 487 488 ri->iri_len += len; 489 rxr->rx_bytes += ri->iri_len; 490 491 rxd->wb.upper.status_error = 0; 492 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 493 494 /* Make sure bad packets are discarded */ 495 if (eop && ((staterr & IGC_RXDEXT_STATERR_RXE) != 0)) { 496 sc->dropped_pkts++; 497 ++rxr->rx_discarded; 498 return (EBADMSG); 499 } 500 ri->iri_frags[i].irf_flid = 0; 501 ri->iri_frags[i].irf_idx = cidx; 502 ri->iri_frags[i].irf_len = len; 503 504 if (++cidx == scctx->isc_nrxd[0]) 505 cidx = 0; 506 #ifdef notyet 507 if (rxr->hdr_split == true) { 508 ri->iri_frags[i].irf_flid = 1; 509 ri->iri_frags[i].irf_idx = cidx; 510 if (++cidx == scctx->isc_nrxd[0]) 511 cidx = 0; 512 } 513 #endif 514 i++; 515 } while (!eop); 516 517 rxr->rx_packets++; 518 519 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) 520 igc_rx_checksum(staterr, ri, ptype); 521 522 if (staterr & IGC_RXD_STAT_VP) { 523 ri->iri_vtag = le16toh(rxd->wb.upper.vlan); 524 ri->iri_flags |= M_VLANTAG; 525 } 526 527 ri->iri_flowid = 528 le32toh(rxd->wb.lower.hi_dword.rss); 529 ri->iri_rsstype = igc_determine_rsstype(pkt_info); 530 ri->iri_nfrags = i; 531 532 return (0); 533 } 534 535 /********************************************************************* 536 * 537 * Verify that the hardware indicated that the checksum is valid. 538 * Inform the stack about the status of checksum so that stack 539 * doesn't spend time verifying the checksum. 540 * 541 *********************************************************************/ 542 static void 543 igc_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype) 544 { 545 uint16_t status = (uint16_t)staterr; 546 uint8_t errors = (uint8_t)(staterr >> 24); 547 548 if (__predict_false(status & IGC_RXD_STAT_IXSM)) 549 return; 550 551 /* If there is a layer 3 or 4 error we are done */ 552 if (__predict_false(errors & (IGC_RXD_ERR_IPE | IGC_RXD_ERR_TCPE))) 553 return; 554 555 /* IP Checksum Good */ 556 if (status & IGC_RXD_STAT_IPCS) 557 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); 558 559 /* Valid L4E checksum */ 560 if (__predict_true(status & 561 (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS))) { 562 /* SCTP header present */ 563 if (__predict_false((ptype & IGC_RXDADV_PKTTYPE_ETQF) == 0 && 564 (ptype & IGC_RXDADV_PKTTYPE_SCTP) != 0)) { 565 ri->iri_csum_flags |= CSUM_SCTP_VALID; 566 } else { 567 ri->iri_csum_flags |= 568 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 569 ri->iri_csum_data = htons(0xffff); 570 } 571 } 572 } 573 574 /******************************************************************** 575 * 576 * Parse the packet type to determine the appropriate hash 577 * 578 ******************************************************************/ 579 static int 580 igc_determine_rsstype(uint16_t pkt_info) 581 { 582 switch (pkt_info & IGC_RXDADV_RSSTYPE_MASK) { 583 case IGC_RXDADV_RSSTYPE_IPV4_TCP: 584 return M_HASHTYPE_RSS_TCP_IPV4; 585 case IGC_RXDADV_RSSTYPE_IPV4: 586 return M_HASHTYPE_RSS_IPV4; 587 case IGC_RXDADV_RSSTYPE_IPV6_TCP: 588 return M_HASHTYPE_RSS_TCP_IPV6; 589 case IGC_RXDADV_RSSTYPE_IPV6_EX: 590 return M_HASHTYPE_RSS_IPV6_EX; 591 case IGC_RXDADV_RSSTYPE_IPV6: 592 return M_HASHTYPE_RSS_IPV6; 593 case IGC_RXDADV_RSSTYPE_IPV6_TCP_EX: 594 return M_HASHTYPE_RSS_TCP_IPV6_EX; 595 default: 596 return M_HASHTYPE_OPAQUE; 597 } 598 } 599