1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io> 5 * All rights reserved. 6 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "if_igc.h" 34 35 #ifdef RSS 36 #include <net/rss_config.h> 37 #include <netinet/in_rss.h> 38 #endif 39 40 #ifdef VERBOSE_DEBUG 41 #define DPRINTF device_printf 42 #else 43 #define DPRINTF(...) 44 #endif 45 46 /********************************************************************* 47 * Local Function prototypes 48 *********************************************************************/ 49 static int igc_isc_txd_encap(void *arg, if_pkt_info_t pi); 50 static void igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 51 static int igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); 52 53 static void igc_isc_rxd_refill(void *arg, if_rxd_update_t iru); 54 55 static void igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, 56 qidx_t pidx); 57 static int igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, 58 qidx_t budget); 59 60 static int igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); 61 62 static int igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, 63 uint32_t *cmd_type_len, uint32_t *olinfo_status); 64 static int igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, 65 uint32_t *cmd_type_len, uint32_t *olinfo_status); 66 67 static void igc_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype); 68 static int igc_determine_rsstype(uint16_t pkt_info); 69 70 extern void igc_if_enable_intr(if_ctx_t ctx); 71 extern int igc_intr(void *arg); 72 73 struct if_txrx igc_txrx = { 74 .ift_txd_encap = igc_isc_txd_encap, 75 .ift_txd_flush = igc_isc_txd_flush, 76 .ift_txd_credits_update = igc_isc_txd_credits_update, 77 .ift_rxd_available = igc_isc_rxd_available, 78 .ift_rxd_pkt_get = igc_isc_rxd_pkt_get, 79 .ift_rxd_refill = igc_isc_rxd_refill, 80 .ift_rxd_flush = igc_isc_rxd_flush, 81 .ift_legacy_intr = igc_intr 82 }; 83 84 void 85 igc_dump_rs(struct igc_adapter *adapter) 86 { 87 if_softc_ctx_t scctx = adapter->shared; 88 struct igc_tx_queue *que; 89 struct tx_ring *txr; 90 qidx_t i, ntxd, qid, cur; 91 int16_t rs_cidx; 92 uint8_t status; 93 94 printf("\n"); 95 ntxd = scctx->isc_ntxd[0]; 96 for (qid = 0; qid < adapter->tx_num_queues; qid++) { 97 que = &adapter->tx_queues[qid]; 98 txr = &que->txr; 99 rs_cidx = txr->tx_rs_cidx; 100 if (rs_cidx != txr->tx_rs_pidx) { 101 cur = txr->tx_rsq[rs_cidx]; 102 status = txr->tx_base[cur].upper.fields.status; 103 if (!(status & IGC_TXD_STAT_DD)) 104 printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur); 105 } else { 106 rs_cidx = (rs_cidx-1)&(ntxd-1); 107 cur = txr->tx_rsq[rs_cidx]; 108 printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur); 109 } 110 printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx); 111 for (i = 0; i < ntxd; i++) { 112 if (txr->tx_base[i].upper.fields.status & IGC_TXD_STAT_DD) 113 printf("%d set ", i); 114 } 115 printf("\n"); 116 } 117 } 118 119 /********************************************************************** 120 * 121 * Setup work for hardware segmentation offload (TSO) on 122 * adapters using advanced tx descriptors 123 * 124 **********************************************************************/ 125 static int 126 igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 127 uint32_t *olinfo_status) 128 { 129 struct igc_adv_tx_context_desc *TXD; 130 uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0; 131 uint32_t mss_l4len_idx = 0; 132 uint32_t paylen; 133 134 switch(pi->ipi_etype) { 135 case ETHERTYPE_IPV6: 136 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 137 break; 138 case ETHERTYPE_IP: 139 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 140 /* Tell transmit desc to also do IPv4 checksum. */ 141 *olinfo_status |= IGC_TXD_POPTS_IXSM << 8; 142 break; 143 default: 144 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 145 __func__, ntohs(pi->ipi_etype)); 146 break; 147 } 148 149 TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 150 151 /* This is used in the transmit desc in encap */ 152 paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; 153 154 /* VLAN MACLEN IPLEN */ 155 if (pi->ipi_mflags & M_VLANTAG) { 156 vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT); 157 } 158 159 vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT; 160 vlan_macip_lens |= pi->ipi_ip_hlen; 161 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 162 163 /* ADV DTYPE TUCMD */ 164 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 165 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 166 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 167 168 /* MSS L4LEN IDX */ 169 mss_l4len_idx |= (pi->ipi_tso_segsz << IGC_ADVTXD_MSS_SHIFT); 170 mss_l4len_idx |= (pi->ipi_tcp_hlen << IGC_ADVTXD_L4LEN_SHIFT); 171 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 172 173 TXD->seqnum_seed = htole32(0); 174 *cmd_type_len |= IGC_ADVTXD_DCMD_TSE; 175 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 176 *olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT; 177 178 return (1); 179 } 180 181 /********************************************************************* 182 * 183 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 184 * 185 **********************************************************************/ 186 static int 187 igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, 188 uint32_t *olinfo_status) 189 { 190 struct igc_adv_tx_context_desc *TXD; 191 uint32_t vlan_macip_lens, type_tucmd_mlhl; 192 uint32_t mss_l4len_idx; 193 mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; 194 195 /* First check if TSO is to be used */ 196 if (pi->ipi_csum_flags & CSUM_TSO) 197 return (igc_tso_setup(txr, pi, cmd_type_len, olinfo_status)); 198 199 /* Indicate the whole packet as payload when not doing TSO */ 200 *olinfo_status |= pi->ipi_len << IGC_ADVTXD_PAYLEN_SHIFT; 201 202 /* Now ready a context descriptor */ 203 TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; 204 205 /* 206 ** In advanced descriptors the vlan tag must 207 ** be placed into the context descriptor. Hence 208 ** we need to make one even if not doing offloads. 209 */ 210 if (pi->ipi_mflags & M_VLANTAG) { 211 vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT); 212 } else if ((pi->ipi_csum_flags & IGC_CSUM_OFFLOAD) == 0) { 213 return (0); 214 } 215 216 /* Set the ether header length */ 217 vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT; 218 219 switch(pi->ipi_etype) { 220 case ETHERTYPE_IP: 221 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 222 break; 223 case ETHERTYPE_IPV6: 224 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 225 break; 226 default: 227 break; 228 } 229 230 vlan_macip_lens |= pi->ipi_ip_hlen; 231 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 232 233 switch (pi->ipi_ipproto) { 234 case IPPROTO_TCP: 235 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) { 236 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 237 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 238 } 239 break; 240 case IPPROTO_UDP: 241 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) { 242 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; 243 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 244 } 245 break; 246 case IPPROTO_SCTP: 247 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) { 248 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP; 249 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 250 } 251 break; 252 default: 253 break; 254 } 255 256 /* Now copy bits into descriptor */ 257 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 258 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 259 TXD->seqnum_seed = htole32(0); 260 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 261 262 return (1); 263 } 264 265 static int 266 igc_isc_txd_encap(void *arg, if_pkt_info_t pi) 267 { 268 struct igc_adapter *sc = arg; 269 if_softc_ctx_t scctx = sc->shared; 270 struct igc_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; 271 struct tx_ring *txr = &que->txr; 272 int nsegs = pi->ipi_nsegs; 273 bus_dma_segment_t *segs = pi->ipi_segs; 274 union igc_adv_tx_desc *txd = NULL; 275 int i, j, pidx_last; 276 uint32_t olinfo_status, cmd_type_len, txd_flags; 277 qidx_t ntxd; 278 279 pidx_last = olinfo_status = 0; 280 /* Basic descriptor defines */ 281 cmd_type_len = (IGC_ADVTXD_DTYP_DATA | 282 IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT); 283 284 if (pi->ipi_mflags & M_VLANTAG) 285 cmd_type_len |= IGC_ADVTXD_DCMD_VLE; 286 287 i = pi->ipi_pidx; 288 ntxd = scctx->isc_ntxd[0]; 289 txd_flags = pi->ipi_flags & IPI_TX_INTR ? IGC_ADVTXD_DCMD_RS : 0; 290 /* Consume the first descriptor */ 291 i += igc_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); 292 if (i == scctx->isc_ntxd[0]) 293 i = 0; 294 295 for (j = 0; j < nsegs; j++) { 296 bus_size_t seglen; 297 bus_addr_t segaddr; 298 299 txd = (union igc_adv_tx_desc *)&txr->tx_base[i]; 300 seglen = segs[j].ds_len; 301 segaddr = htole64(segs[j].ds_addr); 302 303 txd->read.buffer_addr = segaddr; 304 txd->read.cmd_type_len = htole32(IGC_ADVTXD_DCMD_IFCS | 305 cmd_type_len | seglen); 306 txd->read.olinfo_status = htole32(olinfo_status); 307 pidx_last = i; 308 if (++i == scctx->isc_ntxd[0]) { 309 i = 0; 310 } 311 } 312 if (txd_flags) { 313 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; 314 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); 315 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); 316 } 317 318 txd->read.cmd_type_len |= htole32(IGC_ADVTXD_DCMD_EOP | txd_flags); 319 pi->ipi_new_pidx = i; 320 321 return (0); 322 } 323 324 static void 325 igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 326 { 327 struct igc_adapter *adapter = arg; 328 struct igc_tx_queue *que = &adapter->tx_queues[txqid]; 329 struct tx_ring *txr = &que->txr; 330 331 IGC_WRITE_REG(&adapter->hw, IGC_TDT(txr->me), pidx); 332 } 333 334 static int 335 igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 336 { 337 struct igc_adapter *adapter = arg; 338 if_softc_ctx_t scctx = adapter->shared; 339 struct igc_tx_queue *que = &adapter->tx_queues[txqid]; 340 struct tx_ring *txr = &que->txr; 341 342 qidx_t processed = 0; 343 int updated; 344 qidx_t cur, prev, ntxd, rs_cidx; 345 int32_t delta; 346 uint8_t status; 347 348 rs_cidx = txr->tx_rs_cidx; 349 if (rs_cidx == txr->tx_rs_pidx) 350 return (0); 351 cur = txr->tx_rsq[rs_cidx]; 352 status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 353 updated = !!(status & IGC_TXD_STAT_DD); 354 355 if (!updated) 356 return (0); 357 358 /* If clear is false just let caller know that there 359 * are descriptors to reclaim */ 360 if (!clear) 361 return (1); 362 363 prev = txr->tx_cidx_processed; 364 ntxd = scctx->isc_ntxd[0]; 365 do { 366 MPASS(prev != cur); 367 delta = (int32_t)cur - (int32_t)prev; 368 if (delta < 0) 369 delta += ntxd; 370 MPASS(delta > 0); 371 372 processed += delta; 373 prev = cur; 374 rs_cidx = (rs_cidx + 1) & (ntxd-1); 375 if (rs_cidx == txr->tx_rs_pidx) 376 break; 377 cur = txr->tx_rsq[rs_cidx]; 378 status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status; 379 } while ((status & IGC_TXD_STAT_DD)); 380 381 txr->tx_rs_cidx = rs_cidx; 382 txr->tx_cidx_processed = prev; 383 return (processed); 384 } 385 386 static void 387 igc_isc_rxd_refill(void *arg, if_rxd_update_t iru) 388 { 389 struct igc_adapter *sc = arg; 390 if_softc_ctx_t scctx = sc->shared; 391 uint16_t rxqid = iru->iru_qsidx; 392 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 393 union igc_adv_rx_desc *rxd; 394 struct rx_ring *rxr = &que->rxr; 395 uint64_t *paddrs; 396 uint32_t next_pidx, pidx; 397 uint16_t count; 398 int i; 399 400 paddrs = iru->iru_paddrs; 401 pidx = iru->iru_pidx; 402 count = iru->iru_count; 403 404 for (i = 0, next_pidx = pidx; i < count; i++) { 405 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[next_pidx]; 406 407 rxd->read.pkt_addr = htole64(paddrs[i]); 408 if (++next_pidx == scctx->isc_nrxd[0]) 409 next_pidx = 0; 410 } 411 } 412 413 static void 414 igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) 415 { 416 struct igc_adapter *sc = arg; 417 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 418 struct rx_ring *rxr = &que->rxr; 419 420 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), pidx); 421 } 422 423 static int 424 igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) 425 { 426 struct igc_adapter *sc = arg; 427 if_softc_ctx_t scctx = sc->shared; 428 struct igc_rx_queue *que = &sc->rx_queues[rxqid]; 429 struct rx_ring *rxr = &que->rxr; 430 union igc_adv_rx_desc *rxd; 431 uint32_t staterr = 0; 432 int cnt, i; 433 434 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { 435 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[i]; 436 staterr = le32toh(rxd->wb.upper.status_error); 437 438 if ((staterr & IGC_RXD_STAT_DD) == 0) 439 break; 440 if (++i == scctx->isc_nrxd[0]) 441 i = 0; 442 if (staterr & IGC_RXD_STAT_EOP) 443 cnt++; 444 } 445 return (cnt); 446 } 447 448 /**************************************************************** 449 * Routine sends data which has been dma'ed into host memory 450 * to upper layer. Initialize ri structure. 451 * 452 * Returns 0 upon success, errno on failure 453 ***************************************************************/ 454 455 static int 456 igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 457 { 458 struct igc_adapter *adapter = arg; 459 if_softc_ctx_t scctx = adapter->shared; 460 struct igc_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; 461 struct rx_ring *rxr = &que->rxr; 462 union igc_adv_rx_desc *rxd; 463 464 uint16_t pkt_info, len, vtag; 465 uint32_t ptype, staterr; 466 int i, cidx; 467 bool eop; 468 469 staterr = i = vtag = 0; 470 cidx = ri->iri_cidx; 471 472 do { 473 rxd = (union igc_adv_rx_desc *)&rxr->rx_base[cidx]; 474 staterr = le32toh(rxd->wb.upper.status_error); 475 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); 476 477 MPASS ((staterr & IGC_RXD_STAT_DD) != 0); 478 479 len = le16toh(rxd->wb.upper.length); 480 ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGC_PKTTYPE_MASK; 481 482 ri->iri_len += len; 483 rxr->rx_bytes += ri->iri_len; 484 485 rxd->wb.upper.status_error = 0; 486 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 487 488 vtag = le16toh(rxd->wb.upper.vlan); 489 490 /* Make sure bad packets are discarded */ 491 if (eop && ((staterr & IGC_RXDEXT_STATERR_RXE) != 0)) { 492 adapter->dropped_pkts++; 493 ++rxr->rx_discarded; 494 return (EBADMSG); 495 } 496 ri->iri_frags[i].irf_flid = 0; 497 ri->iri_frags[i].irf_idx = cidx; 498 ri->iri_frags[i].irf_len = len; 499 500 if (++cidx == scctx->isc_nrxd[0]) 501 cidx = 0; 502 #ifdef notyet 503 if (rxr->hdr_split == true) { 504 ri->iri_frags[i].irf_flid = 1; 505 ri->iri_frags[i].irf_idx = cidx; 506 if (++cidx == scctx->isc_nrxd[0]) 507 cidx = 0; 508 } 509 #endif 510 i++; 511 } while (!eop); 512 513 rxr->rx_packets++; 514 515 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) 516 igc_rx_checksum(staterr, ri, ptype); 517 518 if ((scctx->isc_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 519 (staterr & IGC_RXD_STAT_VP) != 0) { 520 ri->iri_vtag = vtag; 521 ri->iri_flags |= M_VLANTAG; 522 } 523 524 ri->iri_flowid = 525 le32toh(rxd->wb.lower.hi_dword.rss); 526 ri->iri_rsstype = igc_determine_rsstype(pkt_info); 527 ri->iri_nfrags = i; 528 529 return (0); 530 } 531 532 /********************************************************************* 533 * 534 * Verify that the hardware indicated that the checksum is valid. 535 * Inform the stack about the status of checksum so that stack 536 * doesn't spend time verifying the checksum. 537 * 538 *********************************************************************/ 539 static void 540 igc_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype) 541 { 542 uint16_t status = (uint16_t)staterr; 543 uint8_t errors = (uint8_t)(staterr >> 24); 544 545 if (__predict_false(status & IGC_RXD_STAT_IXSM)) 546 return; 547 548 /* If there is a layer 3 or 4 error we are done */ 549 if (__predict_false(errors & (IGC_RXD_ERR_IPE | IGC_RXD_ERR_TCPE))) 550 return; 551 552 /* IP Checksum Good */ 553 if (status & IGC_RXD_STAT_IPCS) 554 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); 555 556 /* Valid L4E checksum */ 557 if (__predict_true(status & 558 (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS))) { 559 /* SCTP header present */ 560 if (__predict_false((ptype & IGC_RXDADV_PKTTYPE_ETQF) == 0 && 561 (ptype & IGC_RXDADV_PKTTYPE_SCTP) != 0)) { 562 ri->iri_csum_flags |= CSUM_SCTP_VALID; 563 } else { 564 ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 565 ri->iri_csum_data = htons(0xffff); 566 } 567 } 568 } 569 570 /******************************************************************** 571 * 572 * Parse the packet type to determine the appropriate hash 573 * 574 ******************************************************************/ 575 static int 576 igc_determine_rsstype(uint16_t pkt_info) 577 { 578 switch (pkt_info & IGC_RXDADV_RSSTYPE_MASK) { 579 case IGC_RXDADV_RSSTYPE_IPV4_TCP: 580 return M_HASHTYPE_RSS_TCP_IPV4; 581 case IGC_RXDADV_RSSTYPE_IPV4: 582 return M_HASHTYPE_RSS_IPV4; 583 case IGC_RXDADV_RSSTYPE_IPV6_TCP: 584 return M_HASHTYPE_RSS_TCP_IPV6; 585 case IGC_RXDADV_RSSTYPE_IPV6_EX: 586 return M_HASHTYPE_RSS_IPV6_EX; 587 case IGC_RXDADV_RSSTYPE_IPV6: 588 return M_HASHTYPE_RSS_IPV6; 589 case IGC_RXDADV_RSSTYPE_IPV6_TCP_EX: 590 return M_HASHTYPE_RSS_TCP_IPV6_EX; 591 default: 592 return M_HASHTYPE_OPAQUE; 593 } 594 } 595