1 /*- 2 * Broadcom NetXtreme-C/E network driver. 3 * 4 * Copyright (c) 2016 Broadcom, All Rights Reserved. 5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/endian.h> 32 #include <net/if.h> 33 #include <net/if_var.h> 34 #include <net/ethernet.h> 35 #include <net/iflib.h> 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 41 #include "bnxt.h" 42 43 /* 44 * Function prototypes 45 */ 46 47 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi); 48 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx); 49 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear); 50 51 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru); 52 53 /* uint16_t rxqid, uint8_t flid, 54 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, 55 uint16_t buf_size); 56 */ 57 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, 58 qidx_t pidx); 59 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, 60 qidx_t budget); 61 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri); 62 63 static int bnxt_intr(void *sc); 64 65 struct if_txrx bnxt_txrx = { 66 .ift_txd_encap = bnxt_isc_txd_encap, 67 .ift_txd_flush = bnxt_isc_txd_flush, 68 .ift_txd_credits_update = bnxt_isc_txd_credits_update, 69 .ift_rxd_available = bnxt_isc_rxd_available, 70 .ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get, 71 .ift_rxd_refill = bnxt_isc_rxd_refill, 72 .ift_rxd_flush = bnxt_isc_rxd_flush, 73 .ift_legacy_intr = bnxt_intr 74 }; 75 76 /* 77 * Device Dependent Packet Transmit and Receive Functions 78 */ 79 80 static const uint16_t bnxt_tx_lhint[] = { 81 TX_BD_SHORT_FLAGS_LHINT_LT512, 82 TX_BD_SHORT_FLAGS_LHINT_LT1K, 83 TX_BD_SHORT_FLAGS_LHINT_LT2K, 84 TX_BD_SHORT_FLAGS_LHINT_LT2K, 85 TX_BD_SHORT_FLAGS_LHINT_GTE2K, 86 }; 87 88 static int 89 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi) 90 { 91 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 92 struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx]; 93 struct tx_bd_long *tbd; 94 struct tx_bd_long_hi *tbdh; 95 bool need_hi = false; 96 uint16_t flags_type; 97 uint16_t lflags; 98 uint32_t cfa_meta; 99 int seg = 0; 100 101 /* If we have offloads enabled, we need to use two BDs. */ 102 if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) || 103 pi->ipi_mflags & M_VLANTAG) 104 need_hi = true; 105 106 /* TODO: Devices before Cu+B1 need to not mix long and short BDs */ 107 need_hi = true; 108 109 pi->ipi_new_pidx = pi->ipi_pidx; 110 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx]; 111 pi->ipi_ndescs = 0; 112 /* No need to byte-swap the opaque value */ 113 tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx; 114 tbd->len = htole16(pi->ipi_segs[seg].ds_len); 115 tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr); 116 flags_type = ((pi->ipi_nsegs + need_hi) << 117 TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK; 118 if (pi->ipi_len >= 2048) 119 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K; 120 else 121 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9]; 122 123 if (need_hi) { 124 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; 125 126 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); 127 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx]; 128 tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz); 129 tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen + 130 pi->ipi_tcp_hlen) >> 1); 131 tbdh->cfa_action = 0; 132 lflags = 0; 133 cfa_meta = 0; 134 if (pi->ipi_mflags & M_VLANTAG) { 135 /* TODO: Do we need to byte-swap the vtag here? */ 136 cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG | 137 pi->ipi_vtag; 138 cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 139 } 140 tbdh->cfa_meta = htole32(cfa_meta); 141 if (pi->ipi_csum_flags & CSUM_TSO) { 142 lflags |= TX_BD_LONG_LFLAGS_LSO | 143 TX_BD_LONG_LFLAGS_T_IPID; 144 } 145 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) { 146 lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | 147 TX_BD_LONG_LFLAGS_IP_CHKSUM; 148 } 149 else if(pi->ipi_csum_flags & CSUM_IP) { 150 lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; 151 } 152 tbdh->lflags = htole16(lflags); 153 } 154 else { 155 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; 156 } 157 158 for (; seg < pi->ipi_nsegs; seg++) { 159 tbd->flags_type = htole16(flags_type); 160 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); 161 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx]; 162 tbd->len = htole16(pi->ipi_segs[seg].ds_len); 163 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr); 164 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; 165 } 166 flags_type |= TX_BD_SHORT_FLAGS_PACKET_END; 167 tbd->flags_type = htole16(flags_type); 168 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); 169 170 return 0; 171 } 172 173 static void 174 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx) 175 { 176 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 177 struct bnxt_ring *tx_ring = &softc->tx_rings[txqid]; 178 179 /* pidx is what we last set ipi_new_pidx to */ 180 softc->db_ops.bnxt_db_tx(tx_ring, pidx); 181 return; 182 } 183 184 static int 185 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear) 186 { 187 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 188 struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid]; 189 struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr; 190 int avail = 0; 191 uint32_t cons = cpr->cons; 192 bool v_bit = cpr->v_bit; 193 bool last_v_bit; 194 uint32_t last_cons; 195 uint16_t type; 196 uint16_t err; 197 198 for (;;) { 199 last_cons = cons; 200 last_v_bit = v_bit; 201 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 202 CMPL_PREFETCH_NEXT(cpr, cons); 203 204 if (!CMP_VALID(&cmpl[cons], v_bit)) 205 goto done; 206 207 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK; 208 switch (type) { 209 case TX_CMPL_TYPE_TX_L2: 210 err = (le16toh(cmpl[cons].errors_v) & 211 TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >> 212 TX_CMPL_ERRORS_BUFFER_ERROR_SFT; 213 if (err) 214 device_printf(softc->dev, 215 "TX completion error %u\n", err); 216 /* No need to byte-swap the opaque value */ 217 avail += cmpl[cons].opaque >> 24; 218 /* 219 * If we're not clearing, iflib only cares if there's 220 * at least one buffer. Don't scan the whole ring in 221 * this case. 222 */ 223 if (!clear) 224 goto done; 225 break; 226 default: 227 if (type & 1) { 228 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 229 if (!CMP_VALID(&cmpl[cons], v_bit)) 230 goto done; 231 } 232 device_printf(softc->dev, 233 "Unhandled TX completion type %u\n", type); 234 break; 235 } 236 } 237 done: 238 239 if (clear && avail) { 240 cpr->cons = last_cons; 241 cpr->v_bit = last_v_bit; 242 softc->db_ops.bnxt_db_tx_cq(cpr, 0); 243 } 244 245 return avail; 246 } 247 248 static void 249 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru) 250 { 251 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 252 struct bnxt_ring *rx_ring; 253 struct rx_prod_pkt_bd *rxbd; 254 uint16_t type; 255 uint16_t i; 256 uint16_t rxqid; 257 uint16_t count; 258 uint32_t pidx; 259 uint8_t flid; 260 uint64_t *paddrs; 261 qidx_t *frag_idxs; 262 263 rxqid = iru->iru_qsidx; 264 count = iru->iru_count; 265 pidx = iru->iru_pidx; 266 flid = iru->iru_flidx; 267 paddrs = iru->iru_paddrs; 268 frag_idxs = iru->iru_idxs; 269 270 if (flid == 0) { 271 rx_ring = &softc->rx_rings[rxqid]; 272 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; 273 } 274 else { 275 rx_ring = &softc->ag_rings[rxqid]; 276 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; 277 } 278 rxbd = (void *)rx_ring->vaddr; 279 280 for (i=0; i<count; i++) { 281 rxbd[pidx].flags_type = htole16(type); 282 rxbd[pidx].len = htole16(softc->rx_buf_size); 283 /* No need to byte-swap the opaque value */ 284 rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16) 285 | (frag_idxs[i])); 286 rxbd[pidx].addr = htole64(paddrs[i]); 287 if (++pidx == rx_ring->ring_size) 288 pidx = 0; 289 } 290 return; 291 } 292 293 static void 294 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, 295 qidx_t pidx) 296 { 297 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 298 struct bnxt_ring *rx_ring; 299 300 if (flid == 0) 301 rx_ring = &softc->rx_rings[rxqid]; 302 else 303 rx_ring = &softc->ag_rings[rxqid]; 304 305 /* 306 * We *must* update the completion ring before updating the RX ring 307 * or we will overrun the completion ring and the device will wedge for 308 * RX. 309 */ 310 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0); 311 softc->db_ops.bnxt_db_rx(rx_ring, pidx); 312 return; 313 } 314 315 static int 316 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget) 317 { 318 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 319 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid]; 320 struct rx_pkt_cmpl *rcp; 321 struct rx_tpa_end_cmpl *rtpae; 322 struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr; 323 int avail = 0; 324 uint32_t cons = cpr->cons; 325 bool v_bit = cpr->v_bit; 326 uint8_t ags; 327 int i; 328 uint16_t type; 329 330 for (;;) { 331 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 332 CMPL_PREFETCH_NEXT(cpr, cons); 333 334 if (!CMP_VALID(&cmp[cons], v_bit)) 335 goto cmpl_invalid; 336 337 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK; 338 switch (type) { 339 case CMPL_BASE_TYPE_RX_L2: 340 rcp = (void *)&cmp[cons]; 341 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> 342 RX_PKT_CMPL_AGG_BUFS_SFT; 343 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 344 CMPL_PREFETCH_NEXT(cpr, cons); 345 346 if (!CMP_VALID(&cmp[cons], v_bit)) 347 goto cmpl_invalid; 348 349 /* Now account for all the AG completions */ 350 for (i=0; i<ags; i++) { 351 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 352 CMPL_PREFETCH_NEXT(cpr, cons); 353 if (!CMP_VALID(&cmp[cons], v_bit)) 354 goto cmpl_invalid; 355 } 356 avail++; 357 break; 358 case CMPL_BASE_TYPE_RX_TPA_END: 359 rtpae = (void *)&cmp[cons]; 360 ags = (rtpae->agg_bufs_v1 & 361 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 362 RX_TPA_END_CMPL_AGG_BUFS_SFT; 363 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 364 CMPL_PREFETCH_NEXT(cpr, cons); 365 366 if (!CMP_VALID(&cmp[cons], v_bit)) 367 goto cmpl_invalid; 368 /* Now account for all the AG completions */ 369 for (i=0; i<ags; i++) { 370 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 371 CMPL_PREFETCH_NEXT(cpr, cons); 372 if (!CMP_VALID(&cmp[cons], v_bit)) 373 goto cmpl_invalid; 374 } 375 avail++; 376 break; 377 case CMPL_BASE_TYPE_RX_TPA_START: 378 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 379 CMPL_PREFETCH_NEXT(cpr, cons); 380 381 if (!CMP_VALID(&cmp[cons], v_bit)) 382 goto cmpl_invalid; 383 break; 384 case CMPL_BASE_TYPE_RX_AGG: 385 break; 386 default: 387 device_printf(softc->dev, 388 "Unhandled completion type %d on RXQ %d\n", 389 type, rxqid); 390 391 /* Odd completion types use two completions */ 392 if (type & 1) { 393 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); 394 CMPL_PREFETCH_NEXT(cpr, cons); 395 396 if (!CMP_VALID(&cmp[cons], v_bit)) 397 goto cmpl_invalid; 398 } 399 break; 400 } 401 if (avail > budget) 402 break; 403 } 404 cmpl_invalid: 405 406 return avail; 407 } 408 409 static void 410 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type) 411 { 412 uint8_t rss_profile_id; 413 414 rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type); 415 switch (rss_profile_id) { 416 case BNXT_RSS_HASH_TYPE_TCPV4: 417 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4; 418 break; 419 case BNXT_RSS_HASH_TYPE_UDPV4: 420 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4; 421 break; 422 case BNXT_RSS_HASH_TYPE_IPV4: 423 ri->iri_rsstype = M_HASHTYPE_RSS_IPV4; 424 break; 425 case BNXT_RSS_HASH_TYPE_TCPV6: 426 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6; 427 break; 428 case BNXT_RSS_HASH_TYPE_UDPV6: 429 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6; 430 break; 431 case BNXT_RSS_HASH_TYPE_IPV6: 432 ri->iri_rsstype = M_HASHTYPE_RSS_IPV6; 433 break; 434 default: 435 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; 436 break; 437 } 438 } 439 440 static int 441 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri, 442 struct bnxt_cp_ring *cpr, uint16_t flags_type) 443 { 444 struct rx_pkt_cmpl *rcp; 445 struct rx_pkt_cmpl_hi *rcph; 446 struct rx_abuf_cmpl *acp; 447 uint32_t flags2; 448 uint32_t errors; 449 uint8_t ags; 450 int i; 451 452 rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons]; 453 454 /* Extract from the first 16-byte BD */ 455 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { 456 ri->iri_flowid = le32toh(rcp->rss_hash); 457 bnxt_set_rsstype(ri, rcp->rss_hash_type); 458 } 459 else { 460 ri->iri_rsstype = M_HASHTYPE_NONE; 461 } 462 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> 463 RX_PKT_CMPL_AGG_BUFS_SFT; 464 ri->iri_nfrags = ags + 1; 465 /* No need to byte-swap the opaque value */ 466 ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff; 467 ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff; 468 ri->iri_frags[0].irf_len = le16toh(rcp->len); 469 ri->iri_len = le16toh(rcp->len); 470 471 /* Now the second 16-byte BD */ 472 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 473 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 474 rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons]; 475 476 flags2 = le32toh(rcph->flags2); 477 errors = le16toh(rcph->errors_v2); 478 if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) == 479 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { 480 ri->iri_flags |= M_VLANTAG; 481 /* TODO: Should this be the entire 16-bits? */ 482 ri->iri_vtag = le32toh(rcph->metadata) & 483 (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE | 484 RX_PKT_CMPL_METADATA_PRI_MASK); 485 } 486 if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) { 487 ri->iri_csum_flags |= CSUM_IP_CHECKED; 488 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR)) 489 ri->iri_csum_flags |= CSUM_IP_VALID; 490 } 491 if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC | 492 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) { 493 ri->iri_csum_flags |= CSUM_L4_CALC; 494 if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR | 495 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) { 496 ri->iri_csum_flags |= CSUM_L4_VALID; 497 ri->iri_csum_data = 0xffff; 498 } 499 } 500 501 /* And finally the ag ring stuff. */ 502 for (i=1; i < ri->iri_nfrags; i++) { 503 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 504 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 505 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons]; 506 507 /* No need to byte-swap the opaque value */ 508 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff); 509 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff; 510 ri->iri_frags[i].irf_len = le16toh(acp->len); 511 ri->iri_len += le16toh(acp->len); 512 } 513 514 return 0; 515 } 516 517 static int 518 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri, 519 struct bnxt_cp_ring *cpr, uint16_t flags_type) 520 { 521 struct rx_tpa_end_cmpl *agend = 522 &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons]; 523 struct rx_abuf_cmpl *acp; 524 struct bnxt_full_tpa_start *tpas; 525 uint32_t flags2; 526 uint8_t ags; 527 uint8_t agg_id; 528 int i; 529 530 /* Get the agg_id */ 531 agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> 532 RX_TPA_END_CMPL_AGG_ID_SFT; 533 tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]); 534 535 /* Extract from the first 16-byte BD */ 536 if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) { 537 ri->iri_flowid = le32toh(tpas->low.rss_hash); 538 bnxt_set_rsstype(ri, tpas->low.rss_hash_type); 539 } 540 else { 541 ri->iri_rsstype = M_HASHTYPE_NONE; 542 } 543 ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 544 RX_TPA_END_CMPL_AGG_BUFS_SFT; 545 ri->iri_nfrags = ags + 1; 546 /* No need to byte-swap the opaque value */ 547 ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff); 548 ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff); 549 ri->iri_frags[0].irf_len = le16toh(tpas->low.len); 550 ri->iri_len = le16toh(tpas->low.len); 551 552 /* Now the second 16-byte BD */ 553 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 554 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 555 556 flags2 = le32toh(tpas->high.flags2); 557 if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) == 558 RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) { 559 ri->iri_flags |= M_VLANTAG; 560 /* TODO: Should this be the entire 16-bits? */ 561 ri->iri_vtag = le32toh(tpas->high.metadata) & 562 (RX_TPA_START_CMPL_METADATA_VID_MASK | 563 RX_TPA_START_CMPL_METADATA_DE | 564 RX_TPA_START_CMPL_METADATA_PRI_MASK); 565 } 566 if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) { 567 ri->iri_csum_flags |= CSUM_IP_CHECKED; 568 ri->iri_csum_flags |= CSUM_IP_VALID; 569 } 570 if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) { 571 ri->iri_csum_flags |= CSUM_L4_CALC; 572 ri->iri_csum_flags |= CSUM_L4_VALID; 573 ri->iri_csum_data = 0xffff; 574 } 575 576 /* Now the ag ring stuff. */ 577 for (i=1; i < ri->iri_nfrags; i++) { 578 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 579 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 580 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons]; 581 582 /* No need to byte-swap the opaque value */ 583 ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff); 584 ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff); 585 ri->iri_frags[i].irf_len = le16toh(acp->len); 586 ri->iri_len += le16toh(acp->len); 587 } 588 589 /* And finally, the empty BD at the end... */ 590 ri->iri_nfrags++; 591 /* No need to byte-swap the opaque value */ 592 ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff); 593 ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff); 594 ri->iri_frags[i].irf_len = le16toh(agend->len); 595 ri->iri_len += le16toh(agend->len); 596 597 return 0; 598 } 599 600 /* If we return anything but zero, iflib will assert... */ 601 static int 602 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri) 603 { 604 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 605 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx]; 606 struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr; 607 struct cmpl_base *cmp; 608 struct rx_tpa_start_cmpl *rtpa; 609 uint16_t flags_type; 610 uint16_t type; 611 uint8_t agg_id; 612 613 for (;;) { 614 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 615 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 616 CMPL_PREFETCH_NEXT(cpr, cpr->cons); 617 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons]; 618 619 flags_type = le16toh(cmp->type); 620 type = flags_type & CMPL_BASE_TYPE_MASK; 621 622 switch (type) { 623 case CMPL_BASE_TYPE_RX_L2: 624 return bnxt_pkt_get_l2(softc, ri, cpr, flags_type); 625 case CMPL_BASE_TYPE_RX_TPA_END: 626 return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type); 627 case CMPL_BASE_TYPE_RX_TPA_START: 628 rtpa = (void *)&cmp_q[cpr->cons]; 629 agg_id = (rtpa->agg_id & 630 RX_TPA_START_CMPL_AGG_ID_MASK) >> 631 RX_TPA_START_CMPL_AGG_ID_SFT; 632 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa; 633 634 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); 635 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); 636 CMPL_PREFETCH_NEXT(cpr, cpr->cons); 637 638 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high = 639 ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons]; 640 break; 641 default: 642 device_printf(softc->dev, 643 "Unhandled completion type %d on RXQ %d get\n", 644 type, ri->iri_qsidx); 645 if (type & 1) { 646 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, 647 cpr->v_bit); 648 ri->iri_cidx = RING_NEXT(&cpr->ring, 649 ri->iri_cidx); 650 CMPL_PREFETCH_NEXT(cpr, cpr->cons); 651 } 652 break; 653 } 654 } 655 656 return 0; 657 } 658 659 static int 660 bnxt_intr(void *sc) 661 { 662 struct bnxt_softc *softc = (struct bnxt_softc *)sc; 663 664 device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__); 665 return ENOSYS; 666 } 667