Lines Matching +full:wr +full:- +full:hold
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
86 * 0-7 are valid values.
94 * -1: driver should figure out a good value.
98 int fl_pad = -1;
104 * -1: driver should figure out a good value.
107 static int spg_len = -1;
113 * -1: no congestion feedback (not recommended).
129 * -1: let the driver decide whether to enable buffer packing or not.
133 static int buffer_packing = -1;
139 * -1: driver should figure out a good value.
143 static int fl_pack = -1;
165 * for rewriting. -1 and 0-3 are all valid values.
166 * -1: hardware should leave the TCP timestamps alone.
172 static int tsclk = -1;
183 * 1 and 3-17 (both inclusive) are legal values.
223 "# of consecutive packets (1 - 255) that will trigger tx coalescing");
405 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { in set_tcb_rpl_handler()
407 * The return code for filter-write is put in the CPL cookie so in set_tcb_rpl_handler()
413 cookie = G_COOKIE(cpl->cookie); in set_tcb_rpl_handler()
439 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); in act_open_rpl_handler()
451 struct adapter *sc = iq->adapter; in abort_rpl_rss_handler()
466 struct adapter *sc = iq->adapter; in fw4_ack_handler()
546 if (spg_len != -1) { in t4_sge_modload()
553 if (cong_drop < -1 || cong_drop > 2) { in t4_sge_modload()
559 if (ofld_cong_drop < -1 || ofld_cong_drop > 2) { in t4_sge_modload()
633 return (refs - rels); in t4_sge_extfree_refs()
665 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" in setup_pad_and_pack_boundaries()
670 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); in setup_pad_and_pack_boundaries()
674 if (fl_pack != -1 && fl_pack != pad) { in setup_pad_and_pack_boundaries()
676 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," in setup_pad_and_pack_boundaries()
685 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) in setup_pad_and_pack_boundaries()
688 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); in setup_pad_and_pack_boundaries()
696 if (fl_pack != -1) { in setup_pad_and_pack_boundaries()
697 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" in setup_pad_and_pack_boundaries()
705 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); in setup_pad_and_pack_boundaries()
712 * adap->params.vpd.cclk must be set up before this is called.
720 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; in t4_tweak_chip_settings()
722 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_tweak_chip_settings()
732 KASSERT(sc->flags & MASTER_PF, in t4_tweak_chip_settings()
742 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
743 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
744 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
745 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
746 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
747 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
748 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
749 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); in t4_tweak_chip_settings()
760 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); in t4_tweak_chip_settings()
772 KASSERT(intr_timer[i] >= intr_timer[i - 1], in t4_tweak_chip_settings()
777 if (i == nitems(intr_timer) - 1) { in t4_tweak_chip_settings()
781 intr_timer[i] += intr_timer[i - 1]; in t4_tweak_chip_settings()
801 v = V_TSCALE(tscale - 2); in t4_tweak_chip_settings()
804 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { in t4_tweak_chip_settings()
827 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ in t4_tweak_chip_settings()
847 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; in hwsz_ok()
859 struct sge *s = &sc->sge; in t4_init_rx_buf_info()
860 struct sge_params *sp = &sc->params.sge; in t4_init_rx_buf_info()
872 s->safe_zidx = -1; in t4_init_rx_buf_info()
873 rxb = &s->rx_buf_info[0]; in t4_init_rx_buf_info()
875 rxb->size1 = sw_buf_sizes[i]; in t4_init_rx_buf_info()
876 rxb->zone = m_getzone(rxb->size1); in t4_init_rx_buf_info()
877 rxb->type = m_gettype(rxb->size1); in t4_init_rx_buf_info()
878 rxb->size2 = 0; in t4_init_rx_buf_info()
879 rxb->hwidx1 = -1; in t4_init_rx_buf_info()
880 rxb->hwidx2 = -1; in t4_init_rx_buf_info()
882 int hwsize = sp->sge_fl_buffer_size[j]; in t4_init_rx_buf_info()
888 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) in t4_init_rx_buf_info()
889 rxb->hwidx1 = j; in t4_init_rx_buf_info()
892 if (rxb->size1 - CL_METADATA_SIZE < hwsize) in t4_init_rx_buf_info()
894 n = rxb->size1 - hwsize - CL_METADATA_SIZE; in t4_init_rx_buf_info()
896 rxb->hwidx2 = j; in t4_init_rx_buf_info()
897 rxb->size2 = hwsize; in t4_init_rx_buf_info()
900 if (rxb->hwidx2 != -1) { in t4_init_rx_buf_info()
901 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - in t4_init_rx_buf_info()
902 hwsize - CL_METADATA_SIZE) { in t4_init_rx_buf_info()
903 rxb->hwidx2 = j; in t4_init_rx_buf_info()
904 rxb->size2 = hwsize; in t4_init_rx_buf_info()
907 rxb->hwidx2 = j; in t4_init_rx_buf_info()
908 rxb->size2 = hwsize; in t4_init_rx_buf_info()
911 if (rxb->hwidx2 != -1) in t4_init_rx_buf_info()
912 sc->flags |= BUF_PACKING_OK; in t4_init_rx_buf_info()
913 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) in t4_init_rx_buf_info()
914 s->safe_zidx = i; in t4_init_rx_buf_info()
925 struct sge_params *sp = &sc->params.sge; in t4_verify_chip_settings()
928 const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_verify_chip_settings()
932 r = sp->sge_control; in t4_verify_chip_settings()
934 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); in t4_verify_chip_settings()
940 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. in t4_verify_chip_settings()
942 if (sp->page_shift != PAGE_SHIFT) { in t4_verify_chip_settings()
943 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); in t4_verify_chip_settings()
947 if (sc->flags & IS_VF) in t4_verify_chip_settings()
953 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); in t4_verify_chip_settings()
954 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
961 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); in t4_verify_chip_settings()
962 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
971 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); in t4_verify_chip_settings()
972 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
984 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in t4_create_dma_tag()
987 NULL, &sc->dmat); in t4_create_dma_tag()
989 device_printf(sc->dev, in t4_create_dma_tag()
1000 struct sge_params *sp = &sc->params.sge; in t4_sge_sysctls()
1007 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); in t4_sge_sysctls()
1010 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); in t4_sge_sysctls()
1013 NULL, sp->spg_len, "status page size (bytes)"); in t4_sge_sysctls()
1023 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); in t4_sge_sysctls()
1029 if (sc->dmat) in t4_destroy_dma_tag()
1030 bus_dma_tag_destroy(sc->dmat); in t4_destroy_dma_tag()
1059 if (sc->flags & IS_VF) in t4_setup_adapter_queues()
1088 if (sc->sge.ctrlq != NULL) { in t4_teardown_adapter_queues()
1089 MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */ in t4_teardown_adapter_queues()
1105 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + in max_rx_payload()
1107 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && in max_rx_payload()
1108 maxp < sc->params.tp.max_rx_pdu) in max_rx_payload()
1109 maxp = sc->params.tp.max_rx_pdu; in max_rx_payload()
1130 struct adapter *sc = vi->adapter; in t4_setup_vi_queues()
1131 if_t ifp = vi->ifp; in t4_setup_vi_queues()
1135 intr_idx = vi->first_intr; in t4_setup_vi_queues()
1143 MPASS(vi->first_intr >= 0); in t4_setup_vi_queues()
1158 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); in t4_setup_vi_queues()
1183 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); in t4_setup_vi_queues()
1239 if (if_getcapabilities(vi->ifp) & IFCAP_NETMAP) { in t4_teardown_vi_queues()
1292 struct sge_iq *fwq = &sc->sge.fwq; in t4_intr_all()
1294 MPASS(sc->intr_count == 1); in t4_intr_all()
1296 if (sc->intr_type == INTR_INTX) in t4_intr_all()
1312 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; in t4_intr_err()
1314 if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR) in t4_intr_err()
1319 sc->swintr++; in t4_intr_err()
1328 * Interrupt handler for iq-only queues. The firmware event queue is the only
1336 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr_evt()
1338 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr_evt()
1350 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr()
1352 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr()
1365 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { in t4_nm_intr()
1367 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); in t4_nm_intr()
1379 MPASS(irq->nm_rxq != NULL); in t4_vi_intr()
1380 t4_nm_intr(irq->nm_rxq); in t4_vi_intr()
1382 MPASS(irq->rxq != NULL); in t4_vi_intr()
1383 t4_intr(irq->rxq); in t4_vi_intr()
1388 * Deals with interrupts on an iq-only (no freelist) queue.
1394 struct adapter *sc = iq->adapter; in service_iq()
1395 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq()
1401 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq()
1402 KASSERT((iq->flags & IQ_HAS_FL) == 0, in service_iq()
1403 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, in service_iq()
1404 iq->flags)); in service_iq()
1405 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq()
1406 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); in service_iq()
1408 limit = budget ? budget : iq->qsize / 16; in service_iq()
1415 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq()
1419 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq()
1420 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq()
1430 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq()
1432 d->rss.opcode)); in service_iq()
1433 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); in service_iq()
1438 * There are 1K interrupt-capable queues (qids 0 in service_iq()
1444 t4_an_handler(iq, &d->rsp); in service_iq()
1448 q = sc->sge.iqmap[lq - sc->sge.iq_start - in service_iq()
1449 sc->sge.iq_base]; in service_iq()
1450 if (atomic_cmpset_int(&q->state, IQS_IDLE, in service_iq()
1452 if (service_iq_fl(q, q->qsize / 16) == 0) { in service_iq()
1453 (void) atomic_cmpset_int(&q->state, in service_iq()
1468 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq()
1473 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq()
1474 iq->cidx = 0; in service_iq()
1475 iq->gen ^= F_RSPD_GEN; in service_iq()
1476 d = &iq->desc[0]; in service_iq()
1479 t4_write_reg(sc, sc->sge_gts_reg, in service_iq()
1481 V_INGRESSQID(iq->cntxt_id) | in service_iq()
1500 if (service_iq_fl(q, q->qsize / 8) == 0) in service_iq()
1501 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); in service_iq()
1506 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq()
1507 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq()
1517 return (lro->lro_mbuf_max != 0); in sort_before_lro()
1534 cur = &sc->cal_info[sc->cal_current]; in t4_tstmp_to_ns()
1535 gen = seqc_read(&cur->gen); in t4_tstmp_to_ns()
1539 if (seqc_consistent(&cur->gen, gen)) in t4_tstmp_to_ns()
1546 * ( (cur_time - prev_time) ) in t4_tstmp_to_ns()
1547 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time in t4_tstmp_to_ns()
1548 * ( (hw_cur - hw_prev) ) in t4_tstmp_to_ns()
1553 hw_clocks = hw_tstmp - dcur.hw_prev; in t4_tstmp_to_ns()
1554 sbt_cur_to_prev = (dcur.sbt_cur - dcur.sbt_prev); in t4_tstmp_to_ns()
1555 hw_clk_div = dcur.hw_cur - dcur.hw_prev; in t4_tstmp_to_ns()
1564 fl->rx_offset = 0; in move_to_next_rxbuf()
1565 if (__predict_false((++fl->cidx & 7) == 0)) { in move_to_next_rxbuf()
1566 uint16_t cidx = fl->cidx >> 3; in move_to_next_rxbuf()
1568 if (__predict_false(cidx == fl->sidx)) in move_to_next_rxbuf()
1569 fl->cidx = cidx = 0; in move_to_next_rxbuf()
1570 fl->hw_cidx = cidx; in move_to_next_rxbuf()
1582 struct adapter *sc = iq->adapter; in service_iq_fl()
1583 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq_fl()
1590 const struct timeval lro_timeout = {0, sc->lro_timeout}; in service_iq_fl()
1591 struct lro_ctrl *lro = &rxq->lro; in service_iq_fl()
1594 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq_fl()
1595 MPASS(iq->flags & IQ_HAS_FL); in service_iq_fl()
1599 if (iq->flags & IQ_ADJ_CREDIT) { in service_iq_fl()
1601 iq->flags &= ~IQ_ADJ_CREDIT; in service_iq_fl()
1602 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { in service_iq_fl()
1604 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | in service_iq_fl()
1605 V_INGRESSQID((u32)iq->cntxt_id) | in service_iq_fl()
1606 V_SEINTARM(iq->intr_params)); in service_iq_fl()
1612 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq_fl()
1615 limit = budget ? budget : iq->qsize / 16; in service_iq_fl()
1616 fl = &rxq->fl; in service_iq_fl()
1617 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ in service_iq_fl()
1618 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq_fl()
1623 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq_fl()
1624 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq_fl()
1629 if (fl->rx_offset > 0) in service_iq_fl()
1633 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { in service_iq_fl()
1637 fl_hw_cidx = fl->hw_cidx; in service_iq_fl()
1640 if (d->rss.opcode == CPL_RX_PKT) { in service_iq_fl()
1652 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq_fl()
1653 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); in service_iq_fl()
1654 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); in service_iq_fl()
1660 * There are 1K interrupt-capable queues (qids 0 in service_iq_fl()
1671 t4_an_handler(iq, &d->rsp); in service_iq_fl()
1678 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq_fl()
1683 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq_fl()
1684 iq->cidx = 0; in service_iq_fl()
1685 iq->gen ^= F_RSPD_GEN; in service_iq_fl()
1686 d = &iq->desc[0]; in service_iq_fl()
1689 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1690 V_INGRESSQID(iq->cntxt_id) | in service_iq_fl()
1694 if (iq->flags & IQ_LRO_ENABLED && in service_iq_fl()
1696 sc->lro_timeout != 0) { in service_iq_fl()
1707 if (iq->flags & IQ_LRO_ENABLED) { in service_iq_fl()
1708 if (ndescs > 0 && lro->lro_mbuf_count > 8) { in service_iq_fl()
1710 /* hold back one credit and don't flush LRO state */ in service_iq_fl()
1711 iq->flags |= IQ_ADJ_CREDIT; in service_iq_fl()
1712 ndescs--; in service_iq_fl()
1719 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1720 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq_fl()
1735 return ((void *)(sd->cl + sd->moff)); in cl_metadata()
1741 struct cluster_metadata *clm = m->m_ext.ext_arg1; in rxb_free()
1743 uma_zfree(clm->zone, clm->cl); in rxb_free()
1759 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_scatter_segment()
1760 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_scatter_segment()
1765 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1768 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in get_scatter_segment()
1770 payload = sd->cl + fl->rx_offset; in get_scatter_segment()
1773 pad = roundup2(l, fl->buf_boundary) - l; in get_scatter_segment()
1774 if (fl->rx_offset + len + pad < rxb->size2) in get_scatter_segment()
1776 MPASS(fl->rx_offset + blen <= rxb->size2); in get_scatter_segment()
1778 MPASS(fl->rx_offset == 0); /* not packing */ in get_scatter_segment()
1779 blen = rxb->size1; in get_scatter_segment()
1781 payload = sd->cl; in get_scatter_segment()
1788 m->m_pkthdr.len = remaining; in get_scatter_segment()
1794 m->m_len = len; in get_scatter_segment()
1797 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { in get_scatter_segment()
1800 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1801 fl->rx_offset += blen; in get_scatter_segment()
1802 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1803 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1806 } else if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1808 if (sd->nmbuf++ == 0) { in get_scatter_segment()
1809 clm->refcount = 1; in get_scatter_segment()
1810 clm->zone = rxb->zone; in get_scatter_segment()
1811 clm->cl = sd->cl; in get_scatter_segment()
1814 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, in get_scatter_segment()
1817 fl->rx_offset += blen; in get_scatter_segment()
1818 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1819 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1822 m_cljset(m, sd->cl, rxb->type); in get_scatter_segment()
1823 sd->cl = NULL; /* consumed, not a recycle candidate */ in get_scatter_segment()
1837 if (__predict_false(fl->flags & FL_BUF_RESUME)) { in get_fl_payload()
1838 M_ASSERTPKTHDR(fl->m0); in get_fl_payload()
1839 MPASS(fl->m0->m_pkthdr.len == plen); in get_fl_payload()
1840 MPASS(fl->remaining < plen); in get_fl_payload()
1842 m0 = fl->m0; in get_fl_payload()
1843 pnext = fl->pnext; in get_fl_payload()
1844 remaining = fl->remaining; in get_fl_payload()
1845 fl->flags &= ~FL_BUF_RESUME; in get_fl_payload()
1857 remaining = plen - m0->m_len; in get_fl_payload()
1858 pnext = &m0->m_next; in get_fl_payload()
1861 MPASS(fl->rx_offset == 0); in get_fl_payload()
1862 m = get_scatter_segment(sc, fl, plen - remaining, remaining); in get_fl_payload()
1864 fl->m0 = m0; in get_fl_payload()
1865 fl->pnext = pnext; in get_fl_payload()
1866 fl->remaining = remaining; in get_fl_payload()
1867 fl->flags |= FL_BUF_RESUME; in get_fl_payload()
1871 pnext = &m->m_next; in get_fl_payload()
1872 remaining -= m->m_len; in get_fl_payload()
1884 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in skip_scatter_segment()
1885 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in skip_scatter_segment()
1888 if (fl->flags & FL_BUF_PACKING) { in skip_scatter_segment()
1891 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in skip_scatter_segment()
1895 pad = roundup2(l, fl->buf_boundary) - l; in skip_scatter_segment()
1896 if (fl->rx_offset + len + pad < rxb->size2) in skip_scatter_segment()
1898 fl->rx_offset += blen; in skip_scatter_segment()
1899 MPASS(fl->rx_offset <= rxb->size2); in skip_scatter_segment()
1900 if (fl->rx_offset < rxb->size2) in skip_scatter_segment()
1903 MPASS(fl->rx_offset == 0); /* not packing */ in skip_scatter_segment()
1904 blen = rxb->size1; in skip_scatter_segment()
1921 remaining -= len; in skip_fl_payload()
1929 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_segment_len()
1930 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_segment_len()
1932 if (fl->flags & FL_BUF_PACKING) in get_segment_len()
1933 len = rxb->size2 - fl->rx_offset; in get_segment_len()
1935 len = rxb->size1; in get_segment_len()
1945 if_t ifp = rxq->ifp; in eth_rx()
1946 struct sge_fl *fl = &rxq->fl; in eth_rx()
1950 struct lro_ctrl *lro = &rxq->lro; in eth_rx()
1988 MPASS(plen > sc->params.sge.fl_pktshift); in eth_rx()
1989 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && in eth_rx()
1990 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { in eth_rx()
1991 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in eth_rx()
1995 slen = get_segment_len(sc, fl, plen) - in eth_rx()
1996 sc->params.sge.fl_pktshift; in eth_rx()
1997 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; in eth_rx()
1999 rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0); in eth_rx()
2015 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; in eth_rx()
2016 m0->m_len -= sc->params.sge.fl_pktshift; in eth_rx()
2017 m0->m_data += sc->params.sge.fl_pktshift; in eth_rx()
2020 m0->m_pkthdr.rcvif = ifp; in eth_rx()
2021 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); in eth_rx()
2022 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); in eth_rx()
2024 cpl = (const void *)(&d->rss + 1); in eth_rx()
2025 if (sc->params.tp.rx_pkt_encap) { in eth_rx()
2026 const uint16_t ev = be16toh(cpl->err_vec); in eth_rx()
2032 err_vec = be16toh(cpl->err_vec); in eth_rx()
2036 if (cpl->csum_calc && err_vec == 0) { in eth_rx()
2037 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); in eth_rx()
2041 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ in eth_rx()
2042 (cpl->l2info & htobe32(F_RXF_IP6))); in eth_rx()
2043 m0->m_pkthdr.csum_data = be16toh(cpl->csum); in eth_rx()
2046 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2050 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2053 rxq->rxcsum++; in eth_rx()
2058 if (__predict_false(cpl->ip_frag)) { in eth_rx()
2072 m0->m_pkthdr.csum_data = 0xffff; in eth_rx()
2074 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2077 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2084 MPASS(m0->m_pkthdr.csum_data == 0xffff); in eth_rx()
2089 m0->m_pkthdr.csum_flags = in eth_rx()
2092 rxq->vxlan_rxcsum++; in eth_rx()
2096 if (cpl->vlan_ex) { in eth_rx()
2097 if (sc->flags & IS_VF && sc->vlan_id) { in eth_rx()
2102 MPASS(be16toh(cpl->vlan) == sc->vlan_id); in eth_rx()
2104 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); in eth_rx()
2105 m0->m_flags |= M_VLANTAG; in eth_rx()
2106 rxq->vlan_extraction++; in eth_rx()
2110 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { in eth_rx()
2113 * long as we get a non-zero back from t4_tstmp_to_ns(). in eth_rx()
2115 m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc, in eth_rx()
2116 be64toh(d->rsp.u.last_flit)); in eth_rx()
2117 if (m0->m_pkthdr.rcv_tstmp != 0) in eth_rx()
2118 m0->m_flags |= M_TSTMP; in eth_rx()
2122 m0->m_pkthdr.numa_domain = if_getnumadomain(ifp); in eth_rx()
2125 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && in eth_rx()
2148 struct sge_eq *eq = &wrq->eq; in wrq_tx_drain()
2151 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in wrq_tx_drain()
2152 drain_wrq_wr_list(wrq->adapter, wrq); in wrq_tx_drain()
2159 struct sge_eq *eq = &wrq->eq; in drain_wrq_wr_list()
2162 struct wrqe *wr; in drain_wrq_wr_list() local
2163 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ in drain_wrq_wr_list()
2166 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in drain_wrq_wr_list()
2167 wr = STAILQ_FIRST(&wrq->wr_list); in drain_wrq_wr_list()
2168 MPASS(wr != NULL); /* Must be called with something useful to do */ in drain_wrq_wr_list()
2169 MPASS(eq->pidx == eq->dbidx); in drain_wrq_wr_list()
2173 eq->cidx = read_hw_cidx(eq); in drain_wrq_wr_list()
2174 if (eq->pidx == eq->cidx) in drain_wrq_wr_list()
2175 available = eq->sidx - 1; in drain_wrq_wr_list()
2177 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in drain_wrq_wr_list()
2179 MPASS(wr->wrq == wrq); in drain_wrq_wr_list()
2180 n = howmany(wr->wr_len, EQ_ESIZE); in drain_wrq_wr_list()
2184 dst = (void *)&eq->desc[eq->pidx]; in drain_wrq_wr_list()
2185 if (__predict_true(eq->sidx - eq->pidx > n)) { in drain_wrq_wr_list()
2187 bcopy(&wr->wr[0], dst, wr->wr_len); in drain_wrq_wr_list()
2188 eq->pidx += n; in drain_wrq_wr_list()
2190 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; in drain_wrq_wr_list()
2192 bcopy(&wr->wr[0], dst, first_portion); in drain_wrq_wr_list()
2193 if (wr->wr_len > first_portion) { in drain_wrq_wr_list()
2194 bcopy(&wr->wr[first_portion], &eq->desc[0], in drain_wrq_wr_list()
2195 wr->wr_len - first_portion); in drain_wrq_wr_list()
2197 eq->pidx = n - (eq->sidx - eq->pidx); in drain_wrq_wr_list()
2199 wrq->tx_wrs_copied++; in drain_wrq_wr_list()
2201 if (available < eq->sidx / 4 && in drain_wrq_wr_list()
2202 atomic_cmpset_int(&eq->equiq, 0, 1)) { in drain_wrq_wr_list()
2208 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in drain_wrq_wr_list()
2218 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); in drain_wrq_wr_list()
2219 free_wrqe(wr); in drain_wrq_wr_list()
2220 MPASS(wrq->nwr_pending > 0); in drain_wrq_wr_list()
2221 wrq->nwr_pending--; in drain_wrq_wr_list()
2222 MPASS(wrq->ndesc_needed >= n); in drain_wrq_wr_list()
2223 wrq->ndesc_needed -= n; in drain_wrq_wr_list()
2224 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); in drain_wrq_wr_list()
2234 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) in t4_wrq_tx_locked() argument
2237 struct sge_eq *eq = &wrq->eq; in t4_wrq_tx_locked()
2241 MPASS(wr != NULL); in t4_wrq_tx_locked()
2242 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); in t4_wrq_tx_locked()
2243 MPASS((wr->wr_len & 0x7) == 0); in t4_wrq_tx_locked()
2245 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); in t4_wrq_tx_locked()
2246 wrq->nwr_pending++; in t4_wrq_tx_locked()
2247 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); in t4_wrq_tx_locked()
2249 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) in t4_wrq_tx_locked()
2255 MPASS(eq->pidx == eq->dbidx); in t4_wrq_tx_locked()
2262 struct adapter *sc = vi->adapter; in t4_update_fl_bufsize()
2272 fl = &rxq->fl; in t4_update_fl_bufsize()
2275 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2276 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2282 fl = &ofld_rxq->fl; in t4_update_fl_bufsize()
2285 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2286 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2298 return (m->m_pkthdr.PH_loc.eight[1]); in mbuf_eo_nsegs()
2307 m->m_pkthdr.PH_loc.eight[1] = nsegs; in set_mbuf_eo_nsegs()
2317 n = m->m_pkthdr.PH_loc.eight[2]; in mbuf_eo_len16()
2329 m->m_pkthdr.PH_loc.eight[2] = len16; in set_mbuf_eo_len16()
2338 return (m->m_pkthdr.PH_loc.eight[3]); in mbuf_eo_tsclk_tsoff()
2347 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; in set_mbuf_eo_tsclk_tsoff()
2355 return (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT); in needs_eo()
2377 m->m_pkthdr.len = len; in alloc_wr_mbuf()
2378 m->m_len = len; in alloc_wr_mbuf()
2395 return (m->m_pkthdr.csum_flags & csum_flags); in needs_hwcsum()
2406 return (m->m_pkthdr.csum_flags & csum_flags); in needs_tso()
2415 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); in needs_vxlan_csum()
2426 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && in needs_vxlan_tso()
2427 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); in needs_vxlan_tso()
2438 return (m->m_pkthdr.csum_flags & csum_flags); in needs_inner_tcp_csum()
2450 return (m->m_pkthdr.csum_flags & csum_flags); in needs_l3_csum()
2461 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_tcp_csum()
2473 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_l4_csum()
2483 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_udp_csum()
2493 return (m->m_flags & M_VLANTAG); in needs_vlan_insertion()
2507 if (offset + len < m->m_len) { in m_advance()
2512 len -= m->m_len - offset; in m_advance()
2513 m = m->m_next; in m_advance()
2532 len = m->m_len; in count_mbuf_ext_pgs()
2534 len -= skip; in count_mbuf_ext_pgs()
2536 if (m->m_epg_hdrlen != 0) { in count_mbuf_ext_pgs()
2537 if (off >= m->m_epg_hdrlen) { in count_mbuf_ext_pgs()
2538 off -= m->m_epg_hdrlen; in count_mbuf_ext_pgs()
2540 seglen = m->m_epg_hdrlen - off; in count_mbuf_ext_pgs()
2544 len -= seglen; in count_mbuf_ext_pgs()
2546 (vm_offset_t)&m->m_epg_hdr[segoff]); in count_mbuf_ext_pgs()
2552 pgoff = m->m_epg_1st_off; in count_mbuf_ext_pgs()
2553 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { in count_mbuf_ext_pgs()
2556 off -= pglen; in count_mbuf_ext_pgs()
2560 seglen = pglen - off; in count_mbuf_ext_pgs()
2564 len -= seglen; in count_mbuf_ext_pgs()
2565 paddr = m->m_epg_pa[i] + segoff; in count_mbuf_ext_pgs()
2572 seglen = min(len, m->m_epg_trllen - off); in count_mbuf_ext_pgs()
2573 len -= seglen; in count_mbuf_ext_pgs()
2574 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); in count_mbuf_ext_pgs()
2597 MPASS(m->m_pkthdr.len > 0); in count_mbuf_nsegs()
2598 MPASS(m->m_pkthdr.len >= skip); in count_mbuf_nsegs()
2602 for (; m; m = m->m_next) { in count_mbuf_nsegs()
2603 len = m->m_len; in count_mbuf_nsegs()
2607 skip -= len; in count_mbuf_nsegs()
2610 if ((m->m_flags & M_EXTPG) != 0) { in count_mbuf_nsegs()
2617 len -= skip; in count_mbuf_nsegs()
2622 nsegs--; in count_mbuf_nsegs()
2623 nextaddr = pmap_kextract(va + len - 1) + 1; in count_mbuf_nsegs()
2630 * The maximum number of segments that can fit in a WR.
2681 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { in parse_pkt()
2694 MPASS(m0->m_pkthdr.len > 0); in parse_pkt()
2697 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) in parse_pkt()
2698 mst = m0->m_pkthdr.snd_tag; in parse_pkt()
2703 if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) { in parse_pkt()
2726 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && in parse_pkt()
2729 m0 = m_pullup(m0, m0->m_pkthdr.len); in parse_pkt()
2749 m_snd_tag_rele(m0->m_pkthdr.snd_tag); in parse_pkt()
2750 m0->m_pkthdr.snd_tag = NULL; in parse_pkt()
2751 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in parse_pkt()
2765 eh_type = ntohs(eh->ether_type); in parse_pkt()
2769 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2770 m0->m_pkthdr.l2hlen = sizeof(*evh); in parse_pkt()
2772 m0->m_pkthdr.l2hlen = sizeof(*eh); in parse_pkt()
2777 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2779 m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2786 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2796 ip->ip_sum = 0; in parse_pkt()
2798 const uint16_t ipl = ip->ip_len; in parse_pkt()
2800 ip->ip_len = 0; in parse_pkt()
2801 ip->ip_sum = ~in_cksum_hdr(ip); in parse_pkt()
2802 ip->ip_len = ipl; in parse_pkt()
2804 ip->ip_sum = in_cksum_hdr(ip); in parse_pkt()
2806 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; in parse_pkt()
2823 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2824 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); in parse_pkt()
2827 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + in parse_pkt()
2829 eh_type = ntohs(eh->ether_type); in parse_pkt()
2833 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2834 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); in parse_pkt()
2836 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); in parse_pkt()
2838 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2840 m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2846 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2854 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; in parse_pkt()
2870 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); in parse_pkt()
2871 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; in parse_pkt()
2873 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); in parse_pkt()
2874 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | in parse_pkt()
2881 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); in parse_pkt()
2882 m0->m_pkthdr.l4hlen = tcp->th_off * 4; in parse_pkt()
2891 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2898 /* EO WRs have the headers in the WR and not the GL. */ in parse_pkt()
2899 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + in parse_pkt()
2900 m0->m_pkthdr.l4hlen; in parse_pkt()
2907 rc = ethofld_transmit(mst->ifp, m0); in parse_pkt()
2921 struct sge_eq *eq = &wrq->eq; in start_wrq_wr()
2922 struct adapter *sc = wrq->adapter; in start_wrq_wr()
2924 struct wrqe *wr; in start_wrq_wr() local
2932 if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) { in start_wrq_wr()
2937 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in start_wrq_wr()
2940 if (!STAILQ_EMPTY(&wrq->wr_list)) { in start_wrq_wr()
2943 wr = alloc_wrqe(len16 * 16, wrq); in start_wrq_wr()
2944 if (__predict_false(wr == NULL)) in start_wrq_wr()
2946 cookie->pidx = -1; in start_wrq_wr()
2947 cookie->ndesc = ndesc; in start_wrq_wr()
2948 return (&wr->wr); in start_wrq_wr()
2951 eq->cidx = read_hw_cidx(eq); in start_wrq_wr()
2952 if (eq->pidx == eq->cidx) in start_wrq_wr()
2953 available = eq->sidx - 1; in start_wrq_wr()
2955 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in start_wrq_wr()
2959 cookie->pidx = eq->pidx; in start_wrq_wr()
2960 cookie->ndesc = ndesc; in start_wrq_wr()
2961 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); in start_wrq_wr()
2963 w = &eq->desc[eq->pidx]; in start_wrq_wr()
2964 IDXINCR(eq->pidx, ndesc, eq->sidx); in start_wrq_wr()
2965 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { in start_wrq_wr()
2966 w = &wrq->ss[0]; in start_wrq_wr()
2967 wrq->ss_pidx = cookie->pidx; in start_wrq_wr()
2968 wrq->ss_len = len16 * 16; in start_wrq_wr()
2979 struct sge_eq *eq = &wrq->eq; in commit_wrq_wr()
2980 struct adapter *sc = wrq->adapter; in commit_wrq_wr()
2984 if (cookie->pidx == -1) { in commit_wrq_wr()
2985 struct wrqe *wr = __containerof(w, struct wrqe, wr); in commit_wrq_wr() local
2987 t4_wrq_tx(sc, wr); in commit_wrq_wr()
2991 if (__predict_false(w == &wrq->ss[0])) { in commit_wrq_wr()
2992 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; in commit_wrq_wr()
2994 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ in commit_wrq_wr()
2995 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); in commit_wrq_wr()
2996 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); in commit_wrq_wr()
2997 wrq->tx_wrs_ss++; in commit_wrq_wr()
2999 wrq->tx_wrs_direct++; in commit_wrq_wr()
3002 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ in commit_wrq_wr()
3003 pidx = cookie->pidx; in commit_wrq_wr()
3004 MPASS(pidx >= 0 && pidx < eq->sidx); in commit_wrq_wr()
3008 MPASS(pidx == eq->dbidx); in commit_wrq_wr()
3011 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ in commit_wrq_wr()
3014 * Note that the WR via which we'll request tx updates in commit_wrq_wr()
3015 * is at pidx and not eq->pidx, which has moved on in commit_wrq_wr()
3018 dst = (void *)&eq->desc[pidx]; in commit_wrq_wr()
3019 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in commit_wrq_wr()
3020 if (available < eq->sidx / 4 && in commit_wrq_wr()
3021 atomic_cmpset_int(&eq->equiq, 0, 1)) { in commit_wrq_wr()
3027 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in commit_wrq_wr()
3031 if (__predict_true(eq->flags & EQ_HW_ALLOCATED)) in commit_wrq_wr()
3032 ring_eq_db(wrq->adapter, eq, ndesc); in commit_wrq_wr()
3034 IDXINCR(eq->dbidx, ndesc, eq->sidx); in commit_wrq_wr()
3036 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); in commit_wrq_wr()
3037 next->pidx = pidx; in commit_wrq_wr()
3038 next->ndesc += ndesc; in commit_wrq_wr()
3041 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); in commit_wrq_wr()
3042 prev->ndesc += ndesc; in commit_wrq_wr()
3044 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); in commit_wrq_wr()
3046 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in commit_wrq_wr()
3050 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { in commit_wrq_wr()
3052 MPASS(wrq->eq.pidx == wrq->eq.dbidx); in commit_wrq_wr()
3061 struct sge_eq *eq = r->cookie; in can_resume_eth_tx()
3063 return (total_available_tx_desc(eq) > eq->sidx / 8); in can_resume_eth_tx()
3078 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); in discard_tx()
3084 struct fw_eth_tx_pkts_wr *wr = p; in wr_can_update_eq() local
3086 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { in wr_can_update_eq()
3101 struct fw_eth_tx_pkt_wr *wr) in set_txupdate_flags() argument
3103 struct sge_eq *eq = &txq->eq; in set_txupdate_flags()
3104 struct txpkts *txp = &txq->txp; in set_txupdate_flags()
3106 if ((txp->npkt > 0 || avail < eq->sidx / 2) && in set_txupdate_flags()
3107 atomic_cmpset_int(&eq->equiq, 0, 1)) { in set_txupdate_flags()
3108 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); in set_txupdate_flags()
3109 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3110 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { in set_txupdate_flags()
3111 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); in set_txupdate_flags()
3112 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3124 const uint64_t last_tx = txq->last_tx; in record_eth_tx_time()
3132 txq->last_tx = cycles; in record_eth_tx_time()
3133 return (cycles - last_tx < itg); in record_eth_tx_time()
3137 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3143 struct sge_txq *txq = r->cookie; in eth_tx()
3144 if_t ifp = txq->ifp; in eth_tx()
3145 struct sge_eq *eq = &txq->eq; in eth_tx()
3146 struct txpkts *txp = &txq->txp; in eth_tx()
3148 struct adapter *sc = vi->adapter; in eth_tx()
3154 void *wr; /* start of the last WR written to the ring */ in eth_tx() local
3159 remaining = IDXDIFF(pidx, cidx, r->size); in eth_tx()
3161 for (i = 0; i < txp->npkt; i++) in eth_tx()
3162 m_freem(txp->mb[i]); in eth_tx()
3163 txp->npkt = 0; in eth_tx()
3165 m0 = r->items[cidx]; in eth_tx()
3167 if (++cidx == r->size) in eth_tx()
3170 reclaim_tx_descs(txq, eq->sidx); in eth_tx()
3176 if (eq->pidx == eq->cidx) in eth_tx()
3177 avail = eq->sidx - 1; in eth_tx()
3179 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in eth_tx()
3183 txp->score = 0; in eth_tx()
3184 txq->txpkts_flush++; in eth_tx()
3191 m0 = r->items[cidx]; in eth_tx()
3193 MPASS(m0->m_nextpkt == NULL); in eth_tx()
3198 if (t4_tx_coalesce == 0 && txp->npkt == 0) in eth_tx()
3201 txp->score = 0; in eth_tx()
3203 if (++txp->score == 0) in eth_tx()
3204 txp->score = UINT8_MAX; in eth_tx()
3206 txp->score = 1; in eth_tx()
3207 if (txp->npkt > 0 || remaining > 1 || in eth_tx()
3208 txp->score >= t4_tx_coalesce_pkts || in eth_tx()
3209 atomic_load_int(&txq->eq.equiq) != 0) { in eth_tx()
3210 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3219 MPASS(txp->npkt > 0); in eth_tx()
3220 for (i = 0; i < txp->npkt; i++) in eth_tx()
3221 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3222 if (txp->npkt > 1) { in eth_tx()
3223 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3224 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3230 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3231 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3233 txp->mb[0]); in eth_tx()
3235 n = write_txpkt_wr(sc, txq, txp->mb[0], in eth_tx()
3239 avail -= n; in eth_tx()
3241 wr = &eq->desc[eq->pidx]; in eth_tx()
3242 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3243 txp->npkt = 0; /* emptied */ in eth_tx()
3246 /* m0 was coalesced into txq->txpkts. */ in eth_tx()
3252 * combined with the existing txq->txpkts, which has now in eth_tx()
3256 MPASS(txp->npkt == 0); in eth_tx()
3261 MPASS(txp->npkt == 0); in eth_tx()
3270 wr = &eq->desc[eq->pidx]; in eth_tx()
3272 n = write_raw_wr(txq, wr, m0, avail); in eth_tx()
3276 n = t6_ktls_write_wr(txq, wr, m0, avail); in eth_tx()
3280 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3289 avail -= n; in eth_tx()
3291 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3294 if (wr_can_update_eq(wr)) in eth_tx()
3295 set_txupdate_flags(txq, avail, wr); in eth_tx()
3302 remaining--; in eth_tx()
3303 if (__predict_false(++cidx == r->size)) in eth_tx()
3307 if (wr_can_update_eq(wr)) in eth_tx()
3308 set_txupdate_flags(txq, avail, wr); in eth_tx()
3311 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && in eth_tx()
3312 atomic_load_int(&txq->eq.equiq) == 0) { in eth_tx()
3319 MPASS(txp->npkt > 0); in eth_tx()
3320 for (i = 0; i < txp->npkt; i++) in eth_tx()
3321 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3322 if (txp->npkt > 1) { in eth_tx()
3323 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3324 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3330 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3331 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3332 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); in eth_tx()
3334 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); in eth_tx()
3337 wr = &eq->desc[eq->pidx]; in eth_tx()
3338 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3339 txp->npkt = 0; /* emptied */ in eth_tx()
3341 MPASS(wr_can_update_eq(wr)); in eth_tx()
3342 set_txupdate_flags(txq, avail - n, wr); in eth_tx()
3346 *coalescing = txp->npkt > 0; in eth_tx()
3358 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ in init_iq()
3360 KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count, in init_iq()
3365 iq->flags = 0; in init_iq()
3366 iq->state = IQS_DISABLED; in init_iq()
3367 iq->adapter = sc; in init_iq()
3368 iq->qtype = qtype; in init_iq()
3369 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); in init_iq()
3370 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; in init_iq()
3372 iq->intr_params |= F_QINTR_CNT_EN; in init_iq()
3373 iq->intr_pktc_idx = pktc_idx; in init_iq()
3375 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ in init_iq()
3376 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; in init_iq()
3377 iq->intr_idx = intr_idx; in init_iq()
3378 iq->cong_drop = cong; in init_iq()
3384 struct sge_params *sp = &sc->params.sge; in init_fl()
3386 fl->qsize = qsize; in init_fl()
3387 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_fl()
3388 strlcpy(fl->lockname, name, sizeof(fl->lockname)); in init_fl()
3389 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); in init_fl()
3390 if (sc->flags & BUF_PACKING_OK && in init_fl()
3393 fl->flags |= FL_BUF_PACKING; in init_fl()
3394 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); in init_fl()
3395 fl->safe_zidx = sc->sge.safe_zidx; in init_fl()
3396 if (fl->flags & FL_BUF_PACKING) { in init_fl()
3397 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); in init_fl()
3398 fl->buf_boundary = sp->pack_boundary; in init_fl()
3400 fl->lowat = roundup2(sp->fl_starve_threshold, 8); in init_fl()
3401 fl->buf_boundary = 16; in init_fl()
3403 if (fl_pad && fl->buf_boundary < sp->pad_boundary) in init_fl()
3404 fl->buf_boundary = sp->pad_boundary; in init_fl()
3414 eq->type = eqtype; in init_eq()
3415 eq->port_id = port_id; in init_eq()
3416 eq->tx_chan = sc->port[port_id]->tx_chan; in init_eq()
3417 eq->iq = iq; in init_eq()
3418 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_eq()
3419 strlcpy(eq->lockname, name, sizeof(eq->lockname)); in init_eq()
3420 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); in init_eq()
3429 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, in alloc_ring()
3481 struct adapter *sc = vi->adapter; in alloc_iq_fl()
3483 MPASS(!(iq->flags & IQ_SW_ALLOCATED)); in alloc_iq_fl()
3485 len = iq->qsize * IQ_ESIZE; in alloc_iq_fl()
3486 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, in alloc_iq_fl()
3487 (void **)&iq->desc); in alloc_iq_fl()
3492 len = fl->qsize * EQ_ESIZE; in alloc_iq_fl()
3493 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, in alloc_iq_fl()
3494 &fl->ba, (void **)&fl->desc); in alloc_iq_fl()
3496 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, in alloc_iq_fl()
3497 iq->desc); in alloc_iq_fl()
3502 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), in alloc_iq_fl()
3506 iq->flags |= IQ_HAS_FL; in alloc_iq_fl()
3509 iq->flags |= IQ_SW_ALLOCATED; in alloc_iq_fl()
3521 MPASS(iq->flags & IQ_SW_ALLOCATED); in free_iq_fl()
3524 MPASS(iq->flags & IQ_HAS_FL); in free_iq_fl()
3525 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); in free_iq_fl()
3527 free(fl->sdesc, M_CXGBE); in free_iq_fl()
3528 mtx_destroy(&fl->fl_lock); in free_iq_fl()
3531 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); in free_iq_fl()
3547 struct adapter *sc = vi->adapter; in alloc_iq_fl_hwq()
3548 struct port_info *pi = vi->pi; in alloc_iq_fl_hwq()
3551 MPASS (!(iq->flags & IQ_HW_ALLOCATED)); in alloc_iq_fl_hwq()
3555 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | in alloc_iq_fl_hwq()
3562 if (iq == &sc->sge.fwq) in alloc_iq_fl_hwq()
3565 if (iq->intr_idx < 0) { in alloc_iq_fl_hwq()
3568 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); in alloc_iq_fl_hwq()
3570 KASSERT(iq->intr_idx < sc->intr_count, in alloc_iq_fl_hwq()
3571 ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx)); in alloc_iq_fl_hwq()
3572 v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx); in alloc_iq_fl_hwq()
3575 bzero(iq->desc, iq->qsize * IQ_ESIZE); in alloc_iq_fl_hwq()
3578 V_FW_IQ_CMD_VIID(vi->viid) | in alloc_iq_fl_hwq()
3580 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | in alloc_iq_fl_hwq()
3582 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | in alloc_iq_fl_hwq()
3583 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); in alloc_iq_fl_hwq()
3584 c.iqsize = htobe16(iq->qsize); in alloc_iq_fl_hwq()
3585 c.iqaddr = htobe64(iq->ba); in alloc_iq_fl_hwq()
3586 c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype)); in alloc_iq_fl_hwq()
3587 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3588 cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0; in alloc_iq_fl_hwq()
3593 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_iq_fl_hwq()
3598 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : in alloc_iq_fl_hwq()
3600 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3611 c.fl0size = htobe16(fl->qsize); in alloc_iq_fl_hwq()
3612 c.fl0addr = htobe64(fl->ba); in alloc_iq_fl_hwq()
3615 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in alloc_iq_fl_hwq()
3621 iq->cidx = 0; in alloc_iq_fl_hwq()
3622 iq->gen = F_RSPD_GEN; in alloc_iq_fl_hwq()
3623 iq->cntxt_id = be16toh(c.iqid); in alloc_iq_fl_hwq()
3624 iq->abs_id = be16toh(c.physiqid); in alloc_iq_fl_hwq()
3626 cntxt_id = iq->cntxt_id - sc->sge.iq_start; in alloc_iq_fl_hwq()
3627 if (cntxt_id >= sc->sge.iqmap_sz) { in alloc_iq_fl_hwq()
3628 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, in alloc_iq_fl_hwq()
3629 cntxt_id, sc->sge.iqmap_sz - 1); in alloc_iq_fl_hwq()
3631 sc->sge.iqmap[cntxt_id] = iq; in alloc_iq_fl_hwq()
3638 MPASS(!(fl->flags & FL_BUF_RESUME)); in alloc_iq_fl_hwq()
3639 for (i = 0; i < fl->sidx * 8; i++) in alloc_iq_fl_hwq()
3640 MPASS(fl->sdesc[i].cl == NULL); in alloc_iq_fl_hwq()
3642 fl->cntxt_id = be16toh(c.fl0id); in alloc_iq_fl_hwq()
3643 fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0; in alloc_iq_fl_hwq()
3644 fl->rx_offset = 0; in alloc_iq_fl_hwq()
3645 fl->flags &= ~(FL_STARVING | FL_DOOMED); in alloc_iq_fl_hwq()
3647 cntxt_id = fl->cntxt_id - sc->sge.eq_start; in alloc_iq_fl_hwq()
3648 if (cntxt_id >= sc->sge.eqmap_sz) { in alloc_iq_fl_hwq()
3649 panic("%s: fl->cntxt_id (%d) more than the max (%d)", in alloc_iq_fl_hwq()
3650 __func__, cntxt_id, sc->sge.eqmap_sz - 1); in alloc_iq_fl_hwq()
3652 sc->sge.eqmap[cntxt_id] = (void *)fl; in alloc_iq_fl_hwq()
3654 qid = fl->cntxt_id; in alloc_iq_fl_hwq()
3655 if (isset(&sc->doorbells, DOORBELL_UDB)) { in alloc_iq_fl_hwq()
3656 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_iq_fl_hwq()
3657 uint32_t mask = (1 << s_qpp) - 1; in alloc_iq_fl_hwq()
3660 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_iq_fl_hwq()
3667 fl->udb = (volatile void *)udb; in alloc_iq_fl_hwq()
3669 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; in alloc_iq_fl_hwq()
3673 refill_fl(sc, fl, fl->lowat); in alloc_iq_fl_hwq()
3677 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && in alloc_iq_fl_hwq()
3678 iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3679 t4_sge_set_conm_context(sc, iq->cntxt_id, iq->cong_drop, in alloc_iq_fl_hwq()
3684 atomic_store_rel_int(&iq->state, IQS_IDLE); in alloc_iq_fl_hwq()
3685 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | in alloc_iq_fl_hwq()
3686 V_INGRESSQID(iq->cntxt_id)); in alloc_iq_fl_hwq()
3688 iq->flags |= IQ_HW_ALLOCATED; in alloc_iq_fl_hwq()
3698 MPASS(iq->flags & IQ_HW_ALLOCATED); in free_iq_fl_hwq()
3699 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_iq_fl_hwq()
3700 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); in free_iq_fl_hwq()
3705 iq->flags &= ~IQ_HW_ALLOCATED; in free_iq_fl_hwq()
3720 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, in add_iq_sysctls()
3723 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); in add_iq_sysctls()
3725 &iq->abs_id, 0, "absolute id of the queue"); in add_iq_sysctls()
3727 &iq->cntxt_id, 0, "SGE context id of the queue"); in add_iq_sysctls()
3728 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx, in add_iq_sysctls()
3747 &fl->ba, "bus address of descriptor ring"); in add_fl_sysctls()
3749 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_fl_sysctls()
3752 &fl->cntxt_id, 0, "SGE context id of the freelist"); in add_fl_sysctls()
3756 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); in add_fl_sysctls()
3757 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, in add_fl_sysctls()
3759 if (fl->flags & FL_BUF_PACKING) { in add_fl_sysctls()
3761 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); in add_fl_sysctls()
3763 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, in add_fl_sysctls()
3766 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); in add_fl_sysctls()
3768 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); in add_fl_sysctls()
3770 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); in add_fl_sysctls()
3780 struct sge_iq *fwq = &sc->sge.fwq; in alloc_fwq()
3781 struct vi_info *vi = &sc->port[0]->vi[0]; in alloc_fwq()
3783 if (!(fwq->flags & IQ_SW_ALLOCATED)) { in alloc_fwq()
3784 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in alloc_fwq()
3786 if (sc->flags & IS_VF) in alloc_fwq()
3789 intr_idx = sc->intr_count > 1 ? 1 : 0; in alloc_fwq()
3790 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1, IQ_OTHER); in alloc_fwq()
3791 rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid); in alloc_fwq()
3796 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3799 if (!(fwq->flags & IQ_HW_ALLOCATED)) { in alloc_fwq()
3800 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3807 MPASS(fwq->flags & IQ_HW_ALLOCATED); in alloc_fwq()
3819 struct sge_iq *fwq = &sc->sge.fwq; in free_fwq()
3821 if (fwq->flags & IQ_HW_ALLOCATED) { in free_fwq()
3822 MPASS(fwq->flags & IQ_SW_ALLOCATED); in free_fwq()
3824 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3827 if (fwq->flags & IQ_SW_ALLOCATED) { in free_fwq()
3828 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3830 MPASS(!(fwq->flags & IQ_SW_ALLOCATED)); in free_fwq()
3843 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in alloc_ctrlq()
3845 MPASS(idx < sc->params.nports); in alloc_ctrlq()
3847 if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) { in alloc_ctrlq()
3848 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in alloc_ctrlq()
3851 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid), in alloc_ctrlq()
3856 device_get_nameunit(sc->dev), idx); in alloc_ctrlq()
3857 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx, in alloc_ctrlq()
3858 &sc->sge.fwq, name); in alloc_ctrlq()
3859 rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid); in alloc_ctrlq()
3865 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3868 if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) { in alloc_ctrlq()
3869 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3870 MPASS(ctrlq->nwr_pending == 0); in alloc_ctrlq()
3871 MPASS(ctrlq->ndesc_needed == 0); in alloc_ctrlq()
3873 rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq); in alloc_ctrlq()
3878 MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED); in alloc_ctrlq()
3890 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in free_ctrlq()
3892 if (ctrlq->eq.flags & EQ_HW_ALLOCATED) { in free_ctrlq()
3893 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in free_ctrlq()
3894 free_eq_hwq(sc, NULL, &ctrlq->eq); in free_ctrlq()
3895 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3898 if (ctrlq->eq.flags & EQ_SW_ALLOCATED) { in free_ctrlq()
3899 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3901 MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED)); in free_ctrlq()
3909 const int cng_ch_bits_log = sc->chip_params->cng_ch_bits_log; in t4_sge_set_conm_context()
3919 case -1: in t4_sge_set_conm_context()
3950 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); in t4_sge_set_conm_context()
3967 struct adapter *sc = vi->adapter; in alloc_rxq()
3968 if_t ifp = vi->ifp; in alloc_rxq()
3972 if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_rxq()
3973 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_rxq()
3975 rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs); in alloc_rxq()
3978 MPASS(rxq->lro.ifp == ifp); /* also indicates LRO init'ed */ in alloc_rxq()
3980 rxq->ifp = ifp; in alloc_rxq()
3983 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid), in alloc_rxq()
3987 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq, in alloc_rxq()
3991 rxq->iq.flags |= IQ_LRO_ENABLED; in alloc_rxq()
3994 rxq->iq.flags |= IQ_RX_TIMESTAMP; in alloc_rxq()
3995 snprintf(name, sizeof(name), "%s rxq%d-fl", in alloc_rxq()
3996 device_get_nameunit(vi->dev), idx); in alloc_rxq()
3997 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_rxq()
3998 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid); in alloc_rxq()
4003 tcp_lro_free(&rxq->lro); in alloc_rxq()
4004 rxq->lro.ifp = NULL; in alloc_rxq()
4008 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4009 add_rxq_sysctls(&vi->ctx, oid, rxq); in alloc_rxq()
4012 if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_rxq()
4013 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4014 rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl); in alloc_rxq()
4019 MPASS(rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_rxq()
4022 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; in alloc_rxq()
4024 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, in alloc_rxq()
4026 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, in alloc_rxq()
4027 ("PF with non-zero iq_base")); in alloc_rxq()
4033 FL_LOCK(&rxq->fl); in alloc_rxq()
4034 refill_fl(sc, &rxq->fl, 128); in alloc_rxq()
4035 FL_UNLOCK(&rxq->fl); in alloc_rxq()
4047 if (rxq->iq.flags & IQ_HW_ALLOCATED) { in free_rxq()
4048 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in free_rxq()
4049 free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4050 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4053 if (rxq->iq.flags & IQ_SW_ALLOCATED) { in free_rxq()
4054 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4056 tcp_lro_free(&rxq->lro); in free_rxq()
4058 free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4059 MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED)); in free_rxq()
4076 &rxq->lro.lro_queued, 0, NULL); in add_rxq_sysctls()
4078 &rxq->lro.lro_flushed, 0, NULL); in add_rxq_sysctls()
4081 &rxq->rxcsum, "# of times hardware assisted with checksum"); in add_rxq_sysctls()
4083 &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); in add_rxq_sysctls()
4085 &rxq->vxlan_rxcsum, in add_rxq_sysctls()
4098 struct adapter *sc = vi->adapter; in alloc_ofld_rxq()
4102 if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_ofld_rxq()
4103 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_ofld_rxq()
4106 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_rxq()
4107 SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name, in alloc_ofld_rxq()
4110 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, in alloc_ofld_rxq()
4111 vi->qsize_rxq, intr_idx, ofld_cong_drop, IQ_OFLD); in alloc_ofld_rxq()
4112 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", in alloc_ofld_rxq()
4113 device_get_nameunit(vi->dev), idx); in alloc_ofld_rxq()
4114 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_ofld_rxq()
4115 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx, in alloc_ofld_rxq()
4123 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4124 ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4125 ofld_rxq->rx_iscsi_ddp_setup_error = in alloc_ofld_rxq()
4127 ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4128 ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4129 ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4130 add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq); in alloc_ofld_rxq()
4133 if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_ofld_rxq()
4134 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4135 rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl); in alloc_ofld_rxq()
4141 MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_ofld_rxq()
4152 if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) { in free_ofld_rxq()
4153 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in free_ofld_rxq()
4154 free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4155 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4158 if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) { in free_ofld_rxq()
4159 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4160 free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4161 MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)); in free_ofld_rxq()
4162 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok); in free_ofld_rxq()
4163 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error); in free_ofld_rxq()
4164 counter_u64_free(ofld_rxq->ddp_buffer_alloc); in free_ofld_rxq()
4165 counter_u64_free(ofld_rxq->ddp_buffer_reuse); in free_ofld_rxq()
4166 counter_u64_free(ofld_rxq->ddp_buffer_free); in free_ofld_rxq()
4182 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0, in add_ofld_rxq_sysctls()
4185 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_octets, 0, in add_ofld_rxq_sysctls()
4188 "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records, in add_ofld_rxq_sysctls()
4191 "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets, in add_ofld_rxq_sysctls()
4194 "rx_toe_ddp_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_ddp_octets, in add_ofld_rxq_sysctls()
4197 "ddp_buffer_alloc", CTLFLAG_RD, &ofld_rxq->ddp_buffer_alloc, in add_ofld_rxq_sysctls()
4200 "ddp_buffer_reuse", CTLFLAG_RD, &ofld_rxq->ddp_buffer_reuse, in add_ofld_rxq_sysctls()
4203 "ddp_buffer_free", CTLFLAG_RD, &ofld_rxq->ddp_buffer_free, in add_ofld_rxq_sysctls()
4211 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok, in add_ofld_rxq_sysctls()
4214 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error, in add_ofld_rxq_sysctls()
4217 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0, in add_ofld_rxq_sysctls()
4220 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0, in add_ofld_rxq_sysctls()
4223 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0, in add_ofld_rxq_sysctls()
4226 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0, in add_ofld_rxq_sysctls()
4229 CTLFLAG_RD, &ofld_rxq->rx_iscsi_padding_errors, 0, in add_ofld_rxq_sysctls()
4232 CTLFLAG_RD, &ofld_rxq->rx_iscsi_header_digest_errors, 0, in add_ofld_rxq_sysctls()
4235 CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0, in add_ofld_rxq_sysctls()
4260 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ctrl_eq_alloc()
4265 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | in ctrl_eq_alloc()
4269 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); in ctrl_eq_alloc()
4273 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | in ctrl_eq_alloc()
4274 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); in ctrl_eq_alloc()
4281 c.eqaddr = htobe64(eq->ba); in ctrl_eq_alloc()
4283 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ctrl_eq_alloc()
4286 eq->tx_chan, rc); in ctrl_eq_alloc()
4290 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); in ctrl_eq_alloc()
4291 eq->abs_id = G_FW_EQ_CTRL_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ctrl_eq_alloc()
4292 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ctrl_eq_alloc()
4293 if (cntxt_id >= sc->sge.eqmap_sz) in ctrl_eq_alloc()
4294 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ctrl_eq_alloc()
4295 cntxt_id, sc->sge.eqmap_sz - 1); in ctrl_eq_alloc()
4296 sc->sge.eqmap[cntxt_id] = eq; in ctrl_eq_alloc()
4306 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in eth_eq_alloc()
4311 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | in eth_eq_alloc()
4316 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); in eth_eq_alloc()
4319 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | in eth_eq_alloc()
4320 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); in eth_eq_alloc()
4326 c.eqaddr = htobe64(eq->ba); in eth_eq_alloc()
4328 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in eth_eq_alloc()
4330 device_printf(vi->dev, in eth_eq_alloc()
4335 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); in eth_eq_alloc()
4336 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in eth_eq_alloc()
4337 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in eth_eq_alloc()
4338 if (cntxt_id >= sc->sge.eqmap_sz) in eth_eq_alloc()
4339 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in eth_eq_alloc()
4340 cntxt_id, sc->sge.eqmap_sz - 1); in eth_eq_alloc()
4341 sc->sge.eqmap[cntxt_id] = eq; in eth_eq_alloc()
4352 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ofld_eq_alloc()
4357 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | in ofld_eq_alloc()
4363 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | in ofld_eq_alloc()
4364 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); in ofld_eq_alloc()
4371 c.eqaddr = htobe64(eq->ba); in ofld_eq_alloc()
4373 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ofld_eq_alloc()
4375 device_printf(vi->dev, in ofld_eq_alloc()
4380 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); in ofld_eq_alloc()
4381 eq->abs_id = G_FW_EQ_OFLD_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ofld_eq_alloc()
4382 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ofld_eq_alloc()
4383 if (cntxt_id >= sc->sge.eqmap_sz) in ofld_eq_alloc()
4384 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ofld_eq_alloc()
4385 cntxt_id, sc->sge.eqmap_sz - 1); in ofld_eq_alloc()
4386 sc->sge.eqmap[cntxt_id] = eq; in ofld_eq_alloc()
4400 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_eq()
4402 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in alloc_eq()
4404 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, in alloc_eq()
4405 (void **)&eq->desc); in alloc_eq()
4410 eq->flags |= EQ_SW_ALLOCATED; in alloc_eq()
4419 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_eq()
4420 if (eq->type == EQ_ETH) in free_eq()
4421 MPASS(eq->pidx == eq->cidx); in free_eq()
4423 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); in free_eq()
4424 mtx_destroy(&eq->eq_lock); in free_eq()
4434 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, in add_eq_sysctls()
4437 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_eq_sysctls()
4440 &eq->abs_id, 0, "absolute id of the queue"); in add_eq_sysctls()
4442 &eq->cntxt_id, 0, "SGE context id of the queue"); in add_eq_sysctls()
4443 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx, in add_eq_sysctls()
4445 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx, in add_eq_sysctls()
4448 eq->sidx, "status page index"); in add_eq_sysctls()
4456 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_eq_hwq()
4458 eq->iqid = eq->iq->cntxt_id; in alloc_eq_hwq()
4459 eq->pidx = eq->cidx = eq->dbidx = 0; in alloc_eq_hwq()
4461 eq->equeqidx = 0; in alloc_eq_hwq()
4462 eq->doorbells = sc->doorbells; in alloc_eq_hwq()
4463 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_eq_hwq()
4465 switch (eq->type) { in alloc_eq_hwq()
4481 panic("%s: invalid eq type %d.", __func__, eq->type); in alloc_eq_hwq()
4485 eq->type, rc); in alloc_eq_hwq()
4489 if (isset(&eq->doorbells, DOORBELL_UDB) || in alloc_eq_hwq()
4490 isset(&eq->doorbells, DOORBELL_UDBWC) || in alloc_eq_hwq()
4491 isset(&eq->doorbells, DOORBELL_WCWR)) { in alloc_eq_hwq()
4492 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_eq_hwq()
4493 uint32_t mask = (1 << s_qpp) - 1; in alloc_eq_hwq()
4496 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_eq_hwq()
4497 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ in alloc_eq_hwq()
4498 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ in alloc_eq_hwq()
4499 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) in alloc_eq_hwq()
4500 clrbit(&eq->doorbells, DOORBELL_WCWR); in alloc_eq_hwq()
4502 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ in alloc_eq_hwq()
4503 eq->udb_qid = 0; in alloc_eq_hwq()
4505 eq->udb = (volatile void *)udb; in alloc_eq_hwq()
4508 eq->flags |= EQ_HW_ALLOCATED; in alloc_eq_hwq()
4517 MPASS(eq->flags & EQ_HW_ALLOCATED); in free_eq_hwq()
4519 switch (eq->type) { in free_eq_hwq()
4521 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4524 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4528 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4532 panic("%s: invalid eq type %d.", __func__, eq->type); in free_eq_hwq()
4535 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc); in free_eq_hwq()
4538 eq->flags &= ~EQ_HW_ALLOCATED; in free_eq_hwq()
4547 struct sge_eq *eq = &wrq->eq; in alloc_wrq()
4550 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_wrq()
4555 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_wrq()
4558 wrq->adapter = sc; in alloc_wrq()
4559 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); in alloc_wrq()
4560 TAILQ_INIT(&wrq->incomplete_wrs); in alloc_wrq()
4561 STAILQ_INIT(&wrq->wr_list); in alloc_wrq()
4562 wrq->nwr_pending = 0; in alloc_wrq()
4563 wrq->ndesc_needed = 0; in alloc_wrq()
4572 free_eq(sc, &wrq->eq); in free_wrq()
4573 MPASS(wrq->nwr_pending == 0); in free_wrq()
4574 MPASS(wrq->ndesc_needed == 0); in free_wrq()
4575 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in free_wrq()
4576 MPASS(STAILQ_EMPTY(&wrq->wr_list)); in free_wrq()
4591 &wrq->tx_wrs_direct, "# of work requests (direct)"); in add_wrq_sysctls()
4593 &wrq->tx_wrs_copied, "# of work requests (copied)"); in add_wrq_sysctls()
4595 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); in add_wrq_sysctls()
4605 struct port_info *pi = vi->pi; in alloc_txq()
4606 struct adapter *sc = vi->adapter; in alloc_txq()
4607 struct sge_eq *eq = &txq->eq; in alloc_txq()
4612 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_txq()
4613 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_txq()
4616 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid), in alloc_txq()
4620 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_txq()
4622 device_get_nameunit(vi->dev), idx); in alloc_txq()
4623 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->port_id, in alloc_txq()
4624 &sc->sge.rxq[iqidx].iq, name); in alloc_txq()
4626 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, in alloc_txq()
4627 can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK); in alloc_txq()
4636 rc = alloc_eq(sc, eq, &vi->ctx, oid); in alloc_txq()
4639 mp_ring_free(txq->r); in alloc_txq()
4642 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4645 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); in alloc_txq()
4646 txq->ifp = vi->ifp; in alloc_txq()
4647 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); in alloc_txq()
4648 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, in alloc_txq()
4651 add_txq_sysctls(vi, &vi->ctx, oid, txq); in alloc_txq()
4654 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_txq()
4655 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4661 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_txq()
4665 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; in alloc_txq()
4667 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, in alloc_txq()
4669 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, in alloc_txq()
4670 ("PF with non-zero eq_base")); in alloc_txq()
4672 txp = &txq->txp; in alloc_txq()
4673 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4674 txq->txp.max_npkt = min(nitems(txp->mb), in alloc_txq()
4675 sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4676 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) in alloc_txq()
4677 txq->txp.max_npkt--; in alloc_txq()
4679 if (vi->flags & TX_USES_VM_WR) in alloc_txq()
4680 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4681 V_TXPKT_INTF(pi->tx_chan)); in alloc_txq()
4683 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4684 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | in alloc_txq()
4685 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); in alloc_txq()
4687 txq->tc_idx = -1; in alloc_txq()
4699 struct adapter *sc = vi->adapter; in free_txq()
4700 struct sge_eq *eq = &txq->eq; in free_txq()
4702 if (eq->flags & EQ_HW_ALLOCATED) { in free_txq()
4703 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_txq()
4705 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4708 if (eq->flags & EQ_SW_ALLOCATED) { in free_txq()
4709 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4710 sglist_free(txq->gl); in free_txq()
4711 free(txq->sdesc, M_CXGBE); in free_txq()
4712 mp_ring_free(txq->r); in free_txq()
4714 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_txq()
4729 sc = vi->adapter; in add_txq_sysctls()
4732 mp_ring_sysctls(txq->r, ctx, children); in add_txq_sysctls()
4735 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq, in add_txq_sysctls()
4736 sysctl_tc, "I", "traffic class (-1 means none)"); in add_txq_sysctls()
4739 &txq->txcsum, "# of times hardware assisted with checksum"); in add_txq_sysctls()
4741 &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); in add_txq_sysctls()
4743 &txq->tso_wrs, "# of TSO work requests"); in add_txq_sysctls()
4745 &txq->imm_wrs, "# of work requests with immediate data"); in add_txq_sysctls()
4747 &txq->sgl_wrs, "# of work requests with direct SGL"); in add_txq_sysctls()
4749 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); in add_txq_sysctls()
4751 &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); in add_txq_sysctls()
4753 &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); in add_txq_sysctls()
4755 &txq->txpkts0_pkts, in add_txq_sysctls()
4758 &txq->txpkts1_pkts, in add_txq_sysctls()
4761 &txq->txpkts_flush, in add_txq_sysctls()
4762 "# of times txpkts had to be flushed out by an egress-update"); in add_txq_sysctls()
4764 &txq->raw_wrs, "# of raw work requests (non-packets)"); in add_txq_sysctls()
4766 &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); in add_txq_sysctls()
4768 &txq->vxlan_txcsum, in add_txq_sysctls()
4774 CTLFLAG_RD, &txq->kern_tls_records, in add_txq_sysctls()
4777 CTLFLAG_RD, &txq->kern_tls_short, in add_txq_sysctls()
4780 CTLFLAG_RD, &txq->kern_tls_partial, in add_txq_sysctls()
4783 CTLFLAG_RD, &txq->kern_tls_full, in add_txq_sysctls()
4786 CTLFLAG_RD, &txq->kern_tls_octets, in add_txq_sysctls()
4789 CTLFLAG_RD, &txq->kern_tls_waste, in add_txq_sysctls()
4792 CTLFLAG_RD, &txq->kern_tls_options, in add_txq_sysctls()
4793 "# of NIC TLS options-only packets transmitted"); in add_txq_sysctls()
4795 CTLFLAG_RD, &txq->kern_tls_header, in add_txq_sysctls()
4796 "# of NIC TLS header-only packets transmitted"); in add_txq_sysctls()
4798 CTLFLAG_RD, &txq->kern_tls_fin, in add_txq_sysctls()
4799 "# of NIC TLS FIN-only packets transmitted"); in add_txq_sysctls()
4801 CTLFLAG_RD, &txq->kern_tls_fin_short, in add_txq_sysctls()
4804 CTLFLAG_RD, &txq->kern_tls_cbc, in add_txq_sysctls()
4805 "# of NIC TLS sessions using AES-CBC"); in add_txq_sysctls()
4807 CTLFLAG_RD, &txq->kern_tls_gcm, in add_txq_sysctls()
4808 "# of NIC TLS sessions using AES-GCM"); in add_txq_sysctls()
4821 struct port_info *pi = vi->pi; in alloc_ofld_txq()
4822 struct adapter *sc = vi->adapter; in alloc_ofld_txq()
4823 struct sge_eq *eq = &ofld_txq->wrq.eq; in alloc_ofld_txq()
4828 MPASS(idx < vi->nofldtxq); in alloc_ofld_txq()
4830 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_ofld_txq()
4832 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_txq()
4833 SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name, in alloc_ofld_txq()
4837 device_get_nameunit(vi->dev), idx); in alloc_ofld_txq()
4838 if (vi->nofldrxq > 0) { in alloc_ofld_txq()
4839 iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq); in alloc_ofld_txq()
4840 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
4841 &sc->sge.ofld_rxq[iqidx].iq, name); in alloc_ofld_txq()
4843 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_ofld_txq()
4844 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
4845 &sc->sge.rxq[iqidx].iq, name); in alloc_ofld_txq()
4848 rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid); in alloc_ofld_txq()
4855 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
4858 ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4859 ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4860 ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4861 ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4862 ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4863 ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4864 ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4865 add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq); in alloc_ofld_txq()
4868 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_ofld_txq()
4869 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
4870 MPASS(ofld_txq->wrq.nwr_pending == 0); in alloc_ofld_txq()
4871 MPASS(ofld_txq->wrq.ndesc_needed == 0); in alloc_ofld_txq()
4878 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_ofld_txq()
4890 struct adapter *sc = vi->adapter; in free_ofld_txq()
4891 struct sge_eq *eq = &ofld_txq->wrq.eq; in free_ofld_txq()
4893 if (eq->flags & EQ_HW_ALLOCATED) { in free_ofld_txq()
4894 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_ofld_txq()
4896 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
4899 if (eq->flags & EQ_SW_ALLOCATED) { in free_ofld_txq()
4900 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
4901 counter_u64_free(ofld_txq->tx_iscsi_pdus); in free_ofld_txq()
4902 counter_u64_free(ofld_txq->tx_iscsi_octets); in free_ofld_txq()
4903 counter_u64_free(ofld_txq->tx_iscsi_iso_wrs); in free_ofld_txq()
4904 counter_u64_free(ofld_txq->tx_aio_jobs); in free_ofld_txq()
4905 counter_u64_free(ofld_txq->tx_aio_octets); in free_ofld_txq()
4906 counter_u64_free(ofld_txq->tx_toe_tls_records); in free_ofld_txq()
4907 counter_u64_free(ofld_txq->tx_toe_tls_octets); in free_ofld_txq()
4908 free_wrq(sc, &ofld_txq->wrq); in free_ofld_txq()
4909 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_ofld_txq()
4925 CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus, in add_ofld_txq_sysctls()
4928 CTLFLAG_RD, &ofld_txq->tx_iscsi_octets, in add_ofld_txq_sysctls()
4931 CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs, in add_ofld_txq_sysctls()
4934 CTLFLAG_RD, &ofld_txq->tx_aio_jobs, in add_ofld_txq_sysctls()
4935 "# of zero-copy aio_write(2) jobs transmitted"); in add_ofld_txq_sysctls()
4937 CTLFLAG_RD, &ofld_txq->tx_aio_octets, in add_ofld_txq_sysctls()
4938 "# of payload octets in transmitted zero-copy aio_write(2) jobs"); in add_ofld_txq_sysctls()
4940 CTLFLAG_RD, &ofld_txq->tx_toe_tls_records, in add_ofld_txq_sysctls()
4943 CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets, in add_ofld_txq_sysctls()
4956 *ba = error ? 0 : segs->ds_addr; in oneseg_dma_callback()
4964 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); in ring_fl_db()
4968 v = fl->dbval | V_PIDX(n); in ring_fl_db()
4969 if (fl->udb) in ring_fl_db()
4970 *fl->udb = htole32(v); in ring_fl_db()
4972 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); in ring_fl_db()
4973 IDXINCR(fl->dbidx, n, fl->sidx); in ring_fl_db()
4980 * Returns non-zero to indicate that this freelist should be added to the list
4992 uint16_t max_pidx, zidx = fl->zidx; in refill_fl()
4993 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ in refill_fl()
5002 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; in refill_fl()
5003 if (fl->pidx == max_pidx * 8) in refill_fl()
5006 d = &fl->desc[fl->pidx]; in refill_fl()
5007 sd = &fl->sdesc[fl->pidx]; in refill_fl()
5008 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5012 if (sd->cl != NULL) { in refill_fl()
5014 if (sd->nmbuf == 0) { in refill_fl()
5022 fl->cl_fast_recycled++; in refill_fl()
5034 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in refill_fl()
5035 fl->cl_recycled++; in refill_fl()
5039 sd->cl = NULL; /* gave up my reference */ in refill_fl()
5041 MPASS(sd->cl == NULL); in refill_fl()
5042 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5044 if (zidx != fl->safe_zidx) { in refill_fl()
5045 zidx = fl->safe_zidx; in refill_fl()
5046 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5047 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5052 fl->cl_allocated++; in refill_fl()
5053 n--; in refill_fl()
5056 sd->cl = cl; in refill_fl()
5057 sd->zidx = zidx; in refill_fl()
5059 if (fl->flags & FL_BUF_PACKING) { in refill_fl()
5060 *d = htobe64(pa | rxb->hwidx2); in refill_fl()
5061 sd->moff = rxb->size2; in refill_fl()
5063 *d = htobe64(pa | rxb->hwidx1); in refill_fl()
5064 sd->moff = 0; in refill_fl()
5067 sd->nmbuf = 0; in refill_fl()
5070 if (__predict_false((++fl->pidx & 7) == 0)) { in refill_fl()
5071 uint16_t pidx = fl->pidx >> 3; in refill_fl()
5073 if (__predict_false(pidx == fl->sidx)) { in refill_fl()
5074 fl->pidx = 0; in refill_fl()
5076 sd = fl->sdesc; in refill_fl()
5077 d = fl->desc; in refill_fl()
5082 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) in refill_fl()
5087 if ((fl->pidx >> 3) != fl->dbidx) in refill_fl()
5090 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); in refill_fl()
5102 mtx_assert(&sc->sfl_lock, MA_OWNED); in refill_sfl()
5103 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { in refill_sfl()
5106 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { in refill_sfl()
5107 TAILQ_REMOVE(&sc->sfl, fl, link); in refill_sfl()
5108 fl->flags &= ~FL_STARVING; in refill_sfl()
5113 if (!TAILQ_EMPTY(&sc->sfl)) in refill_sfl()
5114 callout_schedule(&sc->sfl_callout, hz / 5); in refill_sfl()
5129 sd = fl->sdesc; in free_fl_buffers()
5130 for (i = 0; i < fl->sidx * 8; i++, sd++) { in free_fl_buffers()
5131 if (sd->cl == NULL) in free_fl_buffers()
5134 if (sd->nmbuf == 0) in free_fl_buffers()
5135 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); in free_fl_buffers()
5136 else if (fl->flags & FL_BUF_PACKING) { in free_fl_buffers()
5138 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in free_fl_buffers()
5139 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, in free_fl_buffers()
5140 sd->cl); in free_fl_buffers()
5144 sd->cl = NULL; in free_fl_buffers()
5147 if (fl->flags & FL_BUF_RESUME) { in free_fl_buffers()
5148 m_freem(fl->m0); in free_fl_buffers()
5149 fl->flags &= ~FL_BUF_RESUME; in free_fl_buffers()
5167 KASSERT(gl->sg_nseg == mbuf_nsegs(m), in get_pkt_gl()
5169 mbuf_nsegs(m), gl->sg_nseg)); in get_pkt_gl()
5171 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), in get_pkt_gl()
5173 gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); in get_pkt_gl()
5178 * len16 for a txpkt WR with a GL. Includes the firmware work request header.
5187 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_len16()
5196 * len16 for a txpkt_vm WR with a GL. Includes the firmware work
5206 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_vm_len16()
5238 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work
5248 nsegs--; /* first segment is part of ulptx_sgl */ in txpkts0_len16()
5257 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work
5275 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - in imm_payload()
5298 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5299 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5302 MPASS(m->m_pkthdr.l4hlen > 0); in csum_to_ctrl()
5303 MPASS(m->m_pkthdr.l5hlen > 0); in csum_to_ctrl()
5304 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5305 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5307 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + in csum_to_ctrl()
5308 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + in csum_to_ctrl()
5309 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5310 l3hlen = m->m_pkthdr.inner_l3hlen; in csum_to_ctrl()
5312 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5313 l3hlen = m->m_pkthdr.l3hlen; in csum_to_ctrl()
5320 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | in csum_to_ctrl()
5323 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | in csum_to_ctrl()
5329 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | in csum_to_ctrl()
5333 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | in csum_to_ctrl()
5360 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_lso_cpl()
5361 m0->m_pkthdr.l4hlen > 0, in write_lso_cpl()
5367 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_lso_cpl()
5368 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_lso_cpl()
5369 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_lso_cpl()
5370 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_lso_cpl()
5374 lso->lso_ctrl = htobe32(ctrl); in write_lso_cpl()
5375 lso->ipid_ofst = htobe16(0); in write_lso_cpl()
5376 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_lso_cpl()
5377 lso->seqno_offset = htobe32(0); in write_lso_cpl()
5378 lso->len = htobe32(m0->m_pkthdr.len); in write_lso_cpl()
5389 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && in write_tnl_lso_cpl()
5390 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && in write_tnl_lso_cpl()
5391 m0->m_pkthdr.inner_l5hlen > 0, in write_tnl_lso_cpl()
5394 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_tnl_lso_cpl()
5395 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, in write_tnl_lso_cpl()
5403 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5404 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | in write_tnl_lso_cpl()
5406 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5412 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); in write_tnl_lso_cpl()
5413 tnl_lso->IpIdOffsetOut = 0; in write_tnl_lso_cpl()
5414 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in write_tnl_lso_cpl()
5417 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + in write_tnl_lso_cpl()
5418 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + in write_tnl_lso_cpl()
5419 m0->m_pkthdr.l5hlen) | in write_tnl_lso_cpl()
5421 tnl_lso->r1 = 0; in write_tnl_lso_cpl()
5425 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5426 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | in write_tnl_lso_cpl()
5427 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); in write_tnl_lso_cpl()
5428 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5430 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); in write_tnl_lso_cpl()
5431 tnl_lso->IpIdOffset = 0; in write_tnl_lso_cpl()
5432 tnl_lso->IpIdSplit_to_Mss = in write_tnl_lso_cpl()
5433 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); in write_tnl_lso_cpl()
5434 tnl_lso->TCPSeqOffset = 0; in write_tnl_lso_cpl()
5435 tnl_lso->EthLenOffset_Size = in write_tnl_lso_cpl()
5436 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); in write_tnl_lso_cpl()
5444 * Write a VM txpkt WR for this packet to the hardware descriptors, update the
5454 struct fw_eth_tx_pkt_vm_wr *wr; in write_txpkt_vm_wr() local
5466 pktlen = m0->m_pkthdr.len; in write_txpkt_vm_wr()
5473 eq = &txq->eq; in write_txpkt_vm_wr()
5474 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_vm_wr()
5475 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | in write_txpkt_vm_wr()
5479 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_vm_wr()
5480 wr->r3[0] = 0; in write_txpkt_vm_wr()
5481 wr->r3[1] = 0; in write_txpkt_vm_wr()
5490 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); in write_txpkt_vm_wr()
5493 cpl = write_lso_cpl(wr + 1, m0); in write_txpkt_vm_wr()
5494 txq->tso_wrs++; in write_txpkt_vm_wr()
5496 cpl = (void *)(wr + 1); in write_txpkt_vm_wr()
5501 txq->txcsum++; /* some hardware assistance provided */ in write_txpkt_vm_wr()
5506 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_vm_wr()
5507 txq->vlan_insertion++; in write_txpkt_vm_wr()
5508 } else if (sc->vlan_id) in write_txpkt_vm_wr()
5509 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkt_vm_wr()
5512 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_vm_wr()
5513 cpl->pack = 0; in write_txpkt_vm_wr()
5514 cpl->len = htobe16(pktlen); in write_txpkt_vm_wr()
5515 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_vm_wr()
5527 if (dst == (void *)&eq->desc[eq->sidx]) { in write_txpkt_vm_wr()
5528 dst = (void *)&eq->desc[0]; in write_txpkt_vm_wr()
5531 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_vm_wr()
5532 txq->sgl_wrs++; in write_txpkt_vm_wr()
5533 txq->txpkt_wrs++; in write_txpkt_vm_wr()
5535 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_vm_wr()
5536 txsd->m = m0; in write_txpkt_vm_wr()
5537 txsd->desc_used = ndesc; in write_txpkt_vm_wr()
5543 * Write a raw WR to the hardware descriptors, update the software
5550 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) in write_raw_wr() argument
5552 struct sge_eq *eq = &txq->eq; in write_raw_wr()
5562 dst = wr; in write_raw_wr()
5563 for (m = m0; m != NULL; m = m->m_next) in write_raw_wr()
5564 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_raw_wr()
5566 txq->raw_wrs++; in write_raw_wr()
5568 txsd = &txq->sdesc[eq->pidx]; in write_raw_wr()
5569 txsd->m = m0; in write_raw_wr()
5570 txsd->desc_used = ndesc; in write_raw_wr()
5576 * Write a txpkt WR for this packet to the hardware descriptors, update the
5587 struct fw_eth_tx_pkt_wr *wr; in write_txpkt_wr() local
5600 pktlen = m0->m_pkthdr.len; in write_txpkt_wr()
5619 eq = &txq->eq; in write_txpkt_wr()
5620 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_wr()
5621 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | in write_txpkt_wr()
5625 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_wr()
5626 wr->r3 = 0; in write_txpkt_wr()
5630 cpl = write_tnl_lso_cpl(wr + 1, m0); in write_txpkt_wr()
5631 txq->vxlan_tso_wrs++; in write_txpkt_wr()
5633 cpl = write_lso_cpl(wr + 1, m0); in write_txpkt_wr()
5634 txq->tso_wrs++; in write_txpkt_wr()
5637 cpl = (void *)(wr + 1); in write_txpkt_wr()
5644 txq->vxlan_txcsum++; in write_txpkt_wr()
5646 txq->txcsum++; in write_txpkt_wr()
5652 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_wr()
5653 txq->vlan_insertion++; in write_txpkt_wr()
5657 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_wr()
5658 cpl->pack = 0; in write_txpkt_wr()
5659 cpl->len = htobe16(pktlen); in write_txpkt_wr()
5660 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_wr()
5664 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) in write_txpkt_wr()
5665 dst = (caddr_t)&eq->desc[0]; in write_txpkt_wr()
5668 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_wr()
5669 txq->sgl_wrs++; in write_txpkt_wr()
5673 for (m = m0; m != NULL; m = m->m_next) { in write_txpkt_wr()
5674 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_txpkt_wr()
5676 pktlen -= m->m_len; in write_txpkt_wr()
5682 txq->imm_wrs++; in write_txpkt_wr()
5685 txq->txpkt_wrs++; in write_txpkt_wr()
5687 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_wr()
5688 txsd->m = m0; in write_txpkt_wr()
5689 txsd->desc_used = ndesc; in write_txpkt_wr()
5699 MPASS(txp->npkt > 0); in cmp_l2hdr()
5700 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in cmp_l2hdr()
5702 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) in cmp_l2hdr()
5707 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); in cmp_l2hdr()
5713 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in save_l2hdr()
5715 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); in save_l2hdr()
5722 struct txpkts *txp = &txq->txp; in add_to_txpkts_vf()
5727 *send = txp->npkt > 0; in add_to_txpkts_vf()
5736 if (txp->npkt > 0) { in add_to_txpkts_vf()
5737 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_vf()
5738 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_vf()
5739 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in add_to_txpkts_vf()
5741 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { in add_to_txpkts_vf()
5746 if (m->m_pkthdr.len + txp->plen > 65535) in add_to_txpkts_vf()
5751 txp->len16 += txpkts1_len16(); in add_to_txpkts_vf()
5752 txp->plen += m->m_pkthdr.len; in add_to_txpkts_vf()
5753 txp->mb[txp->npkt++] = m; in add_to_txpkts_vf()
5754 if (txp->npkt == txp->max_npkt) in add_to_txpkts_vf()
5757 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + in add_to_txpkts_vf()
5759 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_vf()
5761 txp->npkt = 1; in add_to_txpkts_vf()
5762 txp->wr_type = 1; in add_to_txpkts_vf()
5763 txp->plen = m->m_pkthdr.len; in add_to_txpkts_vf()
5764 txp->mb[0] = m; in add_to_txpkts_vf()
5774 struct txpkts *txp = &txq->txp; in add_to_txpkts_pf()
5777 MPASS(!(sc->flags & IS_VF)); in add_to_txpkts_pf()
5782 *send = txp->npkt > 0; in add_to_txpkts_pf()
5788 if (txp->npkt == 0) { in add_to_txpkts_pf()
5789 if (m->m_pkthdr.len > 65535) in add_to_txpkts_pf()
5792 txp->wr_type = 0; in add_to_txpkts_pf()
5793 txp->len16 = in add_to_txpkts_pf()
5797 txp->wr_type = 1; in add_to_txpkts_pf()
5798 txp->len16 = in add_to_txpkts_pf()
5802 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_pf()
5804 txp->npkt = 1; in add_to_txpkts_pf()
5805 txp->plen = m->m_pkthdr.len; in add_to_txpkts_pf()
5806 txp->mb[0] = m; in add_to_txpkts_pf()
5808 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_pf()
5809 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_pf()
5811 if (m->m_pkthdr.len + txp->plen > 65535) { in add_to_txpkts_pf()
5817 MPASS(txp->wr_type == 0 || txp->wr_type == 1); in add_to_txpkts_pf()
5818 if (txp->wr_type == 0) { in add_to_txpkts_pf()
5819 if (tx_len16_to_desc(txp->len16 + in add_to_txpkts_pf()
5822 txp->len16 += txpkts0_len16(nsegs); in add_to_txpkts_pf()
5826 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > in add_to_txpkts_pf()
5829 txp->len16 += txpkts1_len16(); in add_to_txpkts_pf()
5832 txp->plen += m->m_pkthdr.len; in add_to_txpkts_pf()
5833 txp->mb[txp->npkt++] = m; in add_to_txpkts_pf()
5834 if (txp->npkt == txp->max_npkt) in add_to_txpkts_pf()
5841 * Write a txpkts WR for the packets in txp to the hardware descriptors, update
5850 const struct txpkts *txp = &txq->txp; in write_txpkts_wr()
5851 struct sge_eq *eq = &txq->eq; in write_txpkts_wr()
5852 struct fw_eth_tx_pkts_wr *wr; in write_txpkts_wr() local
5861 MPASS(txp->npkt > 0); in write_txpkts_wr()
5862 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_wr()
5864 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_wr()
5865 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); in write_txpkts_wr()
5866 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_wr()
5867 wr->plen = htobe16(txp->plen); in write_txpkts_wr()
5868 wr->npkt = txp->npkt; in write_txpkts_wr()
5869 wr->r3 = 0; in write_txpkts_wr()
5870 wr->type = txp->wr_type; in write_txpkts_wr()
5871 flitp = wr + 1; in write_txpkts_wr()
5875 * set then we know the WR is going to wrap around somewhere. We'll in write_txpkts_wr()
5878 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_wr()
5880 checkwrap = eq->sidx - ndesc < eq->pidx; in write_txpkts_wr()
5881 for (i = 0; i < txp->npkt; i++) { in write_txpkts_wr()
5882 m = txp->mb[i]; in write_txpkts_wr()
5883 if (txp->wr_type == 0) { in write_txpkts_wr()
5889 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | in write_txpkts_wr()
5890 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); in write_txpkts_wr()
5891 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); in write_txpkts_wr()
5895 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | in write_txpkts_wr()
5897 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); in write_txpkts_wr()
5901 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5902 cpl = (void *)&eq->desc[0]; in write_txpkts_wr()
5912 txq->vxlan_txcsum++; in write_txpkts_wr()
5914 txq->txcsum++; in write_txpkts_wr()
5920 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_wr()
5921 txq->vlan_insertion++; in write_txpkts_wr()
5925 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_wr()
5926 cpl->pack = 0; in write_txpkts_wr()
5927 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_wr()
5928 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_wr()
5932 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5933 flitp = (void *)&eq->desc[0]; in write_txpkts_wr()
5938 last->m_nextpkt = m; in write_txpkts_wr()
5942 txq->sgl_wrs++; in write_txpkts_wr()
5943 if (txp->wr_type == 0) { in write_txpkts_wr()
5944 txq->txpkts0_pkts += txp->npkt; in write_txpkts_wr()
5945 txq->txpkts0_wrs++; in write_txpkts_wr()
5947 txq->txpkts1_pkts += txp->npkt; in write_txpkts_wr()
5948 txq->txpkts1_wrs++; in write_txpkts_wr()
5951 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_wr()
5952 txsd->m = txp->mb[0]; in write_txpkts_wr()
5953 txsd->desc_used = ndesc; in write_txpkts_wr()
5961 const struct txpkts *txp = &txq->txp; in write_txpkts_vm_wr()
5962 struct sge_eq *eq = &txq->eq; in write_txpkts_vm_wr()
5963 struct fw_eth_tx_pkts_vm_wr *wr; in write_txpkts_vm_wr() local
5972 MPASS(txp->npkt > 0); in write_txpkts_vm_wr()
5973 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in write_txpkts_vm_wr()
5974 MPASS(txp->mb[0] != NULL); in write_txpkts_vm_wr()
5975 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_vm_wr()
5977 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_vm_wr()
5978 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); in write_txpkts_vm_wr()
5979 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_vm_wr()
5980 wr->r3 = 0; in write_txpkts_vm_wr()
5981 wr->plen = htobe16(txp->plen); in write_txpkts_vm_wr()
5982 wr->npkt = txp->npkt; in write_txpkts_vm_wr()
5983 wr->r4 = 0; in write_txpkts_vm_wr()
5984 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); in write_txpkts_vm_wr()
5985 flitp = wr + 1; in write_txpkts_vm_wr()
5989 * the WR will take 32B so we check for the end of the descriptor ring in write_txpkts_vm_wr()
5992 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_vm_wr()
5994 for (i = 0; i < txp->npkt; i++) { in write_txpkts_vm_wr()
5995 m = txp->mb[i]; in write_txpkts_vm_wr()
5996 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_vm_wr()
5997 flitp = &eq->desc[0]; in write_txpkts_vm_wr()
6003 txq->txcsum++; /* some hardware assistance provided */ in write_txpkts_vm_wr()
6008 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_vm_wr()
6009 txq->vlan_insertion++; in write_txpkts_vm_wr()
6010 } else if (sc->vlan_id) in write_txpkts_vm_wr()
6011 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkts_vm_wr()
6014 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_vm_wr()
6015 cpl->pack = 0; in write_txpkts_vm_wr()
6016 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_vm_wr()
6017 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_vm_wr()
6024 last->m_nextpkt = m; in write_txpkts_vm_wr()
6028 txq->sgl_wrs++; in write_txpkts_vm_wr()
6029 txq->txpkts1_pkts += txp->npkt; in write_txpkts_vm_wr()
6030 txq->txpkts1_wrs++; in write_txpkts_vm_wr()
6032 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_vm_wr()
6033 txsd->m = txp->mb[0]; in write_txpkts_vm_wr()
6034 txsd->desc_used = ndesc; in write_txpkts_vm_wr()
6046 struct sge_eq *eq = &txq->eq; in write_gl_to_txd()
6047 struct sglist *gl = txq->gl; in write_gl_to_txd()
6055 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in write_gl_to_txd()
6056 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in write_gl_to_txd()
6059 nsegs = gl->sg_nseg; in write_gl_to_txd()
6062 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; in write_gl_to_txd()
6064 wrap = (__be64 *)(&eq->desc[eq->sidx]); in write_gl_to_txd()
6065 seg = &gl->sg_segs[0]; in write_gl_to_txd()
6074 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_gl_to_txd()
6076 usgl->len0 = htobe32(seg->ss_len); in write_gl_to_txd()
6077 usgl->addr0 = htobe64(seg->ss_paddr); in write_gl_to_txd()
6084 for (i = 0; i < nsegs - 1; i++, seg++) { in write_gl_to_txd()
6085 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); in write_gl_to_txd()
6086 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); in write_gl_to_txd()
6089 usgl->sge[i / 2].len[1] = htobe32(0); in write_gl_to_txd()
6097 for (i = 0; i < nflits - 2; i++) { in write_gl_to_txd()
6099 flitp = (void *)eq->desc; in write_gl_to_txd()
6100 *flitp++ = get_flit(seg, nsegs - 1, i); in write_gl_to_txd()
6111 *to = (void *)eq->desc; in write_gl_to_txd()
6120 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in copy_to_txd()
6121 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in copy_to_txd()
6124 (uintptr_t)&eq->desc[eq->sidx])) { in copy_to_txd()
6128 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); in copy_to_txd()
6132 portion = len - portion; /* remaining */ in copy_to_txd()
6133 bcopy(from, (void *)eq->desc, portion); in copy_to_txd()
6134 (*to) = (caddr_t)eq->desc + portion; in copy_to_txd()
6145 db = eq->doorbells; in ring_eq_db()
6150 switch (ffs(db) - 1) { in ring_eq_db()
6152 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6164 KASSERT(eq->udb_qid == 0 && n == 1, in ring_eq_db()
6166 __func__, eq->doorbells, n, eq->dbidx, eq)); in ring_eq_db()
6168 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - in ring_eq_db()
6170 i = eq->dbidx; in ring_eq_db()
6171 src = (void *)&eq->desc[i]; in ring_eq_db()
6172 while (src != (void *)&eq->desc[i + 1]) in ring_eq_db()
6179 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6184 t4_write_reg(sc, sc->sge_kdoorbell_reg, in ring_eq_db()
6185 V_QID(eq->cntxt_id) | V_PIDX(n)); in ring_eq_db()
6189 IDXINCR(eq->dbidx, n, eq->sidx); in ring_eq_db()
6198 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); in reclaimable_tx_desc()
6207 pidx = eq->pidx; in total_available_tx_desc()
6210 return (eq->sidx - 1); in total_available_tx_desc()
6212 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); in total_available_tx_desc()
6218 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in read_hw_cidx()
6219 uint16_t cidx = spg->cidx; /* stable snapshot */ in read_hw_cidx()
6231 struct sge_eq *eq = &txq->eq; in reclaim_tx_descs()
6243 txsd = &txq->sdesc[eq->cidx]; in reclaim_tx_descs()
6244 ndesc = txsd->desc_used; in reclaim_tx_descs()
6252 __func__, eq->cidx)); in reclaim_tx_descs()
6254 for (m = txsd->m; m != NULL; m = nextpkt) { in reclaim_tx_descs()
6255 nextpkt = m->m_nextpkt; in reclaim_tx_descs()
6256 m->m_nextpkt = NULL; in reclaim_tx_descs()
6260 can_reclaim -= ndesc; in reclaim_tx_descs()
6261 IDXINCR(eq->cidx, ndesc, eq->sidx); in reclaim_tx_descs()
6271 struct sge_eq *eq = &txq->eq; in tx_reclaim()
6277 if (eq->cidx == eq->pidx) in tx_reclaim()
6278 eq->equeqidx = eq->pidx; in tx_reclaim()
6310 int i, zidx = -1; in find_refill_source()
6311 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in find_refill_source()
6315 if (rxb->hwidx2 == -1) in find_refill_source()
6317 if (rxb->size1 < PAGE_SIZE && in find_refill_source()
6318 rxb->size1 < largest_rx_cluster) in find_refill_source()
6320 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6322 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); in find_refill_source()
6323 if (rxb->size2 >= maxp) in find_refill_source()
6329 if (rxb->hwidx1 == -1) in find_refill_source()
6331 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6333 if (rxb->size1 >= maxp) in find_refill_source()
6345 mtx_lock(&sc->sfl_lock); in add_fl_to_sfl()
6347 if ((fl->flags & FL_DOOMED) == 0) { in add_fl_to_sfl()
6348 fl->flags |= FL_STARVING; in add_fl_to_sfl()
6349 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); in add_fl_to_sfl()
6350 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); in add_fl_to_sfl()
6353 mtx_unlock(&sc->sfl_lock); in add_fl_to_sfl()
6361 atomic_readandclear_int(&eq->equiq); in handle_wrq_egr_update()
6362 taskqueue_enqueue(sc->tq[eq->port_id], &wrq->wrq_tx_task); in handle_wrq_egr_update()
6370 MPASS(eq->type == EQ_ETH); in handle_eth_egr_update()
6372 atomic_readandclear_int(&eq->equiq); in handle_eth_egr_update()
6373 if (mp_ring_is_idle(txq->r)) in handle_eth_egr_update()
6374 taskqueue_enqueue(sc->tq[eq->port_id], &txq->tx_reclaim_task); in handle_eth_egr_update()
6376 mp_ring_check_drainage(txq->r, 64); in handle_eth_egr_update()
6384 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); in handle_sge_egr_update()
6385 struct adapter *sc = iq->adapter; in handle_sge_egr_update()
6386 struct sge *s = &sc->sge; in handle_sge_egr_update()
6393 rss->opcode)); in handle_sge_egr_update()
6395 eq = s->eqmap[qid - s->eq_start - s->eq_base]; in handle_sge_egr_update()
6396 (*h[eq->type])(sc, eq); in handle_sge_egr_update()
6408 struct adapter *sc = iq->adapter; in handle_fw_msg()
6412 rss->opcode)); in handle_fw_msg()
6414 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { in handle_fw_msg()
6417 rss2 = (const struct rss_header *)&cpl->data[0]; in handle_fw_msg()
6418 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); in handle_fw_msg()
6421 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); in handle_fw_msg()
6425 * t4_handle_wrerr_rpl - process a FW work request error message
6439 device_get_nameunit(adap->dev), opcode); in t4_handle_wrerr_rpl()
6442 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), in t4_handle_wrerr_rpl()
6443 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : in t4_handle_wrerr_rpl()
6444 "non-fatal"); in t4_handle_wrerr_rpl()
6445 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { in t4_handle_wrerr_rpl()
6448 for (i = 0; i < nitems(e->u.exception.info); i++) in t4_handle_wrerr_rpl()
6450 be32toh(e->u.exception.info[i])); in t4_handle_wrerr_rpl()
6455 be32toh(e->u.hwmodule.regaddr), in t4_handle_wrerr_rpl()
6456 be32toh(e->u.hwmodule.regval)); in t4_handle_wrerr_rpl()
6459 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", in t4_handle_wrerr_rpl()
6460 be16toh(e->u.wr.cidx), in t4_handle_wrerr_rpl()
6461 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6462 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6463 be32toh(e->u.wr.eqid)); in t4_handle_wrerr_rpl()
6464 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) in t4_handle_wrerr_rpl()
6466 e->u.wr.wrhdr[i]); in t4_handle_wrerr_rpl()
6471 be16toh(e->u.acl.cidx), in t4_handle_wrerr_rpl()
6472 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6473 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6474 be32toh(e->u.acl.eqid), in t4_handle_wrerr_rpl()
6475 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : in t4_handle_wrerr_rpl()
6477 for (i = 0; i < nitems(e->u.acl.val); i++) in t4_handle_wrerr_rpl()
6478 log(LOG_ERR, " %02x", e->u.acl.val[i]); in t4_handle_wrerr_rpl()
6483 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); in t4_handle_wrerr_rpl()
6492 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in bufidx_used()
6496 if (rxb->size1 > largest_rx_cluster) in bufidx_used()
6498 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) in bufidx_used()
6509 struct sge_params *sp = &sc->params.sge; in sysctl_bufsizes()
6521 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); in sysctl_bufsizes()
6533 * len16 for a txpkt WR with a GL. Includes the firmware work request header.
6547 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_eo_len16()
6568 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; in send_etid_flowc_wr()
6571 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flowc_wr()
6572 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == in send_etid_flowc_wr()
6575 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie); in send_etid_flowc_wr()
6580 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flowc_wr()
6582 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | in send_etid_flowc_wr()
6583 V_FW_WR_FLOWID(cst->etid)); in send_etid_flowc_wr()
6584 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in send_etid_flowc_wr()
6585 flowc->mnemval[0].val = htobe32(pfvf); in send_etid_flowc_wr()
6586 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in send_etid_flowc_wr()
6587 flowc->mnemval[1].val = htobe32(pi->tx_chan); in send_etid_flowc_wr()
6588 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in send_etid_flowc_wr()
6589 flowc->mnemval[2].val = htobe32(pi->tx_chan); in send_etid_flowc_wr()
6590 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in send_etid_flowc_wr()
6591 flowc->mnemval[3].val = htobe32(cst->iqid); in send_etid_flowc_wr()
6592 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; in send_etid_flowc_wr()
6593 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); in send_etid_flowc_wr()
6594 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in send_etid_flowc_wr()
6595 flowc->mnemval[5].val = htobe32(cst->schedcl); in send_etid_flowc_wr()
6597 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flowc_wr()
6599 cst->flags &= ~EO_FLOWC_PENDING; in send_etid_flowc_wr()
6600 cst->flags |= EO_FLOWC_RPL_PENDING; in send_etid_flowc_wr()
6601 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ in send_etid_flowc_wr()
6602 cst->tx_credits -= ETID_FLOWC_LEN16; in send_etid_flowc_wr()
6616 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flush_wr()
6618 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie); in send_etid_flush_wr()
6623 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flush_wr()
6625 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | in send_etid_flush_wr()
6626 V_FW_WR_FLOWID(cst->etid)); in send_etid_flush_wr()
6628 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flush_wr()
6630 cst->flags |= EO_FLUSH_RPL_PENDING; in send_etid_flush_wr()
6631 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); in send_etid_flush_wr()
6632 cst->tx_credits -= ETID_FLUSH_LEN16; in send_etid_flush_wr()
6633 cst->ncompl++; in send_etid_flush_wr()
6637 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, in write_ethofld_wr() argument
6649 mtx_assert(&cst->lock, MA_OWNED); in write_ethofld_wr()
6651 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_ethofld_wr()
6652 m0->m_pkthdr.l4hlen > 0, in write_ethofld_wr()
6657 pktlen = m0->m_pkthdr.len; in write_ethofld_wr()
6661 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6664 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | in write_ethofld_wr()
6666 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | in write_ethofld_wr()
6667 V_FW_WR_FLOWID(cst->etid)); in write_ethofld_wr()
6668 wr->r3 = 0; in write_ethofld_wr()
6670 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_ethofld_wr()
6671 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6672 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6673 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6674 wr->u.udpseg.rtplen = 0; in write_ethofld_wr()
6675 wr->u.udpseg.r4 = 0; in write_ethofld_wr()
6676 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); in write_ethofld_wr()
6677 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_ethofld_wr()
6678 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6679 cpl = (void *)(wr + 1); in write_ethofld_wr()
6682 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_ethofld_wr()
6683 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6684 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6685 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6686 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); in write_ethofld_wr()
6687 wr->u.tcpseg.r4 = 0; in write_ethofld_wr()
6688 wr->u.tcpseg.r5 = 0; in write_ethofld_wr()
6689 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6692 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); in write_ethofld_wr()
6694 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6698 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - in write_ethofld_wr()
6700 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_ethofld_wr()
6701 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_ethofld_wr()
6702 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_ethofld_wr()
6704 lso->lso_ctrl = htobe32(ctrl); in write_ethofld_wr()
6705 lso->ipid_ofst = htobe16(0); in write_ethofld_wr()
6706 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6707 lso->seqno_offset = htobe32(0); in write_ethofld_wr()
6708 lso->len = htobe32(pktlen); in write_ethofld_wr()
6712 wr->u.tcpseg.mss = htobe16(0xffff); in write_ethofld_wr()
6713 cpl = (void *)(wr + 1); in write_ethofld_wr()
6719 ctrl1 = csum_to_ctrl(cst->adapter, m0); in write_ethofld_wr()
6724 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_ethofld_wr()
6728 cpl->ctrl0 = cst->ctrl0; in write_ethofld_wr()
6729 cpl->pack = 0; in write_ethofld_wr()
6730 cpl->len = htobe16(pktlen); in write_ethofld_wr()
6731 cpl->ctrl1 = htobe64(ctrl1); in write_ethofld_wr()
6741 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ in write_ethofld_wr()
6743 pad = 16 - (immhdrs & 0xf); in write_ethofld_wr()
6747 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_ethofld_wr()
6751 for (; m0 != NULL; m0 = m0->m_next) { in write_ethofld_wr()
6752 if (__predict_false(m0->m_len == 0)) in write_ethofld_wr()
6754 if (immhdrs >= m0->m_len) { in write_ethofld_wr()
6755 immhdrs -= m0->m_len; in write_ethofld_wr()
6758 if (m0->m_flags & M_EXTPG) in write_ethofld_wr()
6760 mtod(m0, vm_offset_t), m0->m_len); in write_ethofld_wr()
6763 m0->m_len - immhdrs); in write_ethofld_wr()
6769 * Zero pad last 8B in case the WR doesn't end on a 16B in write_ethofld_wr()
6772 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; in write_ethofld_wr()
6774 usgl->len0 = htobe32(segs[0].ss_len); in write_ethofld_wr()
6775 usgl->addr0 = htobe64(segs[0].ss_paddr); in write_ethofld_wr()
6776 for (i = 0; i < nsegs - 1; i++) { in write_ethofld_wr()
6777 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); in write_ethofld_wr()
6778 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); in write_ethofld_wr()
6781 usgl->sge[i / 2].len[1] = htobe32(0); in write_ethofld_wr()
6792 struct fw_eth_tx_eo_wr *wr; in ethofld_tx() local
6794 mtx_assert(&cst->lock, MA_OWNED); in ethofld_tx()
6796 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { in ethofld_tx()
6802 if (next_credits > cst->tx_credits) { in ethofld_tx()
6808 MPASS(cst->ncompl > 0); in ethofld_tx()
6811 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie); in ethofld_tx()
6812 if (__predict_false(wr == NULL)) { in ethofld_tx()
6814 MPASS(cst->ncompl > 0); in ethofld_tx()
6817 cst->tx_credits -= next_credits; in ethofld_tx()
6818 cst->tx_nocompl += next_credits; in ethofld_tx()
6819 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; in ethofld_tx()
6820 ETHER_BPF_MTAP(cst->com.ifp, m); in ethofld_tx()
6821 write_ethofld_wr(cst, wr, m, compl); in ethofld_tx()
6822 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie); in ethofld_tx()
6824 cst->ncompl++; in ethofld_tx()
6825 cst->tx_nocompl = 0; in ethofld_tx()
6827 (void) mbufq_dequeue(&cst->pending_tx); in ethofld_tx()
6839 m->m_pkthdr.snd_tag = NULL; in ethofld_tx()
6840 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in ethofld_tx()
6841 m_snd_tag_rele(&cst->com); in ethofld_tx()
6843 mbufq_enqueue(&cst->pending_fwack, m); in ethofld_tx()
6854 MPASS(m0->m_nextpkt == NULL); in ethofld_transmit()
6855 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); in ethofld_transmit()
6856 MPASS(m0->m_pkthdr.snd_tag != NULL); in ethofld_transmit()
6857 cst = mst_to_crt(m0->m_pkthdr.snd_tag); in ethofld_transmit()
6859 mtx_lock(&cst->lock); in ethofld_transmit()
6860 MPASS(cst->flags & EO_SND_TAG_REF); in ethofld_transmit()
6862 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { in ethofld_transmit()
6864 struct port_info *pi = vi->pi; in ethofld_transmit()
6865 struct adapter *sc = pi->adapter; in ethofld_transmit()
6866 const uint32_t rss_mask = vi->rss_size - 1; in ethofld_transmit()
6869 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; in ethofld_transmit()
6871 rss_hash = m0->m_pkthdr.flowid; in ethofld_transmit()
6875 cst->iqid = vi->rss[rss_hash & rss_mask]; in ethofld_transmit()
6876 cst->eo_txq += rss_hash % vi->nofldtxq; in ethofld_transmit()
6882 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { in ethofld_transmit()
6887 mbufq_enqueue(&cst->pending_tx, m0); in ethofld_transmit()
6888 cst->plen += m0->m_pkthdr.len; in ethofld_transmit()
6891 * Hold an extra reference on the tag while generating work in ethofld_transmit()
6896 m_snd_tag_ref(&cst->com); in ethofld_transmit()
6898 mtx_unlock(&cst->lock); in ethofld_transmit()
6899 m_snd_tag_rele(&cst->com); in ethofld_transmit()
6903 mtx_unlock(&cst->lock); in ethofld_transmit()
6911 struct adapter *sc = iq->adapter; in ethofld_fw4_ack()
6916 uint8_t credits = cpl->credits; in ethofld_fw4_ack()
6919 mtx_lock(&cst->lock); in ethofld_fw4_ack()
6920 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { in ethofld_fw4_ack()
6922 credits -= ETID_FLOWC_LEN16; in ethofld_fw4_ack()
6923 cst->flags &= ~EO_FLOWC_RPL_PENDING; in ethofld_fw4_ack()
6926 KASSERT(cst->ncompl > 0, in ethofld_fw4_ack()
6929 cst->ncompl--; in ethofld_fw4_ack()
6932 m = mbufq_dequeue(&cst->pending_fwack); in ethofld_fw4_ack()
6938 MPASS((cst->flags & in ethofld_fw4_ack()
6942 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); in ethofld_fw4_ack()
6943 MPASS(cst->ncompl == 0); in ethofld_fw4_ack()
6945 cst->flags &= ~EO_FLUSH_RPL_PENDING; in ethofld_fw4_ack()
6946 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
6951 ("%s: too many credits (%u, %u)", __func__, cpl->credits, in ethofld_fw4_ack()
6955 cpl->credits, credits, mbuf_eo_len16(m))); in ethofld_fw4_ack()
6956 credits -= mbuf_eo_len16(m); in ethofld_fw4_ack()
6957 cst->plen -= m->m_pkthdr.len; in ethofld_fw4_ack()
6961 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
6962 MPASS(cst->tx_credits <= cst->tx_total); in ethofld_fw4_ack()
6964 if (cst->flags & EO_SND_TAG_REF) { in ethofld_fw4_ack()
6966 * As with ethofld_transmit(), hold an extra reference in ethofld_fw4_ack()
6969 m_snd_tag_ref(&cst->com); in ethofld_fw4_ack()
6970 m = mbufq_first(&cst->pending_tx); in ethofld_fw4_ack()
6971 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) in ethofld_fw4_ack()
6973 mtx_unlock(&cst->lock); in ethofld_fw4_ack()
6974 m_snd_tag_rele(&cst->com); in ethofld_fw4_ack()
6979 * should hold a reference to the tag. in ethofld_fw4_ack()
6981 MPASS(mbufq_first(&cst->pending_tx) == NULL); in ethofld_fw4_ack()
6982 mtx_unlock(&cst->lock); in ethofld_fw4_ack()