Lines Matching +full:rx +full:- +full:eq

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
85 * 0-7 are valid values.
89 "payload DMA offset in rx buffer (bytes)");
93 * -1: driver should figure out a good value.
97 int fl_pad = -1;
103 * -1: driver should figure out a good value.
106 static int spg_len = -1;
112 * -1: no congestion feedback (not recommended).
119 "Congestion control for NIC RX queues (0 = backpressure, 1 = drop, 2 = both");
123 "Congestion control for TOE RX queues (0 = backpressure, 1 = drop, 2 = both");
128 * -1: let the driver decide whether to enable buffer packing or not.
132 static int buffer_packing = -1;
138 * -1: driver should figure out a good value.
142 static int fl_pack = -1;
147 * Largest rx cluster size that the driver is allowed to allocate.
151 &largest_rx_cluster, 0, "Largest rx cluster (bytes)");
159 &safest_rx_cluster, 0, "Safe rx cluster (bytes)");
164 * for rewriting. -1 and 0-3 are all valid values.
165 * -1: hardware should leave the TCP timestamps alone.
171 static int tsclk = -1;
182 * 1 and 3-17 (both inclusive) are legal values.
189 * Number of LRO entries in the lro_ctrl structure per rx queue.
193 "Number of LRO entries per RX queue");
222 "# of consecutive packets (1 - 255) that will trigger tx coalescing");
408 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { in set_tcb_rpl_handler()
410 * The return code for filter-write is put in the CPL cookie so in set_tcb_rpl_handler()
416 cookie = G_COOKIE(cpl->cookie); in set_tcb_rpl_handler()
442 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); in act_open_rpl_handler()
454 struct adapter *sc = iq->adapter; in abort_rpl_rss_handler()
469 struct adapter *sc = iq->adapter; in fw4_ack_handler()
493 cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK; in fw6_pld_handler()
572 if (spg_len != -1) { in t4_sge_modload()
579 if (cong_drop < -1 || cong_drop > 2) { in t4_sge_modload()
585 if (ofld_cong_drop < -1 || ofld_cong_drop > 2) { in t4_sge_modload()
659 return (refs - rels); in t4_sge_extfree_refs()
691 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" in setup_pad_and_pack_boundaries()
696 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); in setup_pad_and_pack_boundaries()
700 if (fl_pack != -1 && fl_pack != pad) { in setup_pad_and_pack_boundaries()
702 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," in setup_pad_and_pack_boundaries()
711 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) in setup_pad_and_pack_boundaries()
714 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); in setup_pad_and_pack_boundaries()
722 if (fl_pack != -1) { in setup_pad_and_pack_boundaries()
723 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" in setup_pad_and_pack_boundaries()
731 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); in setup_pad_and_pack_boundaries()
738 * adap->params.vpd.cclk must be set up before this is called.
746 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; in t4_tweak_chip_settings()
748 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_tweak_chip_settings()
758 KASSERT(sc->flags & MASTER_PF, in t4_tweak_chip_settings()
768 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
769 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
770 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
771 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
772 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
773 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
774 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
775 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); in t4_tweak_chip_settings()
786 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); in t4_tweak_chip_settings()
798 KASSERT(intr_timer[i] >= intr_timer[i - 1], in t4_tweak_chip_settings()
803 if (i == nitems(intr_timer) - 1) { in t4_tweak_chip_settings()
807 intr_timer[i] += intr_timer[i - 1]; in t4_tweak_chip_settings()
827 v = V_TSCALE(tscale - 2); in t4_tweak_chip_settings()
830 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { in t4_tweak_chip_settings()
853 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ in t4_tweak_chip_settings()
855 if (sc->nvmecaps != 0) { in t4_tweak_chip_settings()
878 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; in hwsz_ok()
884 * Initialize the rx buffer sizes and figure out which zones the buffers will
890 struct sge *s = &sc->sge; in t4_init_rx_buf_info()
891 struct sge_params *sp = &sc->params.sge; in t4_init_rx_buf_info()
903 s->safe_zidx = -1; in t4_init_rx_buf_info()
904 rxb = &s->rx_buf_info[0]; in t4_init_rx_buf_info()
906 rxb->size1 = sw_buf_sizes[i]; in t4_init_rx_buf_info()
907 rxb->zone = m_getzone(rxb->size1); in t4_init_rx_buf_info()
908 rxb->type = m_gettype(rxb->size1); in t4_init_rx_buf_info()
909 rxb->size2 = 0; in t4_init_rx_buf_info()
910 rxb->hwidx1 = -1; in t4_init_rx_buf_info()
911 rxb->hwidx2 = -1; in t4_init_rx_buf_info()
913 int hwsize = sp->sge_fl_buffer_size[j]; in t4_init_rx_buf_info()
919 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) in t4_init_rx_buf_info()
920 rxb->hwidx1 = j; in t4_init_rx_buf_info()
923 if (rxb->size1 - CL_METADATA_SIZE < hwsize) in t4_init_rx_buf_info()
925 n = rxb->size1 - hwsize - CL_METADATA_SIZE; in t4_init_rx_buf_info()
927 rxb->hwidx2 = j; in t4_init_rx_buf_info()
928 rxb->size2 = hwsize; in t4_init_rx_buf_info()
931 if (rxb->hwidx2 != -1) { in t4_init_rx_buf_info()
932 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - in t4_init_rx_buf_info()
933 hwsize - CL_METADATA_SIZE) { in t4_init_rx_buf_info()
934 rxb->hwidx2 = j; in t4_init_rx_buf_info()
935 rxb->size2 = hwsize; in t4_init_rx_buf_info()
938 rxb->hwidx2 = j; in t4_init_rx_buf_info()
939 rxb->size2 = hwsize; in t4_init_rx_buf_info()
942 if (rxb->hwidx2 != -1) in t4_init_rx_buf_info()
943 sc->flags |= BUF_PACKING_OK; in t4_init_rx_buf_info()
944 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) in t4_init_rx_buf_info()
945 s->safe_zidx = i; in t4_init_rx_buf_info()
956 struct sge_params *sp = &sc->params.sge; in t4_verify_chip_settings()
959 const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_verify_chip_settings()
963 r = sp->sge_control; in t4_verify_chip_settings()
965 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); in t4_verify_chip_settings()
971 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. in t4_verify_chip_settings()
973 if (sp->page_shift != PAGE_SHIFT) { in t4_verify_chip_settings()
974 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); in t4_verify_chip_settings()
978 if (sc->flags & IS_VF) in t4_verify_chip_settings()
984 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); in t4_verify_chip_settings()
985 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
992 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); in t4_verify_chip_settings()
993 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
1002 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); in t4_verify_chip_settings()
1003 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
1015 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in t4_create_dma_tag()
1018 NULL, &sc->dmat); in t4_create_dma_tag()
1020 device_printf(sc->dev, in t4_create_dma_tag()
1031 struct sge_params *sp = &sc->params.sge; in t4_sge_sysctls()
1038 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); in t4_sge_sysctls()
1041 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); in t4_sge_sysctls()
1044 NULL, sp->spg_len, "status page size (bytes)"); in t4_sge_sysctls()
1054 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); in t4_sge_sysctls()
1060 if (sc->dmat) in t4_destroy_dma_tag()
1061 bus_dma_tag_destroy(sc->dmat); in t4_destroy_dma_tag()
1068 * purpose rx queues owned by the adapter.
1090 if (sc->flags & IS_VF) in t4_setup_adapter_queues()
1094 * XXX: General purpose rx queues, one per port. in t4_setup_adapter_queues()
1100 for (i = 0; i < sc->sge.nctrlq; i++) { in t4_setup_adapter_queues()
1119 if (sc->sge.ctrlq != NULL) { in t4_teardown_adapter_queues()
1120 MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */ in t4_teardown_adapter_queues()
1121 for (i = 0; i < sc->sge.nctrlq; i++) in t4_teardown_adapter_queues()
1136 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + in max_rx_payload()
1138 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && in max_rx_payload()
1139 maxp < sc->params.tp.max_rx_pdu) in max_rx_payload()
1140 maxp = sc->params.tp.max_rx_pdu; in max_rx_payload()
1161 struct adapter *sc = vi->adapter; in t4_setup_vi_queues()
1162 if_t ifp = vi->ifp; in t4_setup_vi_queues()
1166 intr_idx = vi->first_intr; in t4_setup_vi_queues()
1174 MPASS(vi->first_intr >= 0); in t4_setup_vi_queues()
1177 * We don't have buffers to back the netmap rx queues in t4_setup_vi_queues()
1189 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); in t4_setup_vi_queues()
1196 /* Normal rx queues and netmap rx queues share the same interrupts. */ in t4_setup_vi_queues()
1201 * Allocate rx queues first because a default iqid is required when in t4_setup_vi_queues()
1214 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); in t4_setup_vi_queues()
1270 if (if_getcapabilities(vi->ifp) & IFCAP_NETMAP) { in t4_teardown_vi_queues()
1282 * Take down all the tx queues first, as they reference the rx queues in t4_teardown_vi_queues()
1296 * Then take down the rx queues. in t4_teardown_vi_queues()
1323 struct sge_iq *fwq = &sc->sge.fwq; in t4_intr_all()
1325 MPASS(sc->intr_count == 1); in t4_intr_all()
1327 if (sc->intr_type == INTR_INTX) in t4_intr_all()
1344 if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR) in t4_intr_err()
1349 sc->swintr++; in t4_intr_err()
1353 if (t4_slow_intr_handler(sc, sc->intr_flags)) in t4_intr_err()
1358 * Interrupt handler for iq-only queues. The firmware event queue is the only
1366 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr_evt()
1368 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr_evt()
1380 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr()
1382 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr()
1388 * Interrupt handler for netmap rx queues.
1395 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { in t4_nm_intr()
1397 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); in t4_nm_intr()
1402 * Interrupt handler for vectors shared between NIC and netmap rx queues.
1409 MPASS(irq->nm_rxq != NULL); in t4_vi_intr()
1410 t4_nm_intr(irq->nm_rxq); in t4_vi_intr()
1412 MPASS(irq->rxq != NULL); in t4_vi_intr()
1413 t4_intr(irq->rxq); in t4_vi_intr()
1418 * Deals with interrupts on an iq-only (no freelist) queue.
1424 struct adapter *sc = iq->adapter; in service_iq()
1425 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq()
1431 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq()
1432 KASSERT((iq->flags & IQ_HAS_FL) == 0, in service_iq()
1433 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, in service_iq()
1434 iq->flags)); in service_iq()
1435 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq()
1436 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); in service_iq()
1438 limit = budget ? budget : iq->qsize / 16; in service_iq()
1445 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq()
1449 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq()
1450 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq()
1460 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq()
1462 d->rss.opcode)); in service_iq()
1463 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); in service_iq()
1468 * There are 1K interrupt-capable queues (qids 0 in service_iq()
1474 t4_an_handler(iq, &d->rsp); in service_iq()
1478 q = sc->sge.iqmap[lq - sc->sge.iq_start - in service_iq()
1479 sc->sge.iq_base]; in service_iq()
1480 if (atomic_cmpset_int(&q->state, IQS_IDLE, in service_iq()
1482 if (service_iq_fl(q, q->qsize / 16) == 0) { in service_iq()
1483 (void) atomic_cmpset_int(&q->state, in service_iq()
1498 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq()
1503 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq()
1504 iq->cidx = 0; in service_iq()
1505 iq->gen ^= F_RSPD_GEN; in service_iq()
1506 d = &iq->desc[0]; in service_iq()
1509 t4_write_reg(sc, sc->sge_gts_reg, in service_iq()
1511 V_INGRESSQID(iq->cntxt_id) | in service_iq()
1530 if (service_iq_fl(q, q->qsize / 8) == 0) in service_iq()
1531 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); in service_iq()
1536 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq()
1537 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq()
1547 return (lro->lro_mbuf_max != 0); in sort_before_lro()
1564 cur = &sc->cal_info[sc->cal_current]; in t4_tstmp_to_ns()
1565 gen = seqc_read(&cur->gen); in t4_tstmp_to_ns()
1569 if (seqc_consistent(&cur->gen, gen)) in t4_tstmp_to_ns()
1576 * ( (cur_time - prev_time) ) in t4_tstmp_to_ns()
1577 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time in t4_tstmp_to_ns()
1578 * ( (hw_cur - hw_prev) ) in t4_tstmp_to_ns()
1583 hw_clocks = hw_tstmp - dcur.hw_prev; in t4_tstmp_to_ns()
1584 sbt_cur_to_prev = (dcur.sbt_cur - dcur.sbt_prev); in t4_tstmp_to_ns()
1585 hw_clk_div = dcur.hw_cur - dcur.hw_prev; in t4_tstmp_to_ns()
1594 fl->rx_offset = 0; in move_to_next_rxbuf()
1595 if (__predict_false((++fl->cidx & 7) == 0)) { in move_to_next_rxbuf()
1596 uint16_t cidx = fl->cidx >> 3; in move_to_next_rxbuf()
1598 if (__predict_false(cidx == fl->sidx)) in move_to_next_rxbuf()
1599 fl->cidx = cidx = 0; in move_to_next_rxbuf()
1600 fl->hw_cidx = cidx; in move_to_next_rxbuf()
1612 struct adapter *sc = iq->adapter; in service_iq_fl()
1613 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq_fl()
1620 const struct timeval lro_timeout = {0, sc->lro_timeout}; in service_iq_fl()
1621 struct lro_ctrl *lro = &rxq->lro; in service_iq_fl()
1624 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq_fl()
1625 MPASS(iq->flags & IQ_HAS_FL); in service_iq_fl()
1629 if (iq->flags & IQ_ADJ_CREDIT) { in service_iq_fl()
1631 iq->flags &= ~IQ_ADJ_CREDIT; in service_iq_fl()
1632 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { in service_iq_fl()
1634 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | in service_iq_fl()
1635 V_INGRESSQID((u32)iq->cntxt_id) | in service_iq_fl()
1636 V_SEINTARM(iq->intr_params)); in service_iq_fl()
1642 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq_fl()
1645 limit = budget ? budget : iq->qsize / 16; in service_iq_fl()
1646 fl = &rxq->fl; in service_iq_fl()
1647 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ in service_iq_fl()
1648 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq_fl()
1653 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq_fl()
1654 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq_fl()
1659 if (fl->rx_offset > 0) in service_iq_fl()
1663 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { in service_iq_fl()
1667 fl_hw_cidx = fl->hw_cidx; in service_iq_fl()
1670 if (d->rss.opcode == CPL_RX_PKT) { in service_iq_fl()
1682 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq_fl()
1683 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); in service_iq_fl()
1684 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); in service_iq_fl()
1690 * There are 1K interrupt-capable queues (qids 0 in service_iq_fl()
1701 t4_an_handler(iq, &d->rsp); in service_iq_fl()
1708 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq_fl()
1713 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq_fl()
1714 iq->cidx = 0; in service_iq_fl()
1715 iq->gen ^= F_RSPD_GEN; in service_iq_fl()
1716 d = &iq->desc[0]; in service_iq_fl()
1719 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1720 V_INGRESSQID(iq->cntxt_id) | in service_iq_fl()
1724 if (iq->flags & IQ_LRO_ENABLED && in service_iq_fl()
1726 sc->lro_timeout != 0) { in service_iq_fl()
1737 if (iq->flags & IQ_LRO_ENABLED) { in service_iq_fl()
1738 if (ndescs > 0 && lro->lro_mbuf_count > 8) { in service_iq_fl()
1741 iq->flags |= IQ_ADJ_CREDIT; in service_iq_fl()
1742 ndescs--; in service_iq_fl()
1749 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1750 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq_fl()
1765 return ((void *)(sd->cl + sd->moff)); in cl_metadata()
1771 struct cluster_metadata *clm = m->m_ext.ext_arg1; in rxb_free()
1773 uma_zfree(clm->zone, clm->cl); in rxb_free()
1789 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_scatter_segment()
1790 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_scatter_segment()
1795 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1798 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in get_scatter_segment()
1800 payload = sd->cl + fl->rx_offset; in get_scatter_segment()
1803 pad = roundup2(l, fl->buf_boundary) - l; in get_scatter_segment()
1804 if (fl->rx_offset + len + pad < rxb->size2) in get_scatter_segment()
1806 MPASS(fl->rx_offset + blen <= rxb->size2); in get_scatter_segment()
1808 MPASS(fl->rx_offset == 0); /* not packing */ in get_scatter_segment()
1809 blen = rxb->size1; in get_scatter_segment()
1811 payload = sd->cl; in get_scatter_segment()
1818 m->m_pkthdr.len = remaining; in get_scatter_segment()
1824 m->m_len = len; in get_scatter_segment()
1827 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { in get_scatter_segment()
1830 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1831 fl->rx_offset += blen; in get_scatter_segment()
1832 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1833 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1836 } else if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1838 if (sd->nmbuf++ == 0) { in get_scatter_segment()
1839 clm->refcount = 1; in get_scatter_segment()
1840 clm->zone = rxb->zone; in get_scatter_segment()
1841 clm->cl = sd->cl; in get_scatter_segment()
1844 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, in get_scatter_segment()
1847 fl->rx_offset += blen; in get_scatter_segment()
1848 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1849 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1852 m_cljset(m, sd->cl, rxb->type); in get_scatter_segment()
1853 sd->cl = NULL; /* consumed, not a recycle candidate */ in get_scatter_segment()
1867 if (__predict_false(fl->flags & FL_BUF_RESUME)) { in get_fl_payload()
1868 M_ASSERTPKTHDR(fl->m0); in get_fl_payload()
1869 MPASS(fl->m0->m_pkthdr.len == plen); in get_fl_payload()
1870 MPASS(fl->remaining < plen); in get_fl_payload()
1872 m0 = fl->m0; in get_fl_payload()
1873 pnext = fl->pnext; in get_fl_payload()
1874 remaining = fl->remaining; in get_fl_payload()
1875 fl->flags &= ~FL_BUF_RESUME; in get_fl_payload()
1887 remaining = plen - m0->m_len; in get_fl_payload()
1888 pnext = &m0->m_next; in get_fl_payload()
1891 MPASS(fl->rx_offset == 0); in get_fl_payload()
1892 m = get_scatter_segment(sc, fl, plen - remaining, remaining); in get_fl_payload()
1894 fl->m0 = m0; in get_fl_payload()
1895 fl->pnext = pnext; in get_fl_payload()
1896 fl->remaining = remaining; in get_fl_payload()
1897 fl->flags |= FL_BUF_RESUME; in get_fl_payload()
1901 pnext = &m->m_next; in get_fl_payload()
1902 remaining -= m->m_len; in get_fl_payload()
1914 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in skip_scatter_segment()
1915 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in skip_scatter_segment()
1918 if (fl->flags & FL_BUF_PACKING) { in skip_scatter_segment()
1921 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in skip_scatter_segment()
1925 pad = roundup2(l, fl->buf_boundary) - l; in skip_scatter_segment()
1926 if (fl->rx_offset + len + pad < rxb->size2) in skip_scatter_segment()
1928 fl->rx_offset += blen; in skip_scatter_segment()
1929 MPASS(fl->rx_offset <= rxb->size2); in skip_scatter_segment()
1930 if (fl->rx_offset < rxb->size2) in skip_scatter_segment()
1933 MPASS(fl->rx_offset == 0); /* not packing */ in skip_scatter_segment()
1934 blen = rxb->size1; in skip_scatter_segment()
1951 remaining -= len; in skip_fl_payload()
1959 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_segment_len()
1960 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_segment_len()
1962 if (fl->flags & FL_BUF_PACKING) in get_segment_len()
1963 len = rxb->size2 - fl->rx_offset; in get_segment_len()
1965 len = rxb->size1; in get_segment_len()
1975 if_t ifp = rxq->ifp; in eth_rx()
1976 struct sge_fl *fl = &rxq->fl; in eth_rx()
1980 struct lro_ctrl *lro = &rxq->lro; in eth_rx()
2018 MPASS(plen > sc->params.sge.fl_pktshift); in eth_rx()
2019 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && in eth_rx()
2020 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { in eth_rx()
2021 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in eth_rx()
2025 slen = get_segment_len(sc, fl, plen) - in eth_rx()
2026 sc->params.sge.fl_pktshift; in eth_rx()
2027 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; in eth_rx()
2029 rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0); in eth_rx()
2045 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; in eth_rx()
2046 m0->m_len -= sc->params.sge.fl_pktshift; in eth_rx()
2047 m0->m_data += sc->params.sge.fl_pktshift; in eth_rx()
2050 m0->m_pkthdr.rcvif = ifp; in eth_rx()
2051 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); in eth_rx()
2052 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); in eth_rx()
2054 cpl = (const void *)(&d->rss + 1); in eth_rx()
2055 if (sc->params.tp.rx_pkt_encap) { in eth_rx()
2056 const uint16_t ev = be16toh(cpl->err_vec); in eth_rx()
2062 err_vec = be16toh(cpl->err_vec); in eth_rx()
2066 if (cpl->csum_calc && err_vec == 0) { in eth_rx()
2067 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); in eth_rx()
2071 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ in eth_rx()
2072 (cpl->l2info & htobe32(F_RXF_IP6))); in eth_rx()
2073 m0->m_pkthdr.csum_data = be16toh(cpl->csum); in eth_rx()
2076 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2080 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2083 rxq->rxcsum++; in eth_rx()
2088 if (__predict_false(cpl->ip_frag)) { in eth_rx()
2099 * XXX: Need 32b for csum_data2 in an rx mbuf. in eth_rx()
2102 m0->m_pkthdr.csum_data = 0xffff; in eth_rx()
2104 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2107 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2114 MPASS(m0->m_pkthdr.csum_data == 0xffff); in eth_rx()
2119 m0->m_pkthdr.csum_flags = in eth_rx()
2122 rxq->vxlan_rxcsum++; in eth_rx()
2126 if (cpl->vlan_ex) { in eth_rx()
2127 if (sc->flags & IS_VF && sc->vlan_id) { in eth_rx()
2132 MPASS(be16toh(cpl->vlan) == sc->vlan_id); in eth_rx()
2134 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); in eth_rx()
2135 m0->m_flags |= M_VLANTAG; in eth_rx()
2136 rxq->vlan_extraction++; in eth_rx()
2140 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { in eth_rx()
2143 * long as we get a non-zero back from t4_tstmp_to_ns(). in eth_rx()
2145 m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc, in eth_rx()
2146 be64toh(d->rsp.u.last_flit)); in eth_rx()
2147 if (m0->m_pkthdr.rcv_tstmp != 0) in eth_rx()
2148 m0->m_flags |= M_TSTMP; in eth_rx()
2152 m0->m_pkthdr.numa_domain = if_getnumadomain(ifp); in eth_rx()
2155 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && in eth_rx()
2178 struct sge_eq *eq = &wrq->eq; in wrq_tx_drain() local
2180 EQ_LOCK(eq); in wrq_tx_drain()
2181 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in wrq_tx_drain()
2182 drain_wrq_wr_list(wrq->adapter, wrq); in wrq_tx_drain()
2183 EQ_UNLOCK(eq); in wrq_tx_drain()
2189 struct sge_eq *eq = &wrq->eq; in drain_wrq_wr_list() local
2195 EQ_LOCK_ASSERT_OWNED(eq); in drain_wrq_wr_list()
2196 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in drain_wrq_wr_list()
2197 wr = STAILQ_FIRST(&wrq->wr_list); in drain_wrq_wr_list()
2199 MPASS(eq->pidx == eq->dbidx); in drain_wrq_wr_list()
2203 eq->cidx = read_hw_cidx(eq); in drain_wrq_wr_list()
2204 if (eq->pidx == eq->cidx) in drain_wrq_wr_list()
2205 available = eq->sidx - 1; in drain_wrq_wr_list()
2207 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in drain_wrq_wr_list()
2209 MPASS(wr->wrq == wrq); in drain_wrq_wr_list()
2210 n = howmany(wr->wr_len, EQ_ESIZE); in drain_wrq_wr_list()
2214 dst = (void *)&eq->desc[eq->pidx]; in drain_wrq_wr_list()
2215 if (__predict_true(eq->sidx - eq->pidx > n)) { in drain_wrq_wr_list()
2217 bcopy(&wr->wr[0], dst, wr->wr_len); in drain_wrq_wr_list()
2218 eq->pidx += n; in drain_wrq_wr_list()
2220 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; in drain_wrq_wr_list()
2222 bcopy(&wr->wr[0], dst, first_portion); in drain_wrq_wr_list()
2223 if (wr->wr_len > first_portion) { in drain_wrq_wr_list()
2224 bcopy(&wr->wr[first_portion], &eq->desc[0], in drain_wrq_wr_list()
2225 wr->wr_len - first_portion); in drain_wrq_wr_list()
2227 eq->pidx = n - (eq->sidx - eq->pidx); in drain_wrq_wr_list()
2229 wrq->tx_wrs_copied++; in drain_wrq_wr_list()
2231 if (available < eq->sidx / 4 && in drain_wrq_wr_list()
2232 atomic_cmpset_int(&eq->equiq, 0, 1)) { in drain_wrq_wr_list()
2238 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in drain_wrq_wr_list()
2244 ring_eq_db(sc, eq, dbdiff); in drain_wrq_wr_list()
2248 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); in drain_wrq_wr_list()
2250 MPASS(wrq->nwr_pending > 0); in drain_wrq_wr_list()
2251 wrq->nwr_pending--; in drain_wrq_wr_list()
2252 MPASS(wrq->ndesc_needed >= n); in drain_wrq_wr_list()
2253 wrq->ndesc_needed -= n; in drain_wrq_wr_list()
2254 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); in drain_wrq_wr_list()
2257 ring_eq_db(sc, eq, dbdiff); in drain_wrq_wr_list()
2267 struct sge_eq *eq = &wrq->eq; in t4_wrq_tx_locked() local
2270 EQ_LOCK_ASSERT_OWNED(eq); in t4_wrq_tx_locked()
2272 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); in t4_wrq_tx_locked()
2273 MPASS((wr->wr_len & 0x7) == 0); in t4_wrq_tx_locked()
2275 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); in t4_wrq_tx_locked()
2276 wrq->nwr_pending++; in t4_wrq_tx_locked()
2277 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); in t4_wrq_tx_locked()
2279 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) in t4_wrq_tx_locked()
2285 MPASS(eq->pidx == eq->dbidx); in t4_wrq_tx_locked()
2292 struct adapter *sc = vi->adapter; in t4_update_fl_bufsize()
2302 fl = &rxq->fl; in t4_update_fl_bufsize()
2305 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2306 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2312 fl = &ofld_rxq->fl; in t4_update_fl_bufsize()
2315 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2316 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2328 return (m->m_pkthdr.PH_loc.eight[1]); in mbuf_eo_nsegs()
2337 m->m_pkthdr.PH_loc.eight[1] = nsegs; in set_mbuf_eo_nsegs()
2347 n = m->m_pkthdr.PH_loc.eight[2]; in mbuf_eo_len16()
2359 m->m_pkthdr.PH_loc.eight[2] = len16; in set_mbuf_eo_len16()
2368 return (m->m_pkthdr.PH_loc.eight[3]); in mbuf_eo_tsclk_tsoff()
2377 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; in set_mbuf_eo_tsclk_tsoff()
2385 return (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT); in needs_eo()
2407 m->m_pkthdr.len = len; in alloc_wr_mbuf()
2408 m->m_len = len; in alloc_wr_mbuf()
2425 return (m->m_pkthdr.csum_flags & csum_flags); in needs_hwcsum()
2436 return (m->m_pkthdr.csum_flags & csum_flags); in needs_tso()
2445 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); in needs_vxlan_csum()
2456 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && in needs_vxlan_tso()
2457 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); in needs_vxlan_tso()
2468 return (m->m_pkthdr.csum_flags & csum_flags); in needs_inner_tcp_csum()
2480 return (m->m_pkthdr.csum_flags & csum_flags); in needs_l3_csum()
2491 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_tcp_csum()
2503 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_l4_csum()
2513 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_udp_csum()
2523 return (m->m_flags & M_VLANTAG); in needs_vlan_insertion()
2537 if (offset + len < m->m_len) { in m_advance()
2542 len -= m->m_len - offset; in m_advance()
2543 m = m->m_next; in m_advance()
2562 len = m->m_len; in count_mbuf_ext_pgs()
2564 len -= skip; in count_mbuf_ext_pgs()
2566 if (m->m_epg_hdrlen != 0) { in count_mbuf_ext_pgs()
2567 if (off >= m->m_epg_hdrlen) { in count_mbuf_ext_pgs()
2568 off -= m->m_epg_hdrlen; in count_mbuf_ext_pgs()
2570 seglen = m->m_epg_hdrlen - off; in count_mbuf_ext_pgs()
2574 len -= seglen; in count_mbuf_ext_pgs()
2576 (vm_offset_t)&m->m_epg_hdr[segoff]); in count_mbuf_ext_pgs()
2582 pgoff = m->m_epg_1st_off; in count_mbuf_ext_pgs()
2583 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { in count_mbuf_ext_pgs()
2586 off -= pglen; in count_mbuf_ext_pgs()
2590 seglen = pglen - off; in count_mbuf_ext_pgs()
2594 len -= seglen; in count_mbuf_ext_pgs()
2595 paddr = m->m_epg_pa[i] + segoff; in count_mbuf_ext_pgs()
2602 seglen = min(len, m->m_epg_trllen - off); in count_mbuf_ext_pgs()
2603 len -= seglen; in count_mbuf_ext_pgs()
2604 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); in count_mbuf_ext_pgs()
2627 MPASS(m->m_pkthdr.len > 0); in count_mbuf_nsegs()
2628 MPASS(m->m_pkthdr.len >= skip); in count_mbuf_nsegs()
2632 for (; m; m = m->m_next) { in count_mbuf_nsegs()
2633 len = m->m_len; in count_mbuf_nsegs()
2637 skip -= len; in count_mbuf_nsegs()
2640 if ((m->m_flags & M_EXTPG) != 0) { in count_mbuf_nsegs()
2647 len -= skip; in count_mbuf_nsegs()
2652 nsegs--; in count_mbuf_nsegs()
2653 nextaddr = pmap_kextract(va + len - 1) + 1; in count_mbuf_nsegs()
2711 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { in parse_pkt()
2724 MPASS(m0->m_pkthdr.len > 0); in parse_pkt()
2727 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) in parse_pkt()
2728 mst = m0->m_pkthdr.snd_tag; in parse_pkt()
2733 if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) { in parse_pkt()
2734 struct vi_info *vi = if_getsoftc(mst->ifp); in parse_pkt()
2738 if (is_t6(vi->pi->adapter)) in parse_pkt()
2761 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && in parse_pkt()
2764 m0 = m_pullup(m0, m0->m_pkthdr.len); in parse_pkt()
2784 m_snd_tag_rele(m0->m_pkthdr.snd_tag); in parse_pkt()
2785 m0->m_pkthdr.snd_tag = NULL; in parse_pkt()
2786 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in parse_pkt()
2800 eh_type = ntohs(eh->ether_type); in parse_pkt()
2804 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2805 m0->m_pkthdr.l2hlen = sizeof(*evh); in parse_pkt()
2807 m0->m_pkthdr.l2hlen = sizeof(*eh); in parse_pkt()
2812 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2814 m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2821 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2831 ip->ip_sum = 0; in parse_pkt()
2833 const uint16_t ipl = ip->ip_len; in parse_pkt()
2835 ip->ip_len = 0; in parse_pkt()
2836 ip->ip_sum = ~in_cksum_hdr(ip); in parse_pkt()
2837 ip->ip_len = ipl; in parse_pkt()
2839 ip->ip_sum = in_cksum_hdr(ip); in parse_pkt()
2841 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; in parse_pkt()
2858 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2859 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); in parse_pkt()
2862 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + in parse_pkt()
2864 eh_type = ntohs(eh->ether_type); in parse_pkt()
2868 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2869 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); in parse_pkt()
2871 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); in parse_pkt()
2873 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2875 m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2881 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2889 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; in parse_pkt()
2905 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); in parse_pkt()
2906 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; in parse_pkt()
2908 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); in parse_pkt()
2909 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | in parse_pkt()
2916 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); in parse_pkt()
2917 m0->m_pkthdr.l4hlen = tcp->th_off * 4; in parse_pkt()
2926 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2934 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + in parse_pkt()
2935 m0->m_pkthdr.l4hlen; in parse_pkt()
2942 rc = ethofld_transmit(mst->ifp, m0); in parse_pkt()
2956 struct sge_eq *eq = &wrq->eq; in start_wrq_wr() local
2957 struct adapter *sc = wrq->adapter; in start_wrq_wr()
2966 EQ_LOCK(eq); in start_wrq_wr()
2967 if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) { in start_wrq_wr()
2968 EQ_UNLOCK(eq); in start_wrq_wr()
2972 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in start_wrq_wr()
2975 if (!STAILQ_EMPTY(&wrq->wr_list)) { in start_wrq_wr()
2977 EQ_UNLOCK(eq); in start_wrq_wr()
2981 cookie->pidx = -1; in start_wrq_wr()
2982 cookie->ndesc = ndesc; in start_wrq_wr()
2983 return (&wr->wr); in start_wrq_wr()
2986 eq->cidx = read_hw_cidx(eq); in start_wrq_wr()
2987 if (eq->pidx == eq->cidx) in start_wrq_wr()
2988 available = eq->sidx - 1; in start_wrq_wr()
2990 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in start_wrq_wr()
2994 cookie->pidx = eq->pidx; in start_wrq_wr()
2995 cookie->ndesc = ndesc; in start_wrq_wr()
2996 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); in start_wrq_wr()
2998 w = &eq->desc[eq->pidx]; in start_wrq_wr()
2999 IDXINCR(eq->pidx, ndesc, eq->sidx); in start_wrq_wr()
3000 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { in start_wrq_wr()
3001 w = &wrq->ss[0]; in start_wrq_wr()
3002 wrq->ss_pidx = cookie->pidx; in start_wrq_wr()
3003 wrq->ss_len = len16 * 16; in start_wrq_wr()
3006 EQ_UNLOCK(eq); in start_wrq_wr()
3014 struct sge_eq *eq = &wrq->eq; in commit_wrq_wr() local
3015 struct adapter *sc = wrq->adapter; in commit_wrq_wr()
3019 if (cookie->pidx == -1) { in commit_wrq_wr()
3026 if (__predict_false(w == &wrq->ss[0])) { in commit_wrq_wr()
3027 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; in commit_wrq_wr()
3029 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ in commit_wrq_wr()
3030 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); in commit_wrq_wr()
3031 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); in commit_wrq_wr()
3032 wrq->tx_wrs_ss++; in commit_wrq_wr()
3034 wrq->tx_wrs_direct++; in commit_wrq_wr()
3036 EQ_LOCK(eq); in commit_wrq_wr()
3037 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ in commit_wrq_wr()
3038 pidx = cookie->pidx; in commit_wrq_wr()
3039 MPASS(pidx >= 0 && pidx < eq->sidx); in commit_wrq_wr()
3043 MPASS(pidx == eq->dbidx); in commit_wrq_wr()
3050 * is at pidx and not eq->pidx, which has moved on in commit_wrq_wr()
3053 dst = (void *)&eq->desc[pidx]; in commit_wrq_wr()
3054 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in commit_wrq_wr()
3055 if (available < eq->sidx / 4 && in commit_wrq_wr()
3056 atomic_cmpset_int(&eq->equiq, 0, 1)) { in commit_wrq_wr()
3062 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in commit_wrq_wr()
3066 if (__predict_true(eq->flags & EQ_HW_ALLOCATED)) in commit_wrq_wr()
3067 ring_eq_db(wrq->adapter, eq, ndesc); in commit_wrq_wr()
3069 IDXINCR(eq->dbidx, ndesc, eq->sidx); in commit_wrq_wr()
3071 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); in commit_wrq_wr()
3072 next->pidx = pidx; in commit_wrq_wr()
3073 next->ndesc += ndesc; in commit_wrq_wr()
3076 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); in commit_wrq_wr()
3077 prev->ndesc += ndesc; in commit_wrq_wr()
3079 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); in commit_wrq_wr()
3081 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in commit_wrq_wr()
3085 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { in commit_wrq_wr()
3087 MPASS(wrq->eq.pidx == wrq->eq.dbidx); in commit_wrq_wr()
3090 EQ_UNLOCK(eq); in commit_wrq_wr()
3096 struct sge_eq *eq = r->cookie; in can_resume_eth_tx() local
3098 return (total_available_tx_desc(eq) > eq->sidx / 8); in can_resume_eth_tx()
3110 discard_tx(struct sge_eq *eq) in discard_tx() argument
3113 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); in discard_tx()
3121 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { in wr_can_update_eq()
3138 struct sge_eq *eq = &txq->eq; in set_txupdate_flags() local
3139 struct txpkts *txp = &txq->txp; in set_txupdate_flags()
3141 if ((txp->npkt > 0 || avail < eq->sidx / 2) && in set_txupdate_flags()
3142 atomic_cmpset_int(&eq->equiq, 0, 1)) { in set_txupdate_flags()
3143 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); in set_txupdate_flags()
3144 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3145 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { in set_txupdate_flags()
3146 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); in set_txupdate_flags()
3147 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3159 const uint64_t last_tx = txq->last_tx; in record_eth_tx_time()
3167 txq->last_tx = cycles; in record_eth_tx_time()
3168 return (cycles - last_tx < itg); in record_eth_tx_time()
3172 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3178 struct sge_txq *txq = r->cookie; in eth_tx()
3179 if_t ifp = txq->ifp; in eth_tx()
3180 struct sge_eq *eq = &txq->eq; in eth_tx() local
3181 struct txpkts *txp = &txq->txp; in eth_tx()
3183 struct adapter *sc = vi->adapter; in eth_tx()
3194 remaining = IDXDIFF(pidx, cidx, r->size); in eth_tx()
3195 if (__predict_false(discard_tx(eq))) { in eth_tx()
3196 for (i = 0; i < txp->npkt; i++) in eth_tx()
3197 m_freem(txp->mb[i]); in eth_tx()
3198 txp->npkt = 0; in eth_tx()
3200 m0 = r->items[cidx]; in eth_tx()
3202 if (++cidx == r->size) in eth_tx()
3205 reclaim_tx_descs(txq, eq->sidx); in eth_tx()
3211 if (eq->pidx == eq->cidx) in eth_tx()
3212 avail = eq->sidx - 1; in eth_tx()
3214 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in eth_tx()
3218 txp->score = 0; in eth_tx()
3219 txq->txpkts_flush++; in eth_tx()
3226 m0 = r->items[cidx]; in eth_tx()
3228 MPASS(m0->m_nextpkt == NULL); in eth_tx()
3233 if (t4_tx_coalesce == 0 && txp->npkt == 0) in eth_tx()
3236 txp->score = 0; in eth_tx()
3238 if (++txp->score == 0) in eth_tx()
3239 txp->score = UINT8_MAX; in eth_tx()
3241 txp->score = 1; in eth_tx()
3242 if (txp->npkt > 0 || remaining > 1 || in eth_tx()
3243 txp->score >= t4_tx_coalesce_pkts || in eth_tx()
3244 atomic_load_int(&txq->eq.equiq) != 0) { in eth_tx()
3245 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3254 MPASS(txp->npkt > 0); in eth_tx()
3255 for (i = 0; i < txp->npkt; i++) in eth_tx()
3256 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3257 if (txp->npkt > 1) { in eth_tx()
3258 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3259 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3265 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3266 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3268 txp->mb[0]); in eth_tx()
3270 n = write_txpkt_wr(sc, txq, txp->mb[0], in eth_tx()
3274 avail -= n; in eth_tx()
3276 wr = &eq->desc[eq->pidx]; in eth_tx()
3277 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3278 txp->npkt = 0; /* emptied */ in eth_tx()
3281 /* m0 was coalesced into txq->txpkts. */ in eth_tx()
3287 * combined with the existing txq->txpkts, which has now in eth_tx()
3291 MPASS(txp->npkt == 0); in eth_tx()
3296 MPASS(txp->npkt == 0); in eth_tx()
3305 wr = &eq->desc[eq->pidx]; in eth_tx()
3318 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3327 avail -= n; in eth_tx()
3329 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3334 ring_eq_db(sc, eq, dbdiff); in eth_tx()
3340 remaining--; in eth_tx()
3341 if (__predict_false(++cidx == r->size)) in eth_tx()
3347 ring_eq_db(sc, eq, dbdiff); in eth_tx()
3349 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && in eth_tx()
3350 atomic_load_int(&txq->eq.equiq) == 0) { in eth_tx()
3357 MPASS(txp->npkt > 0); in eth_tx()
3358 for (i = 0; i < txp->npkt; i++) in eth_tx()
3359 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3360 if (txp->npkt > 1) { in eth_tx()
3361 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3362 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3368 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3369 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3370 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); in eth_tx()
3372 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); in eth_tx()
3375 wr = &eq->desc[eq->pidx]; in eth_tx()
3376 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3377 txp->npkt = 0; /* emptied */ in eth_tx()
3380 set_txupdate_flags(txq, avail - n, wr); in eth_tx()
3381 ring_eq_db(sc, eq, n); in eth_tx()
3384 *coalescing = txp->npkt > 0; in eth_tx()
3396 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ in init_iq()
3398 KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count, in init_iq()
3403 iq->flags = 0; in init_iq()
3404 iq->state = IQS_DISABLED; in init_iq()
3405 iq->adapter = sc; in init_iq()
3406 iq->qtype = qtype; in init_iq()
3407 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); in init_iq()
3408 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; in init_iq()
3410 iq->intr_params |= F_QINTR_CNT_EN; in init_iq()
3411 iq->intr_pktc_idx = pktc_idx; in init_iq()
3413 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ in init_iq()
3414 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; in init_iq()
3415 iq->intr_idx = intr_idx; in init_iq()
3416 iq->cong_drop = cong; in init_iq()
3422 struct sge_params *sp = &sc->params.sge; in init_fl()
3424 fl->qsize = qsize; in init_fl()
3425 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_fl()
3426 strlcpy(fl->lockname, name, sizeof(fl->lockname)); in init_fl()
3427 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); in init_fl()
3428 if (sc->flags & BUF_PACKING_OK && in init_fl()
3431 fl->flags |= FL_BUF_PACKING; in init_fl()
3432 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); in init_fl()
3433 fl->safe_zidx = sc->sge.safe_zidx; in init_fl()
3434 if (fl->flags & FL_BUF_PACKING) { in init_fl()
3435 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); in init_fl()
3436 fl->buf_boundary = sp->pack_boundary; in init_fl()
3438 fl->lowat = roundup2(sp->fl_starve_threshold, 8); in init_fl()
3439 fl->buf_boundary = 16; in init_fl()
3441 if (fl_pad && fl->buf_boundary < sp->pad_boundary) in init_fl()
3442 fl->buf_boundary = sp->pad_boundary; in init_fl()
3446 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, in init_eq() argument
3452 eq->type = eqtype; in init_eq()
3453 eq->port_id = port_id; in init_eq()
3454 eq->tx_chan = sc->port[port_id]->tx_chan; in init_eq()
3455 eq->hw_port = sc->port[port_id]->hw_port; in init_eq()
3456 eq->iq = iq; in init_eq()
3457 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_eq()
3458 strlcpy(eq->lockname, name, sizeof(eq->lockname)); in init_eq()
3459 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); in init_eq()
3468 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, in alloc_ring()
3520 struct adapter *sc = vi->adapter; in alloc_iq_fl()
3522 MPASS(!(iq->flags & IQ_SW_ALLOCATED)); in alloc_iq_fl()
3524 len = iq->qsize * IQ_ESIZE; in alloc_iq_fl()
3525 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, in alloc_iq_fl()
3526 (void **)&iq->desc); in alloc_iq_fl()
3531 len = fl->qsize * EQ_ESIZE; in alloc_iq_fl()
3532 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, in alloc_iq_fl()
3533 &fl->ba, (void **)&fl->desc); in alloc_iq_fl()
3535 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, in alloc_iq_fl()
3536 iq->desc); in alloc_iq_fl()
3541 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), in alloc_iq_fl()
3545 iq->flags |= IQ_HAS_FL; in alloc_iq_fl()
3548 iq->flags |= IQ_SW_ALLOCATED; in alloc_iq_fl()
3560 MPASS(iq->flags & IQ_SW_ALLOCATED); in free_iq_fl()
3563 MPASS(iq->flags & IQ_HAS_FL); in free_iq_fl()
3564 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); in free_iq_fl()
3566 free(fl->sdesc, M_CXGBE); in free_iq_fl()
3567 mtx_destroy(&fl->fl_lock); in free_iq_fl()
3570 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); in free_iq_fl()
3586 struct adapter *sc = vi->adapter; in alloc_iq_fl_hwq()
3587 struct port_info *pi = vi->pi; in alloc_iq_fl_hwq()
3590 MPASS (!(iq->flags & IQ_HW_ALLOCATED)); in alloc_iq_fl_hwq()
3594 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | in alloc_iq_fl_hwq()
3601 if (iq == &sc->sge.fwq) in alloc_iq_fl_hwq()
3604 if (iq->intr_idx < 0) { in alloc_iq_fl_hwq()
3607 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); in alloc_iq_fl_hwq()
3609 KASSERT(iq->intr_idx < sc->intr_count, in alloc_iq_fl_hwq()
3610 ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx)); in alloc_iq_fl_hwq()
3611 v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx); in alloc_iq_fl_hwq()
3614 bzero(iq->desc, iq->qsize * IQ_ESIZE); in alloc_iq_fl_hwq()
3617 V_FW_IQ_CMD_VIID(vi->viid) | in alloc_iq_fl_hwq()
3619 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) | in alloc_iq_fl_hwq()
3621 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | in alloc_iq_fl_hwq()
3622 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); in alloc_iq_fl_hwq()
3623 c.iqsize = htobe16(iq->qsize); in alloc_iq_fl_hwq()
3624 c.iqaddr = htobe64(iq->ba); in alloc_iq_fl_hwq()
3625 c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype)); in alloc_iq_fl_hwq()
3626 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3627 if (iq->qtype == IQ_ETH) { in alloc_iq_fl_hwq()
3629 cong_map = 1 << pi->hw_port; in alloc_iq_fl_hwq()
3631 cong_map = pi->rx_e_chan_map; in alloc_iq_fl_hwq()
3638 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_iq_fl_hwq()
3643 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : in alloc_iq_fl_hwq()
3645 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3656 c.fl0size = htobe16(fl->qsize); in alloc_iq_fl_hwq()
3657 c.fl0addr = htobe64(fl->ba); in alloc_iq_fl_hwq()
3660 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in alloc_iq_fl_hwq()
3666 iq->cidx = 0; in alloc_iq_fl_hwq()
3667 iq->gen = F_RSPD_GEN; in alloc_iq_fl_hwq()
3668 iq->cntxt_id = be16toh(c.iqid); in alloc_iq_fl_hwq()
3669 iq->abs_id = be16toh(c.physiqid); in alloc_iq_fl_hwq()
3671 cntxt_id = iq->cntxt_id - sc->sge.iq_start; in alloc_iq_fl_hwq()
3672 if (cntxt_id >= sc->sge.iqmap_sz) { in alloc_iq_fl_hwq()
3673 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, in alloc_iq_fl_hwq()
3674 cntxt_id, sc->sge.iqmap_sz - 1); in alloc_iq_fl_hwq()
3676 sc->sge.iqmap[cntxt_id] = iq; in alloc_iq_fl_hwq()
3683 MPASS(!(fl->flags & FL_BUF_RESUME)); in alloc_iq_fl_hwq()
3684 for (i = 0; i < fl->sidx * 8; i++) in alloc_iq_fl_hwq()
3685 MPASS(fl->sdesc[i].cl == NULL); in alloc_iq_fl_hwq()
3687 fl->cntxt_id = be16toh(c.fl0id); in alloc_iq_fl_hwq()
3688 fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0; in alloc_iq_fl_hwq()
3689 fl->rx_offset = 0; in alloc_iq_fl_hwq()
3690 fl->flags &= ~(FL_STARVING | FL_DOOMED); in alloc_iq_fl_hwq()
3692 cntxt_id = fl->cntxt_id - sc->sge.eq_start; in alloc_iq_fl_hwq()
3693 if (cntxt_id >= sc->sge.eqmap_sz) { in alloc_iq_fl_hwq()
3694 panic("%s: fl->cntxt_id (%d) more than the max (%d)", in alloc_iq_fl_hwq()
3695 __func__, cntxt_id, sc->sge.eqmap_sz - 1); in alloc_iq_fl_hwq()
3697 sc->sge.eqmap[cntxt_id] = (void *)fl; in alloc_iq_fl_hwq()
3699 qid = fl->cntxt_id; in alloc_iq_fl_hwq()
3700 if (isset(&sc->doorbells, DOORBELL_UDB)) { in alloc_iq_fl_hwq()
3701 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_iq_fl_hwq()
3702 uint32_t mask = (1 << s_qpp) - 1; in alloc_iq_fl_hwq()
3705 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_iq_fl_hwq()
3712 fl->udb = (volatile void *)udb; in alloc_iq_fl_hwq()
3714 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; in alloc_iq_fl_hwq()
3718 refill_fl(sc, fl, fl->lowat); in alloc_iq_fl_hwq()
3722 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && in alloc_iq_fl_hwq()
3723 iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3724 t4_sge_set_conm_context(sc, iq->cntxt_id, iq->cong_drop, in alloc_iq_fl_hwq()
3729 atomic_store_rel_int(&iq->state, IQS_IDLE); in alloc_iq_fl_hwq()
3730 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | in alloc_iq_fl_hwq()
3731 V_INGRESSQID(iq->cntxt_id)); in alloc_iq_fl_hwq()
3733 iq->flags |= IQ_HW_ALLOCATED; in alloc_iq_fl_hwq()
3743 MPASS(iq->flags & IQ_HW_ALLOCATED); in free_iq_fl_hwq()
3744 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_iq_fl_hwq()
3745 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); in free_iq_fl_hwq()
3750 iq->flags &= ~IQ_HW_ALLOCATED; in free_iq_fl_hwq()
3765 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, in add_iq_sysctls()
3768 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); in add_iq_sysctls()
3770 &iq->abs_id, 0, "absolute id of the queue"); in add_iq_sysctls()
3772 &iq->cntxt_id, 0, "SGE context id of the queue"); in add_iq_sysctls()
3773 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx, in add_iq_sysctls()
3792 &fl->ba, "bus address of descriptor ring"); in add_fl_sysctls()
3794 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_fl_sysctls()
3797 &fl->cntxt_id, 0, "SGE context id of the freelist"); in add_fl_sysctls()
3801 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); in add_fl_sysctls()
3802 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, in add_fl_sysctls()
3804 if (fl->flags & FL_BUF_PACKING) { in add_fl_sysctls()
3806 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); in add_fl_sysctls()
3808 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, in add_fl_sysctls()
3811 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); in add_fl_sysctls()
3813 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); in add_fl_sysctls()
3815 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); in add_fl_sysctls()
3825 struct sge_iq *fwq = &sc->sge.fwq; in alloc_fwq()
3826 struct vi_info *vi = &sc->port[0]->vi[0]; in alloc_fwq()
3828 if (!(fwq->flags & IQ_SW_ALLOCATED)) { in alloc_fwq()
3829 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in alloc_fwq()
3831 if (sc->flags & IS_VF) in alloc_fwq()
3834 intr_idx = sc->intr_count > 1 ? 1 : 0; in alloc_fwq()
3835 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1, IQ_OTHER); in alloc_fwq()
3836 rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid); in alloc_fwq()
3841 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3844 if (!(fwq->flags & IQ_HW_ALLOCATED)) { in alloc_fwq()
3845 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3852 MPASS(fwq->flags & IQ_HW_ALLOCATED); in alloc_fwq()
3864 struct sge_iq *fwq = &sc->sge.fwq; in free_fwq()
3866 if (fwq->flags & IQ_HW_ALLOCATED) { in free_fwq()
3867 MPASS(fwq->flags & IQ_SW_ALLOCATED); in free_fwq()
3869 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3872 if (fwq->flags & IQ_SW_ALLOCATED) { in free_fwq()
3873 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3875 MPASS(!(fwq->flags & IQ_SW_ALLOCATED)); in free_fwq()
3888 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in alloc_ctrlq()
3890 MPASS(idx < sc->sge.nctrlq); in alloc_ctrlq()
3892 if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) { in alloc_ctrlq()
3893 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in alloc_ctrlq()
3896 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid), in alloc_ctrlq()
3901 device_get_nameunit(sc->dev), idx); in alloc_ctrlq()
3902 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, in alloc_ctrlq()
3903 idx % sc->params.nports, &sc->sge.fwq, name); in alloc_ctrlq()
3904 rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid); in alloc_ctrlq()
3910 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3913 if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) { in alloc_ctrlq()
3914 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3915 MPASS(ctrlq->nwr_pending == 0); in alloc_ctrlq()
3916 MPASS(ctrlq->ndesc_needed == 0); in alloc_ctrlq()
3918 rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx); in alloc_ctrlq()
3923 MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED); in alloc_ctrlq()
3935 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in free_ctrlq()
3937 if (ctrlq->eq.flags & EQ_HW_ALLOCATED) { in free_ctrlq()
3938 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in free_ctrlq()
3939 free_eq_hwq(sc, NULL, &ctrlq->eq); in free_ctrlq()
3940 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3943 if (ctrlq->eq.flags & EQ_SW_ALLOCATED) { in free_ctrlq()
3944 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3946 MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED)); in free_ctrlq()
3954 const int cng_ch_bits_log = sc->chip_params->cng_ch_bits_log; in t4_sge_set_conm_context()
3964 case -1: in t4_sge_set_conm_context()
4000 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in t4_sge_set_conm_context()
4017 struct adapter *sc = vi->adapter; in alloc_rxq()
4018 if_t ifp = vi->ifp; in alloc_rxq()
4022 if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_rxq()
4023 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_rxq()
4025 rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs); in alloc_rxq()
4028 MPASS(rxq->lro.ifp == ifp); /* also indicates LRO init'ed */ in alloc_rxq()
4030 rxq->ifp = ifp; in alloc_rxq()
4033 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid), in alloc_rxq()
4035 "rx queue"); in alloc_rxq()
4037 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq, in alloc_rxq()
4041 rxq->iq.flags |= IQ_LRO_ENABLED; in alloc_rxq()
4044 rxq->iq.flags |= IQ_RX_TIMESTAMP; in alloc_rxq()
4045 snprintf(name, sizeof(name), "%s rxq%d-fl", in alloc_rxq()
4046 device_get_nameunit(vi->dev), idx); in alloc_rxq()
4047 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_rxq()
4048 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid); in alloc_rxq()
4053 tcp_lro_free(&rxq->lro); in alloc_rxq()
4054 rxq->lro.ifp = NULL; in alloc_rxq()
4058 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4059 add_rxq_sysctls(&vi->ctx, oid, rxq); in alloc_rxq()
4062 if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_rxq()
4063 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4064 rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl); in alloc_rxq()
4069 MPASS(rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_rxq()
4072 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; in alloc_rxq()
4074 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, in alloc_rxq()
4076 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, in alloc_rxq()
4077 ("PF with non-zero iq_base")); in alloc_rxq()
4083 FL_LOCK(&rxq->fl); in alloc_rxq()
4084 refill_fl(sc, &rxq->fl, 128); in alloc_rxq()
4085 FL_UNLOCK(&rxq->fl); in alloc_rxq()
4097 if (rxq->iq.flags & IQ_HW_ALLOCATED) { in free_rxq()
4098 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in free_rxq()
4099 free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4100 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4103 if (rxq->iq.flags & IQ_SW_ALLOCATED) { in free_rxq()
4104 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4106 tcp_lro_free(&rxq->lro); in free_rxq()
4108 free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4109 MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED)); in free_rxq()
4126 &rxq->lro.lro_queued, 0, NULL); in add_rxq_sysctls()
4128 &rxq->lro.lro_flushed, 0, NULL); in add_rxq_sysctls()
4131 &rxq->rxcsum, "# of times hardware assisted with checksum"); in add_rxq_sysctls()
4133 &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); in add_rxq_sysctls()
4135 &rxq->vxlan_rxcsum, in add_rxq_sysctls()
4148 struct adapter *sc = vi->adapter; in alloc_ofld_rxq()
4152 if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_ofld_rxq()
4153 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_ofld_rxq()
4156 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_rxq()
4157 SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name, in alloc_ofld_rxq()
4158 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload rx queue"); in alloc_ofld_rxq()
4160 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, in alloc_ofld_rxq()
4161 vi->qsize_rxq, intr_idx, ofld_cong_drop, IQ_OFLD); in alloc_ofld_rxq()
4162 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", in alloc_ofld_rxq()
4163 device_get_nameunit(vi->dev), idx); in alloc_ofld_rxq()
4164 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_ofld_rxq()
4165 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx, in alloc_ofld_rxq()
4173 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4174 ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4175 ofld_rxq->rx_iscsi_ddp_setup_error = in alloc_ofld_rxq()
4177 ofld_rxq->rx_nvme_ddp_setup_ok = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4178 ofld_rxq->rx_nvme_ddp_setup_no_stag = in alloc_ofld_rxq()
4180 ofld_rxq->rx_nvme_ddp_setup_error = in alloc_ofld_rxq()
4182 ofld_rxq->rx_nvme_ddp_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4183 ofld_rxq->rx_nvme_ddp_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4184 ofld_rxq->rx_nvme_fl_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4185 ofld_rxq->rx_nvme_fl_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4186 ofld_rxq->rx_nvme_invalid_headers = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4187 ofld_rxq->rx_nvme_header_digest_errors = in alloc_ofld_rxq()
4189 ofld_rxq->rx_nvme_data_digest_errors = in alloc_ofld_rxq()
4191 ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4192 ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4193 ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4194 add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq); in alloc_ofld_rxq()
4197 if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_ofld_rxq()
4198 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4199 rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl); in alloc_ofld_rxq()
4205 MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_ofld_rxq()
4216 if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) { in free_ofld_rxq()
4217 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in free_ofld_rxq()
4218 free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4219 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4222 if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) { in free_ofld_rxq()
4223 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4224 free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4225 MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)); in free_ofld_rxq()
4226 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok); in free_ofld_rxq()
4227 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error); in free_ofld_rxq()
4228 counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_ok); in free_ofld_rxq()
4229 counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_no_stag); in free_ofld_rxq()
4230 counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_error); in free_ofld_rxq()
4231 counter_u64_free(ofld_rxq->rx_nvme_ddp_octets); in free_ofld_rxq()
4232 counter_u64_free(ofld_rxq->rx_nvme_ddp_pdus); in free_ofld_rxq()
4233 counter_u64_free(ofld_rxq->rx_nvme_fl_octets); in free_ofld_rxq()
4234 counter_u64_free(ofld_rxq->rx_nvme_fl_pdus); in free_ofld_rxq()
4235 counter_u64_free(ofld_rxq->rx_nvme_invalid_headers); in free_ofld_rxq()
4236 counter_u64_free(ofld_rxq->rx_nvme_header_digest_errors); in free_ofld_rxq()
4237 counter_u64_free(ofld_rxq->rx_nvme_data_digest_errors); in free_ofld_rxq()
4238 counter_u64_free(ofld_rxq->ddp_buffer_alloc); in free_ofld_rxq()
4239 counter_u64_free(ofld_rxq->ddp_buffer_reuse); in free_ofld_rxq()
4240 counter_u64_free(ofld_rxq->ddp_buffer_free); in free_ofld_rxq()
4256 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0, in add_ofld_rxq_sysctls()
4259 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_octets, 0, in add_ofld_rxq_sysctls()
4262 "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records, in add_ofld_rxq_sysctls()
4265 "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets, in add_ofld_rxq_sysctls()
4268 "rx_toe_ddp_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_ddp_octets, in add_ofld_rxq_sysctls()
4271 "ddp_buffer_alloc", CTLFLAG_RD, &ofld_rxq->ddp_buffer_alloc, in add_ofld_rxq_sysctls()
4274 "ddp_buffer_reuse", CTLFLAG_RD, &ofld_rxq->ddp_buffer_reuse, in add_ofld_rxq_sysctls()
4277 "ddp_buffer_free", CTLFLAG_RD, &ofld_rxq->ddp_buffer_free, in add_ofld_rxq_sysctls()
4285 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok, in add_ofld_rxq_sysctls()
4288 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error, in add_ofld_rxq_sysctls()
4291 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0, in add_ofld_rxq_sysctls()
4294 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0, in add_ofld_rxq_sysctls()
4297 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0, in add_ofld_rxq_sysctls()
4300 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0, in add_ofld_rxq_sysctls()
4303 CTLFLAG_RD, &ofld_rxq->rx_iscsi_padding_errors, 0, in add_ofld_rxq_sysctls()
4306 CTLFLAG_RD, &ofld_rxq->rx_iscsi_header_digest_errors, 0, in add_ofld_rxq_sysctls()
4309 CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0, in add_ofld_rxq_sysctls()
4317 CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_ok, in add_ofld_rxq_sysctls()
4320 CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_no_stag, in add_ofld_rxq_sysctls()
4323 CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_error, in add_ofld_rxq_sysctls()
4326 CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_octets, in add_ofld_rxq_sysctls()
4329 CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_pdus, in add_ofld_rxq_sysctls()
4332 CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_octets, in add_ofld_rxq_sysctls()
4335 CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_pdus, in add_ofld_rxq_sysctls()
4338 CTLFLAG_RD, &ofld_rxq->rx_nvme_invalid_headers, in add_ofld_rxq_sysctls()
4341 CTLFLAG_RD, &ofld_rxq->rx_nvme_header_digest_errors, in add_ofld_rxq_sysctls()
4344 CTLFLAG_RD, &ofld_rxq->rx_nvme_data_digest_errors, in add_ofld_rxq_sysctls()
4365 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx) in ctrl_eq_alloc() argument
4369 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ctrl_eq_alloc()
4371 core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0; in ctrl_eq_alloc()
4375 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | in ctrl_eq_alloc()
4380 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); in ctrl_eq_alloc()
4384 V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) | in ctrl_eq_alloc()
4385 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); in ctrl_eq_alloc()
4392 c.eqaddr = htobe64(eq->ba); in ctrl_eq_alloc()
4394 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ctrl_eq_alloc()
4397 eq->port_id, rc); in ctrl_eq_alloc()
4401 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); in ctrl_eq_alloc()
4402 eq->abs_id = G_FW_EQ_CTRL_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ctrl_eq_alloc()
4403 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ctrl_eq_alloc()
4404 if (cntxt_id >= sc->sge.eqmap_sz) in ctrl_eq_alloc()
4405 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ctrl_eq_alloc()
4406 cntxt_id, sc->sge.eqmap_sz - 1); in ctrl_eq_alloc()
4407 sc->sge.eqmap[cntxt_id] = eq; in ctrl_eq_alloc()
4413 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx) in eth_eq_alloc() argument
4417 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in eth_eq_alloc()
4419 core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0; in eth_eq_alloc()
4423 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | in eth_eq_alloc()
4429 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); in eth_eq_alloc()
4432 V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO | in eth_eq_alloc()
4433 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); in eth_eq_alloc()
4439 c.eqaddr = htobe64(eq->ba); in eth_eq_alloc()
4441 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in eth_eq_alloc()
4443 device_printf(vi->dev, in eth_eq_alloc()
4448 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); in eth_eq_alloc()
4449 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in eth_eq_alloc()
4450 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in eth_eq_alloc()
4451 if (cntxt_id >= sc->sge.eqmap_sz) in eth_eq_alloc()
4452 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in eth_eq_alloc()
4453 cntxt_id, sc->sge.eqmap_sz - 1); in eth_eq_alloc()
4454 sc->sge.eqmap[cntxt_id] = eq; in eth_eq_alloc()
4475 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, in ofld_eq_alloc() argument
4480 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ofld_eq_alloc()
4482 if (sc->params.tid_qid_sel_mask != 0) in ofld_eq_alloc()
4483 core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx); in ofld_eq_alloc()
4490 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | in ofld_eq_alloc()
4497 V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) | in ofld_eq_alloc()
4498 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); in ofld_eq_alloc()
4505 c.eqaddr = htobe64(eq->ba); in ofld_eq_alloc()
4507 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ofld_eq_alloc()
4509 device_printf(vi->dev, in ofld_eq_alloc()
4514 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); in ofld_eq_alloc()
4515 eq->abs_id = G_FW_EQ_OFLD_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ofld_eq_alloc()
4516 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ofld_eq_alloc()
4517 if (cntxt_id >= sc->sge.eqmap_sz) in ofld_eq_alloc()
4518 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ofld_eq_alloc()
4519 cntxt_id, sc->sge.eqmap_sz - 1); in ofld_eq_alloc()
4520 sc->sge.eqmap[cntxt_id] = eq; in ofld_eq_alloc()
4528 alloc_eq(struct adapter *sc, struct sge_eq *eq, struct sysctl_ctx_list *ctx, in alloc_eq() argument
4534 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_eq()
4536 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in alloc_eq()
4538 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, in alloc_eq()
4539 (void **)&eq->desc); in alloc_eq()
4543 add_eq_sysctls(sc, ctx, oid, eq); in alloc_eq()
4544 eq->flags |= EQ_SW_ALLOCATED; in alloc_eq()
4551 free_eq(struct adapter *sc, struct sge_eq *eq) in free_eq() argument
4553 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_eq()
4554 if (eq->type == EQ_ETH) in free_eq()
4555 MPASS(eq->pidx == eq->cidx); in free_eq()
4557 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); in free_eq()
4558 mtx_destroy(&eq->eq_lock); in free_eq()
4559 bzero(eq, sizeof(*eq)); in free_eq()
4564 struct sysctl_oid *oid, struct sge_eq *eq) in add_eq_sysctls() argument
4568 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, in add_eq_sysctls()
4571 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_eq_sysctls()
4574 &eq->abs_id, 0, "absolute id of the queue"); in add_eq_sysctls()
4576 &eq->cntxt_id, 0, "SGE context id of the queue"); in add_eq_sysctls()
4577 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx, in add_eq_sysctls()
4579 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx, in add_eq_sysctls()
4582 eq->sidx, "status page index"); in add_eq_sysctls()
4586 alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx) in alloc_eq_hwq() argument
4590 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_eq_hwq()
4592 eq->iqid = eq->iq->cntxt_id; in alloc_eq_hwq()
4593 eq->pidx = eq->cidx = eq->dbidx = 0; in alloc_eq_hwq()
4595 eq->equeqidx = 0; in alloc_eq_hwq()
4596 eq->doorbells = sc->doorbells; in alloc_eq_hwq()
4597 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_eq_hwq()
4599 switch (eq->type) { in alloc_eq_hwq()
4601 rc = ctrl_eq_alloc(sc, eq, idx); in alloc_eq_hwq()
4605 rc = eth_eq_alloc(sc, vi, eq, idx); in alloc_eq_hwq()
4610 rc = ofld_eq_alloc(sc, vi, eq, idx); in alloc_eq_hwq()
4615 panic("%s: invalid eq type %d.", __func__, eq->type); in alloc_eq_hwq()
4619 eq->type, rc); in alloc_eq_hwq()
4623 if (isset(&eq->doorbells, DOORBELL_UDB) || in alloc_eq_hwq()
4624 isset(&eq->doorbells, DOORBELL_UDBWC) || in alloc_eq_hwq()
4625 isset(&eq->doorbells, DOORBELL_WCWR)) { in alloc_eq_hwq()
4626 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_eq_hwq()
4627 uint32_t mask = (1 << s_qpp) - 1; in alloc_eq_hwq()
4630 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_eq_hwq()
4631 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ in alloc_eq_hwq()
4632 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ in alloc_eq_hwq()
4633 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) in alloc_eq_hwq()
4634 clrbit(&eq->doorbells, DOORBELL_WCWR); in alloc_eq_hwq()
4636 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ in alloc_eq_hwq()
4637 eq->udb_qid = 0; in alloc_eq_hwq()
4639 eq->udb = (volatile void *)udb; in alloc_eq_hwq()
4642 eq->flags |= EQ_HW_ALLOCATED; in alloc_eq_hwq()
4647 free_eq_hwq(struct adapter *sc, struct vi_info *vi __unused, struct sge_eq *eq) in free_eq_hwq() argument
4651 MPASS(eq->flags & EQ_HW_ALLOCATED); in free_eq_hwq()
4653 switch (eq->type) { in free_eq_hwq()
4655 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4658 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4662 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4666 panic("%s: invalid eq type %d.", __func__, eq->type); in free_eq_hwq()
4669 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc); in free_eq_hwq()
4672 eq->flags &= ~EQ_HW_ALLOCATED; in free_eq_hwq()
4681 struct sge_eq *eq = &wrq->eq; in alloc_wrq() local
4684 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_wrq()
4686 rc = alloc_eq(sc, eq, ctx, oid); in alloc_wrq()
4689 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_wrq()
4692 wrq->adapter = sc; in alloc_wrq()
4693 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); in alloc_wrq()
4694 TAILQ_INIT(&wrq->incomplete_wrs); in alloc_wrq()
4695 STAILQ_INIT(&wrq->wr_list); in alloc_wrq()
4696 wrq->nwr_pending = 0; in alloc_wrq()
4697 wrq->ndesc_needed = 0; in alloc_wrq()
4706 free_eq(sc, &wrq->eq); in free_wrq()
4707 MPASS(wrq->nwr_pending == 0); in free_wrq()
4708 MPASS(wrq->ndesc_needed == 0); in free_wrq()
4709 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in free_wrq()
4710 MPASS(STAILQ_EMPTY(&wrq->wr_list)); in free_wrq()
4725 &wrq->tx_wrs_direct, "# of work requests (direct)"); in add_wrq_sysctls()
4727 &wrq->tx_wrs_copied, "# of work requests (copied)"); in add_wrq_sysctls()
4729 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); in add_wrq_sysctls()
4739 struct port_info *pi = vi->pi; in alloc_txq()
4740 struct adapter *sc = vi->adapter; in alloc_txq()
4741 struct sge_eq *eq = &txq->eq; in alloc_txq() local
4746 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_txq()
4747 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_txq()
4750 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid), in alloc_txq()
4754 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_txq()
4756 device_get_nameunit(vi->dev), idx); in alloc_txq()
4757 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->port_id, in alloc_txq()
4758 &sc->sge.rxq[iqidx].iq, name); in alloc_txq()
4760 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, in alloc_txq()
4761 can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK); in alloc_txq()
4770 rc = alloc_eq(sc, eq, &vi->ctx, oid); in alloc_txq()
4773 mp_ring_free(txq->r); in alloc_txq()
4776 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4779 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); in alloc_txq()
4780 txq->ifp = vi->ifp; in alloc_txq()
4781 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); in alloc_txq()
4782 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, in alloc_txq()
4785 add_txq_sysctls(vi, &vi->ctx, oid, txq); in alloc_txq()
4788 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_txq()
4789 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4790 rc = alloc_eq_hwq(sc, vi, eq, idx); in alloc_txq()
4795 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_txq()
4799 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; in alloc_txq()
4801 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, in alloc_txq()
4803 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, in alloc_txq()
4804 ("PF with non-zero eq_base")); in alloc_txq()
4806 txp = &txq->txp; in alloc_txq()
4807 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4808 txq->txp.max_npkt = min(nitems(txp->mb), in alloc_txq()
4809 sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4810 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) in alloc_txq()
4811 txq->txp.max_npkt--; in alloc_txq()
4813 if (vi->flags & TX_USES_VM_WR) in alloc_txq()
4814 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4815 V_TXPKT_INTF(pi->hw_port)); in alloc_txq()
4817 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4818 V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) | in alloc_txq()
4819 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); in alloc_txq()
4821 txq->tc_idx = -1; in alloc_txq()
4833 struct adapter *sc = vi->adapter; in free_txq()
4834 struct sge_eq *eq = &txq->eq; in free_txq() local
4836 if (eq->flags & EQ_HW_ALLOCATED) { in free_txq()
4837 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_txq()
4838 free_eq_hwq(sc, NULL, eq); in free_txq()
4839 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4842 if (eq->flags & EQ_SW_ALLOCATED) { in free_txq()
4843 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4844 sglist_free(txq->gl); in free_txq()
4845 free(txq->sdesc, M_CXGBE); in free_txq()
4846 mp_ring_free(txq->r); in free_txq()
4847 free_eq(sc, eq); in free_txq()
4848 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_txq()
4863 sc = vi->adapter; in add_txq_sysctls()
4866 mp_ring_sysctls(txq->r, ctx, children); in add_txq_sysctls()
4869 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq, in add_txq_sysctls()
4870 sysctl_tc, "I", "traffic class (-1 means none)"); in add_txq_sysctls()
4873 &txq->txcsum, "# of times hardware assisted with checksum"); in add_txq_sysctls()
4875 &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); in add_txq_sysctls()
4877 &txq->tso_wrs, "# of TSO work requests"); in add_txq_sysctls()
4879 &txq->imm_wrs, "# of work requests with immediate data"); in add_txq_sysctls()
4881 &txq->sgl_wrs, "# of work requests with direct SGL"); in add_txq_sysctls()
4883 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); in add_txq_sysctls()
4885 &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); in add_txq_sysctls()
4887 &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); in add_txq_sysctls()
4889 &txq->txpkts0_pkts, in add_txq_sysctls()
4892 &txq->txpkts1_pkts, in add_txq_sysctls()
4895 &txq->txpkts_flush, in add_txq_sysctls()
4896 "# of times txpkts had to be flushed out by an egress-update"); in add_txq_sysctls()
4898 &txq->raw_wrs, "# of raw work requests (non-packets)"); in add_txq_sysctls()
4900 &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); in add_txq_sysctls()
4902 &txq->vxlan_txcsum, in add_txq_sysctls()
4908 CTLFLAG_RD, &txq->kern_tls_records, in add_txq_sysctls()
4911 CTLFLAG_RD, &txq->kern_tls_short, in add_txq_sysctls()
4914 CTLFLAG_RD, &txq->kern_tls_partial, in add_txq_sysctls()
4917 CTLFLAG_RD, &txq->kern_tls_full, in add_txq_sysctls()
4920 CTLFLAG_RD, &txq->kern_tls_octets, in add_txq_sysctls()
4923 CTLFLAG_RD, &txq->kern_tls_waste, in add_txq_sysctls()
4926 CTLFLAG_RD, &txq->kern_tls_header, in add_txq_sysctls()
4927 "# of NIC TLS header-only packets transmitted"); in add_txq_sysctls()
4929 CTLFLAG_RD, &txq->kern_tls_fin_short, in add_txq_sysctls()
4934 &txq->kern_tls_options, in add_txq_sysctls()
4935 "# of NIC TLS options-only packets transmitted"); in add_txq_sysctls()
4937 "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin, in add_txq_sysctls()
4938 "# of NIC TLS FIN-only packets transmitted"); in add_txq_sysctls()
4942 &txq->kern_tls_ghash_received, in add_txq_sysctls()
4946 &txq->kern_tls_ghash_requested, in add_txq_sysctls()
4950 &txq->kern_tls_lso, in add_txq_sysctls()
4954 &txq->kern_tls_partial_ghash, in add_txq_sysctls()
4958 &txq->kern_tls_splitmode, in add_txq_sysctls()
4962 &txq->kern_tls_trailer, in add_txq_sysctls()
4963 "# of NIC TLS trailer-only packets transmitted"); in add_txq_sysctls()
4966 CTLFLAG_RD, &txq->kern_tls_cbc, in add_txq_sysctls()
4967 "# of NIC TLS sessions using AES-CBC"); in add_txq_sysctls()
4969 CTLFLAG_RD, &txq->kern_tls_gcm, in add_txq_sysctls()
4970 "# of NIC TLS sessions using AES-GCM"); in add_txq_sysctls()
4983 struct port_info *pi = vi->pi; in alloc_ofld_txq()
4984 struct adapter *sc = vi->adapter; in alloc_ofld_txq()
4985 struct sge_eq *eq = &ofld_txq->wrq.eq; in alloc_ofld_txq() local
4990 MPASS(idx < vi->nofldtxq); in alloc_ofld_txq()
4992 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_ofld_txq()
4994 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_txq()
4995 SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name, in alloc_ofld_txq()
4999 device_get_nameunit(vi->dev), idx); in alloc_ofld_txq()
5000 if (vi->nofldrxq > 0) { in alloc_ofld_txq()
5001 iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq); in alloc_ofld_txq()
5002 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
5003 &sc->sge.ofld_rxq[iqidx].iq, name); in alloc_ofld_txq()
5005 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_ofld_txq()
5006 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
5007 &sc->sge.rxq[iqidx].iq, name); in alloc_ofld_txq()
5010 rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid); in alloc_ofld_txq()
5017 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
5020 ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5021 ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5022 ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5023 ofld_txq->tx_nvme_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5024 ofld_txq->tx_nvme_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5025 ofld_txq->tx_nvme_iso_wrs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5026 ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5027 ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5028 ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5029 ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
5030 add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq); in alloc_ofld_txq()
5033 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_ofld_txq()
5034 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
5035 MPASS(ofld_txq->wrq.nwr_pending == 0); in alloc_ofld_txq()
5036 MPASS(ofld_txq->wrq.ndesc_needed == 0); in alloc_ofld_txq()
5037 rc = alloc_eq_hwq(sc, vi, eq, idx); in alloc_ofld_txq()
5043 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_ofld_txq()
5055 struct adapter *sc = vi->adapter; in free_ofld_txq()
5056 struct sge_eq *eq = &ofld_txq->wrq.eq; in free_ofld_txq() local
5058 if (eq->flags & EQ_HW_ALLOCATED) { in free_ofld_txq()
5059 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_ofld_txq()
5060 free_eq_hwq(sc, NULL, eq); in free_ofld_txq()
5061 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
5064 if (eq->flags & EQ_SW_ALLOCATED) { in free_ofld_txq()
5065 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
5066 counter_u64_free(ofld_txq->tx_iscsi_pdus); in free_ofld_txq()
5067 counter_u64_free(ofld_txq->tx_iscsi_octets); in free_ofld_txq()
5068 counter_u64_free(ofld_txq->tx_iscsi_iso_wrs); in free_ofld_txq()
5069 counter_u64_free(ofld_txq->tx_nvme_pdus); in free_ofld_txq()
5070 counter_u64_free(ofld_txq->tx_nvme_octets); in free_ofld_txq()
5071 counter_u64_free(ofld_txq->tx_nvme_iso_wrs); in free_ofld_txq()
5072 counter_u64_free(ofld_txq->tx_aio_jobs); in free_ofld_txq()
5073 counter_u64_free(ofld_txq->tx_aio_octets); in free_ofld_txq()
5074 counter_u64_free(ofld_txq->tx_toe_tls_records); in free_ofld_txq()
5075 counter_u64_free(ofld_txq->tx_toe_tls_octets); in free_ofld_txq()
5076 free_wrq(sc, &ofld_txq->wrq); in free_ofld_txq()
5077 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_ofld_txq()
5093 CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus, in add_ofld_txq_sysctls()
5096 CTLFLAG_RD, &ofld_txq->tx_iscsi_octets, in add_ofld_txq_sysctls()
5099 CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs, in add_ofld_txq_sysctls()
5102 CTLFLAG_RD, &ofld_txq->tx_nvme_pdus, in add_ofld_txq_sysctls()
5105 CTLFLAG_RD, &ofld_txq->tx_nvme_octets, in add_ofld_txq_sysctls()
5108 CTLFLAG_RD, &ofld_txq->tx_nvme_iso_wrs, in add_ofld_txq_sysctls()
5111 CTLFLAG_RD, &ofld_txq->tx_aio_jobs, in add_ofld_txq_sysctls()
5112 "# of zero-copy aio_write(2) jobs transmitted"); in add_ofld_txq_sysctls()
5114 CTLFLAG_RD, &ofld_txq->tx_aio_octets, in add_ofld_txq_sysctls()
5115 "# of payload octets in transmitted zero-copy aio_write(2) jobs"); in add_ofld_txq_sysctls()
5117 CTLFLAG_RD, &ofld_txq->tx_toe_tls_records, in add_ofld_txq_sysctls()
5120 CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets, in add_ofld_txq_sysctls()
5133 *ba = error ? 0 : segs->ds_addr; in oneseg_dma_callback()
5141 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); in ring_fl_db()
5145 v = fl->dbval | V_PIDX(n); in ring_fl_db()
5146 if (fl->udb) in ring_fl_db()
5147 *fl->udb = htole32(v); in ring_fl_db()
5149 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); in ring_fl_db()
5150 IDXINCR(fl->dbidx, n, fl->sidx); in ring_fl_db()
5157 * Returns non-zero to indicate that this freelist should be added to the list
5169 uint16_t max_pidx, zidx = fl->zidx; in refill_fl()
5170 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ in refill_fl()
5179 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; in refill_fl()
5180 if (fl->pidx == max_pidx * 8) in refill_fl()
5183 d = &fl->desc[fl->pidx]; in refill_fl()
5184 sd = &fl->sdesc[fl->pidx]; in refill_fl()
5185 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5189 if (sd->cl != NULL) { in refill_fl()
5191 if (sd->nmbuf == 0) { in refill_fl()
5199 fl->cl_fast_recycled++; in refill_fl()
5211 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in refill_fl()
5212 fl->cl_recycled++; in refill_fl()
5216 sd->cl = NULL; /* gave up my reference */ in refill_fl()
5218 MPASS(sd->cl == NULL); in refill_fl()
5219 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5221 if (zidx != fl->safe_zidx) { in refill_fl()
5222 zidx = fl->safe_zidx; in refill_fl()
5223 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5224 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5229 fl->cl_allocated++; in refill_fl()
5230 n--; in refill_fl()
5233 sd->cl = cl; in refill_fl()
5234 sd->zidx = zidx; in refill_fl()
5236 if (fl->flags & FL_BUF_PACKING) { in refill_fl()
5237 *d = htobe64(pa | rxb->hwidx2); in refill_fl()
5238 sd->moff = rxb->size2; in refill_fl()
5240 *d = htobe64(pa | rxb->hwidx1); in refill_fl()
5241 sd->moff = 0; in refill_fl()
5244 sd->nmbuf = 0; in refill_fl()
5247 if (__predict_false((++fl->pidx & 7) == 0)) { in refill_fl()
5248 uint16_t pidx = fl->pidx >> 3; in refill_fl()
5250 if (__predict_false(pidx == fl->sidx)) { in refill_fl()
5251 fl->pidx = 0; in refill_fl()
5253 sd = fl->sdesc; in refill_fl()
5254 d = fl->desc; in refill_fl()
5259 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) in refill_fl()
5264 if ((fl->pidx >> 3) != fl->dbidx) in refill_fl()
5267 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); in refill_fl()
5279 mtx_assert(&sc->sfl_lock, MA_OWNED); in refill_sfl()
5280 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { in refill_sfl()
5283 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { in refill_sfl()
5284 TAILQ_REMOVE(&sc->sfl, fl, link); in refill_sfl()
5285 fl->flags &= ~FL_STARVING; in refill_sfl()
5290 if (!TAILQ_EMPTY(&sc->sfl)) in refill_sfl()
5291 callout_schedule(&sc->sfl_callout, hz / 5); in refill_sfl()
5306 sd = fl->sdesc; in free_fl_buffers()
5307 for (i = 0; i < fl->sidx * 8; i++, sd++) { in free_fl_buffers()
5308 if (sd->cl == NULL) in free_fl_buffers()
5311 if (sd->nmbuf == 0) in free_fl_buffers()
5312 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); in free_fl_buffers()
5313 else if (fl->flags & FL_BUF_PACKING) { in free_fl_buffers()
5315 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in free_fl_buffers()
5316 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, in free_fl_buffers()
5317 sd->cl); in free_fl_buffers()
5321 sd->cl = NULL; in free_fl_buffers()
5324 if (fl->flags & FL_BUF_RESUME) { in free_fl_buffers()
5325 m_freem(fl->m0); in free_fl_buffers()
5326 fl->flags &= ~FL_BUF_RESUME; in free_fl_buffers()
5344 KASSERT(gl->sg_nseg == mbuf_nsegs(m), in get_pkt_gl()
5346 mbuf_nsegs(m), gl->sg_nseg)); in get_pkt_gl()
5348 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), in get_pkt_gl()
5350 gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); in get_pkt_gl()
5364 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_len16()
5383 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_vm_len16()
5425 nsegs--; /* first segment is part of ulptx_sgl */ in txpkts0_len16()
5452 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - in imm_payload()
5475 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5476 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5479 MPASS(m->m_pkthdr.l4hlen > 0); in csum_to_ctrl()
5480 MPASS(m->m_pkthdr.l5hlen > 0); in csum_to_ctrl()
5481 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5482 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5484 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + in csum_to_ctrl()
5485 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + in csum_to_ctrl()
5486 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5487 l3hlen = m->m_pkthdr.inner_l3hlen; in csum_to_ctrl()
5489 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5490 l3hlen = m->m_pkthdr.l3hlen; in csum_to_ctrl()
5497 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | in csum_to_ctrl()
5500 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | in csum_to_ctrl()
5506 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | in csum_to_ctrl()
5510 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | in csum_to_ctrl()
5537 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_lso_cpl()
5538 m0->m_pkthdr.l4hlen > 0, in write_lso_cpl()
5544 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_lso_cpl()
5545 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_lso_cpl()
5546 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_lso_cpl()
5547 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_lso_cpl()
5551 lso->lso_ctrl = htobe32(ctrl); in write_lso_cpl()
5552 lso->ipid_ofst = htobe16(0); in write_lso_cpl()
5553 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_lso_cpl()
5554 lso->seqno_offset = htobe32(0); in write_lso_cpl()
5555 lso->len = htobe32(m0->m_pkthdr.len); in write_lso_cpl()
5566 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && in write_tnl_lso_cpl()
5567 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && in write_tnl_lso_cpl()
5568 m0->m_pkthdr.inner_l5hlen > 0, in write_tnl_lso_cpl()
5571 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_tnl_lso_cpl()
5572 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, in write_tnl_lso_cpl()
5580 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5581 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | in write_tnl_lso_cpl()
5583 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5589 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); in write_tnl_lso_cpl()
5590 tnl_lso->IpIdOffsetOut = 0; in write_tnl_lso_cpl()
5591 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in write_tnl_lso_cpl()
5594 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + in write_tnl_lso_cpl()
5595 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + in write_tnl_lso_cpl()
5596 m0->m_pkthdr.l5hlen) | in write_tnl_lso_cpl()
5598 tnl_lso->ipsecen_to_rocev2 = 0; in write_tnl_lso_cpl()
5599 tnl_lso->roce_eth = 0; in write_tnl_lso_cpl()
5603 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5604 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | in write_tnl_lso_cpl()
5605 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); in write_tnl_lso_cpl()
5606 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5608 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); in write_tnl_lso_cpl()
5609 tnl_lso->IpIdOffset = 0; in write_tnl_lso_cpl()
5610 tnl_lso->IpIdSplit_to_Mss = in write_tnl_lso_cpl()
5611 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); in write_tnl_lso_cpl()
5612 tnl_lso->TCPSeqOffset = 0; in write_tnl_lso_cpl()
5613 tnl_lso->EthLenOffset_Size = in write_tnl_lso_cpl()
5614 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); in write_tnl_lso_cpl()
5631 struct sge_eq *eq; in write_txpkt_vm_wr() local
5644 pktlen = m0->m_pkthdr.len; in write_txpkt_vm_wr()
5651 eq = &txq->eq; in write_txpkt_vm_wr()
5652 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_vm_wr()
5653 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | in write_txpkt_vm_wr()
5657 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_vm_wr()
5658 wr->r3[0] = 0; in write_txpkt_vm_wr()
5659 wr->r3[1] = 0; in write_txpkt_vm_wr()
5668 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); in write_txpkt_vm_wr()
5672 txq->tso_wrs++; in write_txpkt_vm_wr()
5679 txq->txcsum++; /* some hardware assistance provided */ in write_txpkt_vm_wr()
5684 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_vm_wr()
5685 txq->vlan_insertion++; in write_txpkt_vm_wr()
5686 } else if (sc->vlan_id) in write_txpkt_vm_wr()
5687 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkt_vm_wr()
5690 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_vm_wr()
5691 cpl->pack = 0; in write_txpkt_vm_wr()
5692 cpl->len = htobe16(pktlen); in write_txpkt_vm_wr()
5693 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_vm_wr()
5705 if (dst == (void *)&eq->desc[eq->sidx]) { in write_txpkt_vm_wr()
5706 dst = (void *)&eq->desc[0]; in write_txpkt_vm_wr()
5709 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_vm_wr()
5710 txq->sgl_wrs++; in write_txpkt_vm_wr()
5711 txq->txpkt_wrs++; in write_txpkt_vm_wr()
5713 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_vm_wr()
5714 txsd->m = m0; in write_txpkt_vm_wr()
5715 txsd->desc_used = ndesc; in write_txpkt_vm_wr()
5730 struct sge_eq *eq = &txq->eq; in write_raw_wr() local
5741 for (m = m0; m != NULL; m = m->m_next) in write_raw_wr()
5742 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_raw_wr()
5744 txq->raw_wrs++; in write_raw_wr()
5746 txsd = &txq->sdesc[eq->pidx]; in write_raw_wr()
5747 txsd->m = m0; in write_raw_wr()
5748 txsd->desc_used = ndesc; in write_raw_wr()
5764 struct sge_eq *eq; in write_txpkt_wr() local
5778 pktlen = m0->m_pkthdr.len; in write_txpkt_wr()
5797 eq = &txq->eq; in write_txpkt_wr()
5798 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_wr()
5799 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | in write_txpkt_wr()
5803 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_wr()
5804 wr->r3 = 0; in write_txpkt_wr()
5809 txq->vxlan_tso_wrs++; in write_txpkt_wr()
5812 txq->tso_wrs++; in write_txpkt_wr()
5822 txq->vxlan_txcsum++; in write_txpkt_wr()
5824 txq->txcsum++; in write_txpkt_wr()
5830 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_wr()
5831 txq->vlan_insertion++; in write_txpkt_wr()
5835 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_wr()
5836 cpl->pack = 0; in write_txpkt_wr()
5837 cpl->len = htobe16(pktlen); in write_txpkt_wr()
5838 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_wr()
5842 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) in write_txpkt_wr()
5843 dst = (caddr_t)&eq->desc[0]; in write_txpkt_wr()
5846 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_wr()
5847 txq->sgl_wrs++; in write_txpkt_wr()
5851 for (m = m0; m != NULL; m = m->m_next) { in write_txpkt_wr()
5852 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_txpkt_wr()
5854 pktlen -= m->m_len; in write_txpkt_wr()
5860 txq->imm_wrs++; in write_txpkt_wr()
5863 txq->txpkt_wrs++; in write_txpkt_wr()
5865 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_wr()
5866 txsd->m = m0; in write_txpkt_wr()
5867 txsd->desc_used = ndesc; in write_txpkt_wr()
5877 MPASS(txp->npkt > 0); in cmp_l2hdr()
5878 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in cmp_l2hdr()
5880 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) in cmp_l2hdr()
5885 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); in cmp_l2hdr()
5891 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in save_l2hdr()
5893 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); in save_l2hdr()
5900 struct txpkts *txp = &txq->txp; in add_to_txpkts_vf()
5905 *send = txp->npkt > 0; in add_to_txpkts_vf()
5914 if (txp->npkt > 0) { in add_to_txpkts_vf()
5915 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_vf()
5916 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_vf()
5917 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in add_to_txpkts_vf()
5919 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { in add_to_txpkts_vf()
5924 if (m->m_pkthdr.len + txp->plen > 65535) in add_to_txpkts_vf()
5929 txp->len16 += txpkts1_len16(); in add_to_txpkts_vf()
5930 txp->plen += m->m_pkthdr.len; in add_to_txpkts_vf()
5931 txp->mb[txp->npkt++] = m; in add_to_txpkts_vf()
5932 if (txp->npkt == txp->max_npkt) in add_to_txpkts_vf()
5935 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + in add_to_txpkts_vf()
5937 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_vf()
5939 txp->npkt = 1; in add_to_txpkts_vf()
5940 txp->wr_type = 1; in add_to_txpkts_vf()
5941 txp->plen = m->m_pkthdr.len; in add_to_txpkts_vf()
5942 txp->mb[0] = m; in add_to_txpkts_vf()
5952 struct txpkts *txp = &txq->txp; in add_to_txpkts_pf()
5955 MPASS(!(sc->flags & IS_VF)); in add_to_txpkts_pf()
5960 *send = txp->npkt > 0; in add_to_txpkts_pf()
5966 if (txp->npkt == 0) { in add_to_txpkts_pf()
5967 if (m->m_pkthdr.len > 65535) in add_to_txpkts_pf()
5970 txp->wr_type = 0; in add_to_txpkts_pf()
5971 txp->len16 = in add_to_txpkts_pf()
5975 txp->wr_type = 1; in add_to_txpkts_pf()
5976 txp->len16 = in add_to_txpkts_pf()
5980 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_pf()
5982 txp->npkt = 1; in add_to_txpkts_pf()
5983 txp->plen = m->m_pkthdr.len; in add_to_txpkts_pf()
5984 txp->mb[0] = m; in add_to_txpkts_pf()
5986 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_pf()
5987 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_pf()
5989 if (m->m_pkthdr.len + txp->plen > 65535) { in add_to_txpkts_pf()
5995 MPASS(txp->wr_type == 0 || txp->wr_type == 1); in add_to_txpkts_pf()
5996 if (txp->wr_type == 0) { in add_to_txpkts_pf()
5997 if (tx_len16_to_desc(txp->len16 + in add_to_txpkts_pf()
6000 txp->len16 += txpkts0_len16(nsegs); in add_to_txpkts_pf()
6004 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > in add_to_txpkts_pf()
6007 txp->len16 += txpkts1_len16(); in add_to_txpkts_pf()
6010 txp->plen += m->m_pkthdr.len; in add_to_txpkts_pf()
6011 txp->mb[txp->npkt++] = m; in add_to_txpkts_pf()
6012 if (txp->npkt == txp->max_npkt) in add_to_txpkts_pf()
6028 const struct txpkts *txp = &txq->txp; in write_txpkts_wr()
6029 struct sge_eq *eq = &txq->eq; in write_txpkts_wr() local
6039 MPASS(txp->npkt > 0); in write_txpkts_wr()
6040 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_wr()
6042 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_wr()
6043 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); in write_txpkts_wr()
6044 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_wr()
6045 wr->plen = htobe16(txp->plen); in write_txpkts_wr()
6046 wr->npkt = txp->npkt; in write_txpkts_wr()
6047 wr->r3 = 0; in write_txpkts_wr()
6048 wr->type = txp->wr_type; in write_txpkts_wr()
6056 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_wr()
6058 checkwrap = eq->sidx - ndesc < eq->pidx; in write_txpkts_wr()
6059 for (i = 0; i < txp->npkt; i++) { in write_txpkts_wr()
6060 m = txp->mb[i]; in write_txpkts_wr()
6061 if (txp->wr_type == 0) { in write_txpkts_wr()
6067 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | in write_txpkts_wr()
6068 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); in write_txpkts_wr()
6069 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); in write_txpkts_wr()
6073 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | in write_txpkts_wr()
6075 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); in write_txpkts_wr()
6079 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
6080 cpl = (void *)&eq->desc[0]; in write_txpkts_wr()
6090 txq->vxlan_txcsum++; in write_txpkts_wr()
6092 txq->txcsum++; in write_txpkts_wr()
6098 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_wr()
6099 txq->vlan_insertion++; in write_txpkts_wr()
6103 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_wr()
6104 cpl->pack = 0; in write_txpkts_wr()
6105 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_wr()
6106 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_wr()
6110 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
6111 flitp = (void *)&eq->desc[0]; in write_txpkts_wr()
6116 last->m_nextpkt = m; in write_txpkts_wr()
6120 txq->sgl_wrs++; in write_txpkts_wr()
6121 if (txp->wr_type == 0) { in write_txpkts_wr()
6122 txq->txpkts0_pkts += txp->npkt; in write_txpkts_wr()
6123 txq->txpkts0_wrs++; in write_txpkts_wr()
6125 txq->txpkts1_pkts += txp->npkt; in write_txpkts_wr()
6126 txq->txpkts1_wrs++; in write_txpkts_wr()
6129 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_wr()
6130 txsd->m = txp->mb[0]; in write_txpkts_wr()
6131 txsd->desc_used = ndesc; in write_txpkts_wr()
6139 const struct txpkts *txp = &txq->txp; in write_txpkts_vm_wr()
6140 struct sge_eq *eq = &txq->eq; in write_txpkts_vm_wr() local
6150 MPASS(txp->npkt > 0); in write_txpkts_vm_wr()
6151 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in write_txpkts_vm_wr()
6152 MPASS(txp->mb[0] != NULL); in write_txpkts_vm_wr()
6153 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_vm_wr()
6155 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_vm_wr()
6156 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); in write_txpkts_vm_wr()
6157 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_vm_wr()
6158 wr->r3 = 0; in write_txpkts_vm_wr()
6159 wr->plen = htobe16(txp->plen); in write_txpkts_vm_wr()
6160 wr->npkt = txp->npkt; in write_txpkts_vm_wr()
6161 wr->r4 = 0; in write_txpkts_vm_wr()
6162 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); in write_txpkts_vm_wr()
6170 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_vm_wr()
6172 for (i = 0; i < txp->npkt; i++) { in write_txpkts_vm_wr()
6173 m = txp->mb[i]; in write_txpkts_vm_wr()
6174 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_vm_wr()
6175 flitp = &eq->desc[0]; in write_txpkts_vm_wr()
6181 txq->txcsum++; /* some hardware assistance provided */ in write_txpkts_vm_wr()
6186 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_vm_wr()
6187 txq->vlan_insertion++; in write_txpkts_vm_wr()
6188 } else if (sc->vlan_id) in write_txpkts_vm_wr()
6189 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkts_vm_wr()
6192 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_vm_wr()
6193 cpl->pack = 0; in write_txpkts_vm_wr()
6194 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_vm_wr()
6195 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_vm_wr()
6202 last->m_nextpkt = m; in write_txpkts_vm_wr()
6206 txq->sgl_wrs++; in write_txpkts_vm_wr()
6207 txq->txpkts1_pkts += txp->npkt; in write_txpkts_vm_wr()
6208 txq->txpkts1_wrs++; in write_txpkts_vm_wr()
6210 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_vm_wr()
6211 txsd->m = txp->mb[0]; in write_txpkts_vm_wr()
6212 txsd->desc_used = ndesc; in write_txpkts_vm_wr()
6224 struct sge_eq *eq = &txq->eq; in write_gl_to_txd() local
6225 struct sglist *gl = txq->gl; in write_gl_to_txd()
6233 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in write_gl_to_txd()
6234 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in write_gl_to_txd()
6237 nsegs = gl->sg_nseg; in write_gl_to_txd()
6240 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; in write_gl_to_txd()
6242 wrap = (__be64 *)(&eq->desc[eq->sidx]); in write_gl_to_txd()
6243 seg = &gl->sg_segs[0]; in write_gl_to_txd()
6252 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_gl_to_txd()
6254 usgl->len0 = htobe32(seg->ss_len); in write_gl_to_txd()
6255 usgl->addr0 = htobe64(seg->ss_paddr); in write_gl_to_txd()
6262 for (i = 0; i < nsegs - 1; i++, seg++) { in write_gl_to_txd()
6263 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); in write_gl_to_txd()
6264 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); in write_gl_to_txd()
6267 usgl->sge[i / 2].len[1] = htobe32(0); in write_gl_to_txd()
6275 for (i = 0; i < nflits - 2; i++) { in write_gl_to_txd()
6277 flitp = (void *)eq->desc; in write_gl_to_txd()
6278 *flitp++ = get_flit(seg, nsegs - 1, i); in write_gl_to_txd()
6289 *to = (void *)eq->desc; in write_gl_to_txd()
6295 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) in copy_to_txd() argument
6298 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in copy_to_txd()
6299 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in copy_to_txd()
6302 (uintptr_t)&eq->desc[eq->sidx])) { in copy_to_txd()
6306 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); in copy_to_txd()
6310 portion = len - portion; /* remaining */ in copy_to_txd()
6311 bcopy(from, (void *)eq->desc, portion); in copy_to_txd()
6312 (*to) = (caddr_t)eq->desc + portion; in copy_to_txd()
6317 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) in ring_eq_db() argument
6323 db = eq->doorbells; in ring_eq_db()
6328 switch (ffs(db) - 1) { in ring_eq_db()
6330 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6342 KASSERT(eq->udb_qid == 0 && n == 1, in ring_eq_db()
6343 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", in ring_eq_db()
6344 __func__, eq->doorbells, n, eq->dbidx, eq)); in ring_eq_db()
6346 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - in ring_eq_db()
6348 i = eq->dbidx; in ring_eq_db()
6349 src = (void *)&eq->desc[i]; in ring_eq_db()
6350 while (src != (void *)&eq->desc[i + 1]) in ring_eq_db()
6357 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6362 t4_write_reg(sc, sc->sge_kdoorbell_reg, in ring_eq_db()
6363 V_QID(eq->cntxt_id) | V_PIDX(n)); in ring_eq_db()
6367 IDXINCR(eq->dbidx, n, eq->sidx); in ring_eq_db()
6371 reclaimable_tx_desc(struct sge_eq *eq) in reclaimable_tx_desc() argument
6375 hw_cidx = read_hw_cidx(eq); in reclaimable_tx_desc()
6376 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); in reclaimable_tx_desc()
6380 total_available_tx_desc(struct sge_eq *eq) in total_available_tx_desc() argument
6384 hw_cidx = read_hw_cidx(eq); in total_available_tx_desc()
6385 pidx = eq->pidx; in total_available_tx_desc()
6388 return (eq->sidx - 1); in total_available_tx_desc()
6390 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); in total_available_tx_desc()
6394 read_hw_cidx(struct sge_eq *eq) in read_hw_cidx() argument
6396 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in read_hw_cidx()
6397 uint16_t cidx = spg->cidx; /* stable snapshot */ in read_hw_cidx()
6409 struct sge_eq *eq = &txq->eq; in reclaim_tx_descs() local
6416 can_reclaim = reclaimable_tx_desc(eq); in reclaim_tx_descs()
6421 txsd = &txq->sdesc[eq->cidx]; in reclaim_tx_descs()
6422 ndesc = txsd->desc_used; in reclaim_tx_descs()
6430 __func__, eq->cidx)); in reclaim_tx_descs()
6432 for (m = txsd->m; m != NULL; m = nextpkt) { in reclaim_tx_descs()
6433 nextpkt = m->m_nextpkt; in reclaim_tx_descs()
6434 m->m_nextpkt = NULL; in reclaim_tx_descs()
6438 can_reclaim -= ndesc; in reclaim_tx_descs()
6439 IDXINCR(eq->cidx, ndesc, eq->sidx); in reclaim_tx_descs()
6449 struct sge_eq *eq = &txq->eq; in tx_reclaim() local
6455 if (eq->cidx == eq->pidx) in tx_reclaim()
6456 eq->equeqidx = eq->pidx; in tx_reclaim()
6488 int i, zidx = -1; in find_refill_source()
6489 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in find_refill_source()
6493 if (rxb->hwidx2 == -1) in find_refill_source()
6495 if (rxb->size1 < PAGE_SIZE && in find_refill_source()
6496 rxb->size1 < largest_rx_cluster) in find_refill_source()
6498 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6500 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); in find_refill_source()
6501 if (rxb->size2 >= maxp) in find_refill_source()
6507 if (rxb->hwidx1 == -1) in find_refill_source()
6509 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6511 if (rxb->size1 >= maxp) in find_refill_source()
6523 mtx_lock(&sc->sfl_lock); in add_fl_to_sfl()
6525 if ((fl->flags & FL_DOOMED) == 0) { in add_fl_to_sfl()
6526 fl->flags |= FL_STARVING; in add_fl_to_sfl()
6527 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); in add_fl_to_sfl()
6528 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); in add_fl_to_sfl()
6531 mtx_unlock(&sc->sfl_lock); in add_fl_to_sfl()
6535 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) in handle_wrq_egr_update() argument
6537 struct sge_wrq *wrq = (void *)eq; in handle_wrq_egr_update()
6539 atomic_readandclear_int(&eq->equiq); in handle_wrq_egr_update()
6540 taskqueue_enqueue(sc->tq[eq->port_id], &wrq->wrq_tx_task); in handle_wrq_egr_update()
6544 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) in handle_eth_egr_update() argument
6546 struct sge_txq *txq = (void *)eq; in handle_eth_egr_update()
6548 MPASS(eq->type == EQ_ETH); in handle_eth_egr_update()
6550 atomic_readandclear_int(&eq->equiq); in handle_eth_egr_update()
6551 if (mp_ring_is_idle(txq->r)) in handle_eth_egr_update()
6552 taskqueue_enqueue(sc->tq[eq->port_id], &txq->tx_reclaim_task); in handle_eth_egr_update()
6554 mp_ring_check_drainage(txq->r, 64); in handle_eth_egr_update()
6562 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); in handle_sge_egr_update()
6563 struct adapter *sc = iq->adapter; in handle_sge_egr_update()
6564 struct sge *s = &sc->sge; in handle_sge_egr_update()
6565 struct sge_eq *eq; in handle_sge_egr_update() local
6571 rss->opcode)); in handle_sge_egr_update()
6573 eq = s->eqmap[qid - s->eq_start - s->eq_base]; in handle_sge_egr_update()
6574 (*h[eq->type])(sc, eq); in handle_sge_egr_update()
6586 struct adapter *sc = iq->adapter; in handle_fw_msg()
6590 rss->opcode)); in handle_fw_msg()
6592 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { in handle_fw_msg()
6595 rss2 = (const struct rss_header *)&cpl->data[0]; in handle_fw_msg()
6596 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); in handle_fw_msg()
6599 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); in handle_fw_msg()
6603 * t4_handle_wrerr_rpl - process a FW work request error message
6617 device_get_nameunit(adap->dev), opcode); in t4_handle_wrerr_rpl()
6620 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), in t4_handle_wrerr_rpl()
6621 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : in t4_handle_wrerr_rpl()
6622 "non-fatal"); in t4_handle_wrerr_rpl()
6623 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { in t4_handle_wrerr_rpl()
6626 for (i = 0; i < nitems(e->u.exception.info); i++) in t4_handle_wrerr_rpl()
6628 be32toh(e->u.exception.info[i])); in t4_handle_wrerr_rpl()
6633 be32toh(e->u.hwmodule.regaddr), in t4_handle_wrerr_rpl()
6634 be32toh(e->u.hwmodule.regval)); in t4_handle_wrerr_rpl()
6638 be16toh(e->u.wr.cidx), in t4_handle_wrerr_rpl()
6639 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6640 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6641 be32toh(e->u.wr.eqid)); in t4_handle_wrerr_rpl()
6642 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) in t4_handle_wrerr_rpl()
6644 e->u.wr.wrhdr[i]); in t4_handle_wrerr_rpl()
6649 be16toh(e->u.acl.cidx), in t4_handle_wrerr_rpl()
6650 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6651 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6652 be32toh(e->u.acl.eqid), in t4_handle_wrerr_rpl()
6653 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : in t4_handle_wrerr_rpl()
6655 for (i = 0; i < nitems(e->u.acl.val); i++) in t4_handle_wrerr_rpl()
6656 log(LOG_ERR, " %02x", e->u.acl.val[i]); in t4_handle_wrerr_rpl()
6661 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); in t4_handle_wrerr_rpl()
6670 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in bufidx_used()
6674 if (rxb->size1 > largest_rx_cluster) in bufidx_used()
6676 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) in bufidx_used()
6687 struct sge_params *sp = &sc->params.sge; in sysctl_bufsizes()
6699 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); in sysctl_bufsizes()
6725 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_eo_len16()
6746 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; in send_etid_flowc_wr()
6749 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flowc_wr()
6750 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == in send_etid_flowc_wr()
6753 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie); in send_etid_flowc_wr()
6758 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flowc_wr()
6760 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | in send_etid_flowc_wr()
6761 V_FW_WR_FLOWID(cst->etid)); in send_etid_flowc_wr()
6762 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in send_etid_flowc_wr()
6763 flowc->mnemval[0].val = htobe32(pfvf); in send_etid_flowc_wr()
6765 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in send_etid_flowc_wr()
6766 flowc->mnemval[1].val = htobe32(pi->hw_port); in send_etid_flowc_wr()
6767 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in send_etid_flowc_wr()
6768 flowc->mnemval[2].val = htobe32(pi->hw_port); in send_etid_flowc_wr()
6769 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in send_etid_flowc_wr()
6770 flowc->mnemval[3].val = htobe32(cst->iqid); in send_etid_flowc_wr()
6771 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; in send_etid_flowc_wr()
6772 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); in send_etid_flowc_wr()
6773 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in send_etid_flowc_wr()
6774 flowc->mnemval[5].val = htobe32(cst->schedcl); in send_etid_flowc_wr()
6776 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flowc_wr()
6778 cst->flags &= ~EO_FLOWC_PENDING; in send_etid_flowc_wr()
6779 cst->flags |= EO_FLOWC_RPL_PENDING; in send_etid_flowc_wr()
6780 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ in send_etid_flowc_wr()
6781 cst->tx_credits -= ETID_FLOWC_LEN16; in send_etid_flowc_wr()
6795 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flush_wr()
6797 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie); in send_etid_flush_wr()
6802 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flush_wr()
6804 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | in send_etid_flush_wr()
6805 V_FW_WR_FLOWID(cst->etid)); in send_etid_flush_wr()
6807 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flush_wr()
6809 cst->flags |= EO_FLUSH_RPL_PENDING; in send_etid_flush_wr()
6810 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); in send_etid_flush_wr()
6811 cst->tx_credits -= ETID_FLUSH_LEN16; in send_etid_flush_wr()
6812 cst->ncompl++; in send_etid_flush_wr()
6828 mtx_assert(&cst->lock, MA_OWNED); in write_ethofld_wr()
6830 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_ethofld_wr()
6831 m0->m_pkthdr.l4hlen > 0, in write_ethofld_wr()
6836 pktlen = m0->m_pkthdr.len; in write_ethofld_wr()
6840 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6843 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | in write_ethofld_wr()
6845 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | in write_ethofld_wr()
6846 V_FW_WR_FLOWID(cst->etid)); in write_ethofld_wr()
6847 wr->r3 = 0; in write_ethofld_wr()
6849 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_ethofld_wr()
6850 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6851 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6852 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6853 wr->u.udpseg.rtplen = 0; in write_ethofld_wr()
6854 wr->u.udpseg.r4 = 0; in write_ethofld_wr()
6855 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); in write_ethofld_wr()
6856 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_ethofld_wr()
6857 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6861 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_ethofld_wr()
6862 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6863 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6864 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6865 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); in write_ethofld_wr()
6866 wr->u.tcpseg.r4 = 0; in write_ethofld_wr()
6867 wr->u.tcpseg.r5 = 0; in write_ethofld_wr()
6868 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6873 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6877 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - in write_ethofld_wr()
6879 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_ethofld_wr()
6880 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_ethofld_wr()
6881 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_ethofld_wr()
6883 lso->lso_ctrl = htobe32(ctrl); in write_ethofld_wr()
6884 lso->ipid_ofst = htobe16(0); in write_ethofld_wr()
6885 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6886 lso->seqno_offset = htobe32(0); in write_ethofld_wr()
6887 lso->len = htobe32(pktlen); in write_ethofld_wr()
6891 wr->u.tcpseg.mss = htobe16(0xffff); in write_ethofld_wr()
6898 ctrl1 = csum_to_ctrl(cst->adapter, m0); in write_ethofld_wr()
6903 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_ethofld_wr()
6907 cpl->ctrl0 = cst->ctrl0; in write_ethofld_wr()
6908 cpl->pack = 0; in write_ethofld_wr()
6909 cpl->len = htobe16(pktlen); in write_ethofld_wr()
6910 cpl->ctrl1 = htobe64(ctrl1); in write_ethofld_wr()
6920 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ in write_ethofld_wr()
6922 pad = 16 - (immhdrs & 0xf); in write_ethofld_wr()
6926 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_ethofld_wr()
6930 for (; m0 != NULL; m0 = m0->m_next) { in write_ethofld_wr()
6931 if (__predict_false(m0->m_len == 0)) in write_ethofld_wr()
6933 if (immhdrs >= m0->m_len) { in write_ethofld_wr()
6934 immhdrs -= m0->m_len; in write_ethofld_wr()
6937 if (m0->m_flags & M_EXTPG) in write_ethofld_wr()
6939 mtod(m0, vm_offset_t), m0->m_len); in write_ethofld_wr()
6942 m0->m_len - immhdrs); in write_ethofld_wr()
6951 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; in write_ethofld_wr()
6953 usgl->len0 = htobe32(segs[0].ss_len); in write_ethofld_wr()
6954 usgl->addr0 = htobe64(segs[0].ss_paddr); in write_ethofld_wr()
6955 for (i = 0; i < nsegs - 1; i++) { in write_ethofld_wr()
6956 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); in write_ethofld_wr()
6957 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); in write_ethofld_wr()
6960 usgl->sge[i / 2].len[1] = htobe32(0); in write_ethofld_wr()
6973 mtx_assert(&cst->lock, MA_OWNED); in ethofld_tx()
6975 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { in ethofld_tx()
6981 if (next_credits > cst->tx_credits) { in ethofld_tx()
6987 MPASS(cst->ncompl > 0); in ethofld_tx()
6990 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie); in ethofld_tx()
6993 MPASS(cst->ncompl > 0); in ethofld_tx()
6996 cst->tx_credits -= next_credits; in ethofld_tx()
6997 cst->tx_nocompl += next_credits; in ethofld_tx()
6998 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; in ethofld_tx()
6999 ETHER_BPF_MTAP(cst->com.ifp, m); in ethofld_tx()
7001 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie); in ethofld_tx()
7003 cst->ncompl++; in ethofld_tx()
7004 cst->tx_nocompl = 0; in ethofld_tx()
7006 (void) mbufq_dequeue(&cst->pending_tx); in ethofld_tx()
7018 m->m_pkthdr.snd_tag = NULL; in ethofld_tx()
7019 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in ethofld_tx()
7020 m_snd_tag_rele(&cst->com); in ethofld_tx()
7022 mbufq_enqueue(&cst->pending_fwack, m); in ethofld_tx()
7033 MPASS(m0->m_nextpkt == NULL); in ethofld_transmit()
7034 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); in ethofld_transmit()
7035 MPASS(m0->m_pkthdr.snd_tag != NULL); in ethofld_transmit()
7036 cst = mst_to_crt(m0->m_pkthdr.snd_tag); in ethofld_transmit()
7038 mtx_lock(&cst->lock); in ethofld_transmit()
7039 MPASS(cst->flags & EO_SND_TAG_REF); in ethofld_transmit()
7041 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { in ethofld_transmit()
7043 struct port_info *pi = vi->pi; in ethofld_transmit()
7044 struct adapter *sc = pi->adapter; in ethofld_transmit()
7045 const uint32_t rss_mask = vi->rss_size - 1; in ethofld_transmit()
7048 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; in ethofld_transmit()
7050 rss_hash = m0->m_pkthdr.flowid; in ethofld_transmit()
7054 cst->iqid = vi->rss[rss_hash & rss_mask]; in ethofld_transmit()
7055 cst->eo_txq += rss_hash % vi->nofldtxq; in ethofld_transmit()
7061 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { in ethofld_transmit()
7066 mbufq_enqueue(&cst->pending_tx, m0); in ethofld_transmit()
7067 cst->plen += m0->m_pkthdr.len; in ethofld_transmit()
7075 m_snd_tag_ref(&cst->com); in ethofld_transmit()
7077 mtx_unlock(&cst->lock); in ethofld_transmit()
7078 m_snd_tag_rele(&cst->com); in ethofld_transmit()
7082 mtx_unlock(&cst->lock); in ethofld_transmit()
7090 struct adapter *sc = iq->adapter; in ethofld_fw4_ack()
7095 uint8_t credits = cpl->credits; in ethofld_fw4_ack()
7098 mtx_lock(&cst->lock); in ethofld_fw4_ack()
7099 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { in ethofld_fw4_ack()
7101 credits -= ETID_FLOWC_LEN16; in ethofld_fw4_ack()
7102 cst->flags &= ~EO_FLOWC_RPL_PENDING; in ethofld_fw4_ack()
7105 KASSERT(cst->ncompl > 0, in ethofld_fw4_ack()
7108 cst->ncompl--; in ethofld_fw4_ack()
7111 m = mbufq_dequeue(&cst->pending_fwack); in ethofld_fw4_ack()
7117 MPASS((cst->flags & in ethofld_fw4_ack()
7121 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); in ethofld_fw4_ack()
7122 MPASS(cst->ncompl == 0); in ethofld_fw4_ack()
7124 cst->flags &= ~EO_FLUSH_RPL_PENDING; in ethofld_fw4_ack()
7125 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
7130 ("%s: too many credits (%u, %u)", __func__, cpl->credits, in ethofld_fw4_ack()
7134 cpl->credits, credits, mbuf_eo_len16(m))); in ethofld_fw4_ack()
7135 credits -= mbuf_eo_len16(m); in ethofld_fw4_ack()
7136 cst->plen -= m->m_pkthdr.len; in ethofld_fw4_ack()
7140 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
7141 MPASS(cst->tx_credits <= cst->tx_total); in ethofld_fw4_ack()
7143 if (cst->flags & EO_SND_TAG_REF) { in ethofld_fw4_ack()
7148 m_snd_tag_ref(&cst->com); in ethofld_fw4_ack()
7149 m = mbufq_first(&cst->pending_tx); in ethofld_fw4_ack()
7150 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) in ethofld_fw4_ack()
7152 mtx_unlock(&cst->lock); in ethofld_fw4_ack()
7153 m_snd_tag_rele(&cst->com); in ethofld_fw4_ack()
7160 MPASS(mbufq_first(&cst->pending_tx) == NULL); in ethofld_fw4_ack()
7161 mtx_unlock(&cst->lock); in ethofld_fw4_ack()