Lines Matching +full:ring +full:- +full:disable +full:- +full:pullup

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
86 * 0-7 are valid values.
94 * -1: driver should figure out a good value.
95 * 0: disable padding.
98 int fl_pad = -1;
104 * -1: driver should figure out a good value.
107 static int spg_len = -1;
113 * -1: no congestion feedback (not recommended).
129 * -1: let the driver decide whether to enable buffer packing or not.
130 * 0: disable buffer packing.
133 static int buffer_packing = -1;
139 * -1: driver should figure out a good value.
143 static int fl_pack = -1;
165 * for rewriting. -1 and 0-3 are all valid values.
166 * -1: hardware should leave the TCP timestamps alone.
172 static int tsclk = -1;
183 * 1 and 3-17 (both inclusive) are legal values.
223 "# of consecutive packets (1 - 255) that will trigger tx coalescing");
405 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { in set_tcb_rpl_handler()
407 * The return code for filter-write is put in the CPL cookie so in set_tcb_rpl_handler()
413 cookie = G_COOKIE(cpl->cookie); in set_tcb_rpl_handler()
439 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); in act_open_rpl_handler()
451 struct adapter *sc = iq->adapter; in abort_rpl_rss_handler()
466 struct adapter *sc = iq->adapter; in fw4_ack_handler()
546 if (spg_len != -1) { in t4_sge_modload()
553 if (cong_drop < -1 || cong_drop > 2) { in t4_sge_modload()
559 if (ofld_cong_drop < -1 || ofld_cong_drop > 2) { in t4_sge_modload()
629 return (refs - rels); in t4_sge_extfree_refs()
661 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" in setup_pad_and_pack_boundaries()
666 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); in setup_pad_and_pack_boundaries()
670 if (fl_pack != -1 && fl_pack != pad) { in setup_pad_and_pack_boundaries()
672 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," in setup_pad_and_pack_boundaries()
681 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) in setup_pad_and_pack_boundaries()
684 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); in setup_pad_and_pack_boundaries()
692 if (fl_pack != -1) { in setup_pad_and_pack_boundaries()
693 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" in setup_pad_and_pack_boundaries()
701 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); in setup_pad_and_pack_boundaries()
708 * adap->params.vpd.cclk must be set up before this is called.
716 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; in t4_tweak_chip_settings()
718 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_tweak_chip_settings()
726 KASSERT(sc->flags & MASTER_PF, in t4_tweak_chip_settings()
736 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
737 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
738 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
739 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
740 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
741 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
742 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | in t4_tweak_chip_settings()
743 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); in t4_tweak_chip_settings()
754 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); in t4_tweak_chip_settings()
766 KASSERT(intr_timer[i] >= intr_timer[i - 1], in t4_tweak_chip_settings()
771 if (i == nitems(intr_timer) - 1) { in t4_tweak_chip_settings()
775 intr_timer[i] += intr_timer[i - 1]; in t4_tweak_chip_settings()
795 v = V_TSCALE(tscale - 2); in t4_tweak_chip_settings()
798 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { in t4_tweak_chip_settings()
821 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ in t4_tweak_chip_settings()
841 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; in hwsz_ok()
853 struct sge *s = &sc->sge; in t4_init_rx_buf_info()
854 struct sge_params *sp = &sc->params.sge; in t4_init_rx_buf_info()
864 s->safe_zidx = -1; in t4_init_rx_buf_info()
865 rxb = &s->rx_buf_info[0]; in t4_init_rx_buf_info()
867 rxb->size1 = sw_buf_sizes[i]; in t4_init_rx_buf_info()
868 rxb->zone = m_getzone(rxb->size1); in t4_init_rx_buf_info()
869 rxb->type = m_gettype(rxb->size1); in t4_init_rx_buf_info()
870 rxb->size2 = 0; in t4_init_rx_buf_info()
871 rxb->hwidx1 = -1; in t4_init_rx_buf_info()
872 rxb->hwidx2 = -1; in t4_init_rx_buf_info()
874 int hwsize = sp->sge_fl_buffer_size[j]; in t4_init_rx_buf_info()
880 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) in t4_init_rx_buf_info()
881 rxb->hwidx1 = j; in t4_init_rx_buf_info()
884 if (rxb->size1 - CL_METADATA_SIZE < hwsize) in t4_init_rx_buf_info()
886 n = rxb->size1 - hwsize - CL_METADATA_SIZE; in t4_init_rx_buf_info()
888 rxb->hwidx2 = j; in t4_init_rx_buf_info()
889 rxb->size2 = hwsize; in t4_init_rx_buf_info()
892 if (rxb->hwidx2 != -1) { in t4_init_rx_buf_info()
893 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - in t4_init_rx_buf_info()
894 hwsize - CL_METADATA_SIZE) { in t4_init_rx_buf_info()
895 rxb->hwidx2 = j; in t4_init_rx_buf_info()
896 rxb->size2 = hwsize; in t4_init_rx_buf_info()
899 rxb->hwidx2 = j; in t4_init_rx_buf_info()
900 rxb->size2 = hwsize; in t4_init_rx_buf_info()
903 if (rxb->hwidx2 != -1) in t4_init_rx_buf_info()
904 sc->flags |= BUF_PACKING_OK; in t4_init_rx_buf_info()
905 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) in t4_init_rx_buf_info()
906 s->safe_zidx = i; in t4_init_rx_buf_info()
917 struct sge_params *sp = &sc->params.sge; in t4_verify_chip_settings()
920 const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); in t4_verify_chip_settings()
924 r = sp->sge_control; in t4_verify_chip_settings()
926 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); in t4_verify_chip_settings()
932 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. in t4_verify_chip_settings()
934 if (sp->page_shift != PAGE_SHIFT) { in t4_verify_chip_settings()
935 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); in t4_verify_chip_settings()
939 if (sc->flags & IS_VF) in t4_verify_chip_settings()
945 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); in t4_verify_chip_settings()
946 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
953 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); in t4_verify_chip_settings()
954 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
963 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); in t4_verify_chip_settings()
964 if (sc->vres.ddp.size != 0) in t4_verify_chip_settings()
976 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in t4_create_dma_tag()
979 NULL, &sc->dmat); in t4_create_dma_tag()
981 device_printf(sc->dev, in t4_create_dma_tag()
992 struct sge_params *sp = &sc->params.sge; in t4_sge_sysctls()
999 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); in t4_sge_sysctls()
1002 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); in t4_sge_sysctls()
1005 NULL, sp->spg_len, "status page size (bytes)"); in t4_sge_sysctls()
1015 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); in t4_sge_sysctls()
1021 if (sc->dmat) in t4_destroy_dma_tag()
1022 bus_dma_tag_destroy(sc->dmat); in t4_destroy_dma_tag()
1051 if (sc->flags & IS_VF) in t4_setup_adapter_queues()
1080 if (sc->sge.ctrlq != NULL) { in t4_teardown_adapter_queues()
1081 MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */ in t4_teardown_adapter_queues()
1097 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + in max_rx_payload()
1099 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && in max_rx_payload()
1100 maxp < sc->params.tp.max_rx_pdu) in max_rx_payload()
1101 maxp = sc->params.tp.max_rx_pdu; in max_rx_payload()
1122 struct adapter *sc = vi->adapter; in t4_setup_vi_queues()
1123 if_t ifp = vi->ifp; in t4_setup_vi_queues()
1127 intr_idx = vi->first_intr; in t4_setup_vi_queues()
1135 MPASS(vi->first_intr >= 0); in t4_setup_vi_queues()
1150 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); in t4_setup_vi_queues()
1175 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); in t4_setup_vi_queues()
1231 if (if_getcapabilities(vi->ifp) & IFCAP_NETMAP) { in t4_teardown_vi_queues()
1284 struct sge_iq *fwq = &sc->sge.fwq; in t4_intr_all()
1286 MPASS(sc->intr_count == 1); in t4_intr_all()
1288 if (sc->intr_type == INTR_INTX) in t4_intr_all()
1304 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; in t4_intr_err()
1306 if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR) in t4_intr_err()
1311 sc->swintr++; in t4_intr_err()
1320 * Interrupt handler for iq-only queues. The firmware event queue is the only
1328 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr_evt()
1330 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr_evt()
1342 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { in t4_intr()
1344 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); in t4_intr()
1357 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { in t4_nm_intr()
1359 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); in t4_nm_intr()
1371 MPASS(irq->nm_rxq != NULL); in t4_vi_intr()
1372 t4_nm_intr(irq->nm_rxq); in t4_vi_intr()
1374 MPASS(irq->rxq != NULL); in t4_vi_intr()
1375 t4_intr(irq->rxq); in t4_vi_intr()
1380 * Deals with interrupts on an iq-only (no freelist) queue.
1386 struct adapter *sc = iq->adapter; in service_iq()
1387 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq()
1393 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq()
1394 KASSERT((iq->flags & IQ_HAS_FL) == 0, in service_iq()
1395 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, in service_iq()
1396 iq->flags)); in service_iq()
1397 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq()
1398 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); in service_iq()
1400 limit = budget ? budget : iq->qsize / 16; in service_iq()
1403 * We always come back and check the descriptor ring for new indirect in service_iq()
1407 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq()
1411 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq()
1412 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq()
1422 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq()
1424 d->rss.opcode)); in service_iq()
1425 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); in service_iq()
1430 * There are 1K interrupt-capable queues (qids 0 in service_iq()
1436 t4_an_handler(iq, &d->rsp); in service_iq()
1440 q = sc->sge.iqmap[lq - sc->sge.iq_start - in service_iq()
1441 sc->sge.iq_base]; in service_iq()
1442 if (atomic_cmpset_int(&q->state, IQS_IDLE, in service_iq()
1444 if (service_iq_fl(q, q->qsize / 16) == 0) { in service_iq()
1445 (void) atomic_cmpset_int(&q->state, in service_iq()
1460 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq()
1465 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq()
1466 iq->cidx = 0; in service_iq()
1467 iq->gen ^= F_RSPD_GEN; in service_iq()
1468 d = &iq->desc[0]; in service_iq()
1471 t4_write_reg(sc, sc->sge_gts_reg, in service_iq()
1473 V_INGRESSQID(iq->cntxt_id) | in service_iq()
1492 if (service_iq_fl(q, q->qsize / 8) == 0) in service_iq()
1493 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); in service_iq()
1498 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq()
1499 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq()
1509 return (lro->lro_mbuf_max != 0); in sort_before_lro()
1526 cur = &sc->cal_info[sc->cal_current]; in t4_tstmp_to_ns()
1527 gen = seqc_read(&cur->gen); in t4_tstmp_to_ns()
1531 if (seqc_consistent(&cur->gen, gen)) in t4_tstmp_to_ns()
1538 * ( (cur_time - prev_time) ) in t4_tstmp_to_ns()
1539 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time in t4_tstmp_to_ns()
1540 * ( (hw_cur - hw_prev) ) in t4_tstmp_to_ns()
1545 hw_clocks = hw_tstmp - dcur.hw_prev; in t4_tstmp_to_ns()
1546 sbt_cur_to_prev = (dcur.sbt_cur - dcur.sbt_prev); in t4_tstmp_to_ns()
1547 hw_clk_div = dcur.hw_cur - dcur.hw_prev; in t4_tstmp_to_ns()
1556 fl->rx_offset = 0; in move_to_next_rxbuf()
1557 if (__predict_false((++fl->cidx & 7) == 0)) { in move_to_next_rxbuf()
1558 uint16_t cidx = fl->cidx >> 3; in move_to_next_rxbuf()
1560 if (__predict_false(cidx == fl->sidx)) in move_to_next_rxbuf()
1561 fl->cidx = cidx = 0; in move_to_next_rxbuf()
1562 fl->hw_cidx = cidx; in move_to_next_rxbuf()
1574 struct adapter *sc = iq->adapter; in service_iq_fl()
1575 struct iq_desc *d = &iq->desc[iq->cidx]; in service_iq_fl()
1582 const struct timeval lro_timeout = {0, sc->lro_timeout}; in service_iq_fl()
1583 struct lro_ctrl *lro = &rxq->lro; in service_iq_fl()
1586 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); in service_iq_fl()
1587 MPASS(iq->flags & IQ_HAS_FL); in service_iq_fl()
1591 if (iq->flags & IQ_ADJ_CREDIT) { in service_iq_fl()
1593 iq->flags &= ~IQ_ADJ_CREDIT; in service_iq_fl()
1594 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { in service_iq_fl()
1596 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | in service_iq_fl()
1597 V_INGRESSQID((u32)iq->cntxt_id) | in service_iq_fl()
1598 V_SEINTARM(iq->intr_params)); in service_iq_fl()
1604 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); in service_iq_fl()
1607 limit = budget ? budget : iq->qsize / 16; in service_iq_fl()
1608 fl = &rxq->fl; in service_iq_fl()
1609 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ in service_iq_fl()
1610 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { in service_iq_fl()
1615 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); in service_iq_fl()
1616 lq = be32toh(d->rsp.pldbuflen_qid); in service_iq_fl()
1621 if (fl->rx_offset > 0) in service_iq_fl()
1625 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { in service_iq_fl()
1629 fl_hw_cidx = fl->hw_cidx; in service_iq_fl()
1632 if (d->rss.opcode == CPL_RX_PKT) { in service_iq_fl()
1644 KASSERT(d->rss.opcode < NUM_CPL_CMDS, in service_iq_fl()
1645 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); in service_iq_fl()
1646 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); in service_iq_fl()
1652 * There are 1K interrupt-capable queues (qids 0 in service_iq_fl()
1663 t4_an_handler(iq, &d->rsp); in service_iq_fl()
1670 device_get_nameunit(sc->dev), rsp_type, iq); in service_iq_fl()
1675 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq_fl()
1676 iq->cidx = 0; in service_iq_fl()
1677 iq->gen ^= F_RSPD_GEN; in service_iq_fl()
1678 d = &iq->desc[0]; in service_iq_fl()
1681 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1682 V_INGRESSQID(iq->cntxt_id) | in service_iq_fl()
1686 if (iq->flags & IQ_LRO_ENABLED && in service_iq_fl()
1688 sc->lro_timeout != 0) { in service_iq_fl()
1699 if (iq->flags & IQ_LRO_ENABLED) { in service_iq_fl()
1700 if (ndescs > 0 && lro->lro_mbuf_count > 8) { in service_iq_fl()
1703 iq->flags |= IQ_ADJ_CREDIT; in service_iq_fl()
1704 ndescs--; in service_iq_fl()
1711 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | in service_iq_fl()
1712 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); in service_iq_fl()
1727 return ((void *)(sd->cl + sd->moff)); in cl_metadata()
1733 struct cluster_metadata *clm = m->m_ext.ext_arg1; in rxb_free()
1735 uma_zfree(clm->zone, clm->cl); in rxb_free()
1751 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_scatter_segment()
1752 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_scatter_segment()
1757 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1760 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in get_scatter_segment()
1762 payload = sd->cl + fl->rx_offset; in get_scatter_segment()
1765 pad = roundup2(l, fl->buf_boundary) - l; in get_scatter_segment()
1766 if (fl->rx_offset + len + pad < rxb->size2) in get_scatter_segment()
1768 MPASS(fl->rx_offset + blen <= rxb->size2); in get_scatter_segment()
1770 MPASS(fl->rx_offset == 0); /* not packing */ in get_scatter_segment()
1771 blen = rxb->size1; in get_scatter_segment()
1773 payload = sd->cl; in get_scatter_segment()
1780 m->m_pkthdr.len = remaining; in get_scatter_segment()
1786 m->m_len = len; in get_scatter_segment()
1789 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { in get_scatter_segment()
1792 if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1793 fl->rx_offset += blen; in get_scatter_segment()
1794 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1795 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1798 } else if (fl->flags & FL_BUF_PACKING) { in get_scatter_segment()
1800 if (sd->nmbuf++ == 0) { in get_scatter_segment()
1801 clm->refcount = 1; in get_scatter_segment()
1802 clm->zone = rxb->zone; in get_scatter_segment()
1803 clm->cl = sd->cl; in get_scatter_segment()
1806 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, in get_scatter_segment()
1809 fl->rx_offset += blen; in get_scatter_segment()
1810 MPASS(fl->rx_offset <= rxb->size2); in get_scatter_segment()
1811 if (fl->rx_offset < rxb->size2) in get_scatter_segment()
1814 m_cljset(m, sd->cl, rxb->type); in get_scatter_segment()
1815 sd->cl = NULL; /* consumed, not a recycle candidate */ in get_scatter_segment()
1829 if (__predict_false(fl->flags & FL_BUF_RESUME)) { in get_fl_payload()
1830 M_ASSERTPKTHDR(fl->m0); in get_fl_payload()
1831 MPASS(fl->m0->m_pkthdr.len == plen); in get_fl_payload()
1832 MPASS(fl->remaining < plen); in get_fl_payload()
1834 m0 = fl->m0; in get_fl_payload()
1835 pnext = fl->pnext; in get_fl_payload()
1836 remaining = fl->remaining; in get_fl_payload()
1837 fl->flags &= ~FL_BUF_RESUME; in get_fl_payload()
1849 remaining = plen - m0->m_len; in get_fl_payload()
1850 pnext = &m0->m_next; in get_fl_payload()
1853 MPASS(fl->rx_offset == 0); in get_fl_payload()
1854 m = get_scatter_segment(sc, fl, plen - remaining, remaining); in get_fl_payload()
1856 fl->m0 = m0; in get_fl_payload()
1857 fl->pnext = pnext; in get_fl_payload()
1858 fl->remaining = remaining; in get_fl_payload()
1859 fl->flags |= FL_BUF_RESUME; in get_fl_payload()
1863 pnext = &m->m_next; in get_fl_payload()
1864 remaining -= m->m_len; in get_fl_payload()
1876 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in skip_scatter_segment()
1877 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in skip_scatter_segment()
1880 if (fl->flags & FL_BUF_PACKING) { in skip_scatter_segment()
1883 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ in skip_scatter_segment()
1887 pad = roundup2(l, fl->buf_boundary) - l; in skip_scatter_segment()
1888 if (fl->rx_offset + len + pad < rxb->size2) in skip_scatter_segment()
1890 fl->rx_offset += blen; in skip_scatter_segment()
1891 MPASS(fl->rx_offset <= rxb->size2); in skip_scatter_segment()
1892 if (fl->rx_offset < rxb->size2) in skip_scatter_segment()
1895 MPASS(fl->rx_offset == 0); /* not packing */ in skip_scatter_segment()
1896 blen = rxb->size1; in skip_scatter_segment()
1913 remaining -= len; in skip_fl_payload()
1921 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in get_segment_len()
1922 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; in get_segment_len()
1924 if (fl->flags & FL_BUF_PACKING) in get_segment_len()
1925 len = rxb->size2 - fl->rx_offset; in get_segment_len()
1927 len = rxb->size1; in get_segment_len()
1937 if_t ifp = rxq->ifp; in eth_rx()
1938 struct sge_fl *fl = &rxq->fl; in eth_rx()
1942 struct lro_ctrl *lro = &rxq->lro; in eth_rx()
1980 MPASS(plen > sc->params.sge.fl_pktshift); in eth_rx()
1981 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && in eth_rx()
1982 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { in eth_rx()
1983 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; in eth_rx()
1987 slen = get_segment_len(sc, fl, plen) - in eth_rx()
1988 sc->params.sge.fl_pktshift; in eth_rx()
1989 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; in eth_rx()
1991 rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0); in eth_rx()
2007 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; in eth_rx()
2008 m0->m_len -= sc->params.sge.fl_pktshift; in eth_rx()
2009 m0->m_data += sc->params.sge.fl_pktshift; in eth_rx()
2012 m0->m_pkthdr.rcvif = ifp; in eth_rx()
2013 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); in eth_rx()
2014 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); in eth_rx()
2016 cpl = (const void *)(&d->rss + 1); in eth_rx()
2017 if (sc->params.tp.rx_pkt_encap) { in eth_rx()
2018 const uint16_t ev = be16toh(cpl->err_vec); in eth_rx()
2024 err_vec = be16toh(cpl->err_vec); in eth_rx()
2028 if (cpl->csum_calc && err_vec == 0) { in eth_rx()
2029 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); in eth_rx()
2033 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ in eth_rx()
2034 (cpl->l2info & htobe32(F_RXF_IP6))); in eth_rx()
2035 m0->m_pkthdr.csum_data = be16toh(cpl->csum); in eth_rx()
2038 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2042 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2045 rxq->rxcsum++; in eth_rx()
2050 if (__predict_false(cpl->ip_frag)) { in eth_rx()
2064 m0->m_pkthdr.csum_data = 0xffff; in eth_rx()
2066 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2069 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2076 MPASS(m0->m_pkthdr.csum_data == 0xffff); in eth_rx()
2081 m0->m_pkthdr.csum_flags = in eth_rx()
2084 rxq->vxlan_rxcsum++; in eth_rx()
2088 if (cpl->vlan_ex) { in eth_rx()
2089 if (sc->flags & IS_VF && sc->vlan_id) { in eth_rx()
2094 MPASS(be16toh(cpl->vlan) == sc->vlan_id); in eth_rx()
2096 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); in eth_rx()
2097 m0->m_flags |= M_VLANTAG; in eth_rx()
2098 rxq->vlan_extraction++; in eth_rx()
2102 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { in eth_rx()
2105 * long as we get a non-zero back from t4_tstmp_to_ns(). in eth_rx()
2107 m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc, in eth_rx()
2108 be64toh(d->rsp.u.last_flit)); in eth_rx()
2109 if (m0->m_pkthdr.rcv_tstmp != 0) in eth_rx()
2110 m0->m_flags |= M_TSTMP; in eth_rx()
2114 m0->m_pkthdr.numa_domain = if_getnumadomain(ifp); in eth_rx()
2117 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && in eth_rx()
2140 struct sge_eq *eq = &wrq->eq; in wrq_tx_drain()
2143 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in wrq_tx_drain()
2144 drain_wrq_wr_list(wrq->adapter, wrq); in wrq_tx_drain()
2151 struct sge_eq *eq = &wrq->eq; in drain_wrq_wr_list()
2158 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in drain_wrq_wr_list()
2159 wr = STAILQ_FIRST(&wrq->wr_list); in drain_wrq_wr_list()
2161 MPASS(eq->pidx == eq->dbidx); in drain_wrq_wr_list()
2165 eq->cidx = read_hw_cidx(eq); in drain_wrq_wr_list()
2166 if (eq->pidx == eq->cidx) in drain_wrq_wr_list()
2167 available = eq->sidx - 1; in drain_wrq_wr_list()
2169 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in drain_wrq_wr_list()
2171 MPASS(wr->wrq == wrq); in drain_wrq_wr_list()
2172 n = howmany(wr->wr_len, EQ_ESIZE); in drain_wrq_wr_list()
2176 dst = (void *)&eq->desc[eq->pidx]; in drain_wrq_wr_list()
2177 if (__predict_true(eq->sidx - eq->pidx > n)) { in drain_wrq_wr_list()
2179 bcopy(&wr->wr[0], dst, wr->wr_len); in drain_wrq_wr_list()
2180 eq->pidx += n; in drain_wrq_wr_list()
2182 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; in drain_wrq_wr_list()
2184 bcopy(&wr->wr[0], dst, first_portion); in drain_wrq_wr_list()
2185 if (wr->wr_len > first_portion) { in drain_wrq_wr_list()
2186 bcopy(&wr->wr[first_portion], &eq->desc[0], in drain_wrq_wr_list()
2187 wr->wr_len - first_portion); in drain_wrq_wr_list()
2189 eq->pidx = n - (eq->sidx - eq->pidx); in drain_wrq_wr_list()
2191 wrq->tx_wrs_copied++; in drain_wrq_wr_list()
2193 if (available < eq->sidx / 4 && in drain_wrq_wr_list()
2194 atomic_cmpset_int(&eq->equiq, 0, 1)) { in drain_wrq_wr_list()
2200 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in drain_wrq_wr_list()
2210 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); in drain_wrq_wr_list()
2212 MPASS(wrq->nwr_pending > 0); in drain_wrq_wr_list()
2213 wrq->nwr_pending--; in drain_wrq_wr_list()
2214 MPASS(wrq->ndesc_needed >= n); in drain_wrq_wr_list()
2215 wrq->ndesc_needed -= n; in drain_wrq_wr_list()
2216 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); in drain_wrq_wr_list()
2229 struct sge_eq *eq = &wrq->eq; in t4_wrq_tx_locked()
2234 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); in t4_wrq_tx_locked()
2235 MPASS((wr->wr_len & 0x7) == 0); in t4_wrq_tx_locked()
2237 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); in t4_wrq_tx_locked()
2238 wrq->nwr_pending++; in t4_wrq_tx_locked()
2239 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); in t4_wrq_tx_locked()
2241 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) in t4_wrq_tx_locked()
2247 MPASS(eq->pidx == eq->dbidx); in t4_wrq_tx_locked()
2254 struct adapter *sc = vi->adapter; in t4_update_fl_bufsize()
2264 fl = &rxq->fl; in t4_update_fl_bufsize()
2267 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2268 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2274 fl = &ofld_rxq->fl; in t4_update_fl_bufsize()
2277 fl->zidx = find_refill_source(sc, maxp, in t4_update_fl_bufsize()
2278 fl->flags & FL_BUF_PACKING); in t4_update_fl_bufsize()
2290 return (m->m_pkthdr.PH_loc.eight[1]); in mbuf_eo_nsegs()
2299 m->m_pkthdr.PH_loc.eight[1] = nsegs; in set_mbuf_eo_nsegs()
2309 n = m->m_pkthdr.PH_loc.eight[2]; in mbuf_eo_len16()
2321 m->m_pkthdr.PH_loc.eight[2] = len16; in set_mbuf_eo_len16()
2330 return (m->m_pkthdr.PH_loc.eight[3]); in mbuf_eo_tsclk_tsoff()
2339 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; in set_mbuf_eo_tsclk_tsoff()
2347 return (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT); in needs_eo()
2369 m->m_pkthdr.len = len; in alloc_wr_mbuf()
2370 m->m_len = len; in alloc_wr_mbuf()
2387 return (m->m_pkthdr.csum_flags & csum_flags); in needs_hwcsum()
2398 return (m->m_pkthdr.csum_flags & csum_flags); in needs_tso()
2407 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); in needs_vxlan_csum()
2418 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && in needs_vxlan_tso()
2419 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); in needs_vxlan_tso()
2430 return (m->m_pkthdr.csum_flags & csum_flags); in needs_inner_tcp_csum()
2442 return (m->m_pkthdr.csum_flags & csum_flags); in needs_l3_csum()
2453 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_tcp_csum()
2465 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_l4_csum()
2475 return (m->m_pkthdr.csum_flags & csum_flags); in needs_outer_udp_csum()
2485 return (m->m_flags & M_VLANTAG); in needs_vlan_insertion()
2499 if (offset + len < m->m_len) { in m_advance()
2504 len -= m->m_len - offset; in m_advance()
2505 m = m->m_next; in m_advance()
2524 len = m->m_len; in count_mbuf_ext_pgs()
2526 len -= skip; in count_mbuf_ext_pgs()
2528 if (m->m_epg_hdrlen != 0) { in count_mbuf_ext_pgs()
2529 if (off >= m->m_epg_hdrlen) { in count_mbuf_ext_pgs()
2530 off -= m->m_epg_hdrlen; in count_mbuf_ext_pgs()
2532 seglen = m->m_epg_hdrlen - off; in count_mbuf_ext_pgs()
2536 len -= seglen; in count_mbuf_ext_pgs()
2538 (vm_offset_t)&m->m_epg_hdr[segoff]); in count_mbuf_ext_pgs()
2544 pgoff = m->m_epg_1st_off; in count_mbuf_ext_pgs()
2545 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { in count_mbuf_ext_pgs()
2548 off -= pglen; in count_mbuf_ext_pgs()
2552 seglen = pglen - off; in count_mbuf_ext_pgs()
2556 len -= seglen; in count_mbuf_ext_pgs()
2557 paddr = m->m_epg_pa[i] + segoff; in count_mbuf_ext_pgs()
2564 seglen = min(len, m->m_epg_trllen - off); in count_mbuf_ext_pgs()
2565 len -= seglen; in count_mbuf_ext_pgs()
2566 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); in count_mbuf_ext_pgs()
2589 MPASS(m->m_pkthdr.len > 0); in count_mbuf_nsegs()
2590 MPASS(m->m_pkthdr.len >= skip); in count_mbuf_nsegs()
2594 for (; m; m = m->m_next) { in count_mbuf_nsegs()
2595 len = m->m_len; in count_mbuf_nsegs()
2599 skip -= len; in count_mbuf_nsegs()
2602 if ((m->m_flags & M_EXTPG) != 0) { in count_mbuf_nsegs()
2609 len -= skip; in count_mbuf_nsegs()
2614 nsegs--; in count_mbuf_nsegs()
2615 nextaddr = pmap_kextract(va + len - 1) + 1; in count_mbuf_nsegs()
2673 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { in parse_pkt()
2686 MPASS(m0->m_pkthdr.len > 0); in parse_pkt()
2689 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) in parse_pkt()
2690 mst = m0->m_pkthdr.snd_tag; in parse_pkt()
2695 if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) { in parse_pkt()
2718 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && in parse_pkt()
2721 m0 = m_pullup(m0, m0->m_pkthdr.len); in parse_pkt()
2727 *mp = m0; /* update caller's copy after pullup */ in parse_pkt()
2741 m_snd_tag_rele(m0->m_pkthdr.snd_tag); in parse_pkt()
2742 m0->m_pkthdr.snd_tag = NULL; in parse_pkt()
2743 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in parse_pkt()
2757 eh_type = ntohs(eh->ether_type); in parse_pkt()
2761 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2762 m0->m_pkthdr.l2hlen = sizeof(*evh); in parse_pkt()
2764 m0->m_pkthdr.l2hlen = sizeof(*eh); in parse_pkt()
2769 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2771 m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2778 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2788 ip->ip_sum = 0; in parse_pkt()
2790 const uint16_t ipl = ip->ip_len; in parse_pkt()
2792 ip->ip_len = 0; in parse_pkt()
2793 ip->ip_sum = ~in_cksum_hdr(ip); in parse_pkt()
2794 ip->ip_len = ipl; in parse_pkt()
2796 ip->ip_sum = in_cksum_hdr(ip); in parse_pkt()
2798 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; in parse_pkt()
2815 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2816 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); in parse_pkt()
2819 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + in parse_pkt()
2821 eh_type = ntohs(eh->ether_type); in parse_pkt()
2825 eh_type = ntohs(evh->evl_proto); in parse_pkt()
2826 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); in parse_pkt()
2828 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); in parse_pkt()
2830 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2832 m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2838 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2846 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; in parse_pkt()
2862 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); in parse_pkt()
2863 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; in parse_pkt()
2865 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); in parse_pkt()
2866 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | in parse_pkt()
2873 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); in parse_pkt()
2874 m0->m_pkthdr.l4hlen = tcp->th_off * 4; in parse_pkt()
2883 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2891 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + in parse_pkt()
2892 m0->m_pkthdr.l4hlen; in parse_pkt()
2899 rc = ethofld_transmit(mst->ifp, m0); in parse_pkt()
2913 struct sge_eq *eq = &wrq->eq; in start_wrq_wr()
2914 struct adapter *sc = wrq->adapter; in start_wrq_wr()
2924 if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) { in start_wrq_wr()
2929 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in start_wrq_wr()
2932 if (!STAILQ_EMPTY(&wrq->wr_list)) { in start_wrq_wr()
2938 cookie->pidx = -1; in start_wrq_wr()
2939 cookie->ndesc = ndesc; in start_wrq_wr()
2940 return (&wr->wr); in start_wrq_wr()
2943 eq->cidx = read_hw_cidx(eq); in start_wrq_wr()
2944 if (eq->pidx == eq->cidx) in start_wrq_wr()
2945 available = eq->sidx - 1; in start_wrq_wr()
2947 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in start_wrq_wr()
2951 cookie->pidx = eq->pidx; in start_wrq_wr()
2952 cookie->ndesc = ndesc; in start_wrq_wr()
2953 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); in start_wrq_wr()
2955 w = &eq->desc[eq->pidx]; in start_wrq_wr()
2956 IDXINCR(eq->pidx, ndesc, eq->sidx); in start_wrq_wr()
2957 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { in start_wrq_wr()
2958 w = &wrq->ss[0]; in start_wrq_wr()
2959 wrq->ss_pidx = cookie->pidx; in start_wrq_wr()
2960 wrq->ss_len = len16 * 16; in start_wrq_wr()
2971 struct sge_eq *eq = &wrq->eq; in commit_wrq_wr()
2972 struct adapter *sc = wrq->adapter; in commit_wrq_wr()
2976 if (cookie->pidx == -1) { in commit_wrq_wr()
2983 if (__predict_false(w == &wrq->ss[0])) { in commit_wrq_wr()
2984 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; in commit_wrq_wr()
2986 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ in commit_wrq_wr()
2987 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); in commit_wrq_wr()
2988 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); in commit_wrq_wr()
2989 wrq->tx_wrs_ss++; in commit_wrq_wr()
2991 wrq->tx_wrs_direct++; in commit_wrq_wr()
2994 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ in commit_wrq_wr()
2995 pidx = cookie->pidx; in commit_wrq_wr()
2996 MPASS(pidx >= 0 && pidx < eq->sidx); in commit_wrq_wr()
3000 MPASS(pidx == eq->dbidx); in commit_wrq_wr()
3007 * is at pidx and not eq->pidx, which has moved on in commit_wrq_wr()
3010 dst = (void *)&eq->desc[pidx]; in commit_wrq_wr()
3011 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in commit_wrq_wr()
3012 if (available < eq->sidx / 4 && in commit_wrq_wr()
3013 atomic_cmpset_int(&eq->equiq, 0, 1)) { in commit_wrq_wr()
3019 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | in commit_wrq_wr()
3023 if (__predict_true(eq->flags & EQ_HW_ALLOCATED)) in commit_wrq_wr()
3024 ring_eq_db(wrq->adapter, eq, ndesc); in commit_wrq_wr()
3026 IDXINCR(eq->dbidx, ndesc, eq->sidx); in commit_wrq_wr()
3028 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); in commit_wrq_wr()
3029 next->pidx = pidx; in commit_wrq_wr()
3030 next->ndesc += ndesc; in commit_wrq_wr()
3033 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); in commit_wrq_wr()
3034 prev->ndesc += ndesc; in commit_wrq_wr()
3036 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); in commit_wrq_wr()
3038 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) in commit_wrq_wr()
3042 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { in commit_wrq_wr()
3044 MPASS(wrq->eq.pidx == wrq->eq.dbidx); in commit_wrq_wr()
3053 struct sge_eq *eq = r->cookie; in can_resume_eth_tx()
3055 return (total_available_tx_desc(eq) > eq->sidx / 8); in can_resume_eth_tx()
3070 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); in discard_tx()
3078 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { in wr_can_update_eq()
3095 struct sge_eq *eq = &txq->eq; in set_txupdate_flags()
3096 struct txpkts *txp = &txq->txp; in set_txupdate_flags()
3098 if ((txp->npkt > 0 || avail < eq->sidx / 2) && in set_txupdate_flags()
3099 atomic_cmpset_int(&eq->equiq, 0, 1)) { in set_txupdate_flags()
3100 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); in set_txupdate_flags()
3101 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3102 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { in set_txupdate_flags()
3103 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); in set_txupdate_flags()
3104 eq->equeqidx = eq->pidx; in set_txupdate_flags()
3116 const uint64_t last_tx = txq->last_tx; in record_eth_tx_time()
3124 txq->last_tx = cycles; in record_eth_tx_time()
3125 return (cycles - last_tx < itg); in record_eth_tx_time()
3129 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3135 struct sge_txq *txq = r->cookie; in eth_tx()
3136 if_t ifp = txq->ifp; in eth_tx()
3137 struct sge_eq *eq = &txq->eq; in eth_tx()
3138 struct txpkts *txp = &txq->txp; in eth_tx()
3140 struct adapter *sc = vi->adapter; in eth_tx()
3146 void *wr; /* start of the last WR written to the ring */ in eth_tx()
3151 remaining = IDXDIFF(pidx, cidx, r->size); in eth_tx()
3153 for (i = 0; i < txp->npkt; i++) in eth_tx()
3154 m_freem(txp->mb[i]); in eth_tx()
3155 txp->npkt = 0; in eth_tx()
3157 m0 = r->items[cidx]; in eth_tx()
3159 if (++cidx == r->size) in eth_tx()
3162 reclaim_tx_descs(txq, eq->sidx); in eth_tx()
3168 if (eq->pidx == eq->cidx) in eth_tx()
3169 avail = eq->sidx - 1; in eth_tx()
3171 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in eth_tx()
3175 txp->score = 0; in eth_tx()
3176 txq->txpkts_flush++; in eth_tx()
3183 m0 = r->items[cidx]; in eth_tx()
3185 MPASS(m0->m_nextpkt == NULL); in eth_tx()
3190 if (t4_tx_coalesce == 0 && txp->npkt == 0) in eth_tx()
3193 txp->score = 0; in eth_tx()
3195 if (++txp->score == 0) in eth_tx()
3196 txp->score = UINT8_MAX; in eth_tx()
3198 txp->score = 1; in eth_tx()
3199 if (txp->npkt > 0 || remaining > 1 || in eth_tx()
3200 txp->score >= t4_tx_coalesce_pkts || in eth_tx()
3201 atomic_load_int(&txq->eq.equiq) != 0) { in eth_tx()
3202 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3211 MPASS(txp->npkt > 0); in eth_tx()
3212 for (i = 0; i < txp->npkt; i++) in eth_tx()
3213 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3214 if (txp->npkt > 1) { in eth_tx()
3215 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3216 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3222 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3223 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3225 txp->mb[0]); in eth_tx()
3227 n = write_txpkt_wr(sc, txq, txp->mb[0], in eth_tx()
3231 avail -= n; in eth_tx()
3233 wr = &eq->desc[eq->pidx]; in eth_tx()
3234 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3235 txp->npkt = 0; /* emptied */ in eth_tx()
3238 /* m0 was coalesced into txq->txpkts. */ in eth_tx()
3244 * combined with the existing txq->txpkts, which has now in eth_tx()
3248 MPASS(txp->npkt == 0); in eth_tx()
3253 MPASS(txp->npkt == 0); in eth_tx()
3262 wr = &eq->desc[eq->pidx]; in eth_tx()
3272 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3281 avail -= n; in eth_tx()
3283 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3294 remaining--; in eth_tx()
3295 if (__predict_false(++cidx == r->size)) in eth_tx()
3303 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && in eth_tx()
3304 atomic_load_int(&txq->eq.equiq) == 0) { in eth_tx()
3311 MPASS(txp->npkt > 0); in eth_tx()
3312 for (i = 0; i < txp->npkt; i++) in eth_tx()
3313 ETHER_BPF_MTAP(ifp, txp->mb[i]); in eth_tx()
3314 if (txp->npkt > 1) { in eth_tx()
3315 MPASS(avail >= tx_len16_to_desc(txp->len16)); in eth_tx()
3316 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3322 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); in eth_tx()
3323 if (vi->flags & TX_USES_VM_WR) in eth_tx()
3324 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); in eth_tx()
3326 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); in eth_tx()
3329 wr = &eq->desc[eq->pidx]; in eth_tx()
3330 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3331 txp->npkt = 0; /* emptied */ in eth_tx()
3334 set_txupdate_flags(txq, avail - n, wr); in eth_tx()
3338 *coalescing = txp->npkt > 0; in eth_tx()
3350 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ in init_iq()
3352 KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count, in init_iq()
3357 iq->flags = 0; in init_iq()
3358 iq->state = IQS_DISABLED; in init_iq()
3359 iq->adapter = sc; in init_iq()
3360 iq->qtype = qtype; in init_iq()
3361 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); in init_iq()
3362 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; in init_iq()
3364 iq->intr_params |= F_QINTR_CNT_EN; in init_iq()
3365 iq->intr_pktc_idx = pktc_idx; in init_iq()
3367 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ in init_iq()
3368 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; in init_iq()
3369 iq->intr_idx = intr_idx; in init_iq()
3370 iq->cong_drop = cong; in init_iq()
3376 struct sge_params *sp = &sc->params.sge; in init_fl()
3378 fl->qsize = qsize; in init_fl()
3379 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_fl()
3380 strlcpy(fl->lockname, name, sizeof(fl->lockname)); in init_fl()
3381 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); in init_fl()
3382 if (sc->flags & BUF_PACKING_OK && in init_fl()
3385 fl->flags |= FL_BUF_PACKING; in init_fl()
3386 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); in init_fl()
3387 fl->safe_zidx = sc->sge.safe_zidx; in init_fl()
3388 if (fl->flags & FL_BUF_PACKING) { in init_fl()
3389 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); in init_fl()
3390 fl->buf_boundary = sp->pack_boundary; in init_fl()
3392 fl->lowat = roundup2(sp->fl_starve_threshold, 8); in init_fl()
3393 fl->buf_boundary = 16; in init_fl()
3395 if (fl_pad && fl->buf_boundary < sp->pad_boundary) in init_fl()
3396 fl->buf_boundary = sp->pad_boundary; in init_fl()
3406 eq->type = eqtype; in init_eq()
3407 eq->port_id = port_id; in init_eq()
3408 eq->tx_chan = sc->port[port_id]->tx_chan; in init_eq()
3409 eq->iq = iq; in init_eq()
3410 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_eq()
3411 strlcpy(eq->lockname, name, sizeof(eq->lockname)); in init_eq()
3412 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); in init_eq()
3421 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, in alloc_ring()
3473 struct adapter *sc = vi->adapter; in alloc_iq_fl()
3475 MPASS(!(iq->flags & IQ_SW_ALLOCATED)); in alloc_iq_fl()
3477 len = iq->qsize * IQ_ESIZE; in alloc_iq_fl()
3478 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, in alloc_iq_fl()
3479 (void **)&iq->desc); in alloc_iq_fl()
3484 len = fl->qsize * EQ_ESIZE; in alloc_iq_fl()
3485 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, in alloc_iq_fl()
3486 &fl->ba, (void **)&fl->desc); in alloc_iq_fl()
3488 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, in alloc_iq_fl()
3489 iq->desc); in alloc_iq_fl()
3494 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), in alloc_iq_fl()
3498 iq->flags |= IQ_HAS_FL; in alloc_iq_fl()
3501 iq->flags |= IQ_SW_ALLOCATED; in alloc_iq_fl()
3513 MPASS(iq->flags & IQ_SW_ALLOCATED); in free_iq_fl()
3516 MPASS(iq->flags & IQ_HAS_FL); in free_iq_fl()
3517 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); in free_iq_fl()
3519 free(fl->sdesc, M_CXGBE); in free_iq_fl()
3520 mtx_destroy(&fl->fl_lock); in free_iq_fl()
3523 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); in free_iq_fl()
3539 struct adapter *sc = vi->adapter; in alloc_iq_fl_hwq()
3540 struct port_info *pi = vi->pi; in alloc_iq_fl_hwq()
3543 MPASS (!(iq->flags & IQ_HW_ALLOCATED)); in alloc_iq_fl_hwq()
3547 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | in alloc_iq_fl_hwq()
3554 if (iq == &sc->sge.fwq) in alloc_iq_fl_hwq()
3557 if (iq->intr_idx < 0) { in alloc_iq_fl_hwq()
3560 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); in alloc_iq_fl_hwq()
3562 KASSERT(iq->intr_idx < sc->intr_count, in alloc_iq_fl_hwq()
3563 ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx)); in alloc_iq_fl_hwq()
3564 v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx); in alloc_iq_fl_hwq()
3567 bzero(iq->desc, iq->qsize * IQ_ESIZE); in alloc_iq_fl_hwq()
3570 V_FW_IQ_CMD_VIID(vi->viid) | in alloc_iq_fl_hwq()
3572 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | in alloc_iq_fl_hwq()
3574 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | in alloc_iq_fl_hwq()
3575 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); in alloc_iq_fl_hwq()
3576 c.iqsize = htobe16(iq->qsize); in alloc_iq_fl_hwq()
3577 c.iqaddr = htobe64(iq->ba); in alloc_iq_fl_hwq()
3578 c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype)); in alloc_iq_fl_hwq()
3579 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3580 cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0; in alloc_iq_fl_hwq()
3585 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_iq_fl_hwq()
3590 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : in alloc_iq_fl_hwq()
3592 if (iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3603 c.fl0size = htobe16(fl->qsize); in alloc_iq_fl_hwq()
3604 c.fl0addr = htobe64(fl->ba); in alloc_iq_fl_hwq()
3607 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in alloc_iq_fl_hwq()
3613 iq->cidx = 0; in alloc_iq_fl_hwq()
3614 iq->gen = F_RSPD_GEN; in alloc_iq_fl_hwq()
3615 iq->cntxt_id = be16toh(c.iqid); in alloc_iq_fl_hwq()
3616 iq->abs_id = be16toh(c.physiqid); in alloc_iq_fl_hwq()
3618 cntxt_id = iq->cntxt_id - sc->sge.iq_start; in alloc_iq_fl_hwq()
3619 if (cntxt_id >= sc->sge.iqmap_sz) { in alloc_iq_fl_hwq()
3620 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, in alloc_iq_fl_hwq()
3621 cntxt_id, sc->sge.iqmap_sz - 1); in alloc_iq_fl_hwq()
3623 sc->sge.iqmap[cntxt_id] = iq; in alloc_iq_fl_hwq()
3630 MPASS(!(fl->flags & FL_BUF_RESUME)); in alloc_iq_fl_hwq()
3631 for (i = 0; i < fl->sidx * 8; i++) in alloc_iq_fl_hwq()
3632 MPASS(fl->sdesc[i].cl == NULL); in alloc_iq_fl_hwq()
3634 fl->cntxt_id = be16toh(c.fl0id); in alloc_iq_fl_hwq()
3635 fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0; in alloc_iq_fl_hwq()
3636 fl->rx_offset = 0; in alloc_iq_fl_hwq()
3637 fl->flags &= ~(FL_STARVING | FL_DOOMED); in alloc_iq_fl_hwq()
3639 cntxt_id = fl->cntxt_id - sc->sge.eq_start; in alloc_iq_fl_hwq()
3640 if (cntxt_id >= sc->sge.eqmap_sz) { in alloc_iq_fl_hwq()
3641 panic("%s: fl->cntxt_id (%d) more than the max (%d)", in alloc_iq_fl_hwq()
3642 __func__, cntxt_id, sc->sge.eqmap_sz - 1); in alloc_iq_fl_hwq()
3644 sc->sge.eqmap[cntxt_id] = (void *)fl; in alloc_iq_fl_hwq()
3646 qid = fl->cntxt_id; in alloc_iq_fl_hwq()
3647 if (isset(&sc->doorbells, DOORBELL_UDB)) { in alloc_iq_fl_hwq()
3648 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_iq_fl_hwq()
3649 uint32_t mask = (1 << s_qpp) - 1; in alloc_iq_fl_hwq()
3652 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_iq_fl_hwq()
3659 fl->udb = (volatile void *)udb; in alloc_iq_fl_hwq()
3661 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; in alloc_iq_fl_hwq()
3665 refill_fl(sc, fl, fl->lowat); in alloc_iq_fl_hwq()
3669 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && in alloc_iq_fl_hwq()
3670 iq->cong_drop != -1) { in alloc_iq_fl_hwq()
3671 t4_sge_set_conm_context(sc, iq->cntxt_id, iq->cong_drop, in alloc_iq_fl_hwq()
3676 atomic_store_rel_int(&iq->state, IQS_IDLE); in alloc_iq_fl_hwq()
3677 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | in alloc_iq_fl_hwq()
3678 V_INGRESSQID(iq->cntxt_id)); in alloc_iq_fl_hwq()
3680 iq->flags |= IQ_HW_ALLOCATED; in alloc_iq_fl_hwq()
3690 MPASS(iq->flags & IQ_HW_ALLOCATED); in free_iq_fl_hwq()
3691 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_iq_fl_hwq()
3692 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); in free_iq_fl_hwq()
3697 iq->flags &= ~IQ_HW_ALLOCATED; in free_iq_fl_hwq()
3712 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, in add_iq_sysctls()
3713 "bus address of descriptor ring"); in add_iq_sysctls()
3715 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); in add_iq_sysctls()
3717 &iq->abs_id, 0, "absolute id of the queue"); in add_iq_sysctls()
3719 &iq->cntxt_id, 0, "SGE context id of the queue"); in add_iq_sysctls()
3720 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx, in add_iq_sysctls()
3739 &fl->ba, "bus address of descriptor ring"); in add_fl_sysctls()
3741 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_fl_sysctls()
3742 "desc ring size in bytes"); in add_fl_sysctls()
3744 &fl->cntxt_id, 0, "SGE context id of the freelist"); in add_fl_sysctls()
3748 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); in add_fl_sysctls()
3749 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, in add_fl_sysctls()
3751 if (fl->flags & FL_BUF_PACKING) { in add_fl_sysctls()
3753 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); in add_fl_sysctls()
3755 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, in add_fl_sysctls()
3758 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); in add_fl_sysctls()
3760 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); in add_fl_sysctls()
3762 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); in add_fl_sysctls()
3772 struct sge_iq *fwq = &sc->sge.fwq; in alloc_fwq()
3773 struct vi_info *vi = &sc->port[0]->vi[0]; in alloc_fwq()
3775 if (!(fwq->flags & IQ_SW_ALLOCATED)) { in alloc_fwq()
3776 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in alloc_fwq()
3778 if (sc->flags & IS_VF) in alloc_fwq()
3781 intr_idx = sc->intr_count > 1 ? 1 : 0; in alloc_fwq()
3782 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1, IQ_OTHER); in alloc_fwq()
3783 rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid); in alloc_fwq()
3788 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3791 if (!(fwq->flags & IQ_HW_ALLOCATED)) { in alloc_fwq()
3792 MPASS(fwq->flags & IQ_SW_ALLOCATED); in alloc_fwq()
3799 MPASS(fwq->flags & IQ_HW_ALLOCATED); in alloc_fwq()
3811 struct sge_iq *fwq = &sc->sge.fwq; in free_fwq()
3813 if (fwq->flags & IQ_HW_ALLOCATED) { in free_fwq()
3814 MPASS(fwq->flags & IQ_SW_ALLOCATED); in free_fwq()
3816 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3819 if (fwq->flags & IQ_SW_ALLOCATED) { in free_fwq()
3820 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); in free_fwq()
3822 MPASS(!(fwq->flags & IQ_SW_ALLOCATED)); in free_fwq()
3835 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in alloc_ctrlq()
3837 MPASS(idx < sc->params.nports); in alloc_ctrlq()
3839 if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) { in alloc_ctrlq()
3840 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in alloc_ctrlq()
3843 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid), in alloc_ctrlq()
3848 device_get_nameunit(sc->dev), idx); in alloc_ctrlq()
3849 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx, in alloc_ctrlq()
3850 &sc->sge.fwq, name); in alloc_ctrlq()
3851 rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid); in alloc_ctrlq()
3857 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3860 if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) { in alloc_ctrlq()
3861 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in alloc_ctrlq()
3862 MPASS(ctrlq->nwr_pending == 0); in alloc_ctrlq()
3863 MPASS(ctrlq->ndesc_needed == 0); in alloc_ctrlq()
3865 rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq); in alloc_ctrlq()
3870 MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED); in alloc_ctrlq()
3882 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; in free_ctrlq()
3884 if (ctrlq->eq.flags & EQ_HW_ALLOCATED) { in free_ctrlq()
3885 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); in free_ctrlq()
3886 free_eq_hwq(sc, NULL, &ctrlq->eq); in free_ctrlq()
3887 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3890 if (ctrlq->eq.flags & EQ_SW_ALLOCATED) { in free_ctrlq()
3891 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); in free_ctrlq()
3893 MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED)); in free_ctrlq()
3901 const int cng_ch_bits_log = sc->chip_params->cng_ch_bits_log; in t4_sge_set_conm_context()
3911 case -1: in t4_sge_set_conm_context()
3942 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in t4_sge_set_conm_context()
3959 struct adapter *sc = vi->adapter; in alloc_rxq()
3960 if_t ifp = vi->ifp; in alloc_rxq()
3964 if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_rxq()
3965 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_rxq()
3967 rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs); in alloc_rxq()
3970 MPASS(rxq->lro.ifp == ifp); /* also indicates LRO init'ed */ in alloc_rxq()
3972 rxq->ifp = ifp; in alloc_rxq()
3975 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid), in alloc_rxq()
3979 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq, in alloc_rxq()
3983 rxq->iq.flags |= IQ_LRO_ENABLED; in alloc_rxq()
3986 rxq->iq.flags |= IQ_RX_TIMESTAMP; in alloc_rxq()
3987 snprintf(name, sizeof(name), "%s rxq%d-fl", in alloc_rxq()
3988 device_get_nameunit(vi->dev), idx); in alloc_rxq()
3989 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_rxq()
3990 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid); in alloc_rxq()
3995 tcp_lro_free(&rxq->lro); in alloc_rxq()
3996 rxq->lro.ifp = NULL; in alloc_rxq()
4000 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4001 add_rxq_sysctls(&vi->ctx, oid, rxq); in alloc_rxq()
4004 if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_rxq()
4005 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_rxq()
4006 rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl); in alloc_rxq()
4011 MPASS(rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_rxq()
4014 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; in alloc_rxq()
4016 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, in alloc_rxq()
4018 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, in alloc_rxq()
4019 ("PF with non-zero iq_base")); in alloc_rxq()
4025 FL_LOCK(&rxq->fl); in alloc_rxq()
4026 refill_fl(sc, &rxq->fl, 128); in alloc_rxq()
4027 FL_UNLOCK(&rxq->fl); in alloc_rxq()
4039 if (rxq->iq.flags & IQ_HW_ALLOCATED) { in free_rxq()
4040 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); in free_rxq()
4041 free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4042 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4045 if (rxq->iq.flags & IQ_SW_ALLOCATED) { in free_rxq()
4046 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); in free_rxq()
4048 tcp_lro_free(&rxq->lro); in free_rxq()
4050 free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl); in free_rxq()
4051 MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED)); in free_rxq()
4068 &rxq->lro.lro_queued, 0, NULL); in add_rxq_sysctls()
4070 &rxq->lro.lro_flushed, 0, NULL); in add_rxq_sysctls()
4073 &rxq->rxcsum, "# of times hardware assisted with checksum"); in add_rxq_sysctls()
4075 &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); in add_rxq_sysctls()
4077 &rxq->vxlan_rxcsum, in add_rxq_sysctls()
4090 struct adapter *sc = vi->adapter; in alloc_ofld_rxq()
4094 if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) { in alloc_ofld_rxq()
4095 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in alloc_ofld_rxq()
4098 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_rxq()
4099 SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name, in alloc_ofld_rxq()
4102 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, in alloc_ofld_rxq()
4103 vi->qsize_rxq, intr_idx, ofld_cong_drop, IQ_OFLD); in alloc_ofld_rxq()
4104 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", in alloc_ofld_rxq()
4105 device_get_nameunit(vi->dev), idx); in alloc_ofld_rxq()
4106 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); in alloc_ofld_rxq()
4107 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx, in alloc_ofld_rxq()
4115 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4116 ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4117 ofld_rxq->rx_iscsi_ddp_setup_error = in alloc_ofld_rxq()
4119 ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4120 ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4121 ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK); in alloc_ofld_rxq()
4122 add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq); in alloc_ofld_rxq()
4125 if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) { in alloc_ofld_rxq()
4126 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in alloc_ofld_rxq()
4127 rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl); in alloc_ofld_rxq()
4133 MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED); in alloc_ofld_rxq()
4144 if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) { in free_ofld_rxq()
4145 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); in free_ofld_rxq()
4146 free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4147 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4150 if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) { in free_ofld_rxq()
4151 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); in free_ofld_rxq()
4152 free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); in free_ofld_rxq()
4153 MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)); in free_ofld_rxq()
4154 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok); in free_ofld_rxq()
4155 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error); in free_ofld_rxq()
4156 counter_u64_free(ofld_rxq->ddp_buffer_alloc); in free_ofld_rxq()
4157 counter_u64_free(ofld_rxq->ddp_buffer_reuse); in free_ofld_rxq()
4158 counter_u64_free(ofld_rxq->ddp_buffer_free); in free_ofld_rxq()
4174 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0, in add_ofld_rxq_sysctls()
4177 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_octets, 0, in add_ofld_rxq_sysctls()
4180 "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records, in add_ofld_rxq_sysctls()
4183 "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets, in add_ofld_rxq_sysctls()
4186 "rx_toe_ddp_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_ddp_octets, in add_ofld_rxq_sysctls()
4189 "ddp_buffer_alloc", CTLFLAG_RD, &ofld_rxq->ddp_buffer_alloc, in add_ofld_rxq_sysctls()
4192 "ddp_buffer_reuse", CTLFLAG_RD, &ofld_rxq->ddp_buffer_reuse, in add_ofld_rxq_sysctls()
4195 "ddp_buffer_free", CTLFLAG_RD, &ofld_rxq->ddp_buffer_free, in add_ofld_rxq_sysctls()
4203 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok, in add_ofld_rxq_sysctls()
4206 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error, in add_ofld_rxq_sysctls()
4209 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0, in add_ofld_rxq_sysctls()
4212 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0, in add_ofld_rxq_sysctls()
4215 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0, in add_ofld_rxq_sysctls()
4218 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0, in add_ofld_rxq_sysctls()
4221 CTLFLAG_RD, &ofld_rxq->rx_iscsi_padding_errors, 0, in add_ofld_rxq_sysctls()
4224 CTLFLAG_RD, &ofld_rxq->rx_iscsi_header_digest_errors, 0, in add_ofld_rxq_sysctls()
4227 CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0, in add_ofld_rxq_sysctls()
4252 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ctrl_eq_alloc()
4257 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | in ctrl_eq_alloc()
4261 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); in ctrl_eq_alloc()
4265 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | in ctrl_eq_alloc()
4266 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); in ctrl_eq_alloc()
4273 c.eqaddr = htobe64(eq->ba); in ctrl_eq_alloc()
4275 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ctrl_eq_alloc()
4278 eq->tx_chan, rc); in ctrl_eq_alloc()
4282 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); in ctrl_eq_alloc()
4283 eq->abs_id = G_FW_EQ_CTRL_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ctrl_eq_alloc()
4284 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ctrl_eq_alloc()
4285 if (cntxt_id >= sc->sge.eqmap_sz) in ctrl_eq_alloc()
4286 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ctrl_eq_alloc()
4287 cntxt_id, sc->sge.eqmap_sz - 1); in ctrl_eq_alloc()
4288 sc->sge.eqmap[cntxt_id] = eq; in ctrl_eq_alloc()
4298 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in eth_eq_alloc()
4303 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | in eth_eq_alloc()
4308 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); in eth_eq_alloc()
4311 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | in eth_eq_alloc()
4312 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); in eth_eq_alloc()
4318 c.eqaddr = htobe64(eq->ba); in eth_eq_alloc()
4320 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in eth_eq_alloc()
4322 device_printf(vi->dev, in eth_eq_alloc()
4327 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); in eth_eq_alloc()
4328 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in eth_eq_alloc()
4329 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in eth_eq_alloc()
4330 if (cntxt_id >= sc->sge.eqmap_sz) in eth_eq_alloc()
4331 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in eth_eq_alloc()
4332 cntxt_id, sc->sge.eqmap_sz - 1); in eth_eq_alloc()
4333 sc->sge.eqmap[cntxt_id] = eq; in eth_eq_alloc()
4344 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ofld_eq_alloc()
4349 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | in ofld_eq_alloc()
4355 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | in ofld_eq_alloc()
4356 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); in ofld_eq_alloc()
4363 c.eqaddr = htobe64(eq->ba); in ofld_eq_alloc()
4365 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); in ofld_eq_alloc()
4367 device_printf(vi->dev, in ofld_eq_alloc()
4372 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); in ofld_eq_alloc()
4373 eq->abs_id = G_FW_EQ_OFLD_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); in ofld_eq_alloc()
4374 cntxt_id = eq->cntxt_id - sc->sge.eq_start; in ofld_eq_alloc()
4375 if (cntxt_id >= sc->sge.eqmap_sz) in ofld_eq_alloc()
4376 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, in ofld_eq_alloc()
4377 cntxt_id, sc->sge.eqmap_sz - 1); in ofld_eq_alloc()
4378 sc->sge.eqmap[cntxt_id] = eq; in ofld_eq_alloc()
4392 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_eq()
4394 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in alloc_eq()
4396 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, in alloc_eq()
4397 (void **)&eq->desc); in alloc_eq()
4402 eq->flags |= EQ_SW_ALLOCATED; in alloc_eq()
4411 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_eq()
4412 if (eq->type == EQ_ETH) in free_eq()
4413 MPASS(eq->pidx == eq->cidx); in free_eq()
4415 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); in free_eq()
4416 mtx_destroy(&eq->eq_lock); in free_eq()
4426 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, in add_eq_sysctls()
4427 "bus address of descriptor ring"); in add_eq_sysctls()
4429 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_eq_sysctls()
4430 "desc ring size in bytes"); in add_eq_sysctls()
4432 &eq->abs_id, 0, "absolute id of the queue"); in add_eq_sysctls()
4434 &eq->cntxt_id, 0, "SGE context id of the queue"); in add_eq_sysctls()
4435 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx, in add_eq_sysctls()
4437 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx, in add_eq_sysctls()
4440 eq->sidx, "status page index"); in add_eq_sysctls()
4448 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_eq_hwq()
4450 eq->iqid = eq->iq->cntxt_id; in alloc_eq_hwq()
4451 eq->pidx = eq->cidx = eq->dbidx = 0; in alloc_eq_hwq()
4453 eq->equeqidx = 0; in alloc_eq_hwq()
4454 eq->doorbells = sc->doorbells; in alloc_eq_hwq()
4455 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_eq_hwq()
4457 switch (eq->type) { in alloc_eq_hwq()
4473 panic("%s: invalid eq type %d.", __func__, eq->type); in alloc_eq_hwq()
4477 eq->type, rc); in alloc_eq_hwq()
4481 if (isset(&eq->doorbells, DOORBELL_UDB) || in alloc_eq_hwq()
4482 isset(&eq->doorbells, DOORBELL_UDBWC) || in alloc_eq_hwq()
4483 isset(&eq->doorbells, DOORBELL_WCWR)) { in alloc_eq_hwq()
4484 uint32_t s_qpp = sc->params.sge.eq_s_qpp; in alloc_eq_hwq()
4485 uint32_t mask = (1 << s_qpp) - 1; in alloc_eq_hwq()
4488 udb = sc->udbs_base + UDBS_DB_OFFSET; in alloc_eq_hwq()
4489 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ in alloc_eq_hwq()
4490 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ in alloc_eq_hwq()
4491 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) in alloc_eq_hwq()
4492 clrbit(&eq->doorbells, DOORBELL_WCWR); in alloc_eq_hwq()
4494 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ in alloc_eq_hwq()
4495 eq->udb_qid = 0; in alloc_eq_hwq()
4497 eq->udb = (volatile void *)udb; in alloc_eq_hwq()
4500 eq->flags |= EQ_HW_ALLOCATED; in alloc_eq_hwq()
4509 MPASS(eq->flags & EQ_HW_ALLOCATED); in free_eq_hwq()
4511 switch (eq->type) { in free_eq_hwq()
4513 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4516 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4520 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); in free_eq_hwq()
4524 panic("%s: invalid eq type %d.", __func__, eq->type); in free_eq_hwq()
4527 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc); in free_eq_hwq()
4530 eq->flags &= ~EQ_HW_ALLOCATED; in free_eq_hwq()
4539 struct sge_eq *eq = &wrq->eq; in alloc_wrq()
4542 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in alloc_wrq()
4547 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_wrq()
4550 wrq->adapter = sc; in alloc_wrq()
4551 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); in alloc_wrq()
4552 TAILQ_INIT(&wrq->incomplete_wrs); in alloc_wrq()
4553 STAILQ_INIT(&wrq->wr_list); in alloc_wrq()
4554 wrq->nwr_pending = 0; in alloc_wrq()
4555 wrq->ndesc_needed = 0; in alloc_wrq()
4564 free_eq(sc, &wrq->eq); in free_wrq()
4565 MPASS(wrq->nwr_pending == 0); in free_wrq()
4566 MPASS(wrq->ndesc_needed == 0); in free_wrq()
4567 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); in free_wrq()
4568 MPASS(STAILQ_EMPTY(&wrq->wr_list)); in free_wrq()
4583 &wrq->tx_wrs_direct, "# of work requests (direct)"); in add_wrq_sysctls()
4585 &wrq->tx_wrs_copied, "# of work requests (copied)"); in add_wrq_sysctls()
4587 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); in add_wrq_sysctls()
4597 struct port_info *pi = vi->pi; in alloc_txq()
4598 struct adapter *sc = vi->adapter; in alloc_txq()
4599 struct sge_eq *eq = &txq->eq; in alloc_txq()
4604 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_txq()
4605 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in alloc_txq()
4608 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid), in alloc_txq()
4612 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_txq()
4614 device_get_nameunit(vi->dev), idx); in alloc_txq()
4615 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->port_id, in alloc_txq()
4616 &sc->sge.rxq[iqidx].iq, name); in alloc_txq()
4618 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, in alloc_txq()
4619 can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK); in alloc_txq()
4628 rc = alloc_eq(sc, eq, &vi->ctx, oid); in alloc_txq()
4631 mp_ring_free(txq->r); in alloc_txq()
4634 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4637 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); in alloc_txq()
4638 txq->ifp = vi->ifp; in alloc_txq()
4639 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); in alloc_txq()
4640 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, in alloc_txq()
4643 add_txq_sysctls(vi, &vi->ctx, oid, txq); in alloc_txq()
4646 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_txq()
4647 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_txq()
4653 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_txq()
4657 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; in alloc_txq()
4659 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, in alloc_txq()
4661 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, in alloc_txq()
4662 ("PF with non-zero eq_base")); in alloc_txq()
4664 txp = &txq->txp; in alloc_txq()
4665 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4666 txq->txp.max_npkt = min(nitems(txp->mb), in alloc_txq()
4667 sc->params.max_pkts_per_eth_tx_pkts_wr); in alloc_txq()
4668 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) in alloc_txq()
4669 txq->txp.max_npkt--; in alloc_txq()
4671 if (vi->flags & TX_USES_VM_WR) in alloc_txq()
4672 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4673 V_TXPKT_INTF(pi->tx_chan)); in alloc_txq()
4675 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | in alloc_txq()
4676 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | in alloc_txq()
4677 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); in alloc_txq()
4679 txq->tc_idx = -1; in alloc_txq()
4691 struct adapter *sc = vi->adapter; in free_txq()
4692 struct sge_eq *eq = &txq->eq; in free_txq()
4694 if (eq->flags & EQ_HW_ALLOCATED) { in free_txq()
4695 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_txq()
4697 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4700 if (eq->flags & EQ_SW_ALLOCATED) { in free_txq()
4701 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_txq()
4702 sglist_free(txq->gl); in free_txq()
4703 free(txq->sdesc, M_CXGBE); in free_txq()
4704 mp_ring_free(txq->r); in free_txq()
4706 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_txq()
4721 sc = vi->adapter; in add_txq_sysctls()
4724 mp_ring_sysctls(txq->r, ctx, children); in add_txq_sysctls()
4727 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq, in add_txq_sysctls()
4728 sysctl_tc, "I", "traffic class (-1 means none)"); in add_txq_sysctls()
4731 &txq->txcsum, "# of times hardware assisted with checksum"); in add_txq_sysctls()
4733 &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); in add_txq_sysctls()
4735 &txq->tso_wrs, "# of TSO work requests"); in add_txq_sysctls()
4737 &txq->imm_wrs, "# of work requests with immediate data"); in add_txq_sysctls()
4739 &txq->sgl_wrs, "# of work requests with direct SGL"); in add_txq_sysctls()
4741 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); in add_txq_sysctls()
4743 &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); in add_txq_sysctls()
4745 &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); in add_txq_sysctls()
4747 &txq->txpkts0_pkts, in add_txq_sysctls()
4750 &txq->txpkts1_pkts, in add_txq_sysctls()
4753 &txq->txpkts_flush, in add_txq_sysctls()
4754 "# of times txpkts had to be flushed out by an egress-update"); in add_txq_sysctls()
4756 &txq->raw_wrs, "# of raw work requests (non-packets)"); in add_txq_sysctls()
4758 &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); in add_txq_sysctls()
4760 &txq->vxlan_txcsum, in add_txq_sysctls()
4766 CTLFLAG_RD, &txq->kern_tls_records, in add_txq_sysctls()
4769 CTLFLAG_RD, &txq->kern_tls_short, in add_txq_sysctls()
4772 CTLFLAG_RD, &txq->kern_tls_partial, in add_txq_sysctls()
4775 CTLFLAG_RD, &txq->kern_tls_full, in add_txq_sysctls()
4778 CTLFLAG_RD, &txq->kern_tls_octets, in add_txq_sysctls()
4781 CTLFLAG_RD, &txq->kern_tls_waste, in add_txq_sysctls()
4784 CTLFLAG_RD, &txq->kern_tls_options, in add_txq_sysctls()
4785 "# of NIC TLS options-only packets transmitted"); in add_txq_sysctls()
4787 CTLFLAG_RD, &txq->kern_tls_header, in add_txq_sysctls()
4788 "# of NIC TLS header-only packets transmitted"); in add_txq_sysctls()
4790 CTLFLAG_RD, &txq->kern_tls_fin, in add_txq_sysctls()
4791 "# of NIC TLS FIN-only packets transmitted"); in add_txq_sysctls()
4793 CTLFLAG_RD, &txq->kern_tls_fin_short, in add_txq_sysctls()
4796 CTLFLAG_RD, &txq->kern_tls_cbc, in add_txq_sysctls()
4797 "# of NIC TLS sessions using AES-CBC"); in add_txq_sysctls()
4799 CTLFLAG_RD, &txq->kern_tls_gcm, in add_txq_sysctls()
4800 "# of NIC TLS sessions using AES-GCM"); in add_txq_sysctls()
4813 struct port_info *pi = vi->pi; in alloc_ofld_txq()
4814 struct adapter *sc = vi->adapter; in alloc_ofld_txq()
4815 struct sge_eq *eq = &ofld_txq->wrq.eq; in alloc_ofld_txq()
4820 MPASS(idx < vi->nofldtxq); in alloc_ofld_txq()
4822 if (!(eq->flags & EQ_SW_ALLOCATED)) { in alloc_ofld_txq()
4824 oid = SYSCTL_ADD_NODE(&vi->ctx, in alloc_ofld_txq()
4825 SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name, in alloc_ofld_txq()
4829 device_get_nameunit(vi->dev), idx); in alloc_ofld_txq()
4830 if (vi->nofldrxq > 0) { in alloc_ofld_txq()
4831 iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq); in alloc_ofld_txq()
4832 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
4833 &sc->sge.ofld_rxq[iqidx].iq, name); in alloc_ofld_txq()
4835 iqidx = vi->first_rxq + (idx % vi->nrxq); in alloc_ofld_txq()
4836 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, in alloc_ofld_txq()
4837 &sc->sge.rxq[iqidx].iq, name); in alloc_ofld_txq()
4840 rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid); in alloc_ofld_txq()
4847 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
4850 ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4851 ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4852 ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4853 ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4854 ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4855 ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4856 ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK); in alloc_ofld_txq()
4857 add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq); in alloc_ofld_txq()
4860 if (!(eq->flags & EQ_HW_ALLOCATED)) { in alloc_ofld_txq()
4861 MPASS(eq->flags & EQ_SW_ALLOCATED); in alloc_ofld_txq()
4862 MPASS(ofld_txq->wrq.nwr_pending == 0); in alloc_ofld_txq()
4863 MPASS(ofld_txq->wrq.ndesc_needed == 0); in alloc_ofld_txq()
4870 MPASS(eq->flags & EQ_HW_ALLOCATED); in alloc_ofld_txq()
4882 struct adapter *sc = vi->adapter; in free_ofld_txq()
4883 struct sge_eq *eq = &ofld_txq->wrq.eq; in free_ofld_txq()
4885 if (eq->flags & EQ_HW_ALLOCATED) { in free_ofld_txq()
4886 MPASS(eq->flags & EQ_SW_ALLOCATED); in free_ofld_txq()
4888 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
4891 if (eq->flags & EQ_SW_ALLOCATED) { in free_ofld_txq()
4892 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); in free_ofld_txq()
4893 counter_u64_free(ofld_txq->tx_iscsi_pdus); in free_ofld_txq()
4894 counter_u64_free(ofld_txq->tx_iscsi_octets); in free_ofld_txq()
4895 counter_u64_free(ofld_txq->tx_iscsi_iso_wrs); in free_ofld_txq()
4896 counter_u64_free(ofld_txq->tx_aio_jobs); in free_ofld_txq()
4897 counter_u64_free(ofld_txq->tx_aio_octets); in free_ofld_txq()
4898 counter_u64_free(ofld_txq->tx_toe_tls_records); in free_ofld_txq()
4899 counter_u64_free(ofld_txq->tx_toe_tls_octets); in free_ofld_txq()
4900 free_wrq(sc, &ofld_txq->wrq); in free_ofld_txq()
4901 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); in free_ofld_txq()
4917 CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus, in add_ofld_txq_sysctls()
4920 CTLFLAG_RD, &ofld_txq->tx_iscsi_octets, in add_ofld_txq_sysctls()
4923 CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs, in add_ofld_txq_sysctls()
4926 CTLFLAG_RD, &ofld_txq->tx_aio_jobs, in add_ofld_txq_sysctls()
4927 "# of zero-copy aio_write(2) jobs transmitted"); in add_ofld_txq_sysctls()
4929 CTLFLAG_RD, &ofld_txq->tx_aio_octets, in add_ofld_txq_sysctls()
4930 "# of payload octets in transmitted zero-copy aio_write(2) jobs"); in add_ofld_txq_sysctls()
4932 CTLFLAG_RD, &ofld_txq->tx_toe_tls_records, in add_ofld_txq_sysctls()
4935 CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets, in add_ofld_txq_sysctls()
4948 *ba = error ? 0 : segs->ds_addr; in oneseg_dma_callback()
4956 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); in ring_fl_db()
4960 v = fl->dbval | V_PIDX(n); in ring_fl_db()
4961 if (fl->udb) in ring_fl_db()
4962 *fl->udb = htole32(v); in ring_fl_db()
4964 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); in ring_fl_db()
4965 IDXINCR(fl->dbidx, n, fl->sidx); in ring_fl_db()
4972 * Returns non-zero to indicate that this freelist should be added to the list
4984 uint16_t max_pidx, zidx = fl->zidx; in refill_fl()
4985 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ in refill_fl()
4994 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; in refill_fl()
4995 if (fl->pidx == max_pidx * 8) in refill_fl()
4998 d = &fl->desc[fl->pidx]; in refill_fl()
4999 sd = &fl->sdesc[fl->pidx]; in refill_fl()
5000 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5004 if (sd->cl != NULL) { in refill_fl()
5006 if (sd->nmbuf == 0) { in refill_fl()
5014 fl->cl_fast_recycled++; in refill_fl()
5026 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in refill_fl()
5027 fl->cl_recycled++; in refill_fl()
5031 sd->cl = NULL; /* gave up my reference */ in refill_fl()
5033 MPASS(sd->cl == NULL); in refill_fl()
5034 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5036 if (zidx != fl->safe_zidx) { in refill_fl()
5037 zidx = fl->safe_zidx; in refill_fl()
5038 rxb = &sc->sge.rx_buf_info[zidx]; in refill_fl()
5039 cl = uma_zalloc(rxb->zone, M_NOWAIT); in refill_fl()
5044 fl->cl_allocated++; in refill_fl()
5045 n--; in refill_fl()
5048 sd->cl = cl; in refill_fl()
5049 sd->zidx = zidx; in refill_fl()
5051 if (fl->flags & FL_BUF_PACKING) { in refill_fl()
5052 *d = htobe64(pa | rxb->hwidx2); in refill_fl()
5053 sd->moff = rxb->size2; in refill_fl()
5055 *d = htobe64(pa | rxb->hwidx1); in refill_fl()
5056 sd->moff = 0; in refill_fl()
5059 sd->nmbuf = 0; in refill_fl()
5062 if (__predict_false((++fl->pidx & 7) == 0)) { in refill_fl()
5063 uint16_t pidx = fl->pidx >> 3; in refill_fl()
5065 if (__predict_false(pidx == fl->sidx)) { in refill_fl()
5066 fl->pidx = 0; in refill_fl()
5068 sd = fl->sdesc; in refill_fl()
5069 d = fl->desc; in refill_fl()
5074 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) in refill_fl()
5079 if ((fl->pidx >> 3) != fl->dbidx) in refill_fl()
5082 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); in refill_fl()
5094 mtx_assert(&sc->sfl_lock, MA_OWNED); in refill_sfl()
5095 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { in refill_sfl()
5098 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { in refill_sfl()
5099 TAILQ_REMOVE(&sc->sfl, fl, link); in refill_sfl()
5100 fl->flags &= ~FL_STARVING; in refill_sfl()
5105 if (!TAILQ_EMPTY(&sc->sfl)) in refill_sfl()
5106 callout_schedule(&sc->sfl_callout, hz / 5); in refill_sfl()
5121 sd = fl->sdesc; in free_fl_buffers()
5122 for (i = 0; i < fl->sidx * 8; i++, sd++) { in free_fl_buffers()
5123 if (sd->cl == NULL) in free_fl_buffers()
5126 if (sd->nmbuf == 0) in free_fl_buffers()
5127 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); in free_fl_buffers()
5128 else if (fl->flags & FL_BUF_PACKING) { in free_fl_buffers()
5130 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { in free_fl_buffers()
5131 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, in free_fl_buffers()
5132 sd->cl); in free_fl_buffers()
5136 sd->cl = NULL; in free_fl_buffers()
5139 if (fl->flags & FL_BUF_RESUME) { in free_fl_buffers()
5140 m_freem(fl->m0); in free_fl_buffers()
5141 fl->flags &= ~FL_BUF_RESUME; in free_fl_buffers()
5159 KASSERT(gl->sg_nseg == mbuf_nsegs(m), in get_pkt_gl()
5161 mbuf_nsegs(m), gl->sg_nseg)); in get_pkt_gl()
5163 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), in get_pkt_gl()
5165 gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); in get_pkt_gl()
5179 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_len16()
5198 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_vm_len16()
5240 nsegs--; /* first segment is part of ulptx_sgl */ in txpkts0_len16()
5267 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - in imm_payload()
5290 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5291 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5294 MPASS(m->m_pkthdr.l4hlen > 0); in csum_to_ctrl()
5295 MPASS(m->m_pkthdr.l5hlen > 0); in csum_to_ctrl()
5296 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); in csum_to_ctrl()
5297 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); in csum_to_ctrl()
5299 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + in csum_to_ctrl()
5300 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + in csum_to_ctrl()
5301 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5302 l3hlen = m->m_pkthdr.inner_l3hlen; in csum_to_ctrl()
5304 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; in csum_to_ctrl()
5305 l3hlen = m->m_pkthdr.l3hlen; in csum_to_ctrl()
5312 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | in csum_to_ctrl()
5315 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | in csum_to_ctrl()
5321 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | in csum_to_ctrl()
5325 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | in csum_to_ctrl()
5352 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_lso_cpl()
5353 m0->m_pkthdr.l4hlen > 0, in write_lso_cpl()
5359 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_lso_cpl()
5360 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_lso_cpl()
5361 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_lso_cpl()
5362 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_lso_cpl()
5366 lso->lso_ctrl = htobe32(ctrl); in write_lso_cpl()
5367 lso->ipid_ofst = htobe16(0); in write_lso_cpl()
5368 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_lso_cpl()
5369 lso->seqno_offset = htobe32(0); in write_lso_cpl()
5370 lso->len = htobe32(m0->m_pkthdr.len); in write_lso_cpl()
5381 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && in write_tnl_lso_cpl()
5382 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && in write_tnl_lso_cpl()
5383 m0->m_pkthdr.inner_l5hlen > 0, in write_tnl_lso_cpl()
5386 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_tnl_lso_cpl()
5387 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, in write_tnl_lso_cpl()
5395 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5396 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | in write_tnl_lso_cpl()
5398 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5404 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); in write_tnl_lso_cpl()
5405 tnl_lso->IpIdOffsetOut = 0; in write_tnl_lso_cpl()
5406 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in write_tnl_lso_cpl()
5409 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + in write_tnl_lso_cpl()
5410 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + in write_tnl_lso_cpl()
5411 m0->m_pkthdr.l5hlen) | in write_tnl_lso_cpl()
5413 tnl_lso->r1 = 0; in write_tnl_lso_cpl()
5417 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5418 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | in write_tnl_lso_cpl()
5419 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); in write_tnl_lso_cpl()
5420 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5422 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); in write_tnl_lso_cpl()
5423 tnl_lso->IpIdOffset = 0; in write_tnl_lso_cpl()
5424 tnl_lso->IpIdSplit_to_Mss = in write_tnl_lso_cpl()
5425 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); in write_tnl_lso_cpl()
5426 tnl_lso->TCPSeqOffset = 0; in write_tnl_lso_cpl()
5427 tnl_lso->EthLenOffset_Size = in write_tnl_lso_cpl()
5428 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); in write_tnl_lso_cpl()
5458 pktlen = m0->m_pkthdr.len; in write_txpkt_vm_wr()
5465 eq = &txq->eq; in write_txpkt_vm_wr()
5466 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_vm_wr()
5467 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | in write_txpkt_vm_wr()
5471 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_vm_wr()
5472 wr->r3[0] = 0; in write_txpkt_vm_wr()
5473 wr->r3[1] = 0; in write_txpkt_vm_wr()
5482 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); in write_txpkt_vm_wr()
5486 txq->tso_wrs++; in write_txpkt_vm_wr()
5493 txq->txcsum++; /* some hardware assistance provided */ in write_txpkt_vm_wr()
5498 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_vm_wr()
5499 txq->vlan_insertion++; in write_txpkt_vm_wr()
5500 } else if (sc->vlan_id) in write_txpkt_vm_wr()
5501 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkt_vm_wr()
5504 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_vm_wr()
5505 cpl->pack = 0; in write_txpkt_vm_wr()
5506 cpl->len = htobe16(pktlen); in write_txpkt_vm_wr()
5507 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_vm_wr()
5515 * If this descriptor is the last descriptor in the ring, wrap in write_txpkt_vm_wr()
5516 * around to the front of the ring explicitly for the start of in write_txpkt_vm_wr()
5519 if (dst == (void *)&eq->desc[eq->sidx]) { in write_txpkt_vm_wr()
5520 dst = (void *)&eq->desc[0]; in write_txpkt_vm_wr()
5523 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_vm_wr()
5524 txq->sgl_wrs++; in write_txpkt_vm_wr()
5525 txq->txpkt_wrs++; in write_txpkt_vm_wr()
5527 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_vm_wr()
5528 txsd->m = m0; in write_txpkt_vm_wr()
5529 txsd->desc_used = ndesc; in write_txpkt_vm_wr()
5544 struct sge_eq *eq = &txq->eq; in write_raw_wr()
5555 for (m = m0; m != NULL; m = m->m_next) in write_raw_wr()
5556 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_raw_wr()
5558 txq->raw_wrs++; in write_raw_wr()
5560 txsd = &txq->sdesc[eq->pidx]; in write_raw_wr()
5561 txsd->m = m0; in write_raw_wr()
5562 txsd->desc_used = ndesc; in write_raw_wr()
5592 pktlen = m0->m_pkthdr.len; in write_txpkt_wr()
5611 eq = &txq->eq; in write_txpkt_wr()
5612 wr = (void *)&eq->desc[eq->pidx]; in write_txpkt_wr()
5613 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | in write_txpkt_wr()
5617 wr->equiq_to_len16 = htobe32(ctrl); in write_txpkt_wr()
5618 wr->r3 = 0; in write_txpkt_wr()
5623 txq->vxlan_tso_wrs++; in write_txpkt_wr()
5626 txq->tso_wrs++; in write_txpkt_wr()
5636 txq->vxlan_txcsum++; in write_txpkt_wr()
5638 txq->txcsum++; in write_txpkt_wr()
5644 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_wr()
5645 txq->vlan_insertion++; in write_txpkt_wr()
5649 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkt_wr()
5650 cpl->pack = 0; in write_txpkt_wr()
5651 cpl->len = htobe16(pktlen); in write_txpkt_wr()
5652 cpl->ctrl1 = htobe64(ctrl1); in write_txpkt_wr()
5656 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) in write_txpkt_wr()
5657 dst = (caddr_t)&eq->desc[0]; in write_txpkt_wr()
5660 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_wr()
5661 txq->sgl_wrs++; in write_txpkt_wr()
5665 for (m = m0; m != NULL; m = m->m_next) { in write_txpkt_wr()
5666 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); in write_txpkt_wr()
5668 pktlen -= m->m_len; in write_txpkt_wr()
5674 txq->imm_wrs++; in write_txpkt_wr()
5677 txq->txpkt_wrs++; in write_txpkt_wr()
5679 txsd = &txq->sdesc[eq->pidx]; in write_txpkt_wr()
5680 txsd->m = m0; in write_txpkt_wr()
5681 txsd->desc_used = ndesc; in write_txpkt_wr()
5691 MPASS(txp->npkt > 0); in cmp_l2hdr()
5692 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in cmp_l2hdr()
5694 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) in cmp_l2hdr()
5699 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); in cmp_l2hdr()
5705 MPASS(m->m_len >= VM_TX_L2HDR_LEN); in save_l2hdr()
5707 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); in save_l2hdr()
5714 struct txpkts *txp = &txq->txp; in add_to_txpkts_vf()
5719 *send = txp->npkt > 0; in add_to_txpkts_vf()
5728 if (txp->npkt > 0) { in add_to_txpkts_vf()
5729 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_vf()
5730 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_vf()
5731 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in add_to_txpkts_vf()
5733 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { in add_to_txpkts_vf()
5738 if (m->m_pkthdr.len + txp->plen > 65535) in add_to_txpkts_vf()
5743 txp->len16 += txpkts1_len16(); in add_to_txpkts_vf()
5744 txp->plen += m->m_pkthdr.len; in add_to_txpkts_vf()
5745 txp->mb[txp->npkt++] = m; in add_to_txpkts_vf()
5746 if (txp->npkt == txp->max_npkt) in add_to_txpkts_vf()
5749 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + in add_to_txpkts_vf()
5751 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_vf()
5753 txp->npkt = 1; in add_to_txpkts_vf()
5754 txp->wr_type = 1; in add_to_txpkts_vf()
5755 txp->plen = m->m_pkthdr.len; in add_to_txpkts_vf()
5756 txp->mb[0] = m; in add_to_txpkts_vf()
5766 struct txpkts *txp = &txq->txp; in add_to_txpkts_pf()
5769 MPASS(!(sc->flags & IS_VF)); in add_to_txpkts_pf()
5774 *send = txp->npkt > 0; in add_to_txpkts_pf()
5780 if (txp->npkt == 0) { in add_to_txpkts_pf()
5781 if (m->m_pkthdr.len > 65535) in add_to_txpkts_pf()
5784 txp->wr_type = 0; in add_to_txpkts_pf()
5785 txp->len16 = in add_to_txpkts_pf()
5789 txp->wr_type = 1; in add_to_txpkts_pf()
5790 txp->len16 = in add_to_txpkts_pf()
5794 if (tx_len16_to_desc(txp->len16) > avail) in add_to_txpkts_pf()
5796 txp->npkt = 1; in add_to_txpkts_pf()
5797 txp->plen = m->m_pkthdr.len; in add_to_txpkts_pf()
5798 txp->mb[0] = m; in add_to_txpkts_pf()
5800 MPASS(tx_len16_to_desc(txp->len16) <= avail); in add_to_txpkts_pf()
5801 MPASS(txp->npkt < txp->max_npkt); in add_to_txpkts_pf()
5803 if (m->m_pkthdr.len + txp->plen > 65535) { in add_to_txpkts_pf()
5809 MPASS(txp->wr_type == 0 || txp->wr_type == 1); in add_to_txpkts_pf()
5810 if (txp->wr_type == 0) { in add_to_txpkts_pf()
5811 if (tx_len16_to_desc(txp->len16 + in add_to_txpkts_pf()
5814 txp->len16 += txpkts0_len16(nsegs); in add_to_txpkts_pf()
5818 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > in add_to_txpkts_pf()
5821 txp->len16 += txpkts1_len16(); in add_to_txpkts_pf()
5824 txp->plen += m->m_pkthdr.len; in add_to_txpkts_pf()
5825 txp->mb[txp->npkt++] = m; in add_to_txpkts_pf()
5826 if (txp->npkt == txp->max_npkt) in add_to_txpkts_pf()
5842 const struct txpkts *txp = &txq->txp; in write_txpkts_wr()
5843 struct sge_eq *eq = &txq->eq; in write_txpkts_wr()
5853 MPASS(txp->npkt > 0); in write_txpkts_wr()
5854 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_wr()
5856 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_wr()
5857 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); in write_txpkts_wr()
5858 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_wr()
5859 wr->plen = htobe16(txp->plen); in write_txpkts_wr()
5860 wr->npkt = txp->npkt; in write_txpkts_wr()
5861 wr->r3 = 0; in write_txpkts_wr()
5862 wr->type = txp->wr_type; in write_txpkts_wr()
5870 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_wr()
5872 checkwrap = eq->sidx - ndesc < eq->pidx; in write_txpkts_wr()
5873 for (i = 0; i < txp->npkt; i++) { in write_txpkts_wr()
5874 m = txp->mb[i]; in write_txpkts_wr()
5875 if (txp->wr_type == 0) { in write_txpkts_wr()
5881 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | in write_txpkts_wr()
5882 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); in write_txpkts_wr()
5883 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); in write_txpkts_wr()
5887 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | in write_txpkts_wr()
5889 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); in write_txpkts_wr()
5893 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5894 cpl = (void *)&eq->desc[0]; in write_txpkts_wr()
5904 txq->vxlan_txcsum++; in write_txpkts_wr()
5906 txq->txcsum++; in write_txpkts_wr()
5912 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_wr()
5913 txq->vlan_insertion++; in write_txpkts_wr()
5917 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_wr()
5918 cpl->pack = 0; in write_txpkts_wr()
5919 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_wr()
5920 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_wr()
5924 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5925 flitp = (void *)&eq->desc[0]; in write_txpkts_wr()
5930 last->m_nextpkt = m; in write_txpkts_wr()
5934 txq->sgl_wrs++; in write_txpkts_wr()
5935 if (txp->wr_type == 0) { in write_txpkts_wr()
5936 txq->txpkts0_pkts += txp->npkt; in write_txpkts_wr()
5937 txq->txpkts0_wrs++; in write_txpkts_wr()
5939 txq->txpkts1_pkts += txp->npkt; in write_txpkts_wr()
5940 txq->txpkts1_wrs++; in write_txpkts_wr()
5943 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_wr()
5944 txsd->m = txp->mb[0]; in write_txpkts_wr()
5945 txsd->desc_used = ndesc; in write_txpkts_wr()
5953 const struct txpkts *txp = &txq->txp; in write_txpkts_vm_wr()
5954 struct sge_eq *eq = &txq->eq; in write_txpkts_vm_wr()
5964 MPASS(txp->npkt > 0); in write_txpkts_vm_wr()
5965 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ in write_txpkts_vm_wr()
5966 MPASS(txp->mb[0] != NULL); in write_txpkts_vm_wr()
5967 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); in write_txpkts_vm_wr()
5969 wr = (void *)&eq->desc[eq->pidx]; in write_txpkts_vm_wr()
5970 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); in write_txpkts_vm_wr()
5971 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); in write_txpkts_vm_wr()
5972 wr->r3 = 0; in write_txpkts_vm_wr()
5973 wr->plen = htobe16(txp->plen); in write_txpkts_vm_wr()
5974 wr->npkt = txp->npkt; in write_txpkts_vm_wr()
5975 wr->r4 = 0; in write_txpkts_vm_wr()
5976 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); in write_txpkts_vm_wr()
5981 * the WR will take 32B so we check for the end of the descriptor ring in write_txpkts_vm_wr()
5984 ndesc = tx_len16_to_desc(txp->len16); in write_txpkts_vm_wr()
5986 for (i = 0; i < txp->npkt; i++) { in write_txpkts_vm_wr()
5987 m = txp->mb[i]; in write_txpkts_vm_wr()
5988 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_vm_wr()
5989 flitp = &eq->desc[0]; in write_txpkts_vm_wr()
5995 txq->txcsum++; /* some hardware assistance provided */ in write_txpkts_vm_wr()
6000 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); in write_txpkts_vm_wr()
6001 txq->vlan_insertion++; in write_txpkts_vm_wr()
6002 } else if (sc->vlan_id) in write_txpkts_vm_wr()
6003 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id); in write_txpkts_vm_wr()
6006 cpl->ctrl0 = txq->cpl_ctrl0; in write_txpkts_vm_wr()
6007 cpl->pack = 0; in write_txpkts_vm_wr()
6008 cpl->len = htobe16(m->m_pkthdr.len); in write_txpkts_vm_wr()
6009 cpl->ctrl1 = htobe64(ctrl1); in write_txpkts_vm_wr()
6016 last->m_nextpkt = m; in write_txpkts_vm_wr()
6020 txq->sgl_wrs++; in write_txpkts_vm_wr()
6021 txq->txpkts1_pkts += txp->npkt; in write_txpkts_vm_wr()
6022 txq->txpkts1_wrs++; in write_txpkts_vm_wr()
6024 txsd = &txq->sdesc[eq->pidx]; in write_txpkts_vm_wr()
6025 txsd->m = txp->mb[0]; in write_txpkts_vm_wr()
6026 txsd->desc_used = ndesc; in write_txpkts_vm_wr()
6038 struct sge_eq *eq = &txq->eq; in write_gl_to_txd()
6039 struct sglist *gl = txq->gl; in write_gl_to_txd()
6047 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in write_gl_to_txd()
6048 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in write_gl_to_txd()
6051 nsegs = gl->sg_nseg; in write_gl_to_txd()
6054 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; in write_gl_to_txd()
6056 wrap = (__be64 *)(&eq->desc[eq->sidx]); in write_gl_to_txd()
6057 seg = &gl->sg_segs[0]; in write_gl_to_txd()
6062 * ring, so we're at least 16 bytes away from the status page. There is in write_gl_to_txd()
6066 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_gl_to_txd()
6068 usgl->len0 = htobe32(seg->ss_len); in write_gl_to_txd()
6069 usgl->addr0 = htobe64(seg->ss_paddr); in write_gl_to_txd()
6076 for (i = 0; i < nsegs - 1; i++, seg++) { in write_gl_to_txd()
6077 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); in write_gl_to_txd()
6078 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); in write_gl_to_txd()
6081 usgl->sge[i / 2].len[1] = htobe32(0); in write_gl_to_txd()
6089 for (i = 0; i < nflits - 2; i++) { in write_gl_to_txd()
6091 flitp = (void *)eq->desc; in write_gl_to_txd()
6092 *flitp++ = get_flit(seg, nsegs - 1, i); in write_gl_to_txd()
6103 *to = (void *)eq->desc; in write_gl_to_txd()
6112 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); in copy_to_txd()
6113 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in copy_to_txd()
6116 (uintptr_t)&eq->desc[eq->sidx])) { in copy_to_txd()
6120 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); in copy_to_txd()
6124 portion = len - portion; /* remaining */ in copy_to_txd()
6125 bcopy(from, (void *)eq->desc, portion); in copy_to_txd()
6126 (*to) = (caddr_t)eq->desc + portion; in copy_to_txd()
6137 db = eq->doorbells; in ring_eq_db()
6142 switch (ffs(db) - 1) { in ring_eq_db()
6144 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6156 KASSERT(eq->udb_qid == 0 && n == 1, in ring_eq_db()
6158 __func__, eq->doorbells, n, eq->dbidx, eq)); in ring_eq_db()
6160 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - in ring_eq_db()
6162 i = eq->dbidx; in ring_eq_db()
6163 src = (void *)&eq->desc[i]; in ring_eq_db()
6164 while (src != (void *)&eq->desc[i + 1]) in ring_eq_db()
6171 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); in ring_eq_db()
6176 t4_write_reg(sc, sc->sge_kdoorbell_reg, in ring_eq_db()
6177 V_QID(eq->cntxt_id) | V_PIDX(n)); in ring_eq_db()
6181 IDXINCR(eq->dbidx, n, eq->sidx); in ring_eq_db()
6190 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); in reclaimable_tx_desc()
6199 pidx = eq->pidx; in total_available_tx_desc()
6202 return (eq->sidx - 1); in total_available_tx_desc()
6204 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); in total_available_tx_desc()
6210 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in read_hw_cidx()
6211 uint16_t cidx = spg->cidx; /* stable snapshot */ in read_hw_cidx()
6223 struct sge_eq *eq = &txq->eq; in reclaim_tx_descs()
6235 txsd = &txq->sdesc[eq->cidx]; in reclaim_tx_descs()
6236 ndesc = txsd->desc_used; in reclaim_tx_descs()
6244 __func__, eq->cidx)); in reclaim_tx_descs()
6246 for (m = txsd->m; m != NULL; m = nextpkt) { in reclaim_tx_descs()
6247 nextpkt = m->m_nextpkt; in reclaim_tx_descs()
6248 m->m_nextpkt = NULL; in reclaim_tx_descs()
6252 can_reclaim -= ndesc; in reclaim_tx_descs()
6253 IDXINCR(eq->cidx, ndesc, eq->sidx); in reclaim_tx_descs()
6263 struct sge_eq *eq = &txq->eq; in tx_reclaim()
6269 if (eq->cidx == eq->pidx) in tx_reclaim()
6270 eq->equeqidx = eq->pidx; in tx_reclaim()
6302 int i, zidx = -1; in find_refill_source()
6303 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in find_refill_source()
6307 if (rxb->hwidx2 == -1) in find_refill_source()
6309 if (rxb->size1 < PAGE_SIZE && in find_refill_source()
6310 rxb->size1 < largest_rx_cluster) in find_refill_source()
6312 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6314 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); in find_refill_source()
6315 if (rxb->size2 >= maxp) in find_refill_source()
6321 if (rxb->hwidx1 == -1) in find_refill_source()
6323 if (rxb->size1 > largest_rx_cluster) in find_refill_source()
6325 if (rxb->size1 >= maxp) in find_refill_source()
6337 mtx_lock(&sc->sfl_lock); in add_fl_to_sfl()
6339 if ((fl->flags & FL_DOOMED) == 0) { in add_fl_to_sfl()
6340 fl->flags |= FL_STARVING; in add_fl_to_sfl()
6341 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); in add_fl_to_sfl()
6342 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); in add_fl_to_sfl()
6345 mtx_unlock(&sc->sfl_lock); in add_fl_to_sfl()
6353 atomic_readandclear_int(&eq->equiq); in handle_wrq_egr_update()
6354 taskqueue_enqueue(sc->tq[eq->port_id], &wrq->wrq_tx_task); in handle_wrq_egr_update()
6362 MPASS(eq->type == EQ_ETH); in handle_eth_egr_update()
6364 atomic_readandclear_int(&eq->equiq); in handle_eth_egr_update()
6365 if (mp_ring_is_idle(txq->r)) in handle_eth_egr_update()
6366 taskqueue_enqueue(sc->tq[eq->port_id], &txq->tx_reclaim_task); in handle_eth_egr_update()
6368 mp_ring_check_drainage(txq->r, 64); in handle_eth_egr_update()
6376 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); in handle_sge_egr_update()
6377 struct adapter *sc = iq->adapter; in handle_sge_egr_update()
6378 struct sge *s = &sc->sge; in handle_sge_egr_update()
6385 rss->opcode)); in handle_sge_egr_update()
6387 eq = s->eqmap[qid - s->eq_start - s->eq_base]; in handle_sge_egr_update()
6388 (*h[eq->type])(sc, eq); in handle_sge_egr_update()
6400 struct adapter *sc = iq->adapter; in handle_fw_msg()
6404 rss->opcode)); in handle_fw_msg()
6406 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { in handle_fw_msg()
6409 rss2 = (const struct rss_header *)&cpl->data[0]; in handle_fw_msg()
6410 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); in handle_fw_msg()
6413 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); in handle_fw_msg()
6417 * t4_handle_wrerr_rpl - process a FW work request error message
6431 device_get_nameunit(adap->dev), opcode); in t4_handle_wrerr_rpl()
6434 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), in t4_handle_wrerr_rpl()
6435 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : in t4_handle_wrerr_rpl()
6436 "non-fatal"); in t4_handle_wrerr_rpl()
6437 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { in t4_handle_wrerr_rpl()
6440 for (i = 0; i < nitems(e->u.exception.info); i++) in t4_handle_wrerr_rpl()
6442 be32toh(e->u.exception.info[i])); in t4_handle_wrerr_rpl()
6447 be32toh(e->u.hwmodule.regaddr), in t4_handle_wrerr_rpl()
6448 be32toh(e->u.hwmodule.regval)); in t4_handle_wrerr_rpl()
6452 be16toh(e->u.wr.cidx), in t4_handle_wrerr_rpl()
6453 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6454 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), in t4_handle_wrerr_rpl()
6455 be32toh(e->u.wr.eqid)); in t4_handle_wrerr_rpl()
6456 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) in t4_handle_wrerr_rpl()
6458 e->u.wr.wrhdr[i]); in t4_handle_wrerr_rpl()
6463 be16toh(e->u.acl.cidx), in t4_handle_wrerr_rpl()
6464 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6465 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), in t4_handle_wrerr_rpl()
6466 be32toh(e->u.acl.eqid), in t4_handle_wrerr_rpl()
6467 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : in t4_handle_wrerr_rpl()
6469 for (i = 0; i < nitems(e->u.acl.val); i++) in t4_handle_wrerr_rpl()
6470 log(LOG_ERR, " %02x", e->u.acl.val[i]); in t4_handle_wrerr_rpl()
6475 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); in t4_handle_wrerr_rpl()
6484 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; in bufidx_used()
6488 if (rxb->size1 > largest_rx_cluster) in bufidx_used()
6490 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) in bufidx_used()
6501 struct sge_params *sp = &sc->params.sge; in sysctl_bufsizes()
6513 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); in sysctl_bufsizes()
6539 nsegs--; /* first segment is part of ulptx_sgl */ in txpkt_eo_len16()
6560 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; in send_etid_flowc_wr()
6563 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flowc_wr()
6564 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == in send_etid_flowc_wr()
6567 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie); in send_etid_flowc_wr()
6572 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flowc_wr()
6574 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | in send_etid_flowc_wr()
6575 V_FW_WR_FLOWID(cst->etid)); in send_etid_flowc_wr()
6576 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in send_etid_flowc_wr()
6577 flowc->mnemval[0].val = htobe32(pfvf); in send_etid_flowc_wr()
6578 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in send_etid_flowc_wr()
6579 flowc->mnemval[1].val = htobe32(pi->tx_chan); in send_etid_flowc_wr()
6580 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in send_etid_flowc_wr()
6581 flowc->mnemval[2].val = htobe32(pi->tx_chan); in send_etid_flowc_wr()
6582 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in send_etid_flowc_wr()
6583 flowc->mnemval[3].val = htobe32(cst->iqid); in send_etid_flowc_wr()
6584 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; in send_etid_flowc_wr()
6585 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); in send_etid_flowc_wr()
6586 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in send_etid_flowc_wr()
6587 flowc->mnemval[5].val = htobe32(cst->schedcl); in send_etid_flowc_wr()
6589 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flowc_wr()
6591 cst->flags &= ~EO_FLOWC_PENDING; in send_etid_flowc_wr()
6592 cst->flags |= EO_FLOWC_RPL_PENDING; in send_etid_flowc_wr()
6593 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ in send_etid_flowc_wr()
6594 cst->tx_credits -= ETID_FLOWC_LEN16; in send_etid_flowc_wr()
6608 mtx_assert(&cst->lock, MA_OWNED); in send_etid_flush_wr()
6610 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie); in send_etid_flush_wr()
6615 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_etid_flush_wr()
6617 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | in send_etid_flush_wr()
6618 V_FW_WR_FLOWID(cst->etid)); in send_etid_flush_wr()
6620 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); in send_etid_flush_wr()
6622 cst->flags |= EO_FLUSH_RPL_PENDING; in send_etid_flush_wr()
6623 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); in send_etid_flush_wr()
6624 cst->tx_credits -= ETID_FLUSH_LEN16; in send_etid_flush_wr()
6625 cst->ncompl++; in send_etid_flush_wr()
6641 mtx_assert(&cst->lock, MA_OWNED); in write_ethofld_wr()
6643 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_ethofld_wr()
6644 m0->m_pkthdr.l4hlen > 0, in write_ethofld_wr()
6649 pktlen = m0->m_pkthdr.len; in write_ethofld_wr()
6653 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6656 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | in write_ethofld_wr()
6658 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | in write_ethofld_wr()
6659 V_FW_WR_FLOWID(cst->etid)); in write_ethofld_wr()
6660 wr->r3 = 0; in write_ethofld_wr()
6662 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_ethofld_wr()
6663 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6664 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6665 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6666 wr->u.udpseg.rtplen = 0; in write_ethofld_wr()
6667 wr->u.udpseg.r4 = 0; in write_ethofld_wr()
6668 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); in write_ethofld_wr()
6669 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_ethofld_wr()
6670 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6674 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_ethofld_wr()
6675 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6676 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6677 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6678 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); in write_ethofld_wr()
6679 wr->u.tcpseg.r4 = 0; in write_ethofld_wr()
6680 wr->u.tcpseg.r5 = 0; in write_ethofld_wr()
6681 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); in write_ethofld_wr()
6686 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6690 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - in write_ethofld_wr()
6692 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_ethofld_wr()
6693 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_ethofld_wr()
6694 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_ethofld_wr()
6696 lso->lso_ctrl = htobe32(ctrl); in write_ethofld_wr()
6697 lso->ipid_ofst = htobe16(0); in write_ethofld_wr()
6698 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6699 lso->seqno_offset = htobe32(0); in write_ethofld_wr()
6700 lso->len = htobe32(pktlen); in write_ethofld_wr()
6704 wr->u.tcpseg.mss = htobe16(0xffff); in write_ethofld_wr()
6711 ctrl1 = csum_to_ctrl(cst->adapter, m0); in write_ethofld_wr()
6716 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_ethofld_wr()
6720 cpl->ctrl0 = cst->ctrl0; in write_ethofld_wr()
6721 cpl->pack = 0; in write_ethofld_wr()
6722 cpl->len = htobe16(pktlen); in write_ethofld_wr()
6723 cpl->ctrl1 = htobe64(ctrl1); in write_ethofld_wr()
6733 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ in write_ethofld_wr()
6735 pad = 16 - (immhdrs & 0xf); in write_ethofld_wr()
6739 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_ethofld_wr()
6743 for (; m0 != NULL; m0 = m0->m_next) { in write_ethofld_wr()
6744 if (__predict_false(m0->m_len == 0)) in write_ethofld_wr()
6746 if (immhdrs >= m0->m_len) { in write_ethofld_wr()
6747 immhdrs -= m0->m_len; in write_ethofld_wr()
6750 if (m0->m_flags & M_EXTPG) in write_ethofld_wr()
6752 mtod(m0, vm_offset_t), m0->m_len); in write_ethofld_wr()
6755 m0->m_len - immhdrs); in write_ethofld_wr()
6764 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; in write_ethofld_wr()
6766 usgl->len0 = htobe32(segs[0].ss_len); in write_ethofld_wr()
6767 usgl->addr0 = htobe64(segs[0].ss_paddr); in write_ethofld_wr()
6768 for (i = 0; i < nsegs - 1; i++) { in write_ethofld_wr()
6769 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); in write_ethofld_wr()
6770 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); in write_ethofld_wr()
6773 usgl->sge[i / 2].len[1] = htobe32(0); in write_ethofld_wr()
6786 mtx_assert(&cst->lock, MA_OWNED); in ethofld_tx()
6788 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { in ethofld_tx()
6794 if (next_credits > cst->tx_credits) { in ethofld_tx()
6800 MPASS(cst->ncompl > 0); in ethofld_tx()
6803 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie); in ethofld_tx()
6806 MPASS(cst->ncompl > 0); in ethofld_tx()
6809 cst->tx_credits -= next_credits; in ethofld_tx()
6810 cst->tx_nocompl += next_credits; in ethofld_tx()
6811 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; in ethofld_tx()
6812 ETHER_BPF_MTAP(cst->com.ifp, m); in ethofld_tx()
6814 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie); in ethofld_tx()
6816 cst->ncompl++; in ethofld_tx()
6817 cst->tx_nocompl = 0; in ethofld_tx()
6819 (void) mbufq_dequeue(&cst->pending_tx); in ethofld_tx()
6831 m->m_pkthdr.snd_tag = NULL; in ethofld_tx()
6832 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in ethofld_tx()
6833 m_snd_tag_rele(&cst->com); in ethofld_tx()
6835 mbufq_enqueue(&cst->pending_fwack, m); in ethofld_tx()
6846 MPASS(m0->m_nextpkt == NULL); in ethofld_transmit()
6847 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); in ethofld_transmit()
6848 MPASS(m0->m_pkthdr.snd_tag != NULL); in ethofld_transmit()
6849 cst = mst_to_crt(m0->m_pkthdr.snd_tag); in ethofld_transmit()
6851 mtx_lock(&cst->lock); in ethofld_transmit()
6852 MPASS(cst->flags & EO_SND_TAG_REF); in ethofld_transmit()
6854 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { in ethofld_transmit()
6856 struct port_info *pi = vi->pi; in ethofld_transmit()
6857 struct adapter *sc = pi->adapter; in ethofld_transmit()
6858 const uint32_t rss_mask = vi->rss_size - 1; in ethofld_transmit()
6861 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; in ethofld_transmit()
6863 rss_hash = m0->m_pkthdr.flowid; in ethofld_transmit()
6867 cst->iqid = vi->rss[rss_hash & rss_mask]; in ethofld_transmit()
6868 cst->eo_txq += rss_hash % vi->nofldtxq; in ethofld_transmit()
6874 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { in ethofld_transmit()
6879 mbufq_enqueue(&cst->pending_tx, m0); in ethofld_transmit()
6880 cst->plen += m0->m_pkthdr.len; in ethofld_transmit()
6888 m_snd_tag_ref(&cst->com); in ethofld_transmit()
6890 mtx_unlock(&cst->lock); in ethofld_transmit()
6891 m_snd_tag_rele(&cst->com); in ethofld_transmit()
6895 mtx_unlock(&cst->lock); in ethofld_transmit()
6903 struct adapter *sc = iq->adapter; in ethofld_fw4_ack()
6908 uint8_t credits = cpl->credits; in ethofld_fw4_ack()
6911 mtx_lock(&cst->lock); in ethofld_fw4_ack()
6912 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { in ethofld_fw4_ack()
6914 credits -= ETID_FLOWC_LEN16; in ethofld_fw4_ack()
6915 cst->flags &= ~EO_FLOWC_RPL_PENDING; in ethofld_fw4_ack()
6918 KASSERT(cst->ncompl > 0, in ethofld_fw4_ack()
6921 cst->ncompl--; in ethofld_fw4_ack()
6924 m = mbufq_dequeue(&cst->pending_fwack); in ethofld_fw4_ack()
6930 MPASS((cst->flags & in ethofld_fw4_ack()
6934 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); in ethofld_fw4_ack()
6935 MPASS(cst->ncompl == 0); in ethofld_fw4_ack()
6937 cst->flags &= ~EO_FLUSH_RPL_PENDING; in ethofld_fw4_ack()
6938 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
6943 ("%s: too many credits (%u, %u)", __func__, cpl->credits, in ethofld_fw4_ack()
6947 cpl->credits, credits, mbuf_eo_len16(m))); in ethofld_fw4_ack()
6948 credits -= mbuf_eo_len16(m); in ethofld_fw4_ack()
6949 cst->plen -= m->m_pkthdr.len; in ethofld_fw4_ack()
6953 cst->tx_credits += cpl->credits; in ethofld_fw4_ack()
6954 MPASS(cst->tx_credits <= cst->tx_total); in ethofld_fw4_ack()
6956 if (cst->flags & EO_SND_TAG_REF) { in ethofld_fw4_ack()
6961 m_snd_tag_ref(&cst->com); in ethofld_fw4_ack()
6962 m = mbufq_first(&cst->pending_tx); in ethofld_fw4_ack()
6963 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) in ethofld_fw4_ack()
6965 mtx_unlock(&cst->lock); in ethofld_fw4_ack()
6966 m_snd_tag_rele(&cst->com); in ethofld_fw4_ack()
6973 MPASS(mbufq_first(&cst->pending_tx) == NULL); in ethofld_fw4_ack()
6974 mtx_unlock(&cst->lock); in ethofld_fw4_ack()