Lines Matching defs:cst
6664 send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
6671 mtx_assert(&cst->lock, MA_OWNED);
6672 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) ==
6675 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie);
6683 V_FW_WR_FLOWID(cst->etid));
6692 flowc->mnemval[3].val = htobe32(cst->iqid);
6696 flowc->mnemval[5].val = htobe32(cst->schedcl);
6698 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6700 cst->flags &= ~EO_FLOWC_PENDING;
6701 cst->flags |= EO_FLOWC_RPL_PENDING;
6702 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */
6703 cst->tx_credits -= ETID_FLOWC_LEN16;
6712 send_etid_flush_wr(struct cxgbe_rate_tag *cst)
6717 mtx_assert(&cst->lock, MA_OWNED);
6719 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie);
6727 V_FW_WR_FLOWID(cst->etid));
6729 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6731 cst->flags |= EO_FLUSH_RPL_PENDING;
6732 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16);
6733 cst->tx_credits -= ETID_FLUSH_LEN16;
6734 cst->ncompl++;
6738 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr,
6750 mtx_assert(&cst->lock, MA_OWNED);
6768 V_FW_WR_FLOWID(cst->etid));
6820 ctrl1 = csum_to_ctrl(cst->adapter, m0);
6829 cpl->ctrl0 = cst->ctrl0;
6888 ethofld_tx(struct cxgbe_rate_tag *cst)
6895 mtx_assert(&cst->lock, MA_OWNED);
6897 while ((m = mbufq_first(&cst->pending_tx)) != NULL) {
6903 if (next_credits > cst->tx_credits) {
6909 MPASS(cst->ncompl > 0);
6912 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie);
6915 MPASS(cst->ncompl > 0);
6918 cst->tx_credits -= next_credits;
6919 cst->tx_nocompl += next_credits;
6920 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
6921 ETHER_BPF_MTAP(cst->com.ifp, m);
6922 write_ethofld_wr(cst, wr, m, compl);
6923 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie);
6925 cst->ncompl++;
6926 cst->tx_nocompl = 0;
6928 (void) mbufq_dequeue(&cst->pending_tx);
6942 m_snd_tag_rele(&cst->com);
6944 mbufq_enqueue(&cst->pending_fwack, m);
6952 struct cxgbe_rate_tag *cst;
6958 cst = mst_to_crt(m0->m_pkthdr.snd_tag);
6960 mtx_lock(&cst->lock);
6961 MPASS(cst->flags & EO_SND_TAG_REF);
6963 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) {
6970 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq];
6976 cst->iqid = vi->rss[rss_hash & rss_mask];
6977 cst->eo_txq += rss_hash % vi->nofldtxq;
6978 rc = send_etid_flowc_wr(cst, pi, vi);
6983 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) {
6988 mbufq_enqueue(&cst->pending_tx, m0);
6989 cst->plen += m0->m_pkthdr.len;
6997 m_snd_tag_ref(&cst->com);
6998 ethofld_tx(cst);
6999 mtx_unlock(&cst->lock);
7000 m_snd_tag_rele(&cst->com);
7004 mtx_unlock(&cst->lock);
7016 struct cxgbe_rate_tag *cst;
7019 cst = lookup_etid(sc, etid);
7020 mtx_lock(&cst->lock);
7021 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) {
7024 cst->flags &= ~EO_FLOWC_RPL_PENDING;
7027 KASSERT(cst->ncompl > 0,
7029 __func__, etid, cst));
7030 cst->ncompl--;
7033 m = mbufq_dequeue(&cst->pending_fwack);
7039 MPASS((cst->flags &
7043 MPASS(cst->tx_credits + cpl->credits == cst->tx_total);
7044 MPASS(cst->ncompl == 0);
7046 cst->flags &= ~EO_FLUSH_RPL_PENDING;
7047 cst->tx_credits += cpl->credits;
7048 cxgbe_rate_tag_free_locked(cst);
7049 return (0); /* cst is gone. */
7058 cst->plen -= m->m_pkthdr.len;
7062 cst->tx_credits += cpl->credits;
7063 MPASS(cst->tx_credits <= cst->tx_total);
7065 if (cst->flags & EO_SND_TAG_REF) {
7070 m_snd_tag_ref(&cst->com);
7071 m = mbufq_first(&cst->pending_tx);
7072 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m))
7073 ethofld_tx(cst);
7074 mtx_unlock(&cst->lock);
7075 m_snd_tag_rele(&cst->com);
7082 MPASS(mbufq_first(&cst->pending_tx) == NULL);
7083 mtx_unlock(&cst->lock);