Lines Matching +full:wr +full:- +full:hold

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
83 struct wrqe *wr; in send_flowc_wr() local
86 struct vi_info *vi = toep->vi; in send_flowc_wr()
87 struct port_info *pi = vi->pi; in send_flowc_wr()
88 struct adapter *sc = pi->adapter; in send_flowc_wr()
89 unsigned int pfvf = sc->pf << S_FW_VIID_PFN; in send_flowc_wr()
90 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; in send_flowc_wr()
92 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), in send_flowc_wr()
93 ("%s: flowc for tid %u sent already", __func__, toep->tid)); in send_flowc_wr()
99 if (toep->params.tc_idx != -1) { in send_flowc_wr()
100 MPASS(toep->params.tc_idx >= 0 && in send_flowc_wr()
101 toep->params.tc_idx < sc->params.nsched_cls); in send_flowc_wr()
107 wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq); in send_flowc_wr()
108 if (wr == NULL) { in send_flowc_wr()
112 flowc = wrtod(wr); in send_flowc_wr()
113 memset(flowc, 0, wr->wr_len); in send_flowc_wr()
115 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_flowc_wr()
117 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | in send_flowc_wr()
118 V_FW_WR_FLOWID(toep->tid)); in send_flowc_wr()
122 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ in send_flowc_wr()
123 flowc->mnemval[paramidx].val = htobe32(__v); \ in send_flowc_wr()
130 FLOWC_PARAM(CH, pi->tx_chan); in send_flowc_wr()
131 FLOWC_PARAM(PORT, pi->tx_chan); in send_flowc_wr()
132 FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); in send_flowc_wr()
133 FLOWC_PARAM(SNDBUF, toep->params.sndbuf); in send_flowc_wr()
135 FLOWC_PARAM(MSS, toep->params.emss); in send_flowc_wr()
136 FLOWC_PARAM(SNDNXT, tp->snd_nxt); in send_flowc_wr()
137 FLOWC_PARAM(RCVNXT, tp->rcv_nxt); in send_flowc_wr()
142 __func__, toep->tid, toep->params.emss, toep->params.sndbuf, in send_flowc_wr()
143 tp ? tp->snd_nxt : 0, tp ? tp->rcv_nxt : 0); in send_flowc_wr()
145 if (toep->params.tc_idx != -1) in send_flowc_wr()
146 FLOWC_PARAM(SCHEDCLASS, toep->params.tc_idx); in send_flowc_wr()
151 txsd->tx_credits = howmany(flowclen, 16); in send_flowc_wr()
152 txsd->plen = 0; in send_flowc_wr()
153 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, in send_flowc_wr()
154 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); in send_flowc_wr()
155 toep->tx_credits -= txsd->tx_credits; in send_flowc_wr()
156 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) in send_flowc_wr()
157 toep->txsd_pidx = 0; in send_flowc_wr()
158 toep->txsd_avail--; in send_flowc_wr()
160 toep->flags |= TPF_FLOWC_WR_SENT; in send_flowc_wr()
161 t4_wrq_tx(sc, wr); in send_flowc_wr()
173 const int port_id = toep->vi->pi->port_id; in update_tx_rate_limit()
175 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); in update_tx_rate_limit()
179 tc_idx = -1; in update_tx_rate_limit()
184 MPASS(tc_idx >= 0 && tc_idx < sc->params.nsched_cls); in update_tx_rate_limit()
187 if (toep->params.tc_idx != tc_idx) { in update_tx_rate_limit()
188 struct wrqe *wr; in update_tx_rate_limit() local
191 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; in update_tx_rate_limit()
196 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || in update_tx_rate_limit()
197 (wr = alloc_wrqe(roundup2(flowclen, 16), in update_tx_rate_limit()
198 &toep->ofld_txq->wrq)) == NULL) { in update_tx_rate_limit()
204 flowc = wrtod(wr); in update_tx_rate_limit()
205 memset(flowc, 0, wr->wr_len); in update_tx_rate_limit()
207 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in update_tx_rate_limit()
209 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | in update_tx_rate_limit()
210 V_FW_WR_FLOWID(toep->tid)); in update_tx_rate_limit()
212 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in update_tx_rate_limit()
213 if (tc_idx == -1) in update_tx_rate_limit()
214 flowc->mnemval[0].val = htobe32(0xff); in update_tx_rate_limit()
216 flowc->mnemval[0].val = htobe32(tc_idx); in update_tx_rate_limit()
218 txsd->tx_credits = flowclen16; in update_tx_rate_limit()
219 txsd->plen = 0; in update_tx_rate_limit()
220 toep->tx_credits -= txsd->tx_credits; in update_tx_rate_limit()
221 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) in update_tx_rate_limit()
222 toep->txsd_pidx = 0; in update_tx_rate_limit()
223 toep->txsd_avail--; in update_tx_rate_limit()
224 t4_wrq_tx(sc, wr); in update_tx_rate_limit()
227 if (toep->params.tc_idx >= 0) in update_tx_rate_limit()
228 t4_release_cl_rl(sc, port_id, toep->params.tc_idx); in update_tx_rate_limit()
229 toep->params.tc_idx = tc_idx; in update_tx_rate_limit()
238 struct wrqe *wr; in send_reset() local
240 int tid = toep->tid; in send_reset()
241 struct inpcb *inp = toep->inp; in send_reset()
247 __func__, toep->tid, in send_reset()
248 inp->inp_flags & INP_DROPPED ? "inp dropped" : in send_reset()
249 tcpstates[tp->t_state], in send_reset()
250 toep->flags, inp->inp_flags, in send_reset()
251 toep->flags & TPF_ABORT_SHUTDOWN ? in send_reset()
254 if (toep->flags & TPF_ABORT_SHUTDOWN) in send_reset()
257 toep->flags |= TPF_ABORT_SHUTDOWN; in send_reset()
259 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in send_reset()
262 wr = alloc_wrqe(sizeof(*req), &toep->ofld_txq->wrq); in send_reset()
263 if (wr == NULL) { in send_reset()
267 req = wrtod(wr); in send_reset()
270 if (inp->inp_flags & INP_DROPPED) in send_reset()
271 req->rsvd0 = htobe32(snd_nxt); in send_reset()
273 req->rsvd0 = htobe32(tp->snd_nxt); in send_reset()
274 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); in send_reset()
275 req->cmd = CPL_ABORT_SEND_RST; in send_reset()
281 if ((inp->inp_flags & INP_DROPPED) == 0) { in send_reset()
282 struct socket *so = inp->inp_socket; in send_reset()
285 sbflush(&so->so_snd); in send_reset()
288 t4_l2t_send(sc, wr, toep->l2te); in send_reset()
298 struct toepcb *toep = tp->t_toe; in assign_rxopt()
300 struct adapter *sc = td_adapter(toep->td); in assign_rxopt()
304 toep->params.mtu_idx = G_TCPOPT_MSS(opt); in assign_rxopt()
305 tp->t_maxseg = sc->params.mtus[toep->params.mtu_idx]; in assign_rxopt()
306 if (inp->inp_inc.inc_flags & INC_ISIPV6) in assign_rxopt()
307 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in assign_rxopt()
309 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr); in assign_rxopt()
311 toep->params.emss = tp->t_maxseg; in assign_rxopt()
313 toep->params.tstamp = 1; in assign_rxopt()
314 toep->params.emss -= TCPOLEN_TSTAMP_APPA; in assign_rxopt()
315 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ in assign_rxopt()
316 tp->ts_recent = 0; /* hmmm */ in assign_rxopt()
317 tp->ts_recent_age = tcp_ts_getticks(); in assign_rxopt()
319 toep->params.tstamp = 0; in assign_rxopt()
322 toep->params.sack = 1; in assign_rxopt()
323 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ in assign_rxopt()
325 toep->params.sack = 0; in assign_rxopt()
326 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ in assign_rxopt()
330 tp->t_flags |= TF_RCVD_SCALE; in assign_rxopt()
333 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in assign_rxopt()
335 tp->rcv_scale = tp->request_r_scale; in assign_rxopt()
336 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); in assign_rxopt()
338 toep->params.wscale = 0; in assign_rxopt()
342 toep->tid, toep->params.mtu_idx, toep->params.emss, in assign_rxopt()
343 toep->params.tstamp, toep->params.sack, toep->params.wscale); in assign_rxopt()
355 struct inpcb *inp = toep->inp; in make_established()
356 struct socket *so = inp->inp_socket; in make_established()
361 KASSERT(tp->t_state == TCPS_SYN_SENT || in make_established()
362 tp->t_state == TCPS_SYN_RECEIVED, in make_established()
363 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); in make_established()
366 __func__, toep->tid, so, inp, tp, toep); in make_established()
369 tp->t_starttime = ticks; in make_established()
372 tp->irs = irs; in make_established()
374 tp->rcv_wnd = (u_int)toep->params.opt0_bufsize << 10; in make_established()
375 tp->rcv_adv += tp->rcv_wnd; in make_established()
376 tp->last_ack_sent = tp->rcv_nxt; in make_established()
378 tp->iss = iss; in make_established()
380 tp->snd_una = iss + 1; in make_established()
381 tp->snd_nxt = iss + 1; in make_established()
382 tp->snd_max = iss + 1; in make_established()
393 struct wrqe *wr; in send_rx_credits() local
399 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); in send_rx_credits()
400 if (wr == NULL) in send_rx_credits()
402 req = wrtod(wr); in send_rx_credits()
404 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); in send_rx_credits()
405 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); in send_rx_credits()
407 t4_wrq_tx(sc, wr); in send_rx_credits()
414 struct adapter *sc = tod->tod_softc; in t4_rcvd_locked()
416 struct socket *so = inp->inp_socket; in t4_rcvd_locked()
417 struct sockbuf *sb = &so->so_rcv; in t4_rcvd_locked()
418 struct toepcb *toep = tp->t_toe; in t4_rcvd_locked()
424 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; in t4_rcvd_locked()
426 (tp->rcv_wnd <= 32 * 1024 || rx_credits >= 64 * 1024 || in t4_rcvd_locked()
427 (rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || in t4_rcvd_locked()
428 sbused(sb) + tp->rcv_wnd < sb->sb_lowat)) { in t4_rcvd_locked()
430 tp->rcv_wnd += rx_credits; in t4_rcvd_locked()
431 tp->rcv_adv += rx_credits; in t4_rcvd_locked()
439 struct socket *so = inp->inp_socket; in t4_rcvd()
440 struct sockbuf *sb = &so->so_rcv; in t4_rcvd()
453 struct wrqe *wr; in t4_close_conn() local
455 unsigned int tid = toep->tid; in t4_close_conn()
457 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, in t4_close_conn()
458 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); in t4_close_conn()
460 if (toep->flags & TPF_FIN_SENT) in t4_close_conn()
463 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in t4_close_conn()
466 wr = alloc_wrqe(sizeof(*req), &toep->ofld_txq->wrq); in t4_close_conn()
467 if (wr == NULL) { in t4_close_conn()
471 req = wrtod(wr); in t4_close_conn()
473 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | in t4_close_conn()
474 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); in t4_close_conn()
475 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | in t4_close_conn()
477 req->wr.wr_lo = cpu_to_be64(0); in t4_close_conn()
479 req->rsvd = 0; in t4_close_conn()
481 toep->flags |= TPF_FIN_SENT; in t4_close_conn()
482 toep->flags &= ~TPF_SEND_FIN; in t4_close_conn()
483 t4_l2t_send(sc, wr, toep->l2te); in t4_close_conn()
494 /* Maximum amount of immediate data we could stuff in a WR */
499 const int n = 1; /* Use no more than one desc for imm. data WR */ in max_imm_payload()
509 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr) - in max_imm_payload()
512 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr) - in max_imm_payload()
516 /* Maximum number of SGL entries we could stuff in a WR */
521 int sge_pair_credits = tx_credits - MIN_TX_CREDITS(iso); in max_dsgl_nsegs()
544 txwr->op_to_immdlen = htobe32(V_WR_OP(fw_wr_opcode) | in write_tx_wr()
546 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | in write_tx_wr()
548 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ulp_mode(toep)) | in write_tx_wr()
550 txwr->plen = htobe32(plen); in write_tx_wr()
552 if (toep->params.tx_align > 0) { in write_tx_wr()
553 if (plen < 2 * toep->params.emss) in write_tx_wr()
554 txwr->lsodisable_to_flags |= in write_tx_wr()
557 txwr->lsodisable_to_flags |= in write_tx_wr()
559 (toep->params.nagle == 0 ? 0 : in write_tx_wr()
580 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_tx_sgl()
583 i = -1; in write_tx_sgl()
584 for (m = start; m != stop; m = m->m_next) { in write_tx_sgl()
585 if (m->m_flags & M_EXTPG) in write_tx_sgl()
587 mtod(m, vm_offset_t), m->m_len); in write_tx_sgl()
589 rc = sglist_append(&sg, mtod(m, void *), m->m_len); in write_tx_sgl()
595 usgl->len0 = htobe32(segs[j].ss_len); in write_tx_sgl()
596 usgl->addr0 = htobe64(segs[j].ss_paddr); in write_tx_sgl()
598 usgl->sge[i / 2].len[i & 1] = in write_tx_sgl()
600 usgl->sge[i / 2].addr[i & 1] = in write_tx_sgl()
604 nsegs--; in write_tx_sgl()
610 usgl->sge[i / 2].len[1] = htobe32(0); in write_tx_sgl()
641 struct wrqe *wr; in t4_push_frames() local
643 struct inpcb *inp = toep->inp; in t4_push_frames()
645 struct socket *so = inp->inp_socket; in t4_push_frames()
646 struct sockbuf *sb = &so->so_snd; in t4_push_frames()
652 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in t4_push_frames()
653 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); in t4_push_frames()
663 __func__, toep->tid, toep->flags, tp->t_flags, drop); in t4_push_frames()
665 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) in t4_push_frames()
669 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && in t4_push_frames()
670 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { in t4_push_frames()
671 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; in t4_push_frames()
679 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { in t4_push_frames()
685 txsd = &toep->txsd[toep->txsd_pidx]; in t4_push_frames()
687 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); in t4_push_frames()
697 sb_sndptr = sb->sb_sndptr; in t4_push_frames()
698 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; in t4_push_frames()
703 for (m = sndptr; m != NULL; m = m->m_next) { in t4_push_frames()
706 if ((m->m_flags & M_NOTREADY) != 0) in t4_push_frames()
708 if (m->m_flags & M_EXTPG) { in t4_push_frames()
710 if (m->m_epg_tls != NULL) { in t4_push_frames()
711 toep->flags |= TPF_KTLS; in t4_push_frames()
721 mtod(m, vm_offset_t), m->m_len); in t4_push_frames()
723 n = sglist_count(mtod(m, void *), m->m_len); in t4_push_frames()
726 plen += m->m_len; in t4_push_frames()
730 nsegs -= n; in t4_push_frames()
731 plen -= m->m_len; in t4_push_frames()
734 toep->flags |= TPF_TX_SUSPENDED; in t4_push_frames()
737 &toep->aiotx_jobq)) in t4_push_frames()
749 if (m->m_flags & M_EXTPG) in t4_push_frames()
753 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ in t4_push_frames()
757 m = m->m_next; in t4_push_frames()
762 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && in t4_push_frames()
763 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) in t4_push_frames()
768 if (sb->sb_flags & SB_AUTOSIZE && in t4_push_frames()
770 sb->sb_hiwat < V_tcp_autosndbuf_max && in t4_push_frames()
771 sbused(sb) >= sb->sb_hiwat * 7 / 8) { in t4_push_frames()
772 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, in t4_push_frames()
776 sb->sb_flags &= ~SB_AUTOSIZE; in t4_push_frames()
781 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) in t4_push_frames()
790 KASSERT(m == NULL || (m->m_flags & M_NOTREADY) != 0, in t4_push_frames()
796 if (__predict_false(toep->flags & TPF_FIN_SENT)) in t4_push_frames()
799 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); in t4_push_frames()
804 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), in t4_push_frames()
805 &toep->ofld_txq->wrq); in t4_push_frames()
806 if (wr == NULL) { in t4_push_frames()
808 toep->flags |= TPF_TX_SUSPENDED; in t4_push_frames()
811 txwr = wrtod(wr); in t4_push_frames()
812 credits = howmany(wr->wr_len, 16); in t4_push_frames()
823 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; in t4_push_frames()
824 wr = alloc_wrqe(roundup2(wr_len, 16), in t4_push_frames()
825 &toep->ofld_txq->wrq); in t4_push_frames()
826 if (wr == NULL) { in t4_push_frames()
828 toep->flags |= TPF_TX_SUSPENDED; in t4_push_frames()
831 txwr = wrtod(wr); in t4_push_frames()
844 KASSERT(toep->tx_credits >= credits, in t4_push_frames()
847 toep->tx_credits -= credits; in t4_push_frames()
848 toep->tx_nocompl += credits; in t4_push_frames()
849 toep->plen_nocompl += plen; in t4_push_frames()
850 if (toep->tx_credits <= toep->tx_total * 3 / 8 && in t4_push_frames()
851 toep->tx_nocompl >= toep->tx_total / 4) in t4_push_frames()
855 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); in t4_push_frames()
856 toep->tx_nocompl = 0; in t4_push_frames()
857 toep->plen_nocompl = 0; in t4_push_frames()
860 tp->snd_nxt += plen; in t4_push_frames()
861 tp->snd_max += plen; in t4_push_frames()
865 sb->sb_sndptr = sb_sndptr; in t4_push_frames()
868 toep->flags |= TPF_TX_DATA_SENT; in t4_push_frames()
869 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) in t4_push_frames()
870 toep->flags |= TPF_TX_SUSPENDED; in t4_push_frames()
872 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); in t4_push_frames()
873 txsd->plen = plen; in t4_push_frames()
874 txsd->tx_credits = credits; in t4_push_frames()
876 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { in t4_push_frames()
877 toep->txsd_pidx = 0; in t4_push_frames()
878 txsd = &toep->txsd[0]; in t4_push_frames()
880 toep->txsd_avail--; in t4_push_frames()
882 t4_l2t_send(sc, wr, toep->l2te); in t4_push_frames()
883 } while (m != NULL && (m->m_flags & M_NOTREADY) == 0); in t4_push_frames()
886 if (m == NULL && toep->flags & TPF_SEND_FIN) in t4_push_frames()
903 MPASS(plen >= m->m_pkthdr.len); in rqdrop_locked()
905 plen -= m->m_pkthdr.len; in rqdrop_locked()
928 * - this large PDU is marked as the "last" slice in write_tx_data_iso()
930 * - the amount of data payload bytes equals the burst_size in write_tx_data_iso()
941 cpl->op_to_scsi = htonl(V_CPL_TX_DATA_ISO_OP(CPL_TX_DATA_ISO) | in write_tx_data_iso()
949 cpl->ahs_len = 0; in write_tx_data_iso()
950 cpl->mpdu = htons(DIV_ROUND_UP(mss, 4)); in write_tx_data_iso()
951 cpl->burst_size = htonl(DIV_ROUND_UP(burst_size, 4)); in write_tx_data_iso()
952 cpl->len = htonl(len); in write_tx_data_iso()
953 cpl->reserved2_seglen_offset = htonl(0); in write_tx_data_iso()
954 cpl->datasn_offset = htonl(0); in write_tx_data_iso()
955 cpl->buffer_offset = htonl(0); in write_tx_data_iso()
956 cpl->reserved3 = 0; in write_tx_data_iso()
966 struct wrqe *wr; in write_iscsi_mbuf_wr() local
969 struct inpcb *inp = toep->inp; in write_iscsi_mbuf_wr()
978 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); in write_iscsi_mbuf_wr()
980 plen = sndptr->m_pkthdr.len; in write_iscsi_mbuf_wr()
982 ("raw WR len %u is greater than max WR len", plen)); in write_iscsi_mbuf_wr()
986 wr = alloc_wrqe(roundup2(plen, 16), &toep->ofld_txq->wrq); in write_iscsi_mbuf_wr()
987 if (__predict_false(wr == NULL)) in write_iscsi_mbuf_wr()
990 m_copydata(sndptr, 0, plen, wrtod(wr)); in write_iscsi_mbuf_wr()
991 return (wr); in write_iscsi_mbuf_wr()
1003 for (m = sndptr; m != NULL; m = m->m_next) { in write_iscsi_mbuf_wr()
1006 if (m->m_flags & M_EXTPG) in write_iscsi_mbuf_wr()
1008 m->m_len); in write_iscsi_mbuf_wr()
1010 n = sglist_count(mtod(m, void *), m->m_len); in write_iscsi_mbuf_wr()
1013 plen += m->m_len; in write_iscsi_mbuf_wr()
1022 if (m->m_flags & M_EXTPG) in write_iscsi_mbuf_wr()
1028 if (__predict_false(toep->flags & TPF_FIN_SENT)) in write_iscsi_mbuf_wr()
1032 * We have a PDU to send. All of it goes out in one WR so 'm' in write_iscsi_mbuf_wr()
1037 MPASS(sndptr->m_pkthdr.len == plen); in write_iscsi_mbuf_wr()
1039 shove = !(tp->t_flags & TF_MORETOCOME); in write_iscsi_mbuf_wr()
1049 npdu = iso ? howmany(plen - ISCSI_BHS_SIZE, iso_mss) : 1; in write_iscsi_mbuf_wr()
1052 adjusted_plen += ISCSI_BHS_SIZE * (npdu - 1); in write_iscsi_mbuf_wr()
1065 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; in write_iscsi_mbuf_wr()
1068 wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq); in write_iscsi_mbuf_wr()
1069 if (wr == NULL) { in write_iscsi_mbuf_wr()
1073 txwr = wrtod(wr); in write_iscsi_mbuf_wr()
1074 credits = howmany(wr->wr_len, 16); in write_iscsi_mbuf_wr()
1081 MPASS(plen == sndptr->m_pkthdr.len); in write_iscsi_mbuf_wr()
1101 KASSERT(toep->tx_credits >= credits, in write_iscsi_mbuf_wr()
1103 "toep->tx_credits %u tx_credits %u nsegs %u " in write_iscsi_mbuf_wr()
1105 toep->tx_credits, tx_credits, nsegs, max_nsegs, iso)); in write_iscsi_mbuf_wr()
1107 tp->snd_nxt += adjusted_plen; in write_iscsi_mbuf_wr()
1108 tp->snd_max += adjusted_plen; in write_iscsi_mbuf_wr()
1110 counter_u64_add(toep->ofld_txq->tx_iscsi_pdus, npdu); in write_iscsi_mbuf_wr()
1111 counter_u64_add(toep->ofld_txq->tx_iscsi_octets, plen); in write_iscsi_mbuf_wr()
1113 counter_u64_add(toep->ofld_txq->tx_iscsi_iso_wrs, 1); in write_iscsi_mbuf_wr()
1115 return (wr); in write_iscsi_mbuf_wr()
1123 struct wrqe *wr; in t4_push_pdus() local
1125 struct inpcb *inp = toep->inp; in t4_push_pdus()
1126 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; in t4_push_pdus()
1127 struct mbufq *pduq = &toep->ulp_pduq; in t4_push_pdus()
1130 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in t4_push_pdus()
1131 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); in t4_push_pdus()
1135 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) in t4_push_pdus()
1142 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { in t4_push_pdus()
1149 struct socket *so = inp->inp_socket; in t4_push_pdus()
1150 struct sockbuf *sb = &so->so_snd; in t4_push_pdus()
1155 * transition from a non-zero value to either another in t4_push_pdus()
1156 * non-zero value or zero. Once it is zero it should in t4_push_pdus()
1170 drop -= min(sbu, drop); in t4_push_pdus()
1174 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); in t4_push_pdus()
1178 wr = write_iscsi_mbuf_wr(toep, sndptr); in t4_push_pdus()
1179 if (wr == NULL) { in t4_push_pdus()
1180 toep->flags |= TPF_TX_SUSPENDED; in t4_push_pdus()
1184 plen = sndptr->m_pkthdr.len; in t4_push_pdus()
1185 credits = howmany(wr->wr_len, 16); in t4_push_pdus()
1186 KASSERT(toep->tx_credits >= credits, in t4_push_pdus()
1191 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); in t4_push_pdus()
1193 toep->tx_credits -= credits; in t4_push_pdus()
1194 toep->tx_nocompl += credits; in t4_push_pdus()
1195 toep->plen_nocompl += plen; in t4_push_pdus()
1198 * Ensure there are enough credits for a full-sized WR in t4_push_pdus()
1199 * as page pod WRs can be full-sized. in t4_push_pdus()
1201 if (toep->tx_credits <= SGE_MAX_WR_LEN * 5 / 4 && in t4_push_pdus()
1202 toep->tx_nocompl >= toep->tx_total / 4) { in t4_push_pdus()
1203 wrhdr = wrtod(wr); in t4_push_pdus()
1204 wrhdr->hi |= htobe32(F_FW_WR_COMPL); in t4_push_pdus()
1205 toep->tx_nocompl = 0; in t4_push_pdus()
1206 toep->plen_nocompl = 0; in t4_push_pdus()
1209 toep->flags |= TPF_TX_DATA_SENT; in t4_push_pdus()
1210 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) in t4_push_pdus()
1211 toep->flags |= TPF_TX_SUSPENDED; in t4_push_pdus()
1213 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); in t4_push_pdus()
1214 txsd->plen = plen; in t4_push_pdus()
1215 txsd->tx_credits = credits; in t4_push_pdus()
1217 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { in t4_push_pdus()
1218 toep->txsd_pidx = 0; in t4_push_pdus()
1219 txsd = &toep->txsd[0]; in t4_push_pdus()
1221 toep->txsd_avail--; in t4_push_pdus()
1223 t4_l2t_send(sc, wr, toep->l2te); in t4_push_pdus()
1227 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) in t4_push_pdus()
1237 else if (toep->flags & TPF_KTLS) in t4_push_data()
1246 struct adapter *sc = tod->tod_softc; in t4_tod_output()
1250 struct toepcb *toep = tp->t_toe; in t4_tod_output()
1253 KASSERT((inp->inp_flags & INP_DROPPED) == 0, in t4_tod_output()
1265 struct adapter *sc = tod->tod_softc; in t4_send_fin()
1269 struct toepcb *toep = tp->t_toe; in t4_send_fin()
1272 KASSERT((inp->inp_flags & INP_DROPPED) == 0, in t4_send_fin()
1276 toep->flags |= TPF_SEND_FIN; in t4_send_fin()
1277 if (tp->t_state >= TCPS_ESTABLISHED) in t4_send_fin()
1286 struct adapter *sc = tod->tod_softc; in t4_send_rst()
1290 struct toepcb *toep = tp->t_toe; in t4_send_rst()
1293 KASSERT((inp->inp_flags & INP_DROPPED) == 0, in t4_send_rst()
1298 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in t4_send_rst()
1300 __func__, toep->tid, tcpstates[tp->t_state])); in t4_send_rst()
1312 struct adapter *sc = iq->adapter; in do_peer_close()
1316 struct inpcb *inp = toep->inp; in do_peer_close()
1328 if (__predict_false(toep->flags & TPF_SYNQE)) { in do_peer_close()
1334 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); in do_peer_close()
1336 toep, toep->flags); in do_peer_close()
1340 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_peer_close()
1342 CURVNET_SET(toep->vnet); in do_peer_close()
1349 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, in do_peer_close()
1350 toep->ddp.flags, inp); in do_peer_close()
1352 if (toep->flags & TPF_ABORT_SHUTDOWN) in do_peer_close()
1357 if (__predict_false(toep->ddp.flags & in do_peer_close()
1359 handle_ddp_close(toep, tp, cpl->rcv_nxt); in do_peer_close()
1362 so = inp->inp_socket; in do_peer_close()
1374 KASSERT(tp->rcv_nxt + 1 == be32toh(cpl->rcv_nxt), in do_peer_close()
1375 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, in do_peer_close()
1376 be32toh(cpl->rcv_nxt))); in do_peer_close()
1379 tp->rcv_nxt = be32toh(cpl->rcv_nxt); in do_peer_close()
1381 switch (tp->t_state) { in do_peer_close()
1383 tp->t_starttime = ticks; in do_peer_close()
1395 restore_so_proto(so, inp->inp_vflag & INP_IPV6); in do_peer_close()
1408 __func__, tid, tp->t_state); in do_peer_close()
1424 struct adapter *sc = iq->adapter; in do_close_con_rpl()
1428 struct inpcb *inp = toep->inp; in do_close_con_rpl()
1439 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_close_con_rpl()
1441 CURVNET_SET(toep->vnet); in do_close_con_rpl()
1447 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); in do_close_con_rpl()
1449 if (toep->flags & TPF_ABORT_SHUTDOWN) in do_close_con_rpl()
1452 so = inp->inp_socket; in do_close_con_rpl()
1453 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ in do_close_con_rpl()
1455 switch (tp->t_state) { in do_close_con_rpl()
1457 restore_so_proto(so, inp->inp_vflag & INP_IPV6); in do_close_con_rpl()
1475 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) in do_close_con_rpl()
1483 __func__, tid, tcpstates[tp->t_state]); in do_close_con_rpl()
1496 struct wrqe *wr; in send_abort_rpl() local
1499 wr = alloc_wrqe(sizeof(*cpl), &ofld_txq->wrq); in send_abort_rpl()
1500 if (wr == NULL) { in send_abort_rpl()
1504 cpl = wrtod(wr); in send_abort_rpl()
1507 cpl->cmd = rst_status; in send_abort_rpl()
1509 t4_wrq_tx(sc, wr); in send_abort_rpl()
1518 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); in abort_status_to_errno()
1535 struct adapter *sc = iq->adapter; in do_abort_req()
1539 struct sge_ofld_txq *ofld_txq = toep->ofld_txq; in do_abort_req()
1551 if (toep->flags & TPF_SYNQE) in do_abort_req()
1554 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_abort_req()
1556 if (negative_advice(cpl->status)) { in do_abort_req()
1558 __func__, cpl->status, tid, toep->flags); in do_abort_req()
1562 inp = toep->inp; in do_abort_req()
1563 CURVNET_SET(toep->vnet); in do_abort_req()
1571 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, in do_abort_req()
1572 inp->inp_flags, cpl->status); in do_abort_req()
1579 if (toep->flags & TPF_ABORT_SHUTDOWN) { in do_abort_req()
1583 toep->flags |= TPF_ABORT_SHUTDOWN; in do_abort_req()
1585 if ((inp->inp_flags & INP_DROPPED) == 0) { in do_abort_req()
1586 struct socket *so = inp->inp_socket; in do_abort_req()
1590 cpl->status)); in do_abort_req()
1593 INP_WLOCK(inp); /* re-acquire */ in do_abort_req()
1610 struct adapter *sc = iq->adapter; in do_abort_rpl()
1614 struct inpcb *inp = toep->inp; in do_abort_rpl()
1623 if (toep->flags & TPF_SYNQE) in do_abort_rpl()
1626 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_abort_rpl()
1629 __func__, tid, toep, inp, cpl->status); in do_abort_rpl()
1631 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, in do_abort_rpl()
1643 struct adapter *sc = iq->adapter; in do_rx_data()
1647 struct inpcb *inp = toep->inp; in do_rx_data()
1655 if (__predict_false(toep->flags & TPF_SYNQE)) { in do_rx_data()
1661 MPASS(toep->flags & TPF_ABORT_SHUTDOWN); in do_rx_data()
1663 toep, toep->flags); in do_rx_data()
1668 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_rx_data()
1672 len = m->m_pkthdr.len; in do_rx_data()
1675 if (inp->inp_flags & INP_DROPPED) { in do_rx_data()
1677 __func__, tid, len, inp->inp_flags); in do_rx_data()
1686 toep->flags & TPF_TLS_RECEIVE)) { in do_rx_data()
1694 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) in do_rx_data()
1695 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; in do_rx_data()
1697 tp->rcv_nxt += len; in do_rx_data()
1698 if (tp->rcv_wnd < len) { in do_rx_data()
1703 tp->rcv_wnd -= len; in do_rx_data()
1704 tp->t_rcvtime = ticks; in do_rx_data()
1709 sb = &so->so_rcv; in do_rx_data()
1712 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { in do_rx_data()
1721 CURVNET_SET(toep->vnet); in do_rx_data()
1734 MPASS(toep->vnet == so->so_vnet); in do_rx_data()
1735 CURVNET_SET(toep->vnet); in do_rx_data()
1736 if (sb->sb_flags & SB_AUTOSIZE && in do_rx_data()
1738 sb->sb_hiwat < V_tcp_autorcvbuf_max && in do_rx_data()
1740 unsigned int hiwat = sb->sb_hiwat; in do_rx_data()
1741 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, in do_rx_data()
1745 sb->sb_flags &= ~SB_AUTOSIZE; in do_rx_data()
1749 int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; in do_rx_data()
1751 if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) in do_rx_data()
1752 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", in do_rx_data()
1756 if (toep->ddp.flags & DDP_SC_REQ) in do_rx_data()
1757 toep->ddp.flags ^= DDP_ON | DDP_SC_REQ; in do_rx_data()
1758 else if (cpl->ddp_off == 1) { in do_rx_data()
1760 toep->ddp.flags &= ~DDP_ON; in do_rx_data()
1773 if (toep->ddp.flags & DDP_ON) { in do_rx_data()
1785 t4_rcvd_locked(&toep->td->tod, tp); in do_rx_data()
1788 (toep->ddp.flags & DDP_AIO) != 0 && toep->ddp.waiting_count > 0 && in do_rx_data()
1794 if (toep->flags & TPF_TLS_STARTING) in do_rx_data()
1809 struct adapter *sc = iq->adapter; in do_fw4_ack()
1816 uint8_t credits = cpl->credits; in do_fw4_ack()
1827 if (__predict_false(toep->flags & TPF_SYNQE)) { in do_fw4_ack()
1828 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, in do_fw4_ack()
1833 inp = toep->inp; in do_fw4_ack()
1838 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); in do_fw4_ack()
1842 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { in do_fw4_ack()
1847 KASSERT((inp->inp_flags & INP_DROPPED) == 0, in do_fw4_ack()
1848 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); in do_fw4_ack()
1852 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { in do_fw4_ack()
1853 tcp_seq snd_una = be32toh(cpl->snd_una); in do_fw4_ack()
1856 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { in do_fw4_ack()
1859 __func__, snd_una, toep->tid, tp->snd_una); in do_fw4_ack()
1863 if (tp->snd_una != snd_una) { in do_fw4_ack()
1864 tp->snd_una = snd_una; in do_fw4_ack()
1865 tp->ts_recent_age = tcp_ts_getticks(); in do_fw4_ack()
1872 so = inp->inp_socket; in do_fw4_ack()
1873 txsd = &toep->txsd[toep->txsd_cidx]; in do_fw4_ack()
1876 KASSERT(credits >= txsd->tx_credits, in do_fw4_ack()
1878 credits -= txsd->tx_credits; in do_fw4_ack()
1879 toep->tx_credits += txsd->tx_credits; in do_fw4_ack()
1880 plen += txsd->plen; in do_fw4_ack()
1882 toep->txsd_avail++; in do_fw4_ack()
1883 KASSERT(toep->txsd_avail <= toep->txsd_total, in do_fw4_ack()
1885 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { in do_fw4_ack()
1886 txsd = &toep->txsd[0]; in do_fw4_ack()
1887 toep->txsd_cidx = 0; in do_fw4_ack()
1891 if (toep->tx_credits == toep->tx_total) { in do_fw4_ack()
1892 toep->tx_nocompl = 0; in do_fw4_ack()
1893 toep->plen_nocompl = 0; in do_fw4_ack()
1896 if (toep->flags & TPF_TX_SUSPENDED && in do_fw4_ack()
1897 toep->tx_credits >= toep->tx_total / 4) { in do_fw4_ack()
1902 toep->flags &= ~TPF_TX_SUSPENDED; in do_fw4_ack()
1903 CURVNET_SET(toep->vnet); in do_fw4_ack()
1907 struct sockbuf *sb = &so->so_snd; in do_fw4_ack()
1921 plen -= min(sbu, plen); in do_fw4_ack()
1924 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); in do_fw4_ack()
1931 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) in do_fw4_ack()
1947 struct wrqe *wr; in t4_set_tcb_field() local
1956 wr = alloc_wrqe(sizeof(*req), wrq); in t4_set_tcb_field()
1957 if (wr == NULL) { in t4_set_tcb_field()
1961 req = wrtod(wr); in t4_set_tcb_field()
1963 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); in t4_set_tcb_field()
1964 req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); in t4_set_tcb_field()
1966 req->reply_ctrl |= htobe16(F_NO_REPLY); in t4_set_tcb_field()
1967 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); in t4_set_tcb_field()
1968 req->mask = htobe64(mask); in t4_set_tcb_field()
1969 req->val = htobe64(val); in t4_set_tcb_field()
1970 if (wrq->eq.type == EQ_OFLD) { in t4_set_tcb_field()
1971 txsd = &toep->txsd[toep->txsd_pidx]; in t4_set_tcb_field()
1972 txsd->tx_credits = howmany(sizeof(*req), 16); in t4_set_tcb_field()
1973 txsd->plen = 0; in t4_set_tcb_field()
1974 KASSERT(toep->tx_credits >= txsd->tx_credits && in t4_set_tcb_field()
1975 toep->txsd_avail > 0, in t4_set_tcb_field()
1977 toep->tx_credits)); in t4_set_tcb_field()
1978 toep->tx_credits -= txsd->tx_credits; in t4_set_tcb_field()
1979 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) in t4_set_tcb_field()
1980 toep->txsd_pidx = 0; in t4_set_tcb_field()
1981 toep->txsd_avail--; in t4_set_tcb_field()
1984 t4_wrq_tx(sc, wr); in t4_set_tcb_field()
2013 * Use the 'backend1' field in AIO jobs to hold an error that should
2016 * 'backend4' field to hold a reference count on the job.
2033 so = job->fd_file->f_data; in jobtotid()
2035 toep = tp->t_toe; in jobtotid()
2036 return (toep->tid); in jobtotid()
2046 if (refcount_release(&job->aio_refs) == 0) in aiotx_free_job()
2049 error = (intptr_t)job->aio_error; in aiotx_free_job()
2050 status = job->aio_sent; in aiotx_free_job()
2060 aio_complete(job, -1, error); in aiotx_free_job()
2062 job->msgsnd = 1; in aiotx_free_job()
2074 job = m->m_ext.ext_arg1; in aiotx_free_pgs()
2077 m->m_len, jobtotid(job)); in aiotx_free_pgs()
2080 for (int i = 0; i < m->m_epg_npgs; i++) { in aiotx_free_pgs()
2081 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); in aiotx_free_pgs()
2102 KASSERT(job->aio_sent + len <= job->uaiocb.aio_nbytes, in alloc_aiotx_mbuf()
2111 vm = job->userproc->p_vmspace; in alloc_aiotx_mbuf()
2112 map = &vm->vm_map; in alloc_aiotx_mbuf()
2113 start = (uintptr_t)job->uaiocb.aio_buf + job->aio_sent; in alloc_aiotx_mbuf()
2119 mlen = imin(len, MBUF_PEXT_MAX_PGS * PAGE_SIZE - pgoff); in alloc_aiotx_mbuf()
2130 m->m_epg_1st_off = pgoff; in alloc_aiotx_mbuf()
2131 m->m_epg_npgs = npages; in alloc_aiotx_mbuf()
2136 m->m_epg_last_len = mlen; in alloc_aiotx_mbuf()
2138 m->m_epg_last_len = mlen - (PAGE_SIZE - pgoff) - in alloc_aiotx_mbuf()
2139 (npages - 2) * PAGE_SIZE; in alloc_aiotx_mbuf()
2142 m->m_epg_pa[i] = VM_PAGE_TO_PHYS(pgs[i]); in alloc_aiotx_mbuf()
2144 m->m_len = mlen; in alloc_aiotx_mbuf()
2145 m->m_ext.ext_size = npages * PAGE_SIZE; in alloc_aiotx_mbuf()
2146 m->m_ext.ext_arg1 = job; in alloc_aiotx_mbuf()
2147 refcount_acquire(&job->aio_refs); in alloc_aiotx_mbuf()
2157 last->m_next = m; in alloc_aiotx_mbuf()
2160 len -= mlen; in alloc_aiotx_mbuf()
2179 sb = &so->so_snd; in t4_aiotx_process_job()
2184 error = mac_socket_check_send(job->fd_file->f_cred, so); in t4_aiotx_process_job()
2196 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { in t4_aiotx_process_job()
2199 if ((so->so_options & SO_NOSIGPIPE) == 0) { in t4_aiotx_process_job()
2200 PROC_LOCK(job->userproc); in t4_aiotx_process_job()
2201 kern_psignal(job->userproc, SIGPIPE); in t4_aiotx_process_job()
2202 PROC_UNLOCK(job->userproc); in t4_aiotx_process_job()
2207 if (so->so_error) { in t4_aiotx_process_job()
2208 error = so->so_error; in t4_aiotx_process_job()
2209 so->so_error = 0; in t4_aiotx_process_job()
2214 if ((so->so_state & SS_ISCONNECTED) == 0) { in t4_aiotx_process_job()
2220 if (sbspace(sb) < sb->sb_lowat) { in t4_aiotx_process_job()
2221 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); in t4_aiotx_process_job()
2233 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); in t4_aiotx_process_job()
2244 if (len > job->uaiocb.aio_nbytes - job->aio_sent) { in t4_aiotx_process_job()
2245 len = job->uaiocb.aio_nbytes - job->aio_sent; in t4_aiotx_process_job()
2249 if (len > toep->params.sndbuf) { in t4_aiotx_process_job()
2250 len = toep->params.sndbuf; in t4_aiotx_process_job()
2255 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) in t4_aiotx_process_job()
2269 inp = toep->inp; in t4_aiotx_process_job()
2271 if (inp->inp_flags & INP_DROPPED) { in t4_aiotx_process_job()
2279 job->aio_sent += sent; in t4_aiotx_process_job()
2280 counter_u64_add(toep->ofld_txq->tx_aio_octets, sent); in t4_aiotx_process_job()
2285 if (!(inp->inp_flags & INP_DROPPED)) { in t4_aiotx_process_job()
2288 tp->t_flags |= TF_MORETOCOME; in t4_aiotx_process_job()
2293 error = -error; in t4_aiotx_process_job()
2297 tp->t_flags &= ~TF_MORETOCOME; in t4_aiotx_process_job()
2313 if (job->aio_sent < job->uaiocb.aio_nbytes && in t4_aiotx_process_job()
2314 !(so->so_state & SS_NBIO)) { in t4_aiotx_process_job()
2321 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); in t4_aiotx_process_job()
2328 * hold a reference, but this drops the reference that the in t4_aiotx_process_job()
2333 counter_u64_add(toep->ofld_txq->tx_aio_jobs, 1); in t4_aiotx_process_job()
2337 job->aio_error = (void *)(intptr_t)error; in t4_aiotx_process_job()
2352 so = toep->aiotx_so; in t4_aiotx_task()
2353 CURVNET_SET(toep->vnet); in t4_aiotx_task()
2355 SOCKBUF_LOCK(&so->so_snd); in t4_aiotx_task()
2356 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { in t4_aiotx_task()
2357 job = TAILQ_FIRST(&toep->aiotx_jobq); in t4_aiotx_task()
2358 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); in t4_aiotx_task()
2364 toep->aiotx_so = NULL; in t4_aiotx_task()
2365 SOCKBUF_UNLOCK(&so->so_snd); in t4_aiotx_task()
2377 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); in t4_aiotx_queue_toep()
2380 __func__, toep->tid, toep->aiotx_so != NULL ? "true" : "false"); in t4_aiotx_queue_toep()
2382 if (toep->aiotx_so != NULL) in t4_aiotx_queue_toep()
2385 toep->aiotx_so = so; in t4_aiotx_queue_toep()
2387 soaio_enqueue(&toep->aiotx_task); in t4_aiotx_queue_toep()
2398 so = job->fd_file->f_data; in t4_aiotx_cancel()
2400 toep = tp->t_toe; in t4_aiotx_cancel()
2401 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); in t4_aiotx_cancel()
2402 sb = &so->so_snd; in t4_aiotx_cancel()
2406 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); in t4_aiotx_cancel()
2409 job->aio_error = (void *)(intptr_t)ECANCELED; in t4_aiotx_cancel()
2417 struct toepcb *toep = tp->t_toe; in t4_aio_queue_aiotx()
2418 struct adapter *sc = td_adapter(toep->td); in t4_aio_queue_aiotx()
2421 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) in t4_aio_queue_aiotx()
2424 if (!sc->tt.tx_zcopy) in t4_aio_queue_aiotx()
2430 SOCKBUF_LOCK(&so->so_snd); in t4_aio_queue_aiotx()
2432 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); in t4_aio_queue_aiotx()
2436 refcount_init(&job->aio_refs, 1); in t4_aio_queue_aiotx()
2437 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); in t4_aio_queue_aiotx()
2440 SOCKBUF_UNLOCK(&so->so_snd); in t4_aio_queue_aiotx()
2448 TAILQ_INIT(&toep->aiotx_jobq); in aiotx_init_toep()
2449 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); in aiotx_init_toep()