Lines Matching +full:wr +full:- +full:hold

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2017-2018 Chelsio Communications, Inc.
66 struct adapter *sc = td_adapter(toep->td); in t4_set_tls_tcb_field()
68 t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0); in t4_set_tls_tcb_field()
76 return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS); in can_tls_offload()
82 struct tls_ofld_info *tls_ofld = &toep->tls; in tls_tx_key()
84 return (tls_ofld->tx_key_addr >= 0); in tls_tx_key()
91 struct adapter *sc = td_adapter(toep->td); in t4_set_rx_quiesce()
93 t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS, in t4_set_rx_quiesce()
97 /* Clear TF_RX_QUIESCE to re-enable receive. */
127 struct tls_ofld_info *tls_ofld = &toep->tls; in clear_tls_keyid()
128 struct adapter *sc = td_adapter(toep->td); in clear_tls_keyid()
130 if (tls_ofld->rx_key_addr >= 0) { in clear_tls_keyid()
131 t4_free_tls_keyid(sc, tls_ofld->rx_key_addr); in clear_tls_keyid()
132 tls_ofld->rx_key_addr = -1; in clear_tls_keyid()
134 if (tls_ofld->tx_key_addr >= 0) { in clear_tls_keyid()
135 t4_free_tls_keyid(sc, tls_ofld->tx_key_addr); in clear_tls_keyid()
136 tls_ofld->tx_key_addr = -1; in clear_tls_keyid()
145 return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX); in get_tp_plen_max()
148 /* Send request to get the key-id */
153 struct tls_ofld_info *tls_ofld = &toep->tls; in tls_program_key_id()
154 struct adapter *sc = td_adapter(toep->td); in tls_program_key_id()
157 struct wrqe *wr; in tls_program_key_id() local
169 if (toep->txsd_avail == 0) in tls_program_key_id()
176 wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq); in tls_program_key_id()
177 if (wr == NULL) { in tls_program_key_id()
181 kwr = wrtod(wr); in tls_program_key_id()
184 t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid, in tls_program_key_id()
188 tls_ofld->tx_key_addr = keyid; in tls_program_key_id()
190 tls_ofld->rx_key_addr = keyid; in tls_program_key_id()
193 txsd = &toep->txsd[toep->txsd_pidx]; in tls_program_key_id()
194 txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16); in tls_program_key_id()
195 txsd->plen = 0; in tls_program_key_id()
196 toep->tx_credits -= txsd->tx_credits; in tls_program_key_id()
197 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) in tls_program_key_id()
198 toep->txsd_pidx = 0; in tls_program_key_id()
199 toep->txsd_avail--; in tls_program_key_id()
201 t4_wrq_tx(sc, wr); in tls_program_key_id()
209 struct adapter *sc = td_adapter(toep->td); in tls_alloc_ktls()
218 if ((toep->flags & TPF_TLS_STARTING) != 0) in tls_alloc_ktls()
231 switch (tls->params.cipher_algorithm) { in tls_alloc_ktls()
234 switch (tls->params.cipher_key_len) { in tls_alloc_ktls()
242 switch (tls->params.auth_algorithm) { in tls_alloc_ktls()
254 if (tls->params.iv_len != SALT_SIZE) { in tls_alloc_ktls()
257 switch (tls->params.cipher_key_len) { in tls_alloc_ktls()
273 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || in tls_alloc_ktls()
274 tls->params.tls_vminor < TLS_MINOR_VER_ONE || in tls_alloc_ktls()
275 tls->params.tls_vminor > TLS_MINOR_VER_TWO) { in tls_alloc_ktls()
281 if (toep->tls.tx_key_addr != -1) in tls_alloc_ktls()
284 if (toep->tls.rx_key_addr != -1) in tls_alloc_ktls()
293 toep->tls.scmd0.seqno_numivs = in tls_alloc_ktls()
303 toep->tls.scmd0.ivgen_hdrlen = in tls_alloc_ktls()
308 toep->tls.iv_len = explicit_iv_size; in tls_alloc_ktls()
309 toep->tls.frag_size = tls->params.max_frame_len; in tls_alloc_ktls()
310 toep->tls.fcplenmax = get_tp_plen_max(tls); in tls_alloc_ktls()
311 toep->tls.expn_per_ulp = tls->params.tls_hlen + in tls_alloc_ktls()
312 tls->params.tls_tlen; in tls_alloc_ktls()
313 toep->tls.pdus_per_ulp = 1; in tls_alloc_ktls()
314 toep->tls.adjusted_plen = toep->tls.expn_per_ulp + in tls_alloc_ktls()
315 tls->params.max_frame_len; in tls_alloc_ktls()
316 toep->tls.tx_key_info_size = t4_tls_key_info_size(tls); in tls_alloc_ktls()
318 toep->flags |= TPF_TLS_STARTING | TPF_TLS_RX_QUIESCING; in tls_alloc_ktls()
319 toep->tls.rx_version = tls->params.tls_vmajor << 8 | in tls_alloc_ktls()
320 tls->params.tls_vminor; in tls_alloc_ktls()
323 toep->tid); in tls_alloc_ktls()
333 struct tls_ofld_info *tls_ofld = &toep->tls; in tls_init_toep()
335 tls_ofld->rx_key_addr = -1; in tls_init_toep()
336 tls_ofld->tx_key_addr = -1; in tls_init_toep()
357 struct tls_ofld_info *tls_ofld = &toep->tls; in write_tlstx_wr()
360 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) | in write_tlstx_wr()
363 txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) | in write_tlstx_wr()
365 txwr->plen = htobe32(len); in write_tlstx_wr()
366 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) | in write_tlstx_wr()
368 txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) | in write_tlstx_wr()
372 V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4)); in write_tlstx_wr()
373 txwr->mfs = htobe16(tls_ofld->frag_size); in write_tlstx_wr()
374 txwr->adjustedplen_pkd = htobe16( in write_tlstx_wr()
375 V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen)); in write_tlstx_wr()
376 txwr->expinplenmax_pkd = htobe16( in write_tlstx_wr()
377 V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp)); in write_tlstx_wr()
378 txwr->pdusinplenmax_pkd = in write_tlstx_wr()
379 V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp); in write_tlstx_wr()
386 struct tls_ofld_info *tls_ofld = &toep->tls; in write_tlstx_cpl()
390 data_type = tls_content_type(tls_hdr->type); in write_tlstx_cpl()
391 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) | in write_tlstx_cpl()
394 cpl->pld_len = htobe32(plen); in write_tlstx_cpl()
396 cpl->type_protover = htobe32( in write_tlstx_cpl()
397 V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type)); in write_tlstx_cpl()
398 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs | in write_tlstx_cpl()
400 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen); in write_tlstx_cpl()
401 cpl->scmd1 = htobe64(seqno); in write_tlstx_cpl()
410 MPASS(m->m_epg_npgs > 0); in count_ext_pgs_segs()
412 nextpa = m->m_epg_pa[0] + PAGE_SIZE; in count_ext_pgs_segs()
413 for (i = 1; i < m->m_epg_npgs; i++) { in count_ext_pgs_segs()
414 if (nextpa != m->m_epg_pa[i]) in count_ext_pgs_segs()
416 nextpa = m->m_epg_pa[i] + PAGE_SIZE; in count_ext_pgs_segs()
431 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | in write_ktlstx_sgl()
435 pa = m->m_epg_pa[0] + m->m_epg_1st_off; in write_ktlstx_sgl()
436 usgl->addr0 = htobe64(pa); in write_ktlstx_sgl()
437 len = m_epg_pagelen(m, 0, m->m_epg_1st_off); in write_ktlstx_sgl()
439 for (i = 1; i < m->m_epg_npgs; i++) { in write_ktlstx_sgl()
440 if (m->m_epg_pa[i] != pa) in write_ktlstx_sgl()
445 usgl->len0 = htobe32(len); in write_ktlstx_sgl()
447 nsegs--; in write_ktlstx_sgl()
450 j = -1; in write_ktlstx_sgl()
451 for (; i < m->m_epg_npgs; i++) { in write_ktlstx_sgl()
452 if (j == -1 || m->m_epg_pa[i] != pa) { in write_ktlstx_sgl()
454 usgl->sge[j / 2].len[j & 1] = htobe32(len); in write_ktlstx_sgl()
457 nsegs--; in write_ktlstx_sgl()
459 pa = m->m_epg_pa[i]; in write_ktlstx_sgl()
460 usgl->sge[j / 2].addr[j & 1] = htobe64(pa); in write_ktlstx_sgl()
469 usgl->sge[j / 2].len[j & 1] = htobe32(len); in write_ktlstx_sgl()
472 usgl->sge[j / 2].len[1] = htobe32(0); in write_ktlstx_sgl()
489 struct wrqe *wr; in t4_push_ktls() local
493 struct inpcb *inp = toep->inp; in t4_push_ktls()
495 struct socket *so = inp->inp_socket; in t4_push_ktls()
496 struct sockbuf *sb = &so->so_snd; in t4_push_ktls()
502 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, in t4_push_ktls()
503 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); in t4_push_ktls()
513 __func__, toep->tid, toep->flags, tp->t_flags); in t4_push_ktls()
515 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) in t4_push_ktls()
519 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && in t4_push_ktls()
520 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { in t4_push_ktls()
521 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; in t4_push_ktls()
529 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { in t4_push_ktls()
535 txsd = &toep->txsd[toep->txsd_pidx]; in t4_push_ktls()
537 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); in t4_push_ktls()
546 m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb; in t4_push_ktls()
552 if (m == NULL && toep->flags & TPF_SEND_FIN) { in t4_push_ktls()
566 if (m == NULL || (m->m_flags & M_NOTREADY) != 0) { in t4_push_ktls()
574 __func__, toep->tid); in t4_push_ktls()
579 KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP", in t4_push_ktls()
581 KASSERT(m->m_epg_tls != NULL, in t4_push_ktls()
584 /* Calculate WR length. */ in t4_push_ktls()
589 /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */ in t4_push_ktls()
590 MPASS(toep->tls.iv_len <= AES_BLOCK_LEN); in t4_push_ktls()
596 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; in t4_push_ktls()
608 __func__, toep->tid, m, howmany(wr_len, 16), in t4_push_ktls()
611 toep->flags |= TPF_TX_SUSPENDED; in t4_push_ktls()
616 shove = ((m->m_next == NULL || in t4_push_ktls()
617 (m->m_next->m_flags & M_NOTREADY) != 0)) && in t4_push_ktls()
618 (tp->t_flags & TF_MORETOCOME) == 0; in t4_push_ktls()
620 if (sb->sb_flags & SB_AUTOSIZE && in t4_push_ktls()
622 sb->sb_hiwat < V_tcp_autosndbuf_max && in t4_push_ktls()
623 sbused(sb) >= sb->sb_hiwat * 7 / 8) { in t4_push_ktls()
624 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, in t4_push_ktls()
628 sb->sb_flags &= ~SB_AUTOSIZE; in t4_push_ktls()
638 if (__predict_false(toep->flags & TPF_FIN_SENT)) in t4_push_ktls()
641 wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq); in t4_push_ktls()
642 if (wr == NULL) { in t4_push_ktls()
644 toep->flags |= TPF_TX_SUSPENDED; in t4_push_ktls()
648 thdr = (struct tls_hdr *)&m->m_epg_hdr; in t4_push_ktls()
651 __func__, toep->tid, m->m_epg_seqno, thdr->type, in t4_push_ktls()
652 m->m_len); in t4_push_ktls()
654 txwr = wrtod(wr); in t4_push_ktls()
658 expn_size = m->m_epg_hdrlen + in t4_push_ktls()
659 m->m_epg_trllen; in t4_push_ktls()
660 tls_size = m->m_len - expn_size; in t4_push_ktls()
662 write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno); in t4_push_ktls()
665 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); in t4_push_ktls()
666 idata->len = htobe32(0); in t4_push_ktls()
668 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) | in t4_push_ktls()
670 V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4)); in t4_push_ktls()
671 memrd->addr = htobe32(toep->tls.tx_key_addr >> 5); in t4_push_ktls()
675 memcpy(buf, thdr + 1, toep->tls.iv_len); in t4_push_ktls()
680 KASSERT(toep->tx_credits >= credits, in t4_push_ktls()
683 toep->tx_credits -= credits; in t4_push_ktls()
685 tp->snd_nxt += m->m_len; in t4_push_ktls()
686 tp->snd_max += m->m_len; in t4_push_ktls()
689 sb->sb_sndptr = m; in t4_push_ktls()
692 toep->flags |= TPF_TX_DATA_SENT; in t4_push_ktls()
693 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep)) in t4_push_ktls()
694 toep->flags |= TPF_TX_SUSPENDED; in t4_push_ktls()
696 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); in t4_push_ktls()
697 txsd->plen = m->m_len; in t4_push_ktls()
698 txsd->tx_credits = credits; in t4_push_ktls()
700 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { in t4_push_ktls()
701 toep->txsd_pidx = 0; in t4_push_ktls()
702 txsd = &toep->txsd[0]; in t4_push_ktls()
704 toep->txsd_avail--; in t4_push_ktls()
706 counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1); in t4_push_ktls()
707 counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len); in t4_push_ktls()
709 t4_l2t_send(sc, wr, toep->l2te); in t4_push_ktls()
719 * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
724 struct adapter *sc = iq->adapter; in do_tls_data()
728 struct inpcb *inp = toep->inp; in do_tls_data()
733 KASSERT(!(toep->flags & TPF_SYNQE), in do_tls_data()
736 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_tls_data()
740 len = m->m_pkthdr.len; in do_tls_data()
742 toep->ofld_rxq->rx_toe_tls_octets += len; in do_tls_data()
744 KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)), in do_tls_data()
748 if (inp->inp_flags & INP_DROPPED) { in do_tls_data()
750 __func__, tid, len, inp->inp_flags); in do_tls_data()
757 m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq); in do_tls_data()
759 if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) { in do_tls_data()
771 tp->t_rcvtime = ticks; in do_tls_data()
775 be32toh(cpl->seq)); in do_tls_data()
785 struct adapter *sc = iq->adapter; in do_rx_tls_cmp()
790 struct inpcb *inp = toep->inp; in do_rx_tls_cmp()
802 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_rx_tls_cmp()
803 KASSERT(!(toep->flags & TPF_SYNQE), in do_rx_tls_cmp()
809 len = m->m_pkthdr.len; in do_rx_tls_cmp()
812 toep->ofld_rxq->rx_toe_tls_records++; in do_rx_tls_cmp()
814 KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)), in do_rx_tls_cmp()
818 if (inp->inp_flags & INP_DROPPED) { in do_rx_tls_cmp()
820 __func__, tid, len, inp->inp_flags); in do_rx_tls_cmp()
826 pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length)); in do_rx_tls_cmp()
833 __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt); in do_rx_tls_cmp()
836 tp->rcv_nxt += pdu_length; in do_rx_tls_cmp()
837 KASSERT(tp->rcv_wnd >= pdu_length, in do_rx_tls_cmp()
839 tp->rcv_wnd -= pdu_length; in do_rx_tls_cmp()
847 KASSERT(m->m_len >= sizeof(*tls_hdr_pkt), in do_rx_tls_cmp()
851 tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq); in do_rx_tls_cmp()
853 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq, in do_rx_tls_cmp()
858 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) { in do_rx_tls_cmp()
860 __func__, toep->tid, tls_hdr_pkt->res_to_mac_error, in do_rx_tls_cmp()
861 be32toh(cpl->ddp_valid)); in do_rx_tls_cmp()
865 CURVNET_SET(toep->vnet); in do_rx_tls_cmp()
866 so->so_error = EBADMSG; in do_rx_tls_cmp()
876 sb = &so->so_rcv; in do_rx_tls_cmp()
878 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { in do_rx_tls_cmp()
888 CURVNET_SET(toep->vnet); in do_rx_tls_cmp()
906 if (sb->sb_mtls != NULL) in do_rx_tls_cmp()
916 tgr->tls_type = tls_hdr_pkt->type; in do_rx_tls_cmp()
917 tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8; in do_rx_tls_cmp()
918 tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff; in do_rx_tls_cmp()
920 m_last(tls_data)->m_flags |= M_EOR; in do_rx_tls_cmp()
921 tgr->tls_length = htobe16(tls_data->m_pkthdr.len); in do_rx_tls_cmp()
923 tgr->tls_length = 0; in do_rx_tls_cmp()
931 m->m_len = sb->sb_tls_info->params.tls_hlen; in do_rx_tls_cmp()
932 m->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; in do_rx_tls_cmp()
933 m->m_pkthdr.len = m->m_len; in do_rx_tls_cmp()
935 m->m_pkthdr.len += tls_data->m_pkthdr.len; in do_rx_tls_cmp()
937 m->m_next = tls_data; in do_rx_tls_cmp()
948 trailer_len = sb->sb_tls_info->params.tls_tlen; in do_rx_tls_cmp()
950 m_last(tls_data)->m_len += trailer_len; in do_rx_tls_cmp()
953 m->m_len += trailer_len; in do_rx_tls_cmp()
954 m->m_pkthdr.len += trailer_len; in do_rx_tls_cmp()
955 tls_hdr_pkt->length = htobe16(m->m_pkthdr.len - in do_rx_tls_cmp()
960 MPASS(toep->vnet == so->so_vnet); in do_rx_tls_cmp()
961 CURVNET_SET(toep->vnet); in do_rx_tls_cmp()
962 if (sb->sb_flags & SB_AUTOSIZE && in do_rx_tls_cmp()
964 sb->sb_hiwat < V_tcp_autorcvbuf_max && in do_rx_tls_cmp()
965 m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) { in do_rx_tls_cmp()
966 unsigned int hiwat = sb->sb_hiwat; in do_rx_tls_cmp()
967 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, in do_rx_tls_cmp()
971 sb->sb_flags &= ~SB_AUTOSIZE; in do_rx_tls_cmp()
978 t4_rcvd_locked(&toep->td->tod, tp); in do_rx_tls_cmp()
992 struct inpcb *inp = toep->inp; in do_rx_data_tls()
993 struct tls_ofld_info *tls_ofld = &toep->tls; in do_rx_data_tls()
1000 len = m->m_pkthdr.len; in do_rx_data_tls()
1006 sb = &so->so_rcv; in do_rx_data_tls()
1008 CURVNET_SET(toep->vnet); in do_rx_data_tls()
1010 tp->rcv_nxt += len; in do_rx_data_tls()
1011 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); in do_rx_data_tls()
1012 tp->rcv_wnd -= len; in do_rx_data_tls()
1017 __func__, toep->tid, len); in do_rx_data_tls()
1018 so->so_error = EMSGSIZE; in do_rx_data_tls()
1024 if (be16toh(hdr->version) != tls_ofld->rx_version) { in do_rx_data_tls()
1026 __func__, toep->tid, be16toh(hdr->version)); in do_rx_data_tls()
1027 so->so_error = EINVAL; in do_rx_data_tls()
1030 if (be16toh(hdr->length) < sizeof(*hdr)) { in do_rx_data_tls()
1032 __func__, toep->tid, be16toh(hdr->length)); in do_rx_data_tls()
1033 so->so_error = EBADMSG; in do_rx_data_tls()
1038 if (len < be16toh(hdr->length)) { in do_rx_data_tls()
1040 __func__, toep->tid, len, be16toh(hdr->length)); in do_rx_data_tls()
1042 so->so_error = EMSGSIZE; in do_rx_data_tls()
1047 switch (hdr->type) { in do_rx_data_tls()
1055 __func__, toep->tid, hdr->type); in do_rx_data_tls()
1056 so->so_error = EBADMSG; in do_rx_data_tls()
1065 __func__, toep->tid, hdr->type, be16toh(hdr->length)); in do_rx_data_tls()
1066 so->so_error = EBADMSG; in do_rx_data_tls()
1085 struct wrqe *wr; in tls_update_tcb() local
1091 ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid)); in tls_update_tcb()
1112 ("%s: WR with %d TCB field updates too large", __func__, fields)); in tls_update_tcb()
1114 wr = alloc_wrqe(len, toep->ctrlq); in tls_update_tcb()
1115 if (wr == NULL) { in tls_update_tcb()
1120 wrh = wrtod(wr); in tls_update_tcb()
1132 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26, in tls_update_tcb()
1140 key_offset = toep->tls.rx_key_addr - sc->vres.key.start; in tls_update_tcb()
1141 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30, in tls_update_tcb()
1146 toep->tid, seqno); in tls_update_tcb()
1147 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TLS_SEQ, in tls_update_tcb()
1149 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_RAW, in tls_update_tcb()
1154 toep->flags &= ~TPF_TLS_STARTING; in tls_update_tcb()
1155 toep->flags |= TPF_TLS_RECEIVE; in tls_update_tcb()
1158 toep->params.ulp_mode = ULP_MODE_TLS; in tls_update_tcb()
1159 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE, in tls_update_tcb()
1163 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS, in tls_update_tcb()
1166 t4_wrq_tx(sc, wr); in tls_update_tcb()
1182 MPASS(toep->tls.rx_resid == 0); in tls_check_rx_sockbuf()
1186 __func__, toep->tid, have_header, seqno, resid); in tls_check_rx_sockbuf()
1190 * size of a TLS record, re-enable receive and pause again once in tls_check_rx_sockbuf()
1195 toep->tid); in tls_check_rx_sockbuf()
1196 toep->flags &= ~TPF_TLS_RX_QUIESCED; in tls_check_rx_sockbuf()
1208 MPASS(toep->flags & TPF_TLS_STARTING); in tls_received_starting_data()
1211 if ((toep->flags & TPF_TLS_RX_QUIESCING) != 0) in tls_received_starting_data()
1219 if ((toep->flags & TPF_TLS_RX_QUIESCED) == 0) { in tls_received_starting_data()
1220 CTR(KTR_CXGBE, "%s: tid %d quiescing", __func__, toep->tid); in tls_received_starting_data()
1221 toep->flags |= TPF_TLS_RX_QUIESCING; in tls_received_starting_data()
1226 KASSERT(len <= toep->tls.rx_resid, in tls_received_starting_data()
1228 toep->tls.rx_resid)); in tls_received_starting_data()
1229 toep->tls.rx_resid -= len; in tls_received_starting_data()
1230 if (toep->tls.rx_resid != 0) in tls_received_starting_data()
1239 struct adapter *sc = iq->adapter; in do_tls_tcb_rpl()
1247 if (cpl->status != CPL_ERR_NONE) in do_tls_tcb_rpl()
1248 panic("XXX: tcp_rpl failed: %d", cpl->status); in do_tls_tcb_rpl()
1251 inp = toep->inp; in do_tls_tcb_rpl()
1252 switch (cpl->cookie) { in do_tls_tcb_rpl()
1255 if ((toep->flags & TPF_TLS_STARTING) == 0) in do_tls_tcb_rpl()
1258 MPASS((toep->flags & TPF_TLS_RX_QUIESCING) != 0); in do_tls_tcb_rpl()
1260 toep->flags &= ~TPF_TLS_RX_QUIESCING; in do_tls_tcb_rpl()
1261 toep->flags |= TPF_TLS_RX_QUIESCED; in do_tls_tcb_rpl()
1263 so = inp->inp_socket; in do_tls_tcb_rpl()
1264 sb = &so->so_rcv; in do_tls_tcb_rpl()
1272 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); in do_tls_tcb_rpl()