xref: /freebsd/sys/dev/cxgbe/tom/t4_tls.c (revision ed549cb0c53f8438c52593ce811f6fcc812248e9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 #include "opt_kern_tls.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #ifdef KERN_TLS
37 #include <sys/param.h>
38 #include <sys/ktr.h>
39 #include <sys/ktls.h>
40 #include <sys/sglist.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/systm.h>
44 #include <netinet/in.h>
45 #include <netinet/in_pcb.h>
46 #include <netinet/tcp_var.h>
47 #include <netinet/toecore.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/xform.h>
50 
51 #ifdef TCP_OFFLOAD
52 #include "common/common.h"
53 #include "common/t4_tcb.h"
54 #include "crypto/t4_crypto.h"
55 #include "tom/t4_tom_l2t.h"
56 #include "tom/t4_tom.h"
57 
58 /*
59  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
60  * the mbuf is in the ulp_pdu_reclaimq.
61  */
62 #define	tls_tcp_seq	PH_loc.thirtytwo[0]
63 
64 static void
65 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
66     uint64_t val)
67 {
68 	struct adapter *sc = td_adapter(toep->td);
69 
70 	t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
71 }
72 
73 /* TLS and DTLS common routines */
74 bool
75 can_tls_offload(struct adapter *sc)
76 {
77 
78 	return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
79 }
80 
81 int
82 tls_tx_key(struct toepcb *toep)
83 {
84 	struct tls_ofld_info *tls_ofld = &toep->tls;
85 
86 	return (tls_ofld->tx_key_addr >= 0);
87 }
88 
89 /* Set TF_RX_QUIESCE to pause receive. */
90 static void
91 t4_set_rx_quiesce(struct toepcb *toep)
92 {
93 	struct adapter *sc = td_adapter(toep->td);
94 
95 	t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS,
96 	    V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
97 }
98 
99 /* Clear TF_RX_QUIESCE to re-enable receive. */
100 static void
101 t4_clear_rx_quiesce(struct toepcb *toep)
102 {
103 
104 	t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
105 }
106 
107 /* TLS/DTLS content type  for CPL SFO */
108 static inline unsigned char
109 tls_content_type(unsigned char content_type)
110 {
111 	switch (content_type) {
112 	case CONTENT_TYPE_CCS:
113 		return CPL_TX_TLS_SFO_TYPE_CCS;
114 	case CONTENT_TYPE_ALERT:
115 		return CPL_TX_TLS_SFO_TYPE_ALERT;
116 	case CONTENT_TYPE_HANDSHAKE:
117 		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
118 	case CONTENT_TYPE_APP_DATA:
119 		return CPL_TX_TLS_SFO_TYPE_DATA;
120 	default:
121 		return CPL_TX_TLS_SFO_TYPE_CUSTOM;
122 	}
123 }
124 
125 /* TLS Key memory management */
126 static void
127 clear_tls_keyid(struct toepcb *toep)
128 {
129 	struct tls_ofld_info *tls_ofld = &toep->tls;
130 	struct adapter *sc = td_adapter(toep->td);
131 
132 	if (tls_ofld->rx_key_addr >= 0) {
133 		t4_free_tls_keyid(sc, tls_ofld->rx_key_addr);
134 		tls_ofld->rx_key_addr = -1;
135 	}
136 	if (tls_ofld->tx_key_addr >= 0) {
137 		t4_free_tls_keyid(sc, tls_ofld->tx_key_addr);
138 		tls_ofld->tx_key_addr = -1;
139 	}
140 }
141 
142 static int
143 get_tp_plen_max(struct ktls_session *tls)
144 {
145 	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
146 
147 	return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
148 }
149 
150 /* Send request to get the key-id */
151 static int
152 tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
153     int direction)
154 {
155 	struct tls_ofld_info *tls_ofld = &toep->tls;
156 	struct adapter *sc = td_adapter(toep->td);
157 	struct ofld_tx_sdesc *txsd;
158 	int keyid;
159 	struct wrqe *wr;
160 	struct tls_key_req *kwr;
161 	struct tls_keyctx *kctx;
162 
163 #ifdef INVARIANTS
164 	int kwrlen, kctxlen, len;
165 
166 	kwrlen = sizeof(*kwr);
167 	kctxlen = roundup2(sizeof(*kctx), 32);
168 	len = roundup2(kwrlen + kctxlen, 16);
169 	MPASS(TLS_KEY_WR_SZ == len);
170 #endif
171 	if (toep->txsd_avail == 0)
172 		return (EAGAIN);
173 
174 	if ((keyid = t4_alloc_tls_keyid(sc)) < 0) {
175 		return (ENOSPC);
176 	}
177 
178 	wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq);
179 	if (wr == NULL) {
180 		t4_free_tls_keyid(sc, keyid);
181 		return (ENOMEM);
182 	}
183 	kwr = wrtod(wr);
184 	memset(kwr, 0, TLS_KEY_WR_SZ);
185 
186 	t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
187 	    kwr);
188 	kctx = (struct tls_keyctx *)(kwr + 1);
189 	if (direction == KTLS_TX)
190 		tls_ofld->tx_key_addr = keyid;
191 	else
192 		tls_ofld->rx_key_addr = keyid;
193 	t4_tls_key_ctx(tls, direction, kctx);
194 
195 	txsd = &toep->txsd[toep->txsd_pidx];
196 	txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16);
197 	txsd->plen = 0;
198 	toep->tx_credits -= txsd->tx_credits;
199 	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
200 		toep->txsd_pidx = 0;
201 	toep->txsd_avail--;
202 
203 	t4_wrq_tx(sc, wr);
204 
205 	return (0);
206 }
207 
208 int
209 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
210 {
211 	struct adapter *sc = td_adapter(toep->td);
212 	int error, explicit_iv_size, mac_first;
213 
214 	if (!can_tls_offload(sc))
215 		return (EINVAL);
216 
217 	if (direction == KTLS_RX) {
218 		if (ulp_mode(toep) != ULP_MODE_NONE)
219 			return (EINVAL);
220 		if ((toep->flags & TPF_TLS_STARTING) != 0)
221 			return (EINVAL);
222 	} else {
223 		switch (ulp_mode(toep)) {
224 		case ULP_MODE_NONE:
225 		case ULP_MODE_TLS:
226 		case ULP_MODE_TCPDDP:
227 			break;
228 		default:
229 			return (EINVAL);
230 		}
231 	}
232 
233 	switch (tls->params.cipher_algorithm) {
234 	case CRYPTO_AES_CBC:
235 		/* XXX: Explicitly ignore any provided IV. */
236 		switch (tls->params.cipher_key_len) {
237 		case 128 / 8:
238 		case 192 / 8:
239 		case 256 / 8:
240 			break;
241 		default:
242 			return (EINVAL);
243 		}
244 		switch (tls->params.auth_algorithm) {
245 		case CRYPTO_SHA1_HMAC:
246 		case CRYPTO_SHA2_256_HMAC:
247 		case CRYPTO_SHA2_384_HMAC:
248 			break;
249 		default:
250 			return (EPROTONOSUPPORT);
251 		}
252 		explicit_iv_size = AES_BLOCK_LEN;
253 		mac_first = 1;
254 		break;
255 	case CRYPTO_AES_NIST_GCM_16:
256 		if (tls->params.iv_len != SALT_SIZE) {
257 			return (EINVAL);
258 		}
259 		switch (tls->params.cipher_key_len) {
260 		case 128 / 8:
261 		case 192 / 8:
262 		case 256 / 8:
263 			break;
264 		default:
265 			return (EINVAL);
266 		}
267 		explicit_iv_size = 8;
268 		mac_first = 0;
269 		break;
270 	default:
271 		return (EPROTONOSUPPORT);
272 	}
273 
274 	/* Only TLS 1.1 and TLS 1.2 are currently supported. */
275 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
276 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
277 	    tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
278 		return (EPROTONOSUPPORT);
279 	}
280 
281 	/* Bail if we already have a key. */
282 	if (direction == KTLS_TX) {
283 		if (toep->tls.tx_key_addr != -1)
284 			return (EOPNOTSUPP);
285 	} else {
286 		if (toep->tls.rx_key_addr != -1)
287 			return (EOPNOTSUPP);
288 	}
289 
290 	error = tls_program_key_id(toep, tls, direction);
291 	if (error)
292 		return (error);
293 
294 	if (direction == KTLS_TX) {
295 		toep->tls.scmd0.seqno_numivs =
296 			(V_SCMD_SEQ_NO_CTRL(3) |
297 			 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
298 			 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
299 			 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
300 			 V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
301 			 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
302 			 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
303 			 V_SCMD_IV_SIZE(explicit_iv_size / 2));
304 
305 		toep->tls.scmd0.ivgen_hdrlen =
306 			(V_SCMD_IV_GEN_CTRL(1) |
307 			 V_SCMD_KEY_CTX_INLINE(0) |
308 			 V_SCMD_TLS_FRAG_ENABLE(1));
309 
310 		toep->tls.iv_len = explicit_iv_size;
311 		toep->tls.frag_size = tls->params.max_frame_len;
312 		toep->tls.fcplenmax = get_tp_plen_max(tls);
313 		toep->tls.expn_per_ulp = tls->params.tls_hlen +
314 		    tls->params.tls_tlen;
315 		toep->tls.pdus_per_ulp = 1;
316 		toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
317 		    tls->params.max_frame_len;
318 		toep->tls.tx_key_info_size = t4_tls_key_info_size(tls);
319 	} else {
320 		toep->flags |= TPF_TLS_STARTING | TPF_TLS_RX_QUIESCED;
321 		toep->tls.rx_version = tls->params.tls_vmajor << 8 |
322 		    tls->params.tls_vminor;
323 
324 		CTR2(KTR_CXGBE, "%s: tid %d setting RX_QUIESCE", __func__,
325 		    toep->tid);
326 		t4_set_rx_quiesce(toep);
327 	}
328 
329 	return (0);
330 }
331 
332 void
333 tls_init_toep(struct toepcb *toep)
334 {
335 	struct tls_ofld_info *tls_ofld = &toep->tls;
336 
337 	tls_ofld->rx_key_addr = -1;
338 	tls_ofld->tx_key_addr = -1;
339 }
340 
341 void
342 tls_uninit_toep(struct toepcb *toep)
343 {
344 
345 	clear_tls_keyid(toep);
346 }
347 
348 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
349 #define	MIN_OFLD_TLSTX_CREDITS(toep)					\
350 	(howmany(sizeof(struct fw_tlstx_data_wr) +			\
351 	    sizeof(struct cpl_tx_tls_sfo) + sizeof(struct ulptx_idata) + \
352 	    sizeof(struct ulptx_sc_memrd) +				\
353 	    AES_BLOCK_LEN + 1, 16))
354 
355 static void
356 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
357     unsigned int plen, unsigned int expn, uint8_t credits, int shove)
358 {
359 	struct tls_ofld_info *tls_ofld = &toep->tls;
360 	unsigned int len = plen + expn;
361 
362 	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
363 	    V_FW_TLSTX_DATA_WR_COMPL(1) |
364 	    V_FW_TLSTX_DATA_WR_IMMDLEN(0));
365 	txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
366 	    V_FW_TLSTX_DATA_WR_LEN16(credits));
367 	txwr->plen = htobe32(len);
368 	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
369 	    V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
370 	txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
371 	    V_FW_TLSTX_DATA_WR_EXP(expn) |
372 	    V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
373 	    V_FW_TLSTX_DATA_WR_IVDSGL(0) |
374 	    V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4));
375 	txwr->mfs = htobe16(tls_ofld->frag_size);
376 	txwr->adjustedplen_pkd = htobe16(
377 	    V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
378 	txwr->expinplenmax_pkd = htobe16(
379 	    V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
380 	txwr->pdusinplenmax_pkd =
381 	    V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
382 }
383 
384 static void
385 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
386     struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
387 {
388 	struct tls_ofld_info *tls_ofld = &toep->tls;
389 	int data_type, seglen;
390 
391 	seglen = plen;
392 	data_type = tls_content_type(tls_hdr->type);
393 	cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
394 	    V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
395 	    V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
396 	cpl->pld_len = htobe32(plen);
397 	if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
398 		cpl->type_protover = htobe32(
399 		    V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
400 	cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
401 	    V_SCMD_NUM_IVS(1));
402 	cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
403 	cpl->scmd1 = htobe64(seqno);
404 }
405 
406 static int
407 count_ext_pgs_segs(struct mbuf *m)
408 {
409 	vm_paddr_t nextpa;
410 	u_int i, nsegs;
411 
412 	MPASS(m->m_epg_npgs > 0);
413 	nsegs = 1;
414 	nextpa = m->m_epg_pa[0] + PAGE_SIZE;
415 	for (i = 1; i < m->m_epg_npgs; i++) {
416 		if (nextpa != m->m_epg_pa[i])
417 			nsegs++;
418 		nextpa = m->m_epg_pa[i] + PAGE_SIZE;
419 	}
420 	return (nsegs);
421 }
422 
423 static void
424 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
425 {
426 	struct ulptx_sgl *usgl = dst;
427 	vm_paddr_t pa;
428 	uint32_t len;
429 	int i, j;
430 
431 	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
432 
433 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
434 	    V_ULPTX_NSGE(nsegs));
435 
436 	/* Figure out the first S/G length. */
437 	pa = m->m_epg_pa[0] + m->m_epg_1st_off;
438 	usgl->addr0 = htobe64(pa);
439 	len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
440 	pa += len;
441 	for (i = 1; i < m->m_epg_npgs; i++) {
442 		if (m->m_epg_pa[i] != pa)
443 			break;
444 		len += m_epg_pagelen(m, i, 0);
445 		pa += m_epg_pagelen(m, i, 0);
446 	}
447 	usgl->len0 = htobe32(len);
448 #ifdef INVARIANTS
449 	nsegs--;
450 #endif
451 
452 	j = -1;
453 	for (; i < m->m_epg_npgs; i++) {
454 		if (j == -1 || m->m_epg_pa[i] != pa) {
455 			if (j >= 0)
456 				usgl->sge[j / 2].len[j & 1] = htobe32(len);
457 			j++;
458 #ifdef INVARIANTS
459 			nsegs--;
460 #endif
461 			pa = m->m_epg_pa[i];
462 			usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
463 			len = m_epg_pagelen(m, i, 0);
464 			pa += len;
465 		} else {
466 			len += m_epg_pagelen(m, i, 0);
467 			pa += m_epg_pagelen(m, i, 0);
468 		}
469 	}
470 	if (j >= 0) {
471 		usgl->sge[j / 2].len[j & 1] = htobe32(len);
472 
473 		if ((j & 1) == 0)
474 			usgl->sge[j / 2].len[1] = htobe32(0);
475 	}
476 	KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
477 }
478 
479 /*
480  * Similar to t4_push_frames() but handles sockets that contain TLS
481  * record mbufs.
482  */
483 void
484 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
485 {
486 	struct tls_hdr *thdr;
487 	struct fw_tlstx_data_wr *txwr;
488 	struct cpl_tx_tls_sfo *cpl;
489 	struct ulptx_idata *idata;
490 	struct ulptx_sc_memrd *memrd;
491 	struct wrqe *wr;
492 	struct mbuf *m;
493 	u_int nsegs, credits, wr_len;
494 	u_int expn_size;
495 	struct inpcb *inp = toep->inp;
496 	struct tcpcb *tp = intotcpcb(inp);
497 	struct socket *so = inp->inp_socket;
498 	struct sockbuf *sb = &so->so_snd;
499 	int tls_size, tx_credits, shove, sowwakeup;
500 	struct ofld_tx_sdesc *txsd;
501 	char *buf;
502 
503 	INP_WLOCK_ASSERT(inp);
504 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
505 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
506 
507 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
508 	    ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
509 	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
510 	KASSERT(tls_tx_key(toep),
511 	    ("%s: TX key not set for toep %p", __func__, toep));
512 
513 #ifdef VERBOSE_TRACES
514 	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
515 	    __func__, toep->tid, toep->flags, tp->t_flags);
516 #endif
517 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
518 		return;
519 
520 #ifdef RATELIMIT
521 	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
522 	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
523 		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
524 	}
525 #endif
526 
527 	/*
528 	 * This function doesn't resume by itself.  Someone else must clear the
529 	 * flag and call this function.
530 	 */
531 	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
532 		KASSERT(drop == 0,
533 		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
534 		return;
535 	}
536 
537 	txsd = &toep->txsd[toep->txsd_pidx];
538 	for (;;) {
539 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
540 
541 		SOCKBUF_LOCK(sb);
542 		sowwakeup = drop;
543 		if (drop) {
544 			sbdrop_locked(sb, drop);
545 			drop = 0;
546 		}
547 
548 		m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
549 
550 		/*
551 		 * Send a FIN if requested, but only if there's no
552 		 * more data to send.
553 		 */
554 		if (m == NULL && toep->flags & TPF_SEND_FIN) {
555 			if (sowwakeup)
556 				sowwakeup_locked(so);
557 			else
558 				SOCKBUF_UNLOCK(sb);
559 			SOCKBUF_UNLOCK_ASSERT(sb);
560 			t4_close_conn(sc, toep);
561 			return;
562 		}
563 
564 		/*
565 		 * If there is no ready data to send, wait until more
566 		 * data arrives.
567 		 */
568 		if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
569 			if (sowwakeup)
570 				sowwakeup_locked(so);
571 			else
572 				SOCKBUF_UNLOCK(sb);
573 			SOCKBUF_UNLOCK_ASSERT(sb);
574 #ifdef VERBOSE_TRACES
575 			CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
576 			    __func__, toep->tid);
577 #endif
578 			return;
579 		}
580 
581 		KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP",
582 		    __func__, m));
583 		KASSERT(m->m_epg_tls != NULL,
584 		    ("%s: mbuf %p doesn't have TLS session", __func__, m));
585 
586 		/* Calculate WR length. */
587 		wr_len = sizeof(struct fw_tlstx_data_wr) +
588 		    sizeof(struct cpl_tx_tls_sfo) +
589 		    sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
590 
591 		/* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
592 		MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
593 		wr_len += AES_BLOCK_LEN;
594 
595 		/* Account for SGL in work request length. */
596 		nsegs = count_ext_pgs_segs(m);
597 		wr_len += sizeof(struct ulptx_sgl) +
598 		    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
599 
600 		/* Not enough credits for this work request. */
601 		if (howmany(wr_len, 16) > tx_credits) {
602 			if (sowwakeup)
603 				sowwakeup_locked(so);
604 			else
605 				SOCKBUF_UNLOCK(sb);
606 			SOCKBUF_UNLOCK_ASSERT(sb);
607 #ifdef VERBOSE_TRACES
608 			CTR5(KTR_CXGBE,
609 	    "%s: tid %d mbuf %p requires %d credits, but only %d available",
610 			    __func__, toep->tid, m, howmany(wr_len, 16),
611 			    tx_credits);
612 #endif
613 			toep->flags |= TPF_TX_SUSPENDED;
614 			return;
615 		}
616 
617 		/* Shove if there is no additional data pending. */
618 		shove = ((m->m_next == NULL ||
619 		    (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
620 		    (tp->t_flags & TF_MORETOCOME) == 0;
621 
622 		if (sb->sb_flags & SB_AUTOSIZE &&
623 		    V_tcp_do_autosndbuf &&
624 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
625 		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
626 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
627 			    V_tcp_autosndbuf_max);
628 
629 			if (!sbreserve_locked(so, SO_SND, newsize, NULL))
630 				sb->sb_flags &= ~SB_AUTOSIZE;
631 			else
632 				sowwakeup = 1;	/* room available */
633 		}
634 		if (sowwakeup)
635 			sowwakeup_locked(so);
636 		else
637 			SOCKBUF_UNLOCK(sb);
638 		SOCKBUF_UNLOCK_ASSERT(sb);
639 
640 		if (__predict_false(toep->flags & TPF_FIN_SENT))
641 			panic("%s: excess tx.", __func__);
642 
643 		wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
644 		if (wr == NULL) {
645 			/* XXX: how will we recover from this? */
646 			toep->flags |= TPF_TX_SUSPENDED;
647 			return;
648 		}
649 
650 		thdr = (struct tls_hdr *)&m->m_epg_hdr;
651 #ifdef VERBOSE_TRACES
652 		CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
653 		    __func__, toep->tid, m->m_epg_seqno, thdr->type,
654 		    m->m_len);
655 #endif
656 		txwr = wrtod(wr);
657 		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
658 		memset(txwr, 0, roundup2(wr_len, 16));
659 		credits = howmany(wr_len, 16);
660 		expn_size = m->m_epg_hdrlen +
661 		    m->m_epg_trllen;
662 		tls_size = m->m_len - expn_size;
663 		write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
664 		write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
665 
666 		idata = (struct ulptx_idata *)(cpl + 1);
667 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
668 		idata->len = htobe32(0);
669 		memrd = (struct ulptx_sc_memrd *)(idata + 1);
670 		memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
671 		    V_ULP_TX_SC_MORE(1) |
672 		    V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
673 		memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
674 
675 		/* Copy IV. */
676 		buf = (char *)(memrd + 1);
677 		memcpy(buf, thdr + 1, toep->tls.iv_len);
678 		buf += AES_BLOCK_LEN;
679 
680 		write_ktlstx_sgl(buf, m, nsegs);
681 
682 		KASSERT(toep->tx_credits >= credits,
683 			("%s: not enough credits", __func__));
684 
685 		toep->tx_credits -= credits;
686 
687 		tp->snd_nxt += m->m_len;
688 		tp->snd_max += m->m_len;
689 
690 		SOCKBUF_LOCK(sb);
691 		sb->sb_sndptr = m;
692 		SOCKBUF_UNLOCK(sb);
693 
694 		toep->flags |= TPF_TX_DATA_SENT;
695 		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
696 			toep->flags |= TPF_TX_SUSPENDED;
697 
698 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
699 		txsd->plen = m->m_len;
700 		txsd->tx_credits = credits;
701 		txsd++;
702 		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
703 			toep->txsd_pidx = 0;
704 			txsd = &toep->txsd[0];
705 		}
706 		toep->txsd_avail--;
707 
708 		counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
709 		counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len);
710 
711 		t4_l2t_send(sc, wr, toep->l2te);
712 	}
713 }
714 
715 /*
716  * For TLS data we place received mbufs received via CPL_TLS_DATA into
717  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
718  * received, the completed PDUs are placed into the socket receive
719  * buffer.
720  *
721  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
722  */
723 static int
724 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
725 {
726 	struct adapter *sc = iq->adapter;
727 	const struct cpl_tls_data *cpl = mtod(m, const void *);
728 	unsigned int tid = GET_TID(cpl);
729 	struct toepcb *toep = lookup_tid(sc, tid);
730 	struct inpcb *inp = toep->inp;
731 	struct tcpcb *tp;
732 	int len;
733 
734 	/* XXX: Should this match do_rx_data instead? */
735 	KASSERT(!(toep->flags & TPF_SYNQE),
736 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
737 
738 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
739 
740 	/* strip off CPL header */
741 	m_adj(m, sizeof(*cpl));
742 	len = m->m_pkthdr.len;
743 
744 	toep->ofld_rxq->rx_toe_tls_octets += len;
745 
746 	KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
747 	    ("%s: payload length mismatch", __func__));
748 
749 	INP_WLOCK(inp);
750 	if (inp->inp_flags & INP_DROPPED) {
751 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
752 		    __func__, tid, len, inp->inp_flags);
753 		INP_WUNLOCK(inp);
754 		m_freem(m);
755 		return (0);
756 	}
757 
758 	/* Save TCP sequence number. */
759 	m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
760 
761 	if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
762 #ifdef INVARIANTS
763 		panic("Failed to queue TLS data packet");
764 #else
765 		printf("%s: Failed to queue TLS data packet\n", __func__);
766 		INP_WUNLOCK(inp);
767 		m_freem(m);
768 		return (0);
769 #endif
770 	}
771 
772 	tp = intotcpcb(inp);
773 	tp->t_rcvtime = ticks;
774 
775 #ifdef VERBOSE_TRACES
776 	CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
777 	    be32toh(cpl->seq));
778 #endif
779 
780 	INP_WUNLOCK(inp);
781 	return (0);
782 }
783 
784 static int
785 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
786 {
787 	struct adapter *sc = iq->adapter;
788 	const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
789 	struct tlsrx_hdr_pkt *tls_hdr_pkt;
790 	unsigned int tid = GET_TID(cpl);
791 	struct toepcb *toep = lookup_tid(sc, tid);
792 	struct inpcb *inp = toep->inp;
793 	struct tcpcb *tp;
794 	struct socket *so;
795 	struct sockbuf *sb;
796 	struct mbuf *tls_data;
797 	struct tls_get_record *tgr;
798 	struct mbuf *control;
799 	int pdu_length, rx_credits, trailer_len;
800 #if defined(KTR) || defined(INVARIANTS)
801 	int len;
802 #endif
803 
804 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
805 	KASSERT(!(toep->flags & TPF_SYNQE),
806 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
807 
808 	/* strip off CPL header */
809 	m_adj(m, sizeof(*cpl));
810 #if defined(KTR) || defined(INVARIANTS)
811 	len = m->m_pkthdr.len;
812 #endif
813 
814 	toep->ofld_rxq->rx_toe_tls_records++;
815 
816 	KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
817 	    ("%s: payload length mismatch", __func__));
818 
819 	INP_WLOCK(inp);
820 	if (inp->inp_flags & INP_DROPPED) {
821 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
822 		    __func__, tid, len, inp->inp_flags);
823 		INP_WUNLOCK(inp);
824 		m_freem(m);
825 		return (0);
826 	}
827 
828 	pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
829 
830 	so = inp_inpcbtosocket(inp);
831 	tp = intotcpcb(inp);
832 
833 #ifdef VERBOSE_TRACES
834 	CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
835 	    __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
836 #endif
837 
838 	tp->rcv_nxt += pdu_length;
839 	KASSERT(tp->rcv_wnd >= pdu_length,
840 	    ("%s: negative window size", __func__));
841 	tp->rcv_wnd -= pdu_length;
842 
843 	/* XXX: Not sure what to do about urgent data. */
844 
845 	/*
846 	 * The payload of this CPL is the TLS header followed by
847 	 * additional fields.
848 	 */
849 	KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
850 	    ("%s: payload too small", __func__));
851 	tls_hdr_pkt = mtod(m, void *);
852 
853 	tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
854 	if (tls_data != NULL) {
855 		KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
856 		    ("%s: sequence mismatch", __func__));
857 	}
858 
859 	/* Report decryption errors as EBADMSG. */
860 	if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
861 		CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
862 		    __func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
863 		    be32toh(cpl->ddp_valid));
864 		m_freem(m);
865 		m_freem(tls_data);
866 
867 		CURVNET_SET(toep->vnet);
868 		so->so_error = EBADMSG;
869 		sorwakeup(so);
870 
871 		INP_WUNLOCK(inp);
872 		CURVNET_RESTORE();
873 
874 		return (0);
875 	}
876 
877 	/* Handle data received after the socket is closed. */
878 	sb = &so->so_rcv;
879 	SOCKBUF_LOCK(sb);
880 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
881 		struct epoch_tracker et;
882 
883 		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
884 		    __func__, tid, pdu_length);
885 		m_freem(m);
886 		m_freem(tls_data);
887 		SOCKBUF_UNLOCK(sb);
888 		INP_WUNLOCK(inp);
889 
890 		CURVNET_SET(toep->vnet);
891 		NET_EPOCH_ENTER(et);
892 		INP_WLOCK(inp);
893 		tp = tcp_drop(tp, ECONNRESET);
894 		if (tp != NULL)
895 			INP_WUNLOCK(inp);
896 		NET_EPOCH_EXIT(et);
897 		CURVNET_RESTORE();
898 
899 		return (0);
900 	}
901 
902 	/*
903 	 * If there is any data in the 'sb_mtls' chain of the socket
904 	 * or we aren't able to allocate the control mbuf, append the
905 	 * record as a CSUM_TLS_DECRYPTED packet to 'sb_mtls' rather
906 	 * than as a decrypted record to 'sb_m'.
907 	 */
908 	if (sb->sb_mtls != NULL)
909 		control = NULL;
910 	else
911 		control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
912 		    IPPROTO_TCP, M_NOWAIT);
913 
914 	if (control != NULL) {
915 		tgr = (struct tls_get_record *)
916 		    CMSG_DATA(mtod(control, struct cmsghdr *));
917 		memset(tgr, 0, sizeof(*tgr));
918 		tgr->tls_type = tls_hdr_pkt->type;
919 		tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
920 		tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
921 		if (tls_data != NULL) {
922 			m_last(tls_data)->m_flags |= M_EOR;
923 			tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
924 		} else
925 			tgr->tls_length = 0;
926 
927 		m_freem(m);
928 		m = tls_data;
929 	} else {
930 		M_ASSERTPKTHDR(m);
931 
932 		/* It's ok that any explicit IV is missing. */
933 		m->m_len = sb->sb_tls_info->params.tls_hlen;
934 		m->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
935 		m->m_pkthdr.len = m->m_len;
936 		if (tls_data != NULL) {
937 			m->m_pkthdr.len += tls_data->m_pkthdr.len;
938 			m_demote_pkthdr(tls_data);
939 			m->m_next = tls_data;
940 		}
941 
942 		/*
943 		 * Grow the chain by the trailer, but without
944 		 * contents.  The trailer will be thrown away by
945 		 * ktls_decrypt.  Note that ktls_decrypt assumes the
946 		 * trailer is tls_tlen bytes long, so append that many
947 		 * bytes not the actual trailer size computed from
948 		 * pdu_length.
949 		 */
950 		trailer_len = sb->sb_tls_info->params.tls_tlen;
951 		if (tls_data != NULL) {
952 			m_last(tls_data)->m_len += trailer_len;
953 			tls_data = NULL;
954 		} else
955 			m->m_len += trailer_len;
956 		m->m_pkthdr.len += trailer_len;
957 		tls_hdr_pkt->length = htobe16(m->m_pkthdr.len -
958 		    sizeof(struct tls_record_layer));
959 	}
960 
961 	/* receive buffer autosize */
962 	MPASS(toep->vnet == so->so_vnet);
963 	CURVNET_SET(toep->vnet);
964 	if (sb->sb_flags & SB_AUTOSIZE &&
965 	    V_tcp_do_autorcvbuf &&
966 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
967 	    m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
968 		unsigned int hiwat = sb->sb_hiwat;
969 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
970 		    V_tcp_autorcvbuf_max);
971 
972 		if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
973 			sb->sb_flags &= ~SB_AUTOSIZE;
974 	}
975 
976 	if (control != NULL)
977 		sbappendcontrol_locked(sb, m, control, 0);
978 	else
979 		sbappendstream_locked(sb, m, 0);
980 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
981 #ifdef VERBOSE_TRACES
982 	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
983 	    __func__, tid, rx_credits, tp->rcv_wnd);
984 #endif
985 	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
986 		rx_credits = send_rx_credits(sc, toep, rx_credits);
987 		tp->rcv_wnd += rx_credits;
988 		tp->rcv_adv += rx_credits;
989 	}
990 
991 	sorwakeup_locked(so);
992 	SOCKBUF_UNLOCK_ASSERT(sb);
993 
994 	INP_WUNLOCK(inp);
995 	CURVNET_RESTORE();
996 	return (0);
997 }
998 
999 void
1000 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep,
1001     struct mbuf *m)
1002 {
1003 	struct inpcb *inp = toep->inp;
1004 	struct tls_ofld_info *tls_ofld = &toep->tls;
1005 	struct tls_hdr *hdr;
1006 	struct tcpcb *tp;
1007 	struct socket *so;
1008 	struct sockbuf *sb;
1009 	int len, rx_credits;
1010 
1011 	len = m->m_pkthdr.len;
1012 
1013 	INP_WLOCK_ASSERT(inp);
1014 
1015 	so = inp_inpcbtosocket(inp);
1016 	tp = intotcpcb(inp);
1017 	sb = &so->so_rcv;
1018 	SOCKBUF_LOCK(sb);
1019 	CURVNET_SET(toep->vnet);
1020 
1021 	tp->rcv_nxt += len;
1022 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
1023 	tp->rcv_wnd -= len;
1024 
1025 	/* Do we have a full TLS header? */
1026 	if (len < sizeof(*hdr)) {
1027 		CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header",
1028 		    __func__, toep->tid, len);
1029 		so->so_error = EMSGSIZE;
1030 		goto out;
1031 	}
1032 	hdr = mtod(m, struct tls_hdr *);
1033 
1034 	/* Is the header valid? */
1035 	if (be16toh(hdr->version) != tls_ofld->rx_version) {
1036 		CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x",
1037 		    __func__, toep->tid, be16toh(hdr->version));
1038 		so->so_error = EINVAL;
1039 		goto out;
1040 	}
1041 	if (be16toh(hdr->length) < sizeof(*hdr)) {
1042 		CTR3(KTR_CXGBE, "%s: tid %u invalid length %u",
1043 		    __func__, toep->tid, be16toh(hdr->length));
1044 		so->so_error = EBADMSG;
1045 		goto out;
1046 	}
1047 
1048 	/* Did we get a truncated record? */
1049 	if (len < be16toh(hdr->length)) {
1050 		CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)",
1051 		    __func__, toep->tid, len, be16toh(hdr->length));
1052 
1053 		so->so_error = EMSGSIZE;
1054 		goto out;
1055 	}
1056 
1057 	/* Is the header type unknown? */
1058 	switch (hdr->type) {
1059 	case CONTENT_TYPE_CCS:
1060 	case CONTENT_TYPE_ALERT:
1061 	case CONTENT_TYPE_APP_DATA:
1062 	case CONTENT_TYPE_HANDSHAKE:
1063 		break;
1064 	default:
1065 		CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u",
1066 		    __func__, toep->tid, hdr->type);
1067 		so->so_error = EBADMSG;
1068 		goto out;
1069 	}
1070 
1071 	/*
1072 	 * Just punt.  Although this could fall back to software
1073 	 * decryption, this case should never really happen.
1074 	 */
1075 	CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u",
1076 	    __func__, toep->tid, hdr->type, be16toh(hdr->length));
1077 	so->so_error = EBADMSG;
1078 
1079 out:
1080 	/*
1081 	 * This connection is going to die anyway, so probably don't
1082 	 * need to bother with returning credits.
1083 	 */
1084 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1085 #ifdef VERBOSE_TRACES
1086 	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
1087 	    __func__, toep->tid, rx_credits, tp->rcv_wnd);
1088 #endif
1089 	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1090 		rx_credits = send_rx_credits(toep->vi->adapter, toep,
1091 		    rx_credits);
1092 		tp->rcv_wnd += rx_credits;
1093 		tp->rcv_adv += rx_credits;
1094 	}
1095 
1096 	sorwakeup_locked(so);
1097 	SOCKBUF_UNLOCK_ASSERT(sb);
1098 
1099 	INP_WUNLOCK(inp);
1100 	CURVNET_RESTORE();
1101 
1102 	m_freem(m);
1103 }
1104 
1105 /* SET_TCB_FIELD sent as a ULP command looks like this */
1106 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1107     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1108 
1109 static inline void *
1110 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
1111     uint64_t word, uint64_t mask, uint64_t val)
1112 {
1113 	struct ulptx_idata *ulpsc;
1114 	struct cpl_set_tcb_field_core *req;
1115 
1116 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1117 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1118 
1119 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1120 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1121 	ulpsc->len = htobe32(sizeof(*req));
1122 
1123 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1124 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
1125 	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
1126 	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1127 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1128 	req->mask = htobe64(mask);
1129 	req->val = htobe64(val);
1130 
1131 	ulpsc = (struct ulptx_idata *)(req + 1);
1132 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1133 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1134 		ulpsc->len = htobe32(0);
1135 		return (ulpsc + 1);
1136 	}
1137 	return (ulpsc);
1138 }
1139 
1140 /*
1141  * Send a work request setting multiple TCB fields to enable
1142  * ULP_MODE_TLS.
1143  */
1144 static void
1145 tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
1146 {
1147 	struct wrqe *wr;
1148 	struct work_request_hdr *wrh;
1149 	struct ulp_txpkt *ulpmc;
1150 	int fields, key_offset, len;
1151 
1152 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
1153 	    ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
1154 
1155 	fields = 0;
1156 
1157 	/* 2 writes for the overlay region */
1158 	fields += 2;
1159 
1160 	/* W_TCB_TLS_SEQ */
1161 	fields++;
1162 
1163 	/* W_TCB_ULP_RAW */
1164 	fields++;
1165 
1166 	/* W_TCB_ULP_TYPE */
1167 	fields ++;
1168 
1169 	/* W_TCB_T_FLAGS */
1170 	fields++;
1171 
1172 	len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1173 	KASSERT(len <= SGE_MAX_WR_LEN,
1174 	    ("%s: WR with %d TCB field updates too large", __func__, fields));
1175 
1176 	wr = alloc_wrqe(len, toep->ctrlq);
1177 	if (wr == NULL) {
1178 		/* XXX */
1179 		panic("%s: out of memory", __func__);
1180 	}
1181 
1182 	wrh = wrtod(wr);
1183 	INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
1184 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1185 
1186 	/*
1187 	 * Clear the TLS overlay region: 1023:832.
1188 	 *
1189 	 * Words 26/27 are always set to zero.  Words 28/29
1190 	 * contain seqno and are set when enabling TLS
1191 	 * decryption.  Word 30 is zero and Word 31 contains
1192 	 * the keyid.
1193 	 */
1194 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26,
1195 	    0xffffffffffffffff, 0);
1196 
1197 	/*
1198 	 * RX key tags are an index into the key portion of MA
1199 	 * memory stored as an offset from the base address in
1200 	 * units of 64 bytes.
1201 	 */
1202 	key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
1203 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30,
1204 	    0xffffffffffffffff,
1205 	    (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
1206 
1207 	CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
1208 	    toep->tid, seqno);
1209 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_TLS_SEQ,
1210 	    V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
1211 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_RAW,
1212 	    V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1213 	    V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
1214 	    V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
1215 
1216 	toep->flags &= ~TPF_TLS_STARTING;
1217 	toep->flags |= TPF_TLS_RECEIVE;
1218 
1219 	/* Set the ULP mode to ULP_MODE_TLS. */
1220 	toep->params.ulp_mode = ULP_MODE_TLS;
1221 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE,
1222 	    V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
1223 	    V_TCB_ULP_TYPE(ULP_MODE_TLS));
1224 
1225 	/* Clear TF_RX_QUIESCE. */
1226 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS,
1227 	    V_TF_RX_QUIESCE(1), 0);
1228 
1229 	t4_wrq_tx(sc, wr);
1230 }
1231 
1232 /*
1233  * Examine the pending data in the socket buffer and either enable TLS
1234  * RX or request more encrypted data.
1235  */
1236 static void
1237 tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
1238     struct sockbuf *sb)
1239 {
1240 	uint64_t seqno;
1241 	size_t resid;
1242 	bool have_header;
1243 
1244 	SOCKBUF_LOCK_ASSERT(sb);
1245 	MPASS(toep->tls.rx_resid == 0);
1246 
1247 	have_header = ktls_pending_rx_info(sb, &seqno, &resid);
1248 	CTR5(KTR_CXGBE, "%s: tid %d have_header %d seqno %lu resid %zu",
1249 	    __func__, toep->tid, have_header, seqno, resid);
1250 
1251 	/*
1252 	 * If we have a partial header or we need fewer bytes than the
1253 	 * size of a TLS record, re-enable receive and pause again once
1254 	 * we get more data to try again.
1255 	 */
1256 	if (!have_header || resid != 0) {
1257 		CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
1258 		    toep->tid);
1259 		toep->flags &= ~TPF_TLS_RX_QUIESCED;
1260 		t4_clear_rx_quiesce(toep);
1261 		return;
1262 	}
1263 
1264 	tls_update_tcb(sc, toep, seqno);
1265 }
1266 
1267 void
1268 tls_received_starting_data(struct adapter *sc, struct toepcb *toep,
1269     struct sockbuf *sb, int len)
1270 {
1271 	MPASS(toep->flags & TPF_TLS_STARTING);
1272 
1273 	/*
1274 	 * A previous call to tls_check_rx_sockbuf needed more data.
1275 	 * Now that more data has arrived, quiesce receive again and
1276 	 * check the state once the quiesce has completed.
1277 	 */
1278 	if ((toep->flags & TPF_TLS_RX_QUIESCED) == 0) {
1279 		CTR(KTR_CXGBE, "%s: tid %d quiescing", __func__, toep->tid);
1280 		toep->flags |= TPF_TLS_RX_QUIESCED;
1281 		t4_set_rx_quiesce(toep);
1282 		return;
1283 	}
1284 
1285 	KASSERT(len <= toep->tls.rx_resid,
1286 	    ("%s: received excess bytes %d (waiting for %zu)", __func__, len,
1287 	    toep->tls.rx_resid));
1288 	toep->tls.rx_resid -= len;
1289 	if (toep->tls.rx_resid != 0)
1290 		return;
1291 
1292 	tls_check_rx_sockbuf(sc, toep, sb);
1293 }
1294 
1295 static int
1296 do_tls_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1297 {
1298 	struct adapter *sc = iq->adapter;
1299 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1300 	unsigned int tid = GET_TID(cpl);
1301 	struct toepcb *toep;
1302 	struct inpcb *inp;
1303 	struct socket *so;
1304 	struct sockbuf *sb;
1305 
1306 	if (cpl->status != CPL_ERR_NONE)
1307 		panic("XXX: tcp_rpl failed: %d", cpl->status);
1308 
1309 	toep = lookup_tid(sc, tid);
1310 	inp = toep->inp;
1311 	switch (cpl->cookie) {
1312 	case V_WORD(W_TCB_T_FLAGS) | V_COOKIE(CPL_COOKIE_TOM):
1313 		INP_WLOCK(inp);
1314 		if ((toep->flags & TPF_TLS_STARTING) == 0)
1315 			panic("%s: connection is not starting TLS RX\n",
1316 			    __func__);
1317 
1318 		so = inp->inp_socket;
1319 		sb = &so->so_rcv;
1320 		SOCKBUF_LOCK(sb);
1321 		tls_check_rx_sockbuf(sc, toep, sb);
1322 		SOCKBUF_UNLOCK(sb);
1323 		INP_WUNLOCK(inp);
1324 		break;
1325 	default:
1326 		panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
1327 		    G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
1328 	}
1329 
1330 	return (0);
1331 }
1332 
1333 void
1334 t4_tls_mod_load(void)
1335 {
1336 
1337 	t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1338 	t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1339 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_tls_tcb_rpl,
1340 	    CPL_COOKIE_TOM);
1341 }
1342 
1343 void
1344 t4_tls_mod_unload(void)
1345 {
1346 
1347 	t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1348 	t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1349 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_TOM);
1350 }
1351 #endif	/* TCP_OFFLOAD */
1352 #endif	/* KERN_TLS */
1353