xref: /freebsd/sys/dev/cxgbe/tom/t4_tls.c (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 #include "opt_kern_tls.h"
32 
33 #include <sys/cdefs.h>
34 #ifdef KERN_TLS
35 #include <sys/param.h>
36 #include <sys/ktr.h>
37 #include <sys/ktls.h>
38 #include <sys/sglist.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/systm.h>
42 #include <netinet/in.h>
43 #include <netinet/in_pcb.h>
44 #include <netinet/tcp_var.h>
45 #include <netinet/toecore.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/xform.h>
48 
49 #ifdef TCP_OFFLOAD
50 #include "common/common.h"
51 #include "common/t4_tcb.h"
52 #include "crypto/t4_crypto.h"
53 #include "tom/t4_tom_l2t.h"
54 #include "tom/t4_tom.h"
55 
56 /*
57  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
58  * the mbuf is in the ulp_pdu_reclaimq.
59  */
60 #define	tls_tcp_seq	PH_loc.thirtytwo[0]
61 
62 static void
63 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
64     uint64_t val)
65 {
66 	struct adapter *sc = td_adapter(toep->td);
67 
68 	t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
69 }
70 
71 /* TLS and DTLS common routines */
72 bool
73 can_tls_offload(struct adapter *sc)
74 {
75 
76 	return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
77 }
78 
79 int
80 tls_tx_key(struct toepcb *toep)
81 {
82 	struct tls_ofld_info *tls_ofld = &toep->tls;
83 
84 	return (tls_ofld->tx_key_addr >= 0);
85 }
86 
87 /* Set TF_RX_QUIESCE to pause receive. */
88 static void
89 t4_set_rx_quiesce(struct toepcb *toep)
90 {
91 	struct adapter *sc = td_adapter(toep->td);
92 
93 	t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS,
94 	    V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
95 }
96 
97 /* Clear TF_RX_QUIESCE to re-enable receive. */
98 static void
99 t4_clear_rx_quiesce(struct toepcb *toep)
100 {
101 
102 	t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
103 }
104 
105 /* TLS/DTLS content type  for CPL SFO */
106 static inline unsigned char
107 tls_content_type(unsigned char content_type)
108 {
109 	switch (content_type) {
110 	case CONTENT_TYPE_CCS:
111 		return CPL_TX_TLS_SFO_TYPE_CCS;
112 	case CONTENT_TYPE_ALERT:
113 		return CPL_TX_TLS_SFO_TYPE_ALERT;
114 	case CONTENT_TYPE_HANDSHAKE:
115 		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
116 	case CONTENT_TYPE_APP_DATA:
117 		return CPL_TX_TLS_SFO_TYPE_DATA;
118 	default:
119 		return CPL_TX_TLS_SFO_TYPE_CUSTOM;
120 	}
121 }
122 
123 /* TLS Key memory management */
124 static void
125 clear_tls_keyid(struct toepcb *toep)
126 {
127 	struct tls_ofld_info *tls_ofld = &toep->tls;
128 	struct adapter *sc = td_adapter(toep->td);
129 
130 	if (tls_ofld->rx_key_addr >= 0) {
131 		t4_free_tls_keyid(sc, tls_ofld->rx_key_addr);
132 		tls_ofld->rx_key_addr = -1;
133 	}
134 	if (tls_ofld->tx_key_addr >= 0) {
135 		t4_free_tls_keyid(sc, tls_ofld->tx_key_addr);
136 		tls_ofld->tx_key_addr = -1;
137 	}
138 }
139 
140 static int
141 get_tp_plen_max(struct ktls_session *tls)
142 {
143 	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
144 
145 	return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
146 }
147 
148 /* Send request to get the key-id */
149 static int
150 tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
151     int direction)
152 {
153 	struct tls_ofld_info *tls_ofld = &toep->tls;
154 	struct adapter *sc = td_adapter(toep->td);
155 	struct ofld_tx_sdesc *txsd;
156 	int keyid;
157 	struct wrqe *wr;
158 	struct tls_key_req *kwr;
159 	struct tls_keyctx *kctx;
160 
161 #ifdef INVARIANTS
162 	int kwrlen, kctxlen, len;
163 
164 	kwrlen = sizeof(*kwr);
165 	kctxlen = roundup2(sizeof(*kctx), 32);
166 	len = roundup2(kwrlen + kctxlen, 16);
167 	MPASS(TLS_KEY_WR_SZ == len);
168 #endif
169 	if (toep->txsd_avail == 0)
170 		return (EAGAIN);
171 
172 	if ((keyid = t4_alloc_tls_keyid(sc)) < 0) {
173 		return (ENOSPC);
174 	}
175 
176 	wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq);
177 	if (wr == NULL) {
178 		t4_free_tls_keyid(sc, keyid);
179 		return (ENOMEM);
180 	}
181 	kwr = wrtod(wr);
182 	memset(kwr, 0, TLS_KEY_WR_SZ);
183 
184 	t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
185 	    kwr);
186 	kctx = (struct tls_keyctx *)(kwr + 1);
187 	if (direction == KTLS_TX)
188 		tls_ofld->tx_key_addr = keyid;
189 	else
190 		tls_ofld->rx_key_addr = keyid;
191 	t4_tls_key_ctx(tls, direction, kctx);
192 
193 	txsd = &toep->txsd[toep->txsd_pidx];
194 	txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16);
195 	txsd->plen = 0;
196 	toep->tx_credits -= txsd->tx_credits;
197 	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
198 		toep->txsd_pidx = 0;
199 	toep->txsd_avail--;
200 
201 	t4_wrq_tx(sc, wr);
202 
203 	return (0);
204 }
205 
206 int
207 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
208 {
209 	struct adapter *sc = td_adapter(toep->td);
210 	int error, explicit_iv_size, mac_first;
211 
212 	if (!can_tls_offload(sc))
213 		return (EINVAL);
214 
215 	if (direction == KTLS_RX) {
216 		if (ulp_mode(toep) != ULP_MODE_NONE)
217 			return (EINVAL);
218 		if ((toep->flags & TPF_TLS_STARTING) != 0)
219 			return (EINVAL);
220 	} else {
221 		switch (ulp_mode(toep)) {
222 		case ULP_MODE_NONE:
223 		case ULP_MODE_TLS:
224 		case ULP_MODE_TCPDDP:
225 			break;
226 		default:
227 			return (EINVAL);
228 		}
229 	}
230 
231 	switch (tls->params.cipher_algorithm) {
232 	case CRYPTO_AES_CBC:
233 		/* XXX: Explicitly ignore any provided IV. */
234 		switch (tls->params.cipher_key_len) {
235 		case 128 / 8:
236 		case 192 / 8:
237 		case 256 / 8:
238 			break;
239 		default:
240 			return (EINVAL);
241 		}
242 		switch (tls->params.auth_algorithm) {
243 		case CRYPTO_SHA1_HMAC:
244 		case CRYPTO_SHA2_256_HMAC:
245 		case CRYPTO_SHA2_384_HMAC:
246 			break;
247 		default:
248 			return (EPROTONOSUPPORT);
249 		}
250 		explicit_iv_size = AES_BLOCK_LEN;
251 		mac_first = 1;
252 		break;
253 	case CRYPTO_AES_NIST_GCM_16:
254 		if (tls->params.iv_len != SALT_SIZE) {
255 			return (EINVAL);
256 		}
257 		switch (tls->params.cipher_key_len) {
258 		case 128 / 8:
259 		case 192 / 8:
260 		case 256 / 8:
261 			break;
262 		default:
263 			return (EINVAL);
264 		}
265 		explicit_iv_size = 8;
266 		mac_first = 0;
267 		break;
268 	default:
269 		return (EPROTONOSUPPORT);
270 	}
271 
272 	/* Only TLS 1.1 and TLS 1.2 are currently supported. */
273 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
274 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
275 	    tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
276 		return (EPROTONOSUPPORT);
277 	}
278 
279 	/* Bail if we already have a key. */
280 	if (direction == KTLS_TX) {
281 		if (toep->tls.tx_key_addr != -1)
282 			return (EOPNOTSUPP);
283 	} else {
284 		if (toep->tls.rx_key_addr != -1)
285 			return (EOPNOTSUPP);
286 	}
287 
288 	error = tls_program_key_id(toep, tls, direction);
289 	if (error)
290 		return (error);
291 
292 	if (direction == KTLS_TX) {
293 		toep->tls.scmd0.seqno_numivs =
294 			(V_SCMD_SEQ_NO_CTRL(3) |
295 			 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
296 			 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
297 			 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
298 			 V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
299 			 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
300 			 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
301 			 V_SCMD_IV_SIZE(explicit_iv_size / 2));
302 
303 		toep->tls.scmd0.ivgen_hdrlen =
304 			(V_SCMD_IV_GEN_CTRL(1) |
305 			 V_SCMD_KEY_CTX_INLINE(0) |
306 			 V_SCMD_TLS_FRAG_ENABLE(1));
307 
308 		toep->tls.iv_len = explicit_iv_size;
309 		toep->tls.frag_size = tls->params.max_frame_len;
310 		toep->tls.fcplenmax = get_tp_plen_max(tls);
311 		toep->tls.expn_per_ulp = tls->params.tls_hlen +
312 		    tls->params.tls_tlen;
313 		toep->tls.pdus_per_ulp = 1;
314 		toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
315 		    tls->params.max_frame_len;
316 		toep->tls.tx_key_info_size = t4_tls_key_info_size(tls);
317 	} else {
318 		toep->flags |= TPF_TLS_STARTING | TPF_TLS_RX_QUIESCED;
319 		toep->tls.rx_version = tls->params.tls_vmajor << 8 |
320 		    tls->params.tls_vminor;
321 
322 		CTR2(KTR_CXGBE, "%s: tid %d setting RX_QUIESCE", __func__,
323 		    toep->tid);
324 		t4_set_rx_quiesce(toep);
325 	}
326 
327 	return (0);
328 }
329 
330 void
331 tls_init_toep(struct toepcb *toep)
332 {
333 	struct tls_ofld_info *tls_ofld = &toep->tls;
334 
335 	tls_ofld->rx_key_addr = -1;
336 	tls_ofld->tx_key_addr = -1;
337 }
338 
339 void
340 tls_uninit_toep(struct toepcb *toep)
341 {
342 
343 	clear_tls_keyid(toep);
344 }
345 
346 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
347 #define	MIN_OFLD_TLSTX_CREDITS(toep)					\
348 	(howmany(sizeof(struct fw_tlstx_data_wr) +			\
349 	    sizeof(struct cpl_tx_tls_sfo) + sizeof(struct ulptx_idata) + \
350 	    sizeof(struct ulptx_sc_memrd) +				\
351 	    AES_BLOCK_LEN + 1, 16))
352 
353 static void
354 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
355     unsigned int plen, unsigned int expn, uint8_t credits, int shove)
356 {
357 	struct tls_ofld_info *tls_ofld = &toep->tls;
358 	unsigned int len = plen + expn;
359 
360 	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
361 	    V_FW_TLSTX_DATA_WR_COMPL(1) |
362 	    V_FW_TLSTX_DATA_WR_IMMDLEN(0));
363 	txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
364 	    V_FW_TLSTX_DATA_WR_LEN16(credits));
365 	txwr->plen = htobe32(len);
366 	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
367 	    V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
368 	txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
369 	    V_FW_TLSTX_DATA_WR_EXP(expn) |
370 	    V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
371 	    V_FW_TLSTX_DATA_WR_IVDSGL(0) |
372 	    V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4));
373 	txwr->mfs = htobe16(tls_ofld->frag_size);
374 	txwr->adjustedplen_pkd = htobe16(
375 	    V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
376 	txwr->expinplenmax_pkd = htobe16(
377 	    V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
378 	txwr->pdusinplenmax_pkd =
379 	    V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
380 }
381 
382 static void
383 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
384     struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
385 {
386 	struct tls_ofld_info *tls_ofld = &toep->tls;
387 	int data_type, seglen;
388 
389 	seglen = plen;
390 	data_type = tls_content_type(tls_hdr->type);
391 	cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
392 	    V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
393 	    V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
394 	cpl->pld_len = htobe32(plen);
395 	if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
396 		cpl->type_protover = htobe32(
397 		    V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
398 	cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
399 	    V_SCMD_NUM_IVS(1));
400 	cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
401 	cpl->scmd1 = htobe64(seqno);
402 }
403 
404 static int
405 count_ext_pgs_segs(struct mbuf *m)
406 {
407 	vm_paddr_t nextpa;
408 	u_int i, nsegs;
409 
410 	MPASS(m->m_epg_npgs > 0);
411 	nsegs = 1;
412 	nextpa = m->m_epg_pa[0] + PAGE_SIZE;
413 	for (i = 1; i < m->m_epg_npgs; i++) {
414 		if (nextpa != m->m_epg_pa[i])
415 			nsegs++;
416 		nextpa = m->m_epg_pa[i] + PAGE_SIZE;
417 	}
418 	return (nsegs);
419 }
420 
421 static void
422 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
423 {
424 	struct ulptx_sgl *usgl = dst;
425 	vm_paddr_t pa;
426 	uint32_t len;
427 	int i, j;
428 
429 	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
430 
431 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
432 	    V_ULPTX_NSGE(nsegs));
433 
434 	/* Figure out the first S/G length. */
435 	pa = m->m_epg_pa[0] + m->m_epg_1st_off;
436 	usgl->addr0 = htobe64(pa);
437 	len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
438 	pa += len;
439 	for (i = 1; i < m->m_epg_npgs; i++) {
440 		if (m->m_epg_pa[i] != pa)
441 			break;
442 		len += m_epg_pagelen(m, i, 0);
443 		pa += m_epg_pagelen(m, i, 0);
444 	}
445 	usgl->len0 = htobe32(len);
446 #ifdef INVARIANTS
447 	nsegs--;
448 #endif
449 
450 	j = -1;
451 	for (; i < m->m_epg_npgs; i++) {
452 		if (j == -1 || m->m_epg_pa[i] != pa) {
453 			if (j >= 0)
454 				usgl->sge[j / 2].len[j & 1] = htobe32(len);
455 			j++;
456 #ifdef INVARIANTS
457 			nsegs--;
458 #endif
459 			pa = m->m_epg_pa[i];
460 			usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
461 			len = m_epg_pagelen(m, i, 0);
462 			pa += len;
463 		} else {
464 			len += m_epg_pagelen(m, i, 0);
465 			pa += m_epg_pagelen(m, i, 0);
466 		}
467 	}
468 	if (j >= 0) {
469 		usgl->sge[j / 2].len[j & 1] = htobe32(len);
470 
471 		if ((j & 1) == 0)
472 			usgl->sge[j / 2].len[1] = htobe32(0);
473 	}
474 	KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
475 }
476 
477 /*
478  * Similar to t4_push_frames() but handles sockets that contain TLS
479  * record mbufs.
480  */
481 void
482 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
483 {
484 	struct tls_hdr *thdr;
485 	struct fw_tlstx_data_wr *txwr;
486 	struct cpl_tx_tls_sfo *cpl;
487 	struct ulptx_idata *idata;
488 	struct ulptx_sc_memrd *memrd;
489 	struct wrqe *wr;
490 	struct mbuf *m;
491 	u_int nsegs, credits, wr_len;
492 	u_int expn_size;
493 	struct inpcb *inp = toep->inp;
494 	struct tcpcb *tp = intotcpcb(inp);
495 	struct socket *so = inp->inp_socket;
496 	struct sockbuf *sb = &so->so_snd;
497 	int tls_size, tx_credits, shove, sowwakeup;
498 	struct ofld_tx_sdesc *txsd;
499 	char *buf;
500 
501 	INP_WLOCK_ASSERT(inp);
502 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
503 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
504 
505 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
506 	    ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
507 	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
508 	KASSERT(tls_tx_key(toep),
509 	    ("%s: TX key not set for toep %p", __func__, toep));
510 
511 #ifdef VERBOSE_TRACES
512 	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
513 	    __func__, toep->tid, toep->flags, tp->t_flags);
514 #endif
515 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
516 		return;
517 
518 #ifdef RATELIMIT
519 	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
520 	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
521 		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
522 	}
523 #endif
524 
525 	/*
526 	 * This function doesn't resume by itself.  Someone else must clear the
527 	 * flag and call this function.
528 	 */
529 	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
530 		KASSERT(drop == 0,
531 		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
532 		return;
533 	}
534 
535 	txsd = &toep->txsd[toep->txsd_pidx];
536 	for (;;) {
537 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
538 
539 		SOCKBUF_LOCK(sb);
540 		sowwakeup = drop;
541 		if (drop) {
542 			sbdrop_locked(sb, drop);
543 			drop = 0;
544 		}
545 
546 		m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
547 
548 		/*
549 		 * Send a FIN if requested, but only if there's no
550 		 * more data to send.
551 		 */
552 		if (m == NULL && toep->flags & TPF_SEND_FIN) {
553 			if (sowwakeup)
554 				sowwakeup_locked(so);
555 			else
556 				SOCKBUF_UNLOCK(sb);
557 			SOCKBUF_UNLOCK_ASSERT(sb);
558 			t4_close_conn(sc, toep);
559 			return;
560 		}
561 
562 		/*
563 		 * If there is no ready data to send, wait until more
564 		 * data arrives.
565 		 */
566 		if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
567 			if (sowwakeup)
568 				sowwakeup_locked(so);
569 			else
570 				SOCKBUF_UNLOCK(sb);
571 			SOCKBUF_UNLOCK_ASSERT(sb);
572 #ifdef VERBOSE_TRACES
573 			CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
574 			    __func__, toep->tid);
575 #endif
576 			return;
577 		}
578 
579 		KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP",
580 		    __func__, m));
581 		KASSERT(m->m_epg_tls != NULL,
582 		    ("%s: mbuf %p doesn't have TLS session", __func__, m));
583 
584 		/* Calculate WR length. */
585 		wr_len = sizeof(struct fw_tlstx_data_wr) +
586 		    sizeof(struct cpl_tx_tls_sfo) +
587 		    sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
588 
589 		/* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
590 		MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
591 		wr_len += AES_BLOCK_LEN;
592 
593 		/* Account for SGL in work request length. */
594 		nsegs = count_ext_pgs_segs(m);
595 		wr_len += sizeof(struct ulptx_sgl) +
596 		    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
597 
598 		/* Not enough credits for this work request. */
599 		if (howmany(wr_len, 16) > tx_credits) {
600 			if (sowwakeup)
601 				sowwakeup_locked(so);
602 			else
603 				SOCKBUF_UNLOCK(sb);
604 			SOCKBUF_UNLOCK_ASSERT(sb);
605 #ifdef VERBOSE_TRACES
606 			CTR5(KTR_CXGBE,
607 	    "%s: tid %d mbuf %p requires %d credits, but only %d available",
608 			    __func__, toep->tid, m, howmany(wr_len, 16),
609 			    tx_credits);
610 #endif
611 			toep->flags |= TPF_TX_SUSPENDED;
612 			return;
613 		}
614 
615 		/* Shove if there is no additional data pending. */
616 		shove = ((m->m_next == NULL ||
617 		    (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
618 		    (tp->t_flags & TF_MORETOCOME) == 0;
619 
620 		if (sb->sb_flags & SB_AUTOSIZE &&
621 		    V_tcp_do_autosndbuf &&
622 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
623 		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
624 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
625 			    V_tcp_autosndbuf_max);
626 
627 			if (!sbreserve_locked(so, SO_SND, newsize, NULL))
628 				sb->sb_flags &= ~SB_AUTOSIZE;
629 			else
630 				sowwakeup = 1;	/* room available */
631 		}
632 		if (sowwakeup)
633 			sowwakeup_locked(so);
634 		else
635 			SOCKBUF_UNLOCK(sb);
636 		SOCKBUF_UNLOCK_ASSERT(sb);
637 
638 		if (__predict_false(toep->flags & TPF_FIN_SENT))
639 			panic("%s: excess tx.", __func__);
640 
641 		wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
642 		if (wr == NULL) {
643 			/* XXX: how will we recover from this? */
644 			toep->flags |= TPF_TX_SUSPENDED;
645 			return;
646 		}
647 
648 		thdr = (struct tls_hdr *)&m->m_epg_hdr;
649 #ifdef VERBOSE_TRACES
650 		CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
651 		    __func__, toep->tid, m->m_epg_seqno, thdr->type,
652 		    m->m_len);
653 #endif
654 		txwr = wrtod(wr);
655 		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
656 		memset(txwr, 0, roundup2(wr_len, 16));
657 		credits = howmany(wr_len, 16);
658 		expn_size = m->m_epg_hdrlen +
659 		    m->m_epg_trllen;
660 		tls_size = m->m_len - expn_size;
661 		write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
662 		write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
663 
664 		idata = (struct ulptx_idata *)(cpl + 1);
665 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
666 		idata->len = htobe32(0);
667 		memrd = (struct ulptx_sc_memrd *)(idata + 1);
668 		memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
669 		    V_ULP_TX_SC_MORE(1) |
670 		    V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
671 		memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
672 
673 		/* Copy IV. */
674 		buf = (char *)(memrd + 1);
675 		memcpy(buf, thdr + 1, toep->tls.iv_len);
676 		buf += AES_BLOCK_LEN;
677 
678 		write_ktlstx_sgl(buf, m, nsegs);
679 
680 		KASSERT(toep->tx_credits >= credits,
681 			("%s: not enough credits", __func__));
682 
683 		toep->tx_credits -= credits;
684 
685 		tp->snd_nxt += m->m_len;
686 		tp->snd_max += m->m_len;
687 
688 		SOCKBUF_LOCK(sb);
689 		sb->sb_sndptr = m;
690 		SOCKBUF_UNLOCK(sb);
691 
692 		toep->flags |= TPF_TX_DATA_SENT;
693 		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
694 			toep->flags |= TPF_TX_SUSPENDED;
695 
696 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
697 		txsd->plen = m->m_len;
698 		txsd->tx_credits = credits;
699 		txsd++;
700 		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
701 			toep->txsd_pidx = 0;
702 			txsd = &toep->txsd[0];
703 		}
704 		toep->txsd_avail--;
705 
706 		counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
707 		counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len);
708 
709 		t4_l2t_send(sc, wr, toep->l2te);
710 	}
711 }
712 
713 /*
714  * For TLS data we place received mbufs received via CPL_TLS_DATA into
715  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
716  * received, the completed PDUs are placed into the socket receive
717  * buffer.
718  *
719  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
720  */
721 static int
722 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
723 {
724 	struct adapter *sc = iq->adapter;
725 	const struct cpl_tls_data *cpl = mtod(m, const void *);
726 	unsigned int tid = GET_TID(cpl);
727 	struct toepcb *toep = lookup_tid(sc, tid);
728 	struct inpcb *inp = toep->inp;
729 	struct tcpcb *tp;
730 	int len;
731 
732 	/* XXX: Should this match do_rx_data instead? */
733 	KASSERT(!(toep->flags & TPF_SYNQE),
734 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
735 
736 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
737 
738 	/* strip off CPL header */
739 	m_adj(m, sizeof(*cpl));
740 	len = m->m_pkthdr.len;
741 
742 	toep->ofld_rxq->rx_toe_tls_octets += len;
743 
744 	KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
745 	    ("%s: payload length mismatch", __func__));
746 
747 	INP_WLOCK(inp);
748 	if (inp->inp_flags & INP_DROPPED) {
749 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
750 		    __func__, tid, len, inp->inp_flags);
751 		INP_WUNLOCK(inp);
752 		m_freem(m);
753 		return (0);
754 	}
755 
756 	/* Save TCP sequence number. */
757 	m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
758 
759 	if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
760 #ifdef INVARIANTS
761 		panic("Failed to queue TLS data packet");
762 #else
763 		printf("%s: Failed to queue TLS data packet\n", __func__);
764 		INP_WUNLOCK(inp);
765 		m_freem(m);
766 		return (0);
767 #endif
768 	}
769 
770 	tp = intotcpcb(inp);
771 	tp->t_rcvtime = ticks;
772 
773 #ifdef VERBOSE_TRACES
774 	CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
775 	    be32toh(cpl->seq));
776 #endif
777 
778 	INP_WUNLOCK(inp);
779 	return (0);
780 }
781 
782 static int
783 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
784 {
785 	struct adapter *sc = iq->adapter;
786 	const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
787 	struct tlsrx_hdr_pkt *tls_hdr_pkt;
788 	unsigned int tid = GET_TID(cpl);
789 	struct toepcb *toep = lookup_tid(sc, tid);
790 	struct inpcb *inp = toep->inp;
791 	struct tcpcb *tp;
792 	struct socket *so;
793 	struct sockbuf *sb;
794 	struct mbuf *tls_data;
795 	struct tls_get_record *tgr;
796 	struct mbuf *control;
797 	int pdu_length, trailer_len;
798 #if defined(KTR) || defined(INVARIANTS)
799 	int len;
800 #endif
801 
802 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
803 	KASSERT(!(toep->flags & TPF_SYNQE),
804 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
805 
806 	/* strip off CPL header */
807 	m_adj(m, sizeof(*cpl));
808 #if defined(KTR) || defined(INVARIANTS)
809 	len = m->m_pkthdr.len;
810 #endif
811 
812 	toep->ofld_rxq->rx_toe_tls_records++;
813 
814 	KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
815 	    ("%s: payload length mismatch", __func__));
816 
817 	INP_WLOCK(inp);
818 	if (inp->inp_flags & INP_DROPPED) {
819 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
820 		    __func__, tid, len, inp->inp_flags);
821 		INP_WUNLOCK(inp);
822 		m_freem(m);
823 		return (0);
824 	}
825 
826 	pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
827 
828 	so = inp_inpcbtosocket(inp);
829 	tp = intotcpcb(inp);
830 
831 #ifdef VERBOSE_TRACES
832 	CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
833 	    __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
834 #endif
835 
836 	tp->rcv_nxt += pdu_length;
837 	KASSERT(tp->rcv_wnd >= pdu_length,
838 	    ("%s: negative window size", __func__));
839 	tp->rcv_wnd -= pdu_length;
840 
841 	/* XXX: Not sure what to do about urgent data. */
842 
843 	/*
844 	 * The payload of this CPL is the TLS header followed by
845 	 * additional fields.
846 	 */
847 	KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
848 	    ("%s: payload too small", __func__));
849 	tls_hdr_pkt = mtod(m, void *);
850 
851 	tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
852 	if (tls_data != NULL) {
853 		KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
854 		    ("%s: sequence mismatch", __func__));
855 	}
856 
857 	/* Report decryption errors as EBADMSG. */
858 	if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
859 		CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
860 		    __func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
861 		    be32toh(cpl->ddp_valid));
862 		m_freem(m);
863 		m_freem(tls_data);
864 
865 		CURVNET_SET(toep->vnet);
866 		so->so_error = EBADMSG;
867 		sorwakeup(so);
868 
869 		INP_WUNLOCK(inp);
870 		CURVNET_RESTORE();
871 
872 		return (0);
873 	}
874 
875 	/* Handle data received after the socket is closed. */
876 	sb = &so->so_rcv;
877 	SOCKBUF_LOCK(sb);
878 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
879 		struct epoch_tracker et;
880 
881 		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
882 		    __func__, tid, pdu_length);
883 		m_freem(m);
884 		m_freem(tls_data);
885 		SOCKBUF_UNLOCK(sb);
886 		INP_WUNLOCK(inp);
887 
888 		CURVNET_SET(toep->vnet);
889 		NET_EPOCH_ENTER(et);
890 		INP_WLOCK(inp);
891 		tp = tcp_drop(tp, ECONNRESET);
892 		if (tp != NULL)
893 			INP_WUNLOCK(inp);
894 		NET_EPOCH_EXIT(et);
895 		CURVNET_RESTORE();
896 
897 		return (0);
898 	}
899 
900 	/*
901 	 * If there is any data in the 'sb_mtls' chain of the socket
902 	 * or we aren't able to allocate the control mbuf, append the
903 	 * record as a CSUM_TLS_DECRYPTED packet to 'sb_mtls' rather
904 	 * than as a decrypted record to 'sb_m'.
905 	 */
906 	if (sb->sb_mtls != NULL)
907 		control = NULL;
908 	else
909 		control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
910 		    IPPROTO_TCP, M_NOWAIT);
911 
912 	if (control != NULL) {
913 		tgr = (struct tls_get_record *)
914 		    CMSG_DATA(mtod(control, struct cmsghdr *));
915 		memset(tgr, 0, sizeof(*tgr));
916 		tgr->tls_type = tls_hdr_pkt->type;
917 		tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
918 		tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
919 		if (tls_data != NULL) {
920 			m_last(tls_data)->m_flags |= M_EOR;
921 			tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
922 		} else
923 			tgr->tls_length = 0;
924 
925 		m_freem(m);
926 		m = tls_data;
927 	} else {
928 		M_ASSERTPKTHDR(m);
929 
930 		/* It's ok that any explicit IV is missing. */
931 		m->m_len = sb->sb_tls_info->params.tls_hlen;
932 		m->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
933 		m->m_pkthdr.len = m->m_len;
934 		if (tls_data != NULL) {
935 			m->m_pkthdr.len += tls_data->m_pkthdr.len;
936 			m_demote_pkthdr(tls_data);
937 			m->m_next = tls_data;
938 		}
939 
940 		/*
941 		 * Grow the chain by the trailer, but without
942 		 * contents.  The trailer will be thrown away by
943 		 * ktls_decrypt.  Note that ktls_decrypt assumes the
944 		 * trailer is tls_tlen bytes long, so append that many
945 		 * bytes not the actual trailer size computed from
946 		 * pdu_length.
947 		 */
948 		trailer_len = sb->sb_tls_info->params.tls_tlen;
949 		if (tls_data != NULL) {
950 			m_last(tls_data)->m_len += trailer_len;
951 			tls_data = NULL;
952 		} else
953 			m->m_len += trailer_len;
954 		m->m_pkthdr.len += trailer_len;
955 		tls_hdr_pkt->length = htobe16(m->m_pkthdr.len -
956 		    sizeof(struct tls_record_layer));
957 	}
958 
959 	/* receive buffer autosize */
960 	MPASS(toep->vnet == so->so_vnet);
961 	CURVNET_SET(toep->vnet);
962 	if (sb->sb_flags & SB_AUTOSIZE &&
963 	    V_tcp_do_autorcvbuf &&
964 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
965 	    m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
966 		unsigned int hiwat = sb->sb_hiwat;
967 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
968 		    V_tcp_autorcvbuf_max);
969 
970 		if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
971 			sb->sb_flags &= ~SB_AUTOSIZE;
972 	}
973 
974 	if (control != NULL)
975 		sbappendcontrol_locked(sb, m, control, 0);
976 	else
977 		sbappendstream_locked(sb, m, 0);
978 	t4_rcvd_locked(&toep->td->tod, tp);
979 
980 	sorwakeup_locked(so);
981 	SOCKBUF_UNLOCK_ASSERT(sb);
982 
983 	INP_WUNLOCK(inp);
984 	CURVNET_RESTORE();
985 	return (0);
986 }
987 
988 void
989 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep,
990     struct mbuf *m)
991 {
992 	struct inpcb *inp = toep->inp;
993 	struct tls_ofld_info *tls_ofld = &toep->tls;
994 	struct tls_hdr *hdr;
995 	struct tcpcb *tp;
996 	struct socket *so;
997 	struct sockbuf *sb;
998 	int len;
999 
1000 	len = m->m_pkthdr.len;
1001 
1002 	INP_WLOCK_ASSERT(inp);
1003 
1004 	so = inp_inpcbtosocket(inp);
1005 	tp = intotcpcb(inp);
1006 	sb = &so->so_rcv;
1007 	SOCKBUF_LOCK(sb);
1008 	CURVNET_SET(toep->vnet);
1009 
1010 	tp->rcv_nxt += len;
1011 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
1012 	tp->rcv_wnd -= len;
1013 
1014 	/* Do we have a full TLS header? */
1015 	if (len < sizeof(*hdr)) {
1016 		CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header",
1017 		    __func__, toep->tid, len);
1018 		so->so_error = EMSGSIZE;
1019 		goto out;
1020 	}
1021 	hdr = mtod(m, struct tls_hdr *);
1022 
1023 	/* Is the header valid? */
1024 	if (be16toh(hdr->version) != tls_ofld->rx_version) {
1025 		CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x",
1026 		    __func__, toep->tid, be16toh(hdr->version));
1027 		so->so_error = EINVAL;
1028 		goto out;
1029 	}
1030 	if (be16toh(hdr->length) < sizeof(*hdr)) {
1031 		CTR3(KTR_CXGBE, "%s: tid %u invalid length %u",
1032 		    __func__, toep->tid, be16toh(hdr->length));
1033 		so->so_error = EBADMSG;
1034 		goto out;
1035 	}
1036 
1037 	/* Did we get a truncated record? */
1038 	if (len < be16toh(hdr->length)) {
1039 		CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)",
1040 		    __func__, toep->tid, len, be16toh(hdr->length));
1041 
1042 		so->so_error = EMSGSIZE;
1043 		goto out;
1044 	}
1045 
1046 	/* Is the header type unknown? */
1047 	switch (hdr->type) {
1048 	case CONTENT_TYPE_CCS:
1049 	case CONTENT_TYPE_ALERT:
1050 	case CONTENT_TYPE_APP_DATA:
1051 	case CONTENT_TYPE_HANDSHAKE:
1052 		break;
1053 	default:
1054 		CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u",
1055 		    __func__, toep->tid, hdr->type);
1056 		so->so_error = EBADMSG;
1057 		goto out;
1058 	}
1059 
1060 	/*
1061 	 * Just punt.  Although this could fall back to software
1062 	 * decryption, this case should never really happen.
1063 	 */
1064 	CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u",
1065 	    __func__, toep->tid, hdr->type, be16toh(hdr->length));
1066 	so->so_error = EBADMSG;
1067 
1068 out:
1069 	sorwakeup_locked(so);
1070 	SOCKBUF_UNLOCK_ASSERT(sb);
1071 
1072 	INP_WUNLOCK(inp);
1073 	CURVNET_RESTORE();
1074 
1075 	m_freem(m);
1076 }
1077 
1078 /* SET_TCB_FIELD sent as a ULP command looks like this */
1079 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1080     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1081 
1082 static inline void *
1083 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
1084     uint64_t word, uint64_t mask, uint64_t val)
1085 {
1086 	struct ulptx_idata *ulpsc;
1087 	struct cpl_set_tcb_field_core *req;
1088 
1089 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1090 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1091 
1092 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1093 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1094 	ulpsc->len = htobe32(sizeof(*req));
1095 
1096 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1097 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
1098 	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
1099 	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1100 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1101 	req->mask = htobe64(mask);
1102 	req->val = htobe64(val);
1103 
1104 	ulpsc = (struct ulptx_idata *)(req + 1);
1105 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1106 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1107 		ulpsc->len = htobe32(0);
1108 		return (ulpsc + 1);
1109 	}
1110 	return (ulpsc);
1111 }
1112 
1113 /*
1114  * Send a work request setting multiple TCB fields to enable
1115  * ULP_MODE_TLS.
1116  */
1117 static void
1118 tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
1119 {
1120 	struct wrqe *wr;
1121 	struct work_request_hdr *wrh;
1122 	struct ulp_txpkt *ulpmc;
1123 	int fields, key_offset, len;
1124 
1125 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
1126 	    ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
1127 
1128 	fields = 0;
1129 
1130 	/* 2 writes for the overlay region */
1131 	fields += 2;
1132 
1133 	/* W_TCB_TLS_SEQ */
1134 	fields++;
1135 
1136 	/* W_TCB_ULP_RAW */
1137 	fields++;
1138 
1139 	/* W_TCB_ULP_TYPE */
1140 	fields ++;
1141 
1142 	/* W_TCB_T_FLAGS */
1143 	fields++;
1144 
1145 	len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1146 	KASSERT(len <= SGE_MAX_WR_LEN,
1147 	    ("%s: WR with %d TCB field updates too large", __func__, fields));
1148 
1149 	wr = alloc_wrqe(len, toep->ctrlq);
1150 	if (wr == NULL) {
1151 		/* XXX */
1152 		panic("%s: out of memory", __func__);
1153 	}
1154 
1155 	wrh = wrtod(wr);
1156 	INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
1157 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1158 
1159 	/*
1160 	 * Clear the TLS overlay region: 1023:832.
1161 	 *
1162 	 * Words 26/27 are always set to zero.  Words 28/29
1163 	 * contain seqno and are set when enabling TLS
1164 	 * decryption.  Word 30 is zero and Word 31 contains
1165 	 * the keyid.
1166 	 */
1167 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26,
1168 	    0xffffffffffffffff, 0);
1169 
1170 	/*
1171 	 * RX key tags are an index into the key portion of MA
1172 	 * memory stored as an offset from the base address in
1173 	 * units of 64 bytes.
1174 	 */
1175 	key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
1176 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30,
1177 	    0xffffffffffffffff,
1178 	    (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
1179 
1180 	CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
1181 	    toep->tid, seqno);
1182 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_TLS_SEQ,
1183 	    V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
1184 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_RAW,
1185 	    V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1186 	    V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
1187 	    V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
1188 
1189 	toep->flags &= ~TPF_TLS_STARTING;
1190 	toep->flags |= TPF_TLS_RECEIVE;
1191 
1192 	/* Set the ULP mode to ULP_MODE_TLS. */
1193 	toep->params.ulp_mode = ULP_MODE_TLS;
1194 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE,
1195 	    V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
1196 	    V_TCB_ULP_TYPE(ULP_MODE_TLS));
1197 
1198 	/* Clear TF_RX_QUIESCE. */
1199 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS,
1200 	    V_TF_RX_QUIESCE(1), 0);
1201 
1202 	t4_wrq_tx(sc, wr);
1203 }
1204 
1205 /*
1206  * Examine the pending data in the socket buffer and either enable TLS
1207  * RX or request more encrypted data.
1208  */
1209 static void
1210 tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
1211     struct sockbuf *sb)
1212 {
1213 	uint64_t seqno;
1214 	size_t resid;
1215 	bool have_header;
1216 
1217 	SOCKBUF_LOCK_ASSERT(sb);
1218 	MPASS(toep->tls.rx_resid == 0);
1219 
1220 	have_header = ktls_pending_rx_info(sb, &seqno, &resid);
1221 	CTR5(KTR_CXGBE, "%s: tid %d have_header %d seqno %lu resid %zu",
1222 	    __func__, toep->tid, have_header, seqno, resid);
1223 
1224 	/*
1225 	 * If we have a partial header or we need fewer bytes than the
1226 	 * size of a TLS record, re-enable receive and pause again once
1227 	 * we get more data to try again.
1228 	 */
1229 	if (!have_header || resid != 0) {
1230 		CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
1231 		    toep->tid);
1232 		toep->flags &= ~TPF_TLS_RX_QUIESCED;
1233 		t4_clear_rx_quiesce(toep);
1234 		return;
1235 	}
1236 
1237 	tls_update_tcb(sc, toep, seqno);
1238 }
1239 
1240 void
1241 tls_received_starting_data(struct adapter *sc, struct toepcb *toep,
1242     struct sockbuf *sb, int len)
1243 {
1244 	MPASS(toep->flags & TPF_TLS_STARTING);
1245 
1246 	/*
1247 	 * A previous call to tls_check_rx_sockbuf needed more data.
1248 	 * Now that more data has arrived, quiesce receive again and
1249 	 * check the state once the quiesce has completed.
1250 	 */
1251 	if ((toep->flags & TPF_TLS_RX_QUIESCED) == 0) {
1252 		CTR(KTR_CXGBE, "%s: tid %d quiescing", __func__, toep->tid);
1253 		toep->flags |= TPF_TLS_RX_QUIESCED;
1254 		t4_set_rx_quiesce(toep);
1255 		return;
1256 	}
1257 
1258 	KASSERT(len <= toep->tls.rx_resid,
1259 	    ("%s: received excess bytes %d (waiting for %zu)", __func__, len,
1260 	    toep->tls.rx_resid));
1261 	toep->tls.rx_resid -= len;
1262 	if (toep->tls.rx_resid != 0)
1263 		return;
1264 
1265 	tls_check_rx_sockbuf(sc, toep, sb);
1266 }
1267 
1268 static int
1269 do_tls_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1270 {
1271 	struct adapter *sc = iq->adapter;
1272 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1273 	unsigned int tid = GET_TID(cpl);
1274 	struct toepcb *toep;
1275 	struct inpcb *inp;
1276 	struct socket *so;
1277 	struct sockbuf *sb;
1278 
1279 	if (cpl->status != CPL_ERR_NONE)
1280 		panic("XXX: tcp_rpl failed: %d", cpl->status);
1281 
1282 	toep = lookup_tid(sc, tid);
1283 	inp = toep->inp;
1284 	switch (cpl->cookie) {
1285 	case V_WORD(W_TCB_T_FLAGS) | V_COOKIE(CPL_COOKIE_TOM):
1286 		INP_WLOCK(inp);
1287 		if ((toep->flags & TPF_TLS_STARTING) == 0)
1288 			panic("%s: connection is not starting TLS RX\n",
1289 			    __func__);
1290 
1291 		so = inp->inp_socket;
1292 		sb = &so->so_rcv;
1293 		SOCKBUF_LOCK(sb);
1294 		tls_check_rx_sockbuf(sc, toep, sb);
1295 		SOCKBUF_UNLOCK(sb);
1296 		INP_WUNLOCK(inp);
1297 		break;
1298 	default:
1299 		panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
1300 		    G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
1301 	}
1302 
1303 	return (0);
1304 }
1305 
1306 void
1307 t4_tls_mod_load(void)
1308 {
1309 
1310 	t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1311 	t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1312 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_tls_tcb_rpl,
1313 	    CPL_COOKIE_TOM);
1314 }
1315 
1316 void
1317 t4_tls_mod_unload(void)
1318 {
1319 
1320 	t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1321 	t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1322 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_TOM);
1323 }
1324 #endif	/* TCP_OFFLOAD */
1325 #endif	/* KERN_TLS */
1326