xref: /freebsd/sys/dev/cxgbe/tom/t4_tls.c (revision c93b6e5fa24ba172ab271432c6692f9cc604e15a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/ktr.h>
37 #include <sys/sglist.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/systm.h>
41 #include <netinet/in.h>
42 #include <netinet/in_pcb.h>
43 #include <netinet/tcp_var.h>
44 #include <netinet/toecore.h>
45 
46 #ifdef TCP_OFFLOAD
47 #include "common/common.h"
48 #include "common/t4_tcb.h"
49 #include "crypto/t4_crypto.h"
50 #include "tom/t4_tom_l2t.h"
51 #include "tom/t4_tom.h"
52 
53 /*
54  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
55  * the mbuf is in the ulp_pdu_reclaimq.
56  */
57 #define	tls_tcp_seq	PH_loc.thirtytwo[0]
58 
59 /*
60  * Handshake lock used for the handshake timer.  Having a global lock
61  * is perhaps not ideal, but it avoids having to use callout_drain()
62  * in tls_uninit_toep() which can't block.  Also, the timer shouldn't
63  * actually fire for most connections.
64  */
65 static struct mtx tls_handshake_lock;
66 
67 static void
68 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
69     uint64_t val)
70 {
71 	struct adapter *sc = td_adapter(toep->td);
72 
73 	t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0);
74 }
75 
76 /* TLS and DTLS common routines */
77 bool
78 can_tls_offload(struct adapter *sc)
79 {
80 
81 	return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
82 }
83 
84 int
85 tls_tx_key(struct toepcb *toep)
86 {
87 	struct tls_ofld_info *tls_ofld = &toep->tls;
88 
89 	return (tls_ofld->tx_key_addr >= 0);
90 }
91 
92 int
93 tls_rx_key(struct toepcb *toep)
94 {
95 	struct tls_ofld_info *tls_ofld = &toep->tls;
96 
97 	return (tls_ofld->rx_key_addr >= 0);
98 }
99 
100 static int
101 key_size(struct toepcb *toep)
102 {
103 	struct tls_ofld_info *tls_ofld = &toep->tls;
104 
105 	return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
106 		tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
107 }
108 
109 /* Set TLS Key-Id in TCB */
110 static void
111 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
112 {
113 
114 	t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
115 			 V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
116 			 V_TCB_RX_TLS_KEY_TAG(key_id));
117 }
118 
119 /* Clear TF_RX_QUIESCE to re-enable receive. */
120 static void
121 t4_clear_rx_quiesce(struct toepcb *toep)
122 {
123 
124 	t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
125 }
126 
127 static void
128 tls_clr_ofld_mode(struct toepcb *toep)
129 {
130 
131 	tls_stop_handshake_timer(toep);
132 
133 	/* Operate in PDU extraction mode only. */
134 	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
135 	    V_TCB_ULP_RAW(M_TCB_ULP_RAW),
136 	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
137 	t4_clear_rx_quiesce(toep);
138 }
139 
140 static void
141 tls_clr_quiesce(struct toepcb *toep)
142 {
143 
144 	tls_stop_handshake_timer(toep);
145 	t4_clear_rx_quiesce(toep);
146 }
147 
148 /*
149  * Calculate the TLS data expansion size
150  */
151 static int
152 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
153     unsigned short *pdus_per_ulp)
154 {
155 	struct tls_ofld_info *tls_ofld = &toep->tls;
156 	struct tls_scmd *scmd = &tls_ofld->scmd0;
157 	int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
158 	    pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
159 	int exp_per_pdu = 0;
160 	int hdr_len = TLS_HEADER_LENGTH;
161 
162 	do {
163 		max_frag_size = tls_ofld->k_ctx.frag_size;
164 		if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
165 		   SCMD_CIPH_MODE_AES_GCM) {
166 			frag_count = (data_len / max_frag_size);
167 			exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
168 				hdr_len;
169 			expn_size =  frag_count * exp_per_pdu;
170 			if (full_pdus_only) {
171 				*pdus_per_ulp = data_len / (exp_per_pdu +
172 					max_frag_size);
173 				if (*pdus_per_ulp > 32)
174 					*pdus_per_ulp = 32;
175 				else if(!*pdus_per_ulp)
176 					*pdus_per_ulp = 1;
177 				expn_size = (*pdus_per_ulp) * exp_per_pdu;
178 				break;
179 			}
180 			if ((last_frag_size = data_len % max_frag_size) > 0) {
181 				frag_count += 1;
182 				expn_size += exp_per_pdu;
183 			}
184 			break;
185 		} else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
186 			   SCMD_CIPH_MODE_NOP) {
187 			/* Calculate the number of fragments we can make */
188 			frag_count  = (data_len / max_frag_size);
189 			if (frag_count > 0) {
190 				pad_per_pdu = (((howmany((max_frag_size +
191 						       tls_ofld->mac_length),
192 						      CIPHER_BLOCK_SIZE)) *
193 						CIPHER_BLOCK_SIZE) -
194 					       (max_frag_size +
195 						tls_ofld->mac_length));
196 				if (!pad_per_pdu)
197 					pad_per_pdu = CIPHER_BLOCK_SIZE;
198 				exp_per_pdu = pad_per_pdu +
199 				       	tls_ofld->mac_length +
200 					hdr_len + CIPHER_BLOCK_SIZE;
201 				expn_size = frag_count * exp_per_pdu;
202 			}
203 			if (full_pdus_only) {
204 				*pdus_per_ulp = data_len / (exp_per_pdu +
205 					max_frag_size);
206 				if (*pdus_per_ulp > 32)
207 					*pdus_per_ulp = 32;
208 				else if (!*pdus_per_ulp)
209 					*pdus_per_ulp = 1;
210 				expn_size = (*pdus_per_ulp) * exp_per_pdu;
211 				break;
212 			}
213 			/* Consider the last fragment */
214 			if ((last_frag_size = data_len % max_frag_size) > 0) {
215 				pad_last_pdu = (((howmany((last_frag_size +
216 							tls_ofld->mac_length),
217 						       CIPHER_BLOCK_SIZE)) *
218 						 CIPHER_BLOCK_SIZE) -
219 						(last_frag_size +
220 						 tls_ofld->mac_length));
221 				if (!pad_last_pdu)
222 					pad_last_pdu = CIPHER_BLOCK_SIZE;
223 				expn_size += (pad_last_pdu +
224 					      tls_ofld->mac_length + hdr_len +
225 					      CIPHER_BLOCK_SIZE);
226 			}
227 		}
228 	} while (0);
229 
230 	return (expn_size);
231 }
232 
233 /* Copy Key to WR */
234 static void
235 tls_copy_tx_key(struct toepcb *toep, void *dst)
236 {
237 	struct tls_ofld_info *tls_ofld = &toep->tls;
238 	struct ulptx_sc_memrd *sc_memrd;
239 	struct ulptx_idata *sc;
240 
241 	if (tls_ofld->k_ctx.tx_key_info_size <= 0)
242 		return;
243 
244 	if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
245 		sc = dst;
246 		sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
247 		sc->len = htobe32(0);
248 		sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
249 		sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
250 		    V_ULP_TX_SC_MORE(1) |
251 		    V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
252 		sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
253 	} else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
254 		memcpy(dst, &tls_ofld->k_ctx.tx,
255 		    tls_ofld->k_ctx.tx_key_info_size);
256 	}
257 }
258 
259 /* TLS/DTLS content type  for CPL SFO */
260 static inline unsigned char
261 tls_content_type(unsigned char content_type)
262 {
263 	/*
264 	 * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
265 	 * default to "CUSTOM" for all other types including
266 	 * heartbeat?
267 	 */
268 	switch (content_type) {
269 	case CONTENT_TYPE_CCS:
270 		return CPL_TX_TLS_SFO_TYPE_CCS;
271 	case CONTENT_TYPE_ALERT:
272 		return CPL_TX_TLS_SFO_TYPE_ALERT;
273 	case CONTENT_TYPE_HANDSHAKE:
274 		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
275 	case CONTENT_TYPE_HEARTBEAT:
276 		return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
277 	}
278 	return CPL_TX_TLS_SFO_TYPE_DATA;
279 }
280 
281 static unsigned char
282 get_cipher_key_size(unsigned int ck_size)
283 {
284 	switch (ck_size) {
285 	case AES_NOP: /* NOP */
286 		return 15;
287 	case AES_128: /* AES128 */
288 		return CH_CK_SIZE_128;
289 	case AES_192: /* AES192 */
290 		return CH_CK_SIZE_192;
291 	case AES_256: /* AES256 */
292 		return CH_CK_SIZE_256;
293 	default:
294 		return CH_CK_SIZE_256;
295 	}
296 }
297 
298 static unsigned char
299 get_mac_key_size(unsigned int mk_size)
300 {
301 	switch (mk_size) {
302 	case SHA_NOP: /* NOP */
303 		return CH_MK_SIZE_128;
304 	case SHA_GHASH: /* GHASH */
305 	case SHA_512: /* SHA512 */
306 		return CH_MK_SIZE_512;
307 	case SHA_224: /* SHA2-224 */
308 		return CH_MK_SIZE_192;
309 	case SHA_256: /* SHA2-256*/
310 		return CH_MK_SIZE_256;
311 	case SHA_384: /* SHA384 */
312 		return CH_MK_SIZE_512;
313 	case SHA1: /* SHA1 */
314 	default:
315 		return CH_MK_SIZE_160;
316 	}
317 }
318 
319 static unsigned int
320 get_proto_ver(int proto_ver)
321 {
322 	switch (proto_ver) {
323 	case TLS1_2_VERSION:
324 		return TLS_1_2_VERSION;
325 	case TLS1_1_VERSION:
326 		return TLS_1_1_VERSION;
327 	case DTLS1_2_VERSION:
328 		return DTLS_1_2_VERSION;
329 	default:
330 		return TLS_VERSION_MAX;
331 	}
332 }
333 
334 static void
335 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
336 {
337 
338 	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
339 		kwr->u.rxhdr.ivinsert_to_authinsrt =
340 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
341 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
342 			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
343 			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
344 			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
345 			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
346 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
347 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
348 		kwr->u.rxhdr.ivpresent_to_rxmk_size &=
349 			~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
350 		kwr->u.rxhdr.authmode_to_rxvalid &=
351 			~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
352 	} else {
353 		kwr->u.rxhdr.ivinsert_to_authinsrt =
354 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
355 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
356 			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
357 			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
358 			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
359 			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
360 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
361 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
362 	}
363 }
364 
365 /* Rx key */
366 static void
367 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
368 {
369 	unsigned int ck_size = kctx->cipher_secret_size;
370 	unsigned int mk_size = kctx->mac_secret_size;
371 	int proto_ver = kctx->proto_ver;
372 
373 	kwr->u.rxhdr.flitcnt_hmacctrl =
374 		((kctx->tx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
375 
376 	kwr->u.rxhdr.protover_ciphmode =
377 		V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
378 		V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
379 
380 	kwr->u.rxhdr.authmode_to_rxvalid =
381 		V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
382 		V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
383 		V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
384 		V_TLS_KEYCTX_TX_WR_RXVALID(1);
385 
386 	kwr->u.rxhdr.ivpresent_to_rxmk_size =
387 		V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
388 		V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
389 		V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
390 		V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
391 
392 	tls_rxkey_flit1(kwr, kctx);
393 
394 	/* No key reversal for GCM */
395 	if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
396 		t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
397 				 (kctx->cipher_secret_size << 3));
398 		memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
399 		       kctx->rx.key + kctx->cipher_secret_size,
400 		       (IPAD_SIZE + OPAD_SIZE));
401 	} else {
402 		memcpy(kwr->keys.edkey, kctx->rx.key,
403 		       (kctx->tx_key_info_size - SALT_SIZE));
404 		memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
405 	}
406 }
407 
408 /* Tx key */
409 static void
410 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
411 {
412 	unsigned int ck_size = kctx->cipher_secret_size;
413 	unsigned int mk_size = kctx->mac_secret_size;
414 
415 	kwr->u.txhdr.ctxlen =
416 		(kctx->tx_key_info_size >> 4);
417 	kwr->u.txhdr.dualck_to_txvalid =
418 		V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
419 		V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
420 		V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
421 		V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
422 		V_TLS_KEYCTX_TX_WR_TXVALID(1);
423 
424 	memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
425 	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
426 		memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
427 		kwr->u.txhdr.dualck_to_txvalid &=
428 			~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
429 	}
430 	kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
431 }
432 
433 /* TLS Key memory management */
434 static int
435 get_new_keyid(struct toepcb *toep, struct tls_key_context *k_ctx)
436 {
437 	struct adapter *sc = td_adapter(toep->td);
438 	vmem_addr_t addr;
439 
440 	if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
441 	    &addr) != 0)
442 		return (-1);
443 
444 	return (addr);
445 }
446 
447 static void
448 free_keyid(struct toepcb *toep, int keyid)
449 {
450 	struct adapter *sc = td_adapter(toep->td);
451 
452 	vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ);
453 }
454 
455 static void
456 clear_tls_keyid(struct toepcb *toep)
457 {
458 	struct tls_ofld_info *tls_ofld = &toep->tls;
459 
460 	if (tls_ofld->rx_key_addr >= 0) {
461 		free_keyid(toep, tls_ofld->rx_key_addr);
462 		tls_ofld->rx_key_addr = -1;
463 	}
464 	if (tls_ofld->tx_key_addr >= 0) {
465 		free_keyid(toep, tls_ofld->tx_key_addr);
466 		tls_ofld->tx_key_addr = -1;
467 	}
468 }
469 
470 static int
471 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
472 {
473 	return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
474 		((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
475 }
476 
477 static int
478 get_tp_plen_max(struct tls_ofld_info *tls_ofld)
479 {
480 	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
481 
482 	return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
483 }
484 
485 /* Send request to get the key-id */
486 static int
487 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
488 {
489 	struct tls_ofld_info *tls_ofld = &toep->tls;
490 	struct adapter *sc = td_adapter(toep->td);
491 	struct ofld_tx_sdesc *txsd;
492 	int kwrlen, kctxlen, keyid, len;
493 	struct wrqe *wr;
494 	struct tls_key_req *kwr;
495 	struct tls_keyctx *kctx;
496 
497 	kwrlen = sizeof(*kwr);
498 	kctxlen = roundup2(sizeof(*kctx), 32);
499 	len = roundup2(kwrlen + kctxlen, 16);
500 
501 	if (toep->txsd_avail == 0)
502 		return (EAGAIN);
503 
504 	/* Dont initialize key for re-neg */
505 	if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
506 		if ((keyid = get_new_keyid(toep, k_ctx)) < 0) {
507 			return (ENOSPC);
508 		}
509 	} else {
510 		keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
511 	}
512 
513 	wr = alloc_wrqe(len, toep->ofld_txq);
514 	if (wr == NULL) {
515 		free_keyid(toep, keyid);
516 		return (ENOMEM);
517 	}
518 	kwr = wrtod(wr);
519 	memset(kwr, 0, kwrlen);
520 
521 	kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
522 	    F_FW_WR_ATOMIC);
523 	kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
524 	    V_FW_WR_FLOWID(toep->tid));
525 	kwr->protocol = get_proto_ver(k_ctx->proto_ver);
526 	kwr->mfs = htons(k_ctx->frag_size);
527 	kwr->reneg_to_write_rx = k_ctx->l_p_key;
528 
529 	/* master command */
530 	kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
531 	    V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
532 	kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
533 	kwr->len16 = htobe32((toep->tid << 8) |
534 	    DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
535 	kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
536 
537 	/* sub command */
538 	kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
539 	kwr->sc_len = htobe32(kctxlen);
540 
541 	kctx = (struct tls_keyctx *)(kwr + 1);
542 	memset(kctx, 0, kctxlen);
543 
544 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
545 		tls_ofld->tx_key_addr = keyid;
546 		prepare_txkey_wr(kctx, k_ctx);
547 	} else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
548 		tls_ofld->rx_key_addr = keyid;
549 		prepare_rxkey_wr(kctx, k_ctx);
550 	}
551 
552 	txsd = &toep->txsd[toep->txsd_pidx];
553 	txsd->tx_credits = DIV_ROUND_UP(len, 16);
554 	txsd->plen = 0;
555 	toep->tx_credits -= txsd->tx_credits;
556 	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
557 		toep->txsd_pidx = 0;
558 	toep->txsd_avail--;
559 
560 	t4_wrq_tx(sc, wr);
561 
562 	return (0);
563 }
564 
565 /* Store a key received from SSL in DDR. */
566 static int
567 program_key_context(struct tcpcb *tp, struct toepcb *toep,
568     struct tls_key_context *uk_ctx)
569 {
570 	struct adapter *sc = td_adapter(toep->td);
571 	struct tls_ofld_info *tls_ofld = &toep->tls;
572 	struct tls_key_context *k_ctx;
573 	int error, key_offset;
574 
575 	if (tp->t_state != TCPS_ESTABLISHED) {
576 		/*
577 		 * XXX: Matches Linux driver, but not sure this is a
578 		 * very appropriate error.
579 		 */
580 		return (ENOENT);
581 	}
582 
583 	/* Stop timer on handshake completion */
584 	tls_stop_handshake_timer(toep);
585 
586 	toep->flags &= ~TPF_FORCE_CREDITS;
587 
588 	CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
589 	    G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
590 	    "KEY_WRITE_TX", uk_ctx->proto_ver);
591 
592 	if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
593 	    toep->ulp_mode != ULP_MODE_TLS)
594 		return (EOPNOTSUPP);
595 
596 	/* Don't copy the 'tx' and 'rx' fields. */
597 	k_ctx = &tls_ofld->k_ctx;
598 	memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
599 	    sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
600 
601 	/* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
602 	if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
603 		if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
604 			tls_ofld->rx_key_addr = -1;
605 			t4_clear_rx_quiesce(toep);
606 		} else {
607 			tls_ofld->tx_key_addr = -1;
608 		}
609 		return (0);
610 	}
611 
612 	if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
613 		k_ctx->iv_size = 4;
614 		k_ctx->mac_first = 0;
615 		k_ctx->hmac_ctrl = 0;
616 	} else {
617 		k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
618 		k_ctx->mac_first = 1;
619 	}
620 
621 	tls_ofld->scmd0.seqno_numivs =
622 		(V_SCMD_SEQ_NO_CTRL(3) |
623 		 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
624 		 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
625 		 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
626 		 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
627 		 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
628 		 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
629 		 V_SCMD_IV_SIZE(k_ctx->iv_size));
630 
631 	tls_ofld->scmd0.ivgen_hdrlen =
632 		(V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
633 		 V_SCMD_KEY_CTX_INLINE(0) |
634 		 V_SCMD_TLS_FRAG_ENABLE(1));
635 
636 	tls_ofld->mac_length = k_ctx->mac_secret_size;
637 
638 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
639 		k_ctx->rx = uk_ctx->rx;
640 		/* Dont initialize key for re-neg */
641 		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
642 			tls_ofld->rx_key_addr = -1;
643 	} else {
644 		k_ctx->tx = uk_ctx->tx;
645 		/* Dont initialize key for re-neg */
646 		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
647 			tls_ofld->tx_key_addr = -1;
648 	}
649 
650 	/* Flush pending data before new Tx key becomes active */
651 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
652 		struct sockbuf *sb;
653 
654 		/* XXX: This might not drain everything. */
655 		t4_push_frames(sc, toep, 0);
656 		sb = &toep->inp->inp_socket->so_snd;
657 		SOCKBUF_LOCK(sb);
658 
659 		/* XXX: This asserts that everything has been pushed. */
660 		MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
661 		sb->sb_sndptr = NULL;
662 		tls_ofld->sb_off = sbavail(sb);
663 		SOCKBUF_UNLOCK(sb);
664 		tls_ofld->tx_seq_no = 0;
665 	}
666 
667 	if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
668 	    (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
669 		error = tls_program_key_id(toep, k_ctx);
670 		if (error) {
671 			/* XXX: Only clear quiesce for KEY_WRITE_RX? */
672 			t4_clear_rx_quiesce(toep);
673 			return (error);
674 		}
675 	}
676 
677 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
678 		/*
679 		 * RX key tags are an index into the key portion of MA
680 		 * memory stored as an offset from the base address in
681 		 * units of 64 bytes.
682 		 */
683 		key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
684 		t4_set_tls_keyid(toep, key_offset / 64);
685 		t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
686 				 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
687 				 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
688 						V_TF_TLS_CONTROL(1) |
689 						V_TF_TLS_ACTIVE(1) |
690 						V_TF_TLS_ENABLE(1))));
691 		t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
692 				 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
693 				 V_TCB_TLS_SEQ(0));
694 		t4_clear_rx_quiesce(toep);
695 	} else {
696 		unsigned short pdus_per_ulp;
697 
698 		if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
699 			tls_ofld->tx_key_addr = 1;
700 
701 		tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
702 		tls_ofld->expn_per_ulp = tls_expansion_size(toep,
703 				tls_ofld->fcplenmax, 1, &pdus_per_ulp);
704 		tls_ofld->pdus_per_ulp = pdus_per_ulp;
705 		tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
706 			((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
707 			 tls_ofld->k_ctx.frag_size);
708 	}
709 
710 	return (0);
711 }
712 
713 /*
714  * In some cases a client connection can hang without sending the
715  * ServerHelloDone message from the NIC to the host.  Send a dummy
716  * RX_DATA_ACK with RX_MODULATE to unstick the connection.
717  */
718 static void
719 tls_send_handshake_ack(void *arg)
720 {
721 	struct toepcb *toep = arg;
722 	struct tls_ofld_info *tls_ofld = &toep->tls;
723 	struct adapter *sc = td_adapter(toep->td);
724 
725 	/*
726 	 * XXX: Does not have the t4_get_tcb() checks to refine the
727 	 * workaround.
728 	 */
729 	callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
730 
731 	CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
732 	send_rx_modulate(sc, toep);
733 }
734 
735 static void
736 tls_start_handshake_timer(struct toepcb *toep)
737 {
738 	struct tls_ofld_info *tls_ofld = &toep->tls;
739 
740 	mtx_lock(&tls_handshake_lock);
741 	callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
742 	    tls_send_handshake_ack, toep);
743 	mtx_unlock(&tls_handshake_lock);
744 }
745 
746 void
747 tls_stop_handshake_timer(struct toepcb *toep)
748 {
749 	struct tls_ofld_info *tls_ofld = &toep->tls;
750 
751 	mtx_lock(&tls_handshake_lock);
752 	callout_stop(&tls_ofld->handshake_timer);
753 	mtx_unlock(&tls_handshake_lock);
754 }
755 
756 int
757 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
758 {
759 	struct tls_key_context uk_ctx;
760 	struct inpcb *inp;
761 	struct tcpcb *tp;
762 	struct toepcb *toep;
763 	int error, optval;
764 
765 	error = 0;
766 	if (sopt->sopt_dir == SOPT_SET &&
767 	    sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
768 		error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
769 		    sizeof(uk_ctx));
770 		if (error)
771 			return (error);
772 	}
773 
774 	inp = sotoinpcb(so);
775 	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
776 	INP_WLOCK(inp);
777 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
778 		INP_WUNLOCK(inp);
779 		return (ECONNRESET);
780 	}
781 	tp = intotcpcb(inp);
782 	toep = tp->t_toe;
783 	switch (sopt->sopt_dir) {
784 	case SOPT_SET:
785 		switch (sopt->sopt_name) {
786 		case TCP_TLSOM_SET_TLS_CONTEXT:
787 			error = program_key_context(tp, toep, &uk_ctx);
788 			INP_WUNLOCK(inp);
789 			break;
790 		case TCP_TLSOM_CLR_TLS_TOM:
791 			if (toep->ulp_mode == ULP_MODE_TLS) {
792 				CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
793 				    __func__, toep->tid);
794 				tls_clr_ofld_mode(toep);
795 			} else
796 				error = EOPNOTSUPP;
797 			INP_WUNLOCK(inp);
798 			break;
799 		case TCP_TLSOM_CLR_QUIES:
800 			if (toep->ulp_mode == ULP_MODE_TLS) {
801 				CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
802 				    __func__, toep->tid);
803 				tls_clr_quiesce(toep);
804 			} else
805 				error = EOPNOTSUPP;
806 			INP_WUNLOCK(inp);
807 			break;
808 		default:
809 			INP_WUNLOCK(inp);
810 			error = EOPNOTSUPP;
811 			break;
812 		}
813 		break;
814 	case SOPT_GET:
815 		switch (sopt->sopt_name) {
816 		case TCP_TLSOM_GET_TLS_TOM:
817 			/*
818 			 * TLS TX is permitted on any TOE socket, but
819 			 * TLS RX requires a TLS ULP mode.
820 			 */
821 			optval = TLS_TOM_NONE;
822 			if (can_tls_offload(td_adapter(toep->td))) {
823 				switch (toep->ulp_mode) {
824 				case ULP_MODE_NONE:
825 				case ULP_MODE_TCPDDP:
826 					optval = TLS_TOM_TXONLY;
827 					break;
828 				case ULP_MODE_TLS:
829 					optval = TLS_TOM_BOTH;
830 					break;
831 				}
832 			}
833 			CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
834 			    __func__, toep->tid, optval);
835 			INP_WUNLOCK(inp);
836 			error = sooptcopyout(sopt, &optval, sizeof(optval));
837 			break;
838 		default:
839 			INP_WUNLOCK(inp);
840 			error = EOPNOTSUPP;
841 			break;
842 		}
843 		break;
844 	}
845 	return (error);
846 }
847 
848 void
849 tls_init_toep(struct toepcb *toep)
850 {
851 	struct tls_ofld_info *tls_ofld = &toep->tls;
852 
853 	tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
854 	tls_ofld->rx_key_addr = -1;
855 	tls_ofld->tx_key_addr = -1;
856 	if (toep->ulp_mode == ULP_MODE_TLS)
857 		callout_init_mtx(&tls_ofld->handshake_timer,
858 		    &tls_handshake_lock, 0);
859 }
860 
861 void
862 tls_establish(struct toepcb *toep)
863 {
864 
865 	/*
866 	 * Enable PDU extraction.
867 	 *
868 	 * XXX: Supposedly this should be done by the firmware when
869 	 * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
870 	 * in practice this seems to be required.
871 	 */
872 	CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
873 	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
874 	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
875 
876 	toep->flags |= TPF_FORCE_CREDITS;
877 
878 	tls_start_handshake_timer(toep);
879 }
880 
881 void
882 tls_uninit_toep(struct toepcb *toep)
883 {
884 
885 	if (toep->ulp_mode == ULP_MODE_TLS)
886 		tls_stop_handshake_timer(toep);
887 	clear_tls_keyid(toep);
888 }
889 
890 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
891 #define	MIN_OFLD_TLSTX_CREDITS(toep)					\
892 	(howmany(sizeof(struct fw_tlstx_data_wr) +			\
893 	    sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +		\
894 	    CIPHER_BLOCK_SIZE + 1, 16))
895 
896 static inline u_int
897 max_imm_tls_space(int tx_credits)
898 {
899 	const int n = 2;	/* Use only up to 2 desc for imm. data WR */
900 	int space;
901 
902 	KASSERT(tx_credits >= 0 &&
903 		tx_credits <= MAX_OFLD_TX_CREDITS,
904 		("%s: %d credits", __func__, tx_credits));
905 
906 	if (tx_credits >= (n * EQ_ESIZE) / 16)
907 		space = (n * EQ_ESIZE);
908 	else
909 		space = tx_credits * 16;
910 	return (space);
911 }
912 
913 static int
914 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
915 {
916 	int max_nsegs_1mbuf, n, nsegs;
917 
918 	while (skip >= m->m_len) {
919 		skip -= m->m_len;
920 		m = m->m_next;
921 	}
922 
923 	nsegs = 0;
924 	max_nsegs_1mbuf = 0;
925 	while (len > 0) {
926 		n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
927 		if (n > max_nsegs_1mbuf)
928 			max_nsegs_1mbuf = n;
929 		nsegs += n;
930 		len -= m->m_len - skip;
931 		skip = 0;
932 		m = m->m_next;
933 	}
934 	*max_nsegs_1mbufp = max_nsegs_1mbuf;
935 	return (nsegs);
936 }
937 
938 static void
939 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
940     unsigned int immdlen, unsigned int plen, unsigned int expn,
941     unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
942 {
943 	struct tls_ofld_info *tls_ofld = &toep->tls;
944 	unsigned int len = plen + expn;
945 
946 	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
947 	    V_FW_TLSTX_DATA_WR_COMPL(1) |
948 	    V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
949 	txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
950 	    V_FW_TLSTX_DATA_WR_LEN16(credits));
951 	txwr->plen = htobe32(len);
952 	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
953 	    V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
954 	txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
955 	    V_FW_TLSTX_DATA_WR_EXP(expn) |
956 	    V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
957 	    V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
958 	    V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
959 	txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
960 	txwr->adjustedplen_pkd = htobe16(
961 	    V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
962 	txwr->expinplenmax_pkd = htobe16(
963 	    V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
964 	txwr->pdusinplenmax_pkd = htobe16(
965 	    V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp));
966 }
967 
968 static void
969 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
970     struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
971 {
972 	struct tls_ofld_info *tls_ofld = &toep->tls;
973 	int data_type, seglen;
974 
975 	if (plen < tls_ofld->k_ctx.frag_size)
976 		seglen = plen;
977 	else
978 		seglen = tls_ofld->k_ctx.frag_size;
979 	data_type = tls_content_type(tls_hdr->type);
980 	cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
981 	    V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
982 	    V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
983 	cpl->pld_len = htobe32(plen);
984 	if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
985 		cpl->type_protover = htobe32(
986 		    V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
987 	cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
988 	    V_SCMD_NUM_IVS(pdus));
989 	cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
990 	cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
991 	tls_ofld->tx_seq_no += pdus;
992 }
993 
994 /*
995  * Similar to write_tx_sgl() except that it accepts an optional
996  * trailer buffer for IVs.
997  */
998 static void
999 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1000     void *iv_buffer, int iv_len, int nsegs, int n)
1001 {
1002 	struct mbuf *m;
1003 	struct ulptx_sgl *usgl = dst;
1004 	int i, j, rc;
1005 	struct sglist sg;
1006 	struct sglist_seg segs[n];
1007 
1008 	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1009 
1010 	sglist_init(&sg, n, segs);
1011 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1012 	    V_ULPTX_NSGE(nsegs));
1013 
1014 	for (m = start; skip >= m->m_len; m = m->m_next)
1015 		skip -= m->m_len;
1016 
1017 	i = -1;
1018 	for (m = start; plen > 0; m = m->m_next) {
1019 		rc = sglist_append(&sg, mtod(m, char *) + skip,
1020 		    m->m_len - skip);
1021 		if (__predict_false(rc != 0))
1022 			panic("%s: sglist_append %d", __func__, rc);
1023 		plen -= m->m_len - skip;
1024 		skip = 0;
1025 
1026 		for (j = 0; j < sg.sg_nseg; i++, j++) {
1027 			if (i < 0) {
1028 				usgl->len0 = htobe32(segs[j].ss_len);
1029 				usgl->addr0 = htobe64(segs[j].ss_paddr);
1030 			} else {
1031 				usgl->sge[i / 2].len[i & 1] =
1032 				    htobe32(segs[j].ss_len);
1033 				usgl->sge[i / 2].addr[i & 1] =
1034 				    htobe64(segs[j].ss_paddr);
1035 			}
1036 #ifdef INVARIANTS
1037 			nsegs--;
1038 #endif
1039 		}
1040 		sglist_reset(&sg);
1041 	}
1042 	if (iv_buffer != NULL) {
1043 		rc = sglist_append(&sg, iv_buffer, iv_len);
1044 		if (__predict_false(rc != 0))
1045 			panic("%s: sglist_append %d", __func__, rc);
1046 
1047 		for (j = 0; j < sg.sg_nseg; i++, j++) {
1048 			if (i < 0) {
1049 				usgl->len0 = htobe32(segs[j].ss_len);
1050 				usgl->addr0 = htobe64(segs[j].ss_paddr);
1051 			} else {
1052 				usgl->sge[i / 2].len[i & 1] =
1053 				    htobe32(segs[j].ss_len);
1054 				usgl->sge[i / 2].addr[i & 1] =
1055 				    htobe64(segs[j].ss_paddr);
1056 			}
1057 #ifdef INVARIANTS
1058 			nsegs--;
1059 #endif
1060 		}
1061 	}
1062 	if (i & 1)
1063 		usgl->sge[i / 2].len[1] = htobe32(0);
1064 	KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1065 	    __func__, nsegs, start, iv_buffer));
1066 }
1067 
1068 /*
1069  * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1070  * is enabled.  Rather than transmitting bulk data, the socket buffer
1071  * contains TLS records.  The work request requires a full TLS record,
1072  * so batch mbufs up until a full TLS record is seen.  This requires
1073  * reading the TLS header out of the start of each record to determine
1074  * its length.
1075  */
1076 void
1077 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1078 {
1079 	struct tls_hdr thdr;
1080 	struct mbuf *sndptr;
1081 	struct fw_tlstx_data_wr *txwr;
1082 	struct cpl_tx_tls_sfo *cpl;
1083 	struct wrqe *wr;
1084 	u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1085 	u_int expn_size, iv_len, pdus, sndptroff;
1086 	struct tls_ofld_info *tls_ofld = &toep->tls;
1087 	struct inpcb *inp = toep->inp;
1088 	struct tcpcb *tp = intotcpcb(inp);
1089 	struct socket *so = inp->inp_socket;
1090 	struct sockbuf *sb = &so->so_snd;
1091 	int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1092 	struct ofld_tx_sdesc *txsd;
1093 	bool imm_ivs, imm_payload;
1094 	void *iv_buffer, *iv_dst, *buf;
1095 
1096 	INP_WLOCK_ASSERT(inp);
1097 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1098 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1099 
1100 	KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
1101 	    toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS,
1102 	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
1103 	KASSERT(tls_tx_key(toep),
1104 	    ("%s: TX key not set for toep %p", __func__, toep));
1105 
1106 #ifdef VERBOSE_TRACES
1107 	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1108 	    __func__, toep->tid, toep->flags, tp->t_flags);
1109 #endif
1110 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1111 		return;
1112 
1113 #ifdef RATELIMIT
1114 	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1115 	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1116 		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1117 	}
1118 #endif
1119 
1120 	/*
1121 	 * This function doesn't resume by itself.  Someone else must clear the
1122 	 * flag and call this function.
1123 	 */
1124 	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1125 		KASSERT(drop == 0,
1126 		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1127 		return;
1128 	}
1129 
1130 	txsd = &toep->txsd[toep->txsd_pidx];
1131 	for (;;) {
1132 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1133 		space = max_imm_tls_space(tx_credits);
1134 		wr_len = sizeof(struct fw_tlstx_data_wr) +
1135 		    sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1136 		if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1137 #ifdef VERBOSE_TRACES
1138 			CTR5(KTR_CXGBE,
1139 			    "%s: tid %d tx_credits %d min_wr %d space %d",
1140 			    __func__, toep->tid, tx_credits, wr_len +
1141 			    CIPHER_BLOCK_SIZE + 1, space);
1142 #endif
1143 			return;
1144 		}
1145 
1146 		SOCKBUF_LOCK(sb);
1147 		sowwakeup = drop;
1148 		if (drop) {
1149 			sbdrop_locked(sb, drop);
1150 			MPASS(tls_ofld->sb_off >= drop);
1151 			tls_ofld->sb_off -= drop;
1152 			drop = 0;
1153 		}
1154 
1155 		/*
1156 		 * Send a FIN if requested, but only if there's no
1157 		 * more data to send.
1158 		 */
1159 		if (sbavail(sb) == tls_ofld->sb_off &&
1160 		    toep->flags & TPF_SEND_FIN) {
1161 			if (sowwakeup)
1162 				sowwakeup_locked(so);
1163 			else
1164 				SOCKBUF_UNLOCK(sb);
1165 			SOCKBUF_UNLOCK_ASSERT(sb);
1166 			t4_close_conn(sc, toep);
1167 			return;
1168 		}
1169 
1170 		if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1171 			/*
1172 			 * A full TLS header is not yet queued, stop
1173 			 * for now until more data is added to the
1174 			 * socket buffer.  However, if the connection
1175 			 * has been closed, we will never get the rest
1176 			 * of the header so just discard the partial
1177 			 * header and close the connection.
1178 			 */
1179 #ifdef VERBOSE_TRACES
1180 			CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s",
1181 			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1182 			    toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN");
1183 #endif
1184 			if (sowwakeup)
1185 				sowwakeup_locked(so);
1186 			else
1187 				SOCKBUF_UNLOCK(sb);
1188 			SOCKBUF_UNLOCK_ASSERT(sb);
1189 			if (toep->flags & TPF_SEND_FIN)
1190 				t4_close_conn(sc, toep);
1191 			return;
1192 		}
1193 
1194 		/* Read the header of the next TLS record. */
1195 		sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1196 		m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1197 		tls_size = htons(thdr.length);
1198 		plen = TLS_HEADER_LENGTH + tls_size;
1199 		pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1200 		iv_len = pdus * CIPHER_BLOCK_SIZE;
1201 
1202 		if (sbavail(sb) < tls_ofld->sb_off + plen) {
1203 			/*
1204 			 * The full TLS record is not yet queued, stop
1205 			 * for now until more data is added to the
1206 			 * socket buffer.  However, if the connection
1207 			 * has been closed, we will never get the rest
1208 			 * of the record so just discard the partial
1209 			 * record and close the connection.
1210 			 */
1211 #ifdef VERBOSE_TRACES
1212 			CTR6(KTR_CXGBE,
1213 			    "%s: tid %d sbavail %d sb_off %d plen %d%s",
1214 			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1215 			    plen, toep->flags & TPF_SEND_FIN ? "" :
1216 			    " SEND_FIN");
1217 #endif
1218 			if (sowwakeup)
1219 				sowwakeup_locked(so);
1220 			else
1221 				SOCKBUF_UNLOCK(sb);
1222 			SOCKBUF_UNLOCK_ASSERT(sb);
1223 			if (toep->flags & TPF_SEND_FIN)
1224 				t4_close_conn(sc, toep);
1225 			return;
1226 		}
1227 
1228 		/* Shove if there is no additional data pending. */
1229 		shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1230 		    !(tp->t_flags & TF_MORETOCOME);
1231 
1232 		if (sb->sb_flags & SB_AUTOSIZE &&
1233 		    V_tcp_do_autosndbuf &&
1234 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
1235 		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1236 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1237 			    V_tcp_autosndbuf_max);
1238 
1239 			if (!sbreserve_locked(sb, newsize, so, NULL))
1240 				sb->sb_flags &= ~SB_AUTOSIZE;
1241 			else
1242 				sowwakeup = 1;	/* room available */
1243 		}
1244 		if (sowwakeup)
1245 			sowwakeup_locked(so);
1246 		else
1247 			SOCKBUF_UNLOCK(sb);
1248 		SOCKBUF_UNLOCK_ASSERT(sb);
1249 
1250 		if (__predict_false(toep->flags & TPF_FIN_SENT))
1251 			panic("%s: excess tx.", __func__);
1252 
1253 		/* Determine whether to use immediate vs SGL. */
1254 		imm_payload = false;
1255 		imm_ivs = false;
1256 		if (wr_len + iv_len <= space) {
1257 			imm_ivs = true;
1258 			wr_len += iv_len;
1259 			if (wr_len + tls_size <= space) {
1260 				wr_len += tls_size;
1261 				imm_payload = true;
1262 			}
1263 		}
1264 
1265 		/* Allocate space for IVs if needed. */
1266 		if (!imm_ivs) {
1267 			iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1268 			if (iv_buffer == NULL) {
1269 				/*
1270 				 * XXX: How to restart this?
1271 				 */
1272 				if (sowwakeup)
1273 					sowwakeup_locked(so);
1274 				else
1275 					SOCKBUF_UNLOCK(sb);
1276 				SOCKBUF_UNLOCK_ASSERT(sb);
1277 				CTR3(KTR_CXGBE,
1278 			    "%s: tid %d failed to alloc IV space len %d",
1279 				    __func__, toep->tid, iv_len);
1280 				return;
1281 			}
1282 		} else
1283 			iv_buffer = NULL;
1284 
1285 		/* Determine size of SGL. */
1286 		nsegs = 0;
1287 		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1288 		if (!imm_payload) {
1289 			nsegs = count_mbuf_segs(sndptr, sndptroff +
1290 			    TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1291 			if (!imm_ivs) {
1292 				int n = sglist_count(iv_buffer, iv_len);
1293 				nsegs += n;
1294 				if (n > max_nsegs_1mbuf)
1295 					max_nsegs_1mbuf = n;
1296 			}
1297 
1298 			/* Account for SGL in work request length. */
1299 			wr_len += sizeof(struct ulptx_sgl) +
1300 			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1301 		}
1302 
1303 		wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1304 		if (wr == NULL) {
1305 			/* XXX: how will we recover from this? */
1306 			toep->flags |= TPF_TX_SUSPENDED;
1307 			return;
1308 		}
1309 
1310 #ifdef VERBOSE_TRACES
1311 		CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1312 		    __func__, toep->tid, thdr.type, tls_size, pdus);
1313 #endif
1314 		txwr = wrtod(wr);
1315 		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1316 		memset(txwr, 0, roundup2(wr_len, 16));
1317 		credits = howmany(wr_len, 16);
1318 		expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1319 		write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1320 		    tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1321 		write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1322 		tls_copy_tx_key(toep, cpl + 1);
1323 
1324 		/* Generate random IVs */
1325 		buf = (char *)(cpl + 1) + key_size(toep);
1326 		if (imm_ivs) {
1327 			MPASS(iv_buffer == NULL);
1328 			iv_dst = buf;
1329 			buf = (char *)iv_dst + iv_len;
1330 		} else
1331 			iv_dst = iv_buffer;
1332 		arc4rand(iv_dst, iv_len, 0);
1333 
1334 		if (imm_payload) {
1335 			m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1336 			    tls_size, buf);
1337 		} else {
1338 			write_tlstx_sgl(buf, sndptr,
1339 			    sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1340 			    iv_len, nsegs, max_nsegs_1mbuf);
1341 		}
1342 
1343 		KASSERT(toep->tx_credits >= credits,
1344 			("%s: not enough credits", __func__));
1345 
1346 		toep->tx_credits -= credits;
1347 
1348 		tp->snd_nxt += plen;
1349 		tp->snd_max += plen;
1350 
1351 		SOCKBUF_LOCK(sb);
1352 		sbsndptr_adv(sb, sb->sb_sndptr, plen);
1353 		tls_ofld->sb_off += plen;
1354 		SOCKBUF_UNLOCK(sb);
1355 
1356 		toep->flags |= TPF_TX_DATA_SENT;
1357 		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1358 			toep->flags |= TPF_TX_SUSPENDED;
1359 
1360 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1361 		txsd->plen = plen;
1362 		txsd->tx_credits = credits;
1363 		txsd->iv_buffer = iv_buffer;
1364 		txsd++;
1365 		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1366 			toep->txsd_pidx = 0;
1367 			txsd = &toep->txsd[0];
1368 		}
1369 		toep->txsd_avail--;
1370 
1371 		atomic_add_long(&toep->vi->pi->tx_tls_records, 1);
1372 		atomic_add_long(&toep->vi->pi->tx_tls_octets, plen);
1373 
1374 		t4_l2t_send(sc, wr, toep->l2te);
1375 	}
1376 }
1377 
1378 /*
1379  * For TLS data we place received mbufs received via CPL_TLS_DATA into
1380  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1381  * received, the completed PDUs are placed into the socket receive
1382  * buffer.
1383  *
1384  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1385  */
1386 static int
1387 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1388 {
1389 	struct adapter *sc = iq->adapter;
1390 	const struct cpl_tls_data *cpl = mtod(m, const void *);
1391 	unsigned int tid = GET_TID(cpl);
1392 	struct toepcb *toep = lookup_tid(sc, tid);
1393 	struct inpcb *inp = toep->inp;
1394 	struct tcpcb *tp;
1395 	int len;
1396 
1397 	/* XXX: Should this match do_rx_data instead? */
1398 	KASSERT(!(toep->flags & TPF_SYNQE),
1399 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1400 
1401 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1402 
1403 	/* strip off CPL header */
1404 	m_adj(m, sizeof(*cpl));
1405 	len = m->m_pkthdr.len;
1406 
1407 	atomic_add_long(&toep->vi->pi->rx_tls_octets, len);
1408 
1409 	KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1410 	    ("%s: payload length mismatch", __func__));
1411 
1412 	INP_WLOCK(inp);
1413 	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1414 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1415 		    __func__, tid, len, inp->inp_flags);
1416 		INP_WUNLOCK(inp);
1417 		m_freem(m);
1418 		return (0);
1419 	}
1420 
1421 	/* Save TCP sequence number. */
1422 	m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1423 
1424 	if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1425 #ifdef INVARIANTS
1426 		panic("Failed to queue TLS data packet");
1427 #else
1428 		printf("%s: Failed to queue TLS data packet\n", __func__);
1429 		INP_WUNLOCK(inp);
1430 		m_freem(m);
1431 		return (0);
1432 #endif
1433 	}
1434 
1435 	tp = intotcpcb(inp);
1436 	tp->t_rcvtime = ticks;
1437 
1438 #ifdef VERBOSE_TRACES
1439 	CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1440 	    be32toh(cpl->seq));
1441 #endif
1442 
1443 	INP_WUNLOCK(inp);
1444 	return (0);
1445 }
1446 
1447 static int
1448 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1449 {
1450 	struct adapter *sc = iq->adapter;
1451 	const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
1452 	struct tlsrx_hdr_pkt *tls_hdr_pkt;
1453 	unsigned int tid = GET_TID(cpl);
1454 	struct toepcb *toep = lookup_tid(sc, tid);
1455 	struct inpcb *inp = toep->inp;
1456 	struct tcpcb *tp;
1457 	struct socket *so;
1458 	struct sockbuf *sb;
1459 	struct mbuf *tls_data;
1460 	int len, pdu_length, rx_credits;
1461 
1462 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1463 	KASSERT(!(toep->flags & TPF_SYNQE),
1464 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1465 
1466 	/* strip off CPL header */
1467 	m_adj(m, sizeof(*cpl));
1468 	len = m->m_pkthdr.len;
1469 
1470 	atomic_add_long(&toep->vi->pi->rx_tls_records, 1);
1471 
1472 	KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
1473 	    ("%s: payload length mismatch", __func__));
1474 
1475 	INP_WLOCK(inp);
1476 	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1477 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1478 		    __func__, tid, len, inp->inp_flags);
1479 		INP_WUNLOCK(inp);
1480 		m_freem(m);
1481 		return (0);
1482 	}
1483 
1484 	pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
1485 
1486 	tp = intotcpcb(inp);
1487 
1488 #ifdef VERBOSE_TRACES
1489 	CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
1490 	    __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
1491 #endif
1492 
1493 	tp->rcv_nxt += pdu_length;
1494 	if (tp->rcv_wnd < pdu_length) {
1495 		toep->tls.rcv_over += pdu_length - tp->rcv_wnd;
1496 		tp->rcv_wnd = 0;
1497 	} else
1498 		tp->rcv_wnd -= pdu_length;
1499 
1500 	/* XXX: Not sure what to do about urgent data. */
1501 
1502 	/*
1503 	 * The payload of this CPL is the TLS header followed by
1504 	 * additional fields.
1505 	 */
1506 	KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
1507 	    ("%s: payload too small", __func__));
1508 	tls_hdr_pkt = mtod(m, void *);
1509 
1510 	/*
1511 	 * Only the TLS header is sent to OpenSSL, so report errors by
1512 	 * altering the record type.
1513 	 */
1514 	if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0)
1515 		tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1516 
1517 	/* Trim this CPL's mbuf to only include the TLS header. */
1518 	KASSERT(m->m_len == len && m->m_next == NULL,
1519 	    ("%s: CPL spans multiple mbufs", __func__));
1520 	m->m_len = TLS_HEADER_LENGTH;
1521 	m->m_pkthdr.len = TLS_HEADER_LENGTH;
1522 
1523 	tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
1524 	if (tls_data != NULL) {
1525 		KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
1526 		    ("%s: sequence mismatch", __func__));
1527 
1528 		/*
1529 		 * Update the TLS header length to be the length of
1530 		 * the payload data.
1531 		 */
1532 		tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
1533 
1534 		m->m_next = tls_data;
1535 		m->m_pkthdr.len += tls_data->m_len;
1536 	}
1537 
1538 	so = inp_inpcbtosocket(inp);
1539 	sb = &so->so_rcv;
1540 	SOCKBUF_LOCK(sb);
1541 
1542 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1543 		struct epoch_tracker et;
1544 
1545 		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1546 		    __func__, tid, pdu_length);
1547 		m_freem(m);
1548 		SOCKBUF_UNLOCK(sb);
1549 		INP_WUNLOCK(inp);
1550 
1551 		CURVNET_SET(toep->vnet);
1552 		INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1553 		INP_WLOCK(inp);
1554 		tp = tcp_drop(tp, ECONNRESET);
1555 		if (tp)
1556 			INP_WUNLOCK(inp);
1557 		INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1558 		CURVNET_RESTORE();
1559 
1560 		return (0);
1561 	}
1562 
1563 	/*
1564 	 * Not all of the bytes on the wire are included in the socket buffer
1565 	 * (e.g. the MAC of the TLS record).  However, those bytes are included
1566 	 * in the TCP sequence space.
1567 	 */
1568 
1569 	/* receive buffer autosize */
1570 	MPASS(toep->vnet == so->so_vnet);
1571 	CURVNET_SET(toep->vnet);
1572 	if (sb->sb_flags & SB_AUTOSIZE &&
1573 	    V_tcp_do_autorcvbuf &&
1574 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1575 	    m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
1576 		unsigned int hiwat = sb->sb_hiwat;
1577 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
1578 		    V_tcp_autorcvbuf_max);
1579 
1580 		if (!sbreserve_locked(sb, newsize, so, NULL))
1581 			sb->sb_flags &= ~SB_AUTOSIZE;
1582 	}
1583 
1584 	sbappendstream_locked(sb, m, 0);
1585 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1586 #ifdef VERBOSE_TRACES
1587 	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
1588 	    __func__, tid, rx_credits, tp->rcv_wnd);
1589 #endif
1590 	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1591 		rx_credits = send_rx_credits(sc, toep, rx_credits);
1592 		tp->rcv_wnd += rx_credits;
1593 		tp->rcv_adv += rx_credits;
1594 	}
1595 
1596 	sorwakeup_locked(so);
1597 	SOCKBUF_UNLOCK_ASSERT(sb);
1598 
1599 	INP_WUNLOCK(inp);
1600 	CURVNET_RESTORE();
1601 	return (0);
1602 }
1603 
1604 void
1605 t4_tls_mod_load(void)
1606 {
1607 
1608 	mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF);
1609 	t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1610 	t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1611 }
1612 
1613 void
1614 t4_tls_mod_unload(void)
1615 {
1616 
1617 	t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1618 	t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1619 	mtx_destroy(&tls_handshake_lock);
1620 }
1621 #endif	/* TCP_OFFLOAD */
1622