xref: /freebsd/sys/dev/cxgbe/tom/t4_tls.c (revision 19318a62d7f8cfe2f0f5c24178fa33e8844ae5d1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 #include "opt_kern_tls.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #ifdef KERN_TLS
39 #include <sys/ktls.h>
40 #endif
41 #include <sys/sglist.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/systm.h>
45 #include <netinet/in.h>
46 #include <netinet/in_pcb.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/toecore.h>
49 #ifdef KERN_TLS
50 #include <opencrypto/cryptodev.h>
51 #include <opencrypto/xform.h>
52 #endif
53 
54 #ifdef TCP_OFFLOAD
55 #include "common/common.h"
56 #include "common/t4_tcb.h"
57 #include "crypto/t4_crypto.h"
58 #include "tom/t4_tom_l2t.h"
59 #include "tom/t4_tom.h"
60 
61 /*
62  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
63  * the mbuf is in the ulp_pdu_reclaimq.
64  */
65 #define	tls_tcp_seq	PH_loc.thirtytwo[0]
66 
67 static void
68 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
69     uint64_t val)
70 {
71 	struct adapter *sc = td_adapter(toep->td);
72 
73 	t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
74 }
75 
76 /* TLS and DTLS common routines */
77 bool
78 can_tls_offload(struct adapter *sc)
79 {
80 
81 	return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
82 }
83 
84 int
85 tls_tx_key(struct toepcb *toep)
86 {
87 	struct tls_ofld_info *tls_ofld = &toep->tls;
88 
89 	return (tls_ofld->tx_key_addr >= 0);
90 }
91 
92 int
93 tls_rx_key(struct toepcb *toep)
94 {
95 	struct tls_ofld_info *tls_ofld = &toep->tls;
96 
97 	return (tls_ofld->rx_key_addr >= 0);
98 }
99 
100 static int
101 key_size(struct toepcb *toep)
102 {
103 	struct tls_ofld_info *tls_ofld = &toep->tls;
104 
105 	return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
106 		tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
107 }
108 
109 /* Set TLS Key-Id in TCB */
110 static void
111 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
112 {
113 
114 	t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
115 			 V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
116 			 V_TCB_RX_TLS_KEY_TAG(key_id));
117 }
118 
119 /* Clear TF_RX_QUIESCE to re-enable receive. */
120 static void
121 t4_clear_rx_quiesce(struct toepcb *toep)
122 {
123 
124 	t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
125 }
126 
127 static void
128 tls_clr_ofld_mode(struct toepcb *toep)
129 {
130 
131 	tls_stop_handshake_timer(toep);
132 
133 	KASSERT(toep->tls.rx_key_addr == -1,
134 	    ("%s: tid %d has RX key", __func__, toep->tid));
135 
136 	/* Switch to plain TOE mode. */
137 	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
138 	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)),
139 	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(0)));
140 	t4_set_tls_tcb_field(toep, W_TCB_ULP_TYPE,
141 	    V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_NONE));
142 	t4_clear_rx_quiesce(toep);
143 
144 	toep->flags &= ~(TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED);
145 	toep->params.ulp_mode = ULP_MODE_NONE;
146 }
147 
148 static void
149 tls_clr_quiesce(struct toepcb *toep)
150 {
151 
152 	tls_stop_handshake_timer(toep);
153 	t4_clear_rx_quiesce(toep);
154 }
155 
156 /*
157  * Calculate the TLS data expansion size
158  */
159 static int
160 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
161     unsigned short *pdus_per_ulp)
162 {
163 	struct tls_ofld_info *tls_ofld = &toep->tls;
164 	struct tls_scmd *scmd = &tls_ofld->scmd0;
165 	int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
166 	    pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
167 	int exp_per_pdu = 0;
168 	int hdr_len = TLS_HEADER_LENGTH;
169 
170 	do {
171 		max_frag_size = tls_ofld->k_ctx.frag_size;
172 		if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
173 		   SCMD_CIPH_MODE_AES_GCM) {
174 			frag_count = (data_len / max_frag_size);
175 			exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
176 				hdr_len;
177 			expn_size =  frag_count * exp_per_pdu;
178 			if (full_pdus_only) {
179 				*pdus_per_ulp = data_len / (exp_per_pdu +
180 					max_frag_size);
181 				if (*pdus_per_ulp > 32)
182 					*pdus_per_ulp = 32;
183 				else if(!*pdus_per_ulp)
184 					*pdus_per_ulp = 1;
185 				expn_size = (*pdus_per_ulp) * exp_per_pdu;
186 				break;
187 			}
188 			if ((last_frag_size = data_len % max_frag_size) > 0) {
189 				frag_count += 1;
190 				expn_size += exp_per_pdu;
191 			}
192 			break;
193 		} else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
194 			   SCMD_CIPH_MODE_NOP) {
195 			/* Calculate the number of fragments we can make */
196 			frag_count  = (data_len / max_frag_size);
197 			if (frag_count > 0) {
198 				pad_per_pdu = (((howmany((max_frag_size +
199 						       tls_ofld->mac_length),
200 						      CIPHER_BLOCK_SIZE)) *
201 						CIPHER_BLOCK_SIZE) -
202 					       (max_frag_size +
203 						tls_ofld->mac_length));
204 				if (!pad_per_pdu)
205 					pad_per_pdu = CIPHER_BLOCK_SIZE;
206 				exp_per_pdu = pad_per_pdu +
207 				       	tls_ofld->mac_length +
208 					hdr_len + CIPHER_BLOCK_SIZE;
209 				expn_size = frag_count * exp_per_pdu;
210 			}
211 			if (full_pdus_only) {
212 				*pdus_per_ulp = data_len / (exp_per_pdu +
213 					max_frag_size);
214 				if (*pdus_per_ulp > 32)
215 					*pdus_per_ulp = 32;
216 				else if (!*pdus_per_ulp)
217 					*pdus_per_ulp = 1;
218 				expn_size = (*pdus_per_ulp) * exp_per_pdu;
219 				break;
220 			}
221 			/* Consider the last fragment */
222 			if ((last_frag_size = data_len % max_frag_size) > 0) {
223 				pad_last_pdu = (((howmany((last_frag_size +
224 							tls_ofld->mac_length),
225 						       CIPHER_BLOCK_SIZE)) *
226 						 CIPHER_BLOCK_SIZE) -
227 						(last_frag_size +
228 						 tls_ofld->mac_length));
229 				if (!pad_last_pdu)
230 					pad_last_pdu = CIPHER_BLOCK_SIZE;
231 				expn_size += (pad_last_pdu +
232 					      tls_ofld->mac_length + hdr_len +
233 					      CIPHER_BLOCK_SIZE);
234 			}
235 		}
236 	} while (0);
237 
238 	return (expn_size);
239 }
240 
241 /* Copy Key to WR */
242 static void
243 tls_copy_tx_key(struct toepcb *toep, void *dst)
244 {
245 	struct tls_ofld_info *tls_ofld = &toep->tls;
246 	struct ulptx_sc_memrd *sc_memrd;
247 	struct ulptx_idata *sc;
248 
249 	if (tls_ofld->k_ctx.tx_key_info_size <= 0)
250 		return;
251 
252 	if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
253 		sc = dst;
254 		sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
255 		sc->len = htobe32(0);
256 		sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
257 		sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
258 		    V_ULP_TX_SC_MORE(1) |
259 		    V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
260 		sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
261 	} else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
262 		memcpy(dst, &tls_ofld->k_ctx.tx,
263 		    tls_ofld->k_ctx.tx_key_info_size);
264 	}
265 }
266 
267 /* TLS/DTLS content type  for CPL SFO */
268 static inline unsigned char
269 tls_content_type(unsigned char content_type)
270 {
271 	/*
272 	 * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
273 	 * default to "CUSTOM" for all other types including
274 	 * heartbeat?
275 	 */
276 	switch (content_type) {
277 	case CONTENT_TYPE_CCS:
278 		return CPL_TX_TLS_SFO_TYPE_CCS;
279 	case CONTENT_TYPE_ALERT:
280 		return CPL_TX_TLS_SFO_TYPE_ALERT;
281 	case CONTENT_TYPE_HANDSHAKE:
282 		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
283 	case CONTENT_TYPE_HEARTBEAT:
284 		return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
285 	}
286 	return CPL_TX_TLS_SFO_TYPE_DATA;
287 }
288 
289 static unsigned char
290 get_cipher_key_size(unsigned int ck_size)
291 {
292 	switch (ck_size) {
293 	case AES_NOP: /* NOP */
294 		return 15;
295 	case AES_128: /* AES128 */
296 		return CH_CK_SIZE_128;
297 	case AES_192: /* AES192 */
298 		return CH_CK_SIZE_192;
299 	case AES_256: /* AES256 */
300 		return CH_CK_SIZE_256;
301 	default:
302 		return CH_CK_SIZE_256;
303 	}
304 }
305 
306 static unsigned char
307 get_mac_key_size(unsigned int mk_size)
308 {
309 	switch (mk_size) {
310 	case SHA_NOP: /* NOP */
311 		return CH_MK_SIZE_128;
312 	case SHA_GHASH: /* GHASH */
313 	case SHA_512: /* SHA512 */
314 		return CH_MK_SIZE_512;
315 	case SHA_224: /* SHA2-224 */
316 		return CH_MK_SIZE_192;
317 	case SHA_256: /* SHA2-256*/
318 		return CH_MK_SIZE_256;
319 	case SHA_384: /* SHA384 */
320 		return CH_MK_SIZE_512;
321 	case SHA1: /* SHA1 */
322 	default:
323 		return CH_MK_SIZE_160;
324 	}
325 }
326 
327 static unsigned int
328 get_proto_ver(int proto_ver)
329 {
330 	switch (proto_ver) {
331 	case TLS1_2_VERSION:
332 		return TLS_1_2_VERSION;
333 	case TLS1_1_VERSION:
334 		return TLS_1_1_VERSION;
335 	case DTLS1_2_VERSION:
336 		return DTLS_1_2_VERSION;
337 	default:
338 		return TLS_VERSION_MAX;
339 	}
340 }
341 
342 static void
343 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
344 {
345 
346 	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
347 		kwr->u.rxhdr.ivinsert_to_authinsrt =
348 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
349 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
350 			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
351 			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
352 			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
353 			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
354 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
355 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
356 		kwr->u.rxhdr.ivpresent_to_rxmk_size &=
357 			~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
358 		kwr->u.rxhdr.authmode_to_rxvalid &=
359 			~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
360 	} else {
361 		kwr->u.rxhdr.ivinsert_to_authinsrt =
362 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
363 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
364 			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
365 			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
366 			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
367 			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
368 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
369 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
370 	}
371 }
372 
373 /* Rx key */
374 static void
375 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
376 {
377 	unsigned int ck_size = kctx->cipher_secret_size;
378 	unsigned int mk_size = kctx->mac_secret_size;
379 	int proto_ver = kctx->proto_ver;
380 
381 	kwr->u.rxhdr.flitcnt_hmacctrl =
382 		((kctx->rx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
383 
384 	kwr->u.rxhdr.protover_ciphmode =
385 		V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
386 		V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
387 
388 	kwr->u.rxhdr.authmode_to_rxvalid =
389 		V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
390 		V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
391 		V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
392 		V_TLS_KEYCTX_TX_WR_RXVALID(1);
393 
394 	kwr->u.rxhdr.ivpresent_to_rxmk_size =
395 		V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
396 		V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
397 		V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
398 		V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
399 
400 	tls_rxkey_flit1(kwr, kctx);
401 
402 	/* No key reversal for GCM */
403 	if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
404 		t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
405 				 (kctx->cipher_secret_size << 3));
406 		memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
407 		       kctx->rx.key + kctx->cipher_secret_size,
408 		       (IPAD_SIZE + OPAD_SIZE));
409 	} else {
410 		memcpy(kwr->keys.edkey, kctx->rx.key,
411 		       (kctx->rx_key_info_size - SALT_SIZE));
412 		memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
413 	}
414 }
415 
416 /* Tx key */
417 static void
418 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
419 {
420 	unsigned int ck_size = kctx->cipher_secret_size;
421 	unsigned int mk_size = kctx->mac_secret_size;
422 
423 	kwr->u.txhdr.ctxlen =
424 		(kctx->tx_key_info_size >> 4);
425 	kwr->u.txhdr.dualck_to_txvalid =
426 		V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
427 		V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
428 		V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
429 		V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
430 		V_TLS_KEYCTX_TX_WR_TXVALID(1);
431 
432 	memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
433 	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
434 		memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
435 		kwr->u.txhdr.dualck_to_txvalid &=
436 			~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
437 	}
438 	kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
439 }
440 
441 /* TLS Key memory management */
442 static int
443 get_new_keyid(struct toepcb *toep)
444 {
445 	struct adapter *sc = td_adapter(toep->td);
446 	vmem_addr_t addr;
447 
448 	if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
449 	    &addr) != 0)
450 		return (-1);
451 
452 	return (addr);
453 }
454 
455 static void
456 free_keyid(struct toepcb *toep, int keyid)
457 {
458 	struct adapter *sc = td_adapter(toep->td);
459 
460 	vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ);
461 }
462 
463 static void
464 clear_tls_keyid(struct toepcb *toep)
465 {
466 	struct tls_ofld_info *tls_ofld = &toep->tls;
467 
468 	if (tls_ofld->rx_key_addr >= 0) {
469 		free_keyid(toep, tls_ofld->rx_key_addr);
470 		tls_ofld->rx_key_addr = -1;
471 	}
472 	if (tls_ofld->tx_key_addr >= 0) {
473 		free_keyid(toep, tls_ofld->tx_key_addr);
474 		tls_ofld->tx_key_addr = -1;
475 	}
476 }
477 
478 static int
479 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
480 {
481 	return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
482 		((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
483 }
484 
485 static int
486 get_tp_plen_max(struct tls_ofld_info *tls_ofld)
487 {
488 	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
489 
490 	return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
491 }
492 
493 /* Send request to get the key-id */
494 static int
495 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
496 {
497 	struct tls_ofld_info *tls_ofld = &toep->tls;
498 	struct adapter *sc = td_adapter(toep->td);
499 	struct ofld_tx_sdesc *txsd;
500 	int kwrlen, kctxlen, keyid, len;
501 	struct wrqe *wr;
502 	struct tls_key_req *kwr;
503 	struct tls_keyctx *kctx;
504 
505 	kwrlen = sizeof(*kwr);
506 	kctxlen = roundup2(sizeof(*kctx), 32);
507 	len = roundup2(kwrlen + kctxlen, 16);
508 
509 	if (toep->txsd_avail == 0)
510 		return (EAGAIN);
511 
512 	/* Dont initialize key for re-neg */
513 	if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
514 		if ((keyid = get_new_keyid(toep)) < 0) {
515 			return (ENOSPC);
516 		}
517 	} else {
518 		keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
519 	}
520 
521 	wr = alloc_wrqe(len, &toep->ofld_txq->wrq);
522 	if (wr == NULL) {
523 		free_keyid(toep, keyid);
524 		return (ENOMEM);
525 	}
526 	kwr = wrtod(wr);
527 	memset(kwr, 0, kwrlen);
528 
529 	kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
530 	    F_FW_WR_ATOMIC);
531 	kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
532 	    V_FW_WR_FLOWID(toep->tid));
533 	kwr->protocol = get_proto_ver(k_ctx->proto_ver);
534 	kwr->mfs = htons(k_ctx->frag_size);
535 	kwr->reneg_to_write_rx = k_ctx->l_p_key;
536 
537 	/* master command */
538 	kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
539 	    V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
540 	kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
541 	kwr->len16 = htobe32((toep->tid << 8) |
542 	    DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
543 	kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
544 
545 	/* sub command */
546 	kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
547 	kwr->sc_len = htobe32(kctxlen);
548 
549 	kctx = (struct tls_keyctx *)(kwr + 1);
550 	memset(kctx, 0, kctxlen);
551 
552 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
553 		tls_ofld->tx_key_addr = keyid;
554 		prepare_txkey_wr(kctx, k_ctx);
555 	} else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
556 		tls_ofld->rx_key_addr = keyid;
557 		prepare_rxkey_wr(kctx, k_ctx);
558 	}
559 
560 	txsd = &toep->txsd[toep->txsd_pidx];
561 	txsd->tx_credits = DIV_ROUND_UP(len, 16);
562 	txsd->plen = 0;
563 	toep->tx_credits -= txsd->tx_credits;
564 	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
565 		toep->txsd_pidx = 0;
566 	toep->txsd_avail--;
567 
568 	t4_wrq_tx(sc, wr);
569 
570 	return (0);
571 }
572 
573 /* Store a key received from SSL in DDR. */
574 static int
575 program_key_context(struct tcpcb *tp, struct toepcb *toep,
576     struct tls_key_context *uk_ctx)
577 {
578 	struct adapter *sc = td_adapter(toep->td);
579 	struct tls_ofld_info *tls_ofld = &toep->tls;
580 	struct tls_key_context *k_ctx;
581 	int error, key_offset;
582 
583 	if (tp->t_state != TCPS_ESTABLISHED) {
584 		/*
585 		 * XXX: Matches Linux driver, but not sure this is a
586 		 * very appropriate error.
587 		 */
588 		return (ENOENT);
589 	}
590 
591 	/* Stop timer on handshake completion */
592 	tls_stop_handshake_timer(toep);
593 
594 	toep->flags &= ~TPF_FORCE_CREDITS;
595 
596 	CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
597 	    G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
598 	    "KEY_WRITE_TX", uk_ctx->proto_ver);
599 
600 	if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
601 	    ulp_mode(toep) != ULP_MODE_TLS)
602 		return (EOPNOTSUPP);
603 
604 	/* Don't copy the 'tx' and 'rx' fields. */
605 	k_ctx = &tls_ofld->k_ctx;
606 	memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
607 	    sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
608 
609 	/* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
610 	if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
611 		if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
612 			tls_ofld->rx_key_addr = -1;
613 			t4_clear_rx_quiesce(toep);
614 		} else {
615 			tls_ofld->tx_key_addr = -1;
616 		}
617 		return (0);
618 	}
619 
620 	if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
621 		k_ctx->iv_size = 4;
622 		k_ctx->mac_first = 0;
623 		k_ctx->hmac_ctrl = 0;
624 	} else {
625 		k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
626 		k_ctx->mac_first = 1;
627 	}
628 
629 	tls_ofld->scmd0.seqno_numivs =
630 		(V_SCMD_SEQ_NO_CTRL(3) |
631 		 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
632 		 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
633 		 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
634 		 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
635 		 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
636 		 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
637 		 V_SCMD_IV_SIZE(k_ctx->iv_size));
638 
639 	tls_ofld->scmd0.ivgen_hdrlen =
640 		(V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
641 		 V_SCMD_KEY_CTX_INLINE(0) |
642 		 V_SCMD_TLS_FRAG_ENABLE(1));
643 
644 	tls_ofld->mac_length = k_ctx->mac_secret_size;
645 
646 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
647 		k_ctx->rx = uk_ctx->rx;
648 		/* Dont initialize key for re-neg */
649 		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
650 			tls_ofld->rx_key_addr = -1;
651 	} else {
652 		k_ctx->tx = uk_ctx->tx;
653 		/* Dont initialize key for re-neg */
654 		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
655 			tls_ofld->tx_key_addr = -1;
656 	}
657 
658 	/* Flush pending data before new Tx key becomes active */
659 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
660 		struct sockbuf *sb;
661 
662 		/* XXX: This might not drain everything. */
663 		t4_push_frames(sc, toep, 0);
664 		sb = &toep->inp->inp_socket->so_snd;
665 		SOCKBUF_LOCK(sb);
666 
667 		/* XXX: This asserts that everything has been pushed. */
668 		MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
669 		sb->sb_sndptr = NULL;
670 		tls_ofld->sb_off = sbavail(sb);
671 		SOCKBUF_UNLOCK(sb);
672 		tls_ofld->tx_seq_no = 0;
673 	}
674 
675 	if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
676 	    (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
677 
678 		/*
679 		 * XXX: The userland library sets tx_key_info_size, not
680 		 * rx_key_info_size.
681 		 */
682 		k_ctx->rx_key_info_size = k_ctx->tx_key_info_size;
683 
684 		error = tls_program_key_id(toep, k_ctx);
685 		if (error) {
686 			/* XXX: Only clear quiesce for KEY_WRITE_RX? */
687 			t4_clear_rx_quiesce(toep);
688 			return (error);
689 		}
690 	}
691 
692 	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
693 		/*
694 		 * RX key tags are an index into the key portion of MA
695 		 * memory stored as an offset from the base address in
696 		 * units of 64 bytes.
697 		 */
698 		key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
699 		t4_set_tls_keyid(toep, key_offset / 64);
700 		t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
701 				 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
702 				 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
703 						V_TF_TLS_CONTROL(1) |
704 						V_TF_TLS_ACTIVE(1) |
705 						V_TF_TLS_ENABLE(1))));
706 		t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
707 				 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
708 				 V_TCB_TLS_SEQ(0));
709 		t4_clear_rx_quiesce(toep);
710 
711 		toep->flags |= TPF_TLS_RECEIVE;
712 	} else {
713 		unsigned short pdus_per_ulp;
714 
715 		if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
716 			tls_ofld->tx_key_addr = 1;
717 
718 		tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
719 		tls_ofld->expn_per_ulp = tls_expansion_size(toep,
720 				tls_ofld->fcplenmax, 1, &pdus_per_ulp);
721 		tls_ofld->pdus_per_ulp = pdus_per_ulp;
722 		tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
723 			((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
724 			 tls_ofld->k_ctx.frag_size);
725 	}
726 
727 	return (0);
728 }
729 
730 /*
731  * In some cases a client connection can hang without sending the
732  * ServerHelloDone message from the NIC to the host.  Send a dummy
733  * RX_DATA_ACK with RX_MODULATE to unstick the connection.
734  */
735 static void
736 tls_send_handshake_ack(void *arg)
737 {
738 	struct toepcb *toep = arg;
739 	struct tls_ofld_info *tls_ofld = &toep->tls;
740 	struct adapter *sc = td_adapter(toep->td);
741 
742 	/* Bail without rescheduling if the connection has closed. */
743 	if ((toep->flags & (TPF_FIN_SENT | TPF_ABORT_SHUTDOWN)) != 0)
744 		return;
745 
746 	/*
747 	 * If this connection has timed out without receiving more
748 	 * data, downgrade to plain TOE mode and don't re-arm the
749 	 * timer.
750 	 */
751 	if (sc->tt.tls_rx_timeout != 0) {
752 		struct inpcb *inp;
753 		struct tcpcb *tp;
754 
755 		inp = toep->inp;
756 		tp = intotcpcb(inp);
757 		if ((ticks - tp->t_rcvtime) >= sc->tt.tls_rx_timeout) {
758 			CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__,
759 			    toep->tid);
760 			tls_clr_ofld_mode(toep);
761 			return;
762 		}
763 	}
764 
765 	/*
766 	 * XXX: Does not have the t4_get_tcb() checks to refine the
767 	 * workaround.
768 	 */
769 	callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
770 
771 	CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
772 	send_rx_modulate(sc, toep);
773 }
774 
775 static void
776 tls_start_handshake_timer(struct toepcb *toep)
777 {
778 	struct tls_ofld_info *tls_ofld = &toep->tls;
779 
780 	INP_WLOCK_ASSERT(toep->inp);
781 	callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
782 	    tls_send_handshake_ack, toep);
783 }
784 
785 void
786 tls_stop_handshake_timer(struct toepcb *toep)
787 {
788 	struct tls_ofld_info *tls_ofld = &toep->tls;
789 
790 	INP_WLOCK_ASSERT(toep->inp);
791 	callout_stop(&tls_ofld->handshake_timer);
792 }
793 
794 int
795 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
796 {
797 	struct tls_key_context uk_ctx;
798 	struct inpcb *inp;
799 	struct tcpcb *tp;
800 	struct toepcb *toep;
801 	int error, optval;
802 
803 	error = 0;
804 	if (sopt->sopt_dir == SOPT_SET &&
805 	    sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
806 		error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
807 		    sizeof(uk_ctx));
808 		if (error)
809 			return (error);
810 	}
811 
812 	inp = sotoinpcb(so);
813 	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
814 	INP_WLOCK(inp);
815 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
816 		INP_WUNLOCK(inp);
817 		return (ECONNRESET);
818 	}
819 	tp = intotcpcb(inp);
820 	toep = tp->t_toe;
821 	switch (sopt->sopt_dir) {
822 	case SOPT_SET:
823 		switch (sopt->sopt_name) {
824 		case TCP_TLSOM_SET_TLS_CONTEXT:
825 			if (toep->tls.mode == TLS_MODE_KTLS)
826 				error = EINVAL;
827 			else {
828 				error = program_key_context(tp, toep, &uk_ctx);
829 				if (error == 0)
830 					toep->tls.mode = TLS_MODE_TLSOM;
831 			}
832 			INP_WUNLOCK(inp);
833 			break;
834 		case TCP_TLSOM_CLR_TLS_TOM:
835 			if (toep->tls.mode == TLS_MODE_KTLS)
836 				error = EINVAL;
837 			else if (ulp_mode(toep) == ULP_MODE_TLS) {
838 				CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
839 				    __func__, toep->tid);
840 				tls_clr_ofld_mode(toep);
841 			} else
842 				error = EOPNOTSUPP;
843 			INP_WUNLOCK(inp);
844 			break;
845 		case TCP_TLSOM_CLR_QUIES:
846 			if (toep->tls.mode == TLS_MODE_KTLS)
847 				error = EINVAL;
848 			else if (ulp_mode(toep) == ULP_MODE_TLS) {
849 				CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
850 				    __func__, toep->tid);
851 				tls_clr_quiesce(toep);
852 			} else
853 				error = EOPNOTSUPP;
854 			INP_WUNLOCK(inp);
855 			break;
856 		default:
857 			INP_WUNLOCK(inp);
858 			error = EOPNOTSUPP;
859 			break;
860 		}
861 		break;
862 	case SOPT_GET:
863 		switch (sopt->sopt_name) {
864 		case TCP_TLSOM_GET_TLS_TOM:
865 			/*
866 			 * TLS TX is permitted on any TOE socket, but
867 			 * TLS RX requires a TLS ULP mode.
868 			 */
869 			optval = TLS_TOM_NONE;
870 			if (can_tls_offload(td_adapter(toep->td)) &&
871 			    toep->tls.mode != TLS_MODE_KTLS) {
872 				switch (ulp_mode(toep)) {
873 				case ULP_MODE_NONE:
874 				case ULP_MODE_TCPDDP:
875 					optval = TLS_TOM_TXONLY;
876 					break;
877 				case ULP_MODE_TLS:
878 					optval = TLS_TOM_BOTH;
879 					break;
880 				}
881 			}
882 			CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
883 			    __func__, toep->tid, optval);
884 			INP_WUNLOCK(inp);
885 			error = sooptcopyout(sopt, &optval, sizeof(optval));
886 			break;
887 		default:
888 			INP_WUNLOCK(inp);
889 			error = EOPNOTSUPP;
890 			break;
891 		}
892 		break;
893 	}
894 	return (error);
895 }
896 
897 #ifdef KERN_TLS
898 static void
899 init_ktls_key_context(struct ktls_session *tls, struct tls_key_context *k_ctx,
900     int direction)
901 {
902 	struct auth_hash *axf;
903 	u_int key_info_size, mac_key_size;
904 	char *hash, *key;
905 
906 	k_ctx->l_p_key = V_KEY_GET_LOC(direction == KTLS_TX ? KEY_WRITE_TX :
907 	    KEY_WRITE_RX);
908 	k_ctx->proto_ver = tls->params.tls_vmajor << 8 | tls->params.tls_vminor;
909 	k_ctx->cipher_secret_size = tls->params.cipher_key_len;
910 	key_info_size = sizeof(struct tx_keyctx_hdr) +
911 	    k_ctx->cipher_secret_size;
912 	if (direction == KTLS_TX)
913 		key = k_ctx->tx.key;
914 	else
915 		key = k_ctx->rx.key;
916 	memcpy(key, tls->params.cipher_key, tls->params.cipher_key_len);
917 	hash = key + tls->params.cipher_key_len;
918 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
919 		k_ctx->state.auth_mode = SCMD_AUTH_MODE_GHASH;
920 		k_ctx->state.enc_mode = SCMD_CIPH_MODE_AES_GCM;
921 		k_ctx->iv_size = 4;
922 		k_ctx->mac_first = 0;
923 		k_ctx->hmac_ctrl = SCMD_HMAC_CTRL_NOP;
924 		key_info_size += GMAC_BLOCK_LEN;
925 		k_ctx->mac_secret_size = 0;
926 		if (direction == KTLS_TX)
927 			memcpy(k_ctx->tx.salt, tls->params.iv, SALT_SIZE);
928 		else
929 			memcpy(k_ctx->rx.salt, tls->params.iv, SALT_SIZE);
930 		t4_init_gmac_hash(tls->params.cipher_key,
931 		    tls->params.cipher_key_len, hash);
932 	} else {
933 		switch (tls->params.auth_algorithm) {
934 		case CRYPTO_SHA1_HMAC:
935 			axf = &auth_hash_hmac_sha1;
936 			mac_key_size = SHA1_HASH_LEN;
937 			k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA1;
938 			break;
939 		case CRYPTO_SHA2_256_HMAC:
940 			axf = &auth_hash_hmac_sha2_256;
941 			mac_key_size = SHA2_256_HASH_LEN;
942 			k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA256;
943 			break;
944 		case CRYPTO_SHA2_384_HMAC:
945 			axf = &auth_hash_hmac_sha2_384;
946 			mac_key_size = SHA2_512_HASH_LEN;
947 			k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA512_384;
948 			break;
949 		default:
950 			panic("bad auth mode");
951 		}
952 		k_ctx->state.enc_mode = SCMD_CIPH_MODE_AES_CBC;
953 		k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
954 		k_ctx->mac_first = 1;
955 		k_ctx->hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
956 		key_info_size += roundup2(mac_key_size, 16) * 2;
957 		k_ctx->mac_secret_size = mac_key_size;
958 		t4_init_hmac_digest(axf, mac_key_size, tls->params.auth_key,
959 		    tls->params.auth_key_len, hash);
960 	}
961 
962 	if (direction == KTLS_TX)
963 		k_ctx->tx_key_info_size = key_info_size;
964 	else
965 		k_ctx->rx_key_info_size = key_info_size;
966 	k_ctx->frag_size = tls->params.max_frame_len;
967 	k_ctx->iv_ctrl = 1;
968 }
969 
970 int
971 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
972 {
973 	struct adapter *sc = td_adapter(toep->td);
974 	struct tls_key_context *k_ctx;
975 	int error, key_offset;
976 
977 	if (toep->tls.mode == TLS_MODE_TLSOM)
978 		return (EINVAL);
979 	if (!can_tls_offload(td_adapter(toep->td)))
980 		return (EINVAL);
981 	switch (ulp_mode(toep)) {
982 	case ULP_MODE_TLS:
983 		break;
984 	case ULP_MODE_NONE:
985 	case ULP_MODE_TCPDDP:
986 		if (direction != KTLS_TX)
987 			return (EINVAL);
988 		break;
989 	default:
990 		return (EINVAL);
991 	}
992 
993 	switch (tls->params.cipher_algorithm) {
994 	case CRYPTO_AES_CBC:
995 		/* XXX: Explicitly ignore any provided IV. */
996 		switch (tls->params.cipher_key_len) {
997 		case 128 / 8:
998 		case 192 / 8:
999 		case 256 / 8:
1000 			break;
1001 		default:
1002 			error = EINVAL;
1003 			goto clr_ofld;
1004 		}
1005 		switch (tls->params.auth_algorithm) {
1006 		case CRYPTO_SHA1_HMAC:
1007 		case CRYPTO_SHA2_256_HMAC:
1008 		case CRYPTO_SHA2_384_HMAC:
1009 			break;
1010 		default:
1011 			error = EPROTONOSUPPORT;
1012 			goto clr_ofld;
1013 		}
1014 		break;
1015 	case CRYPTO_AES_NIST_GCM_16:
1016 		if (tls->params.iv_len != SALT_SIZE) {
1017 			error = EINVAL;
1018 			goto clr_ofld;
1019 		}
1020 		switch (tls->params.cipher_key_len) {
1021 		case 128 / 8:
1022 		case 192 / 8:
1023 		case 256 / 8:
1024 			break;
1025 		default:
1026 			error = EINVAL;
1027 			goto clr_ofld;
1028 		}
1029 		break;
1030 	default:
1031 		error = EPROTONOSUPPORT;
1032 		goto clr_ofld;
1033 	}
1034 
1035 	/* Only TLS 1.1 and TLS 1.2 are currently supported. */
1036 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1037 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
1038 	    tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
1039 		error = EPROTONOSUPPORT;
1040 		goto clr_ofld;
1041 	}
1042 
1043 	/* Bail if we already have a key. */
1044 	if (direction == KTLS_TX) {
1045 		if (toep->tls.tx_key_addr != -1)
1046 			return (EOPNOTSUPP);
1047 	} else {
1048 		if (toep->tls.rx_key_addr != -1)
1049 			return (EOPNOTSUPP);
1050 	}
1051 
1052 	/*
1053 	 * XXX: This assumes no key renegotation.  If KTLS ever supports
1054 	 * that we will want to allocate TLS sessions dynamically rather
1055 	 * than as a static member of toep.
1056 	 */
1057 	k_ctx = &toep->tls.k_ctx;
1058 	init_ktls_key_context(tls, k_ctx, direction);
1059 
1060 	error = tls_program_key_id(toep, k_ctx);
1061 	if (error) {
1062 		if (direction == KTLS_RX)
1063 			goto clr_ofld;
1064 		return (error);
1065 	}
1066 
1067 	if (direction == KTLS_TX) {
1068 		toep->tls.scmd0.seqno_numivs =
1069 			(V_SCMD_SEQ_NO_CTRL(3) |
1070 			 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
1071 			 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
1072 			 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
1073 			 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
1074 			 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
1075 			 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
1076 			 V_SCMD_IV_SIZE(k_ctx->iv_size));
1077 
1078 		toep->tls.scmd0.ivgen_hdrlen =
1079 			(V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
1080 			 V_SCMD_KEY_CTX_INLINE(0) |
1081 			 V_SCMD_TLS_FRAG_ENABLE(1));
1082 
1083 		if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1084 			toep->tls.iv_len = 8;
1085 		else
1086 			toep->tls.iv_len = AES_BLOCK_LEN;
1087 
1088 		toep->tls.mac_length = k_ctx->mac_secret_size;
1089 
1090 		toep->tls.fcplenmax = get_tp_plen_max(&toep->tls);
1091 		toep->tls.expn_per_ulp = tls->params.tls_hlen +
1092 		    tls->params.tls_tlen;
1093 		toep->tls.pdus_per_ulp = 1;
1094 		toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
1095 		    toep->tls.k_ctx.frag_size;
1096 	} else {
1097 		/* Stop timer on handshake completion */
1098 		tls_stop_handshake_timer(toep);
1099 
1100 		toep->flags &= ~TPF_FORCE_CREDITS;
1101 		toep->flags |= TPF_TLS_RECEIVE;
1102 
1103 		/*
1104 		 * RX key tags are an index into the key portion of MA
1105 		 * memory stored as an offset from the base address in
1106 		 * units of 64 bytes.
1107 		 */
1108 		key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
1109 		t4_set_tls_keyid(toep, key_offset / 64);
1110 		t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
1111 				 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1112 				 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
1113 						V_TF_TLS_CONTROL(1) |
1114 						V_TF_TLS_ACTIVE(1) |
1115 						V_TF_TLS_ENABLE(1))));
1116 		t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
1117 				 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
1118 				 V_TCB_TLS_SEQ(0));
1119 		t4_clear_rx_quiesce(toep);
1120 	}
1121 
1122 	toep->tls.mode = TLS_MODE_KTLS;
1123 
1124 	return (0);
1125 
1126 clr_ofld:
1127 	if (ulp_mode(toep) == ULP_MODE_TLS) {
1128 		CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__,
1129 		    toep->tid);
1130 		tls_clr_ofld_mode(toep);
1131 	}
1132 	return (error);
1133 }
1134 #endif
1135 
1136 void
1137 tls_init_toep(struct toepcb *toep)
1138 {
1139 	struct tls_ofld_info *tls_ofld = &toep->tls;
1140 
1141 	tls_ofld->mode = TLS_MODE_OFF;
1142 	tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
1143 	tls_ofld->rx_key_addr = -1;
1144 	tls_ofld->tx_key_addr = -1;
1145 }
1146 
1147 void
1148 tls_establish(struct toepcb *toep)
1149 {
1150 
1151 	/*
1152 	 * Enable PDU extraction.
1153 	 *
1154 	 * XXX: Supposedly this should be done by the firmware when
1155 	 * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
1156 	 * in practice this seems to be required.
1157 	 */
1158 	CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
1159 	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1160 	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
1161 
1162 	toep->flags |= TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED;
1163 
1164 	callout_init_rw(&toep->tls.handshake_timer, &toep->inp->inp_lock, 0);
1165 	tls_start_handshake_timer(toep);
1166 }
1167 
1168 void
1169 tls_detach(struct toepcb *toep)
1170 {
1171 
1172 	if (toep->flags & TPF_TLS_ESTABLISHED) {
1173 		tls_stop_handshake_timer(toep);
1174 		toep->flags &= ~TPF_TLS_ESTABLISHED;
1175 	}
1176 }
1177 
1178 void
1179 tls_uninit_toep(struct toepcb *toep)
1180 {
1181 
1182 	MPASS((toep->flags & TPF_TLS_ESTABLISHED) == 0);
1183 	clear_tls_keyid(toep);
1184 }
1185 
1186 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
1187 #define	MIN_OFLD_TLSTX_CREDITS(toep)					\
1188 	(howmany(sizeof(struct fw_tlstx_data_wr) +			\
1189 	    sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +		\
1190 	    CIPHER_BLOCK_SIZE + 1, 16))
1191 
1192 static inline u_int
1193 max_imm_tls_space(int tx_credits)
1194 {
1195 	const int n = 2;	/* Use only up to 2 desc for imm. data WR */
1196 	int space;
1197 
1198 	KASSERT(tx_credits >= 0 &&
1199 		tx_credits <= MAX_OFLD_TX_CREDITS,
1200 		("%s: %d credits", __func__, tx_credits));
1201 
1202 	if (tx_credits >= (n * EQ_ESIZE) / 16)
1203 		space = (n * EQ_ESIZE);
1204 	else
1205 		space = tx_credits * 16;
1206 	return (space);
1207 }
1208 
1209 static int
1210 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
1211 {
1212 	int max_nsegs_1mbuf, n, nsegs;
1213 
1214 	while (skip >= m->m_len) {
1215 		skip -= m->m_len;
1216 		m = m->m_next;
1217 	}
1218 
1219 	nsegs = 0;
1220 	max_nsegs_1mbuf = 0;
1221 	while (len > 0) {
1222 		n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
1223 		if (n > max_nsegs_1mbuf)
1224 			max_nsegs_1mbuf = n;
1225 		nsegs += n;
1226 		len -= m->m_len - skip;
1227 		skip = 0;
1228 		m = m->m_next;
1229 	}
1230 	*max_nsegs_1mbufp = max_nsegs_1mbuf;
1231 	return (nsegs);
1232 }
1233 
1234 static void
1235 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
1236     unsigned int immdlen, unsigned int plen, unsigned int expn,
1237     unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
1238 {
1239 	struct tls_ofld_info *tls_ofld = &toep->tls;
1240 	unsigned int len = plen + expn;
1241 
1242 	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
1243 	    V_FW_TLSTX_DATA_WR_COMPL(1) |
1244 	    V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
1245 	txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
1246 	    V_FW_TLSTX_DATA_WR_LEN16(credits));
1247 	txwr->plen = htobe32(len);
1248 	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
1249 	    V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
1250 	txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
1251 	    V_FW_TLSTX_DATA_WR_EXP(expn) |
1252 	    V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
1253 	    V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
1254 	    V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
1255 	txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
1256 	txwr->adjustedplen_pkd = htobe16(
1257 	    V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
1258 	txwr->expinplenmax_pkd = htobe16(
1259 	    V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
1260 	txwr->pdusinplenmax_pkd =
1261 	    V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
1262 }
1263 
1264 static void
1265 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
1266     struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
1267 {
1268 	struct tls_ofld_info *tls_ofld = &toep->tls;
1269 	int data_type, seglen;
1270 
1271 	if (plen < tls_ofld->k_ctx.frag_size)
1272 		seglen = plen;
1273 	else
1274 		seglen = tls_ofld->k_ctx.frag_size;
1275 	data_type = tls_content_type(tls_hdr->type);
1276 	cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
1277 	    V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
1278 	    V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
1279 	cpl->pld_len = htobe32(plen);
1280 	if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
1281 		cpl->type_protover = htobe32(
1282 		    V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
1283 	cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
1284 	    V_SCMD_NUM_IVS(pdus));
1285 	cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
1286 	cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
1287 	tls_ofld->tx_seq_no += pdus;
1288 }
1289 
1290 /*
1291  * Similar to write_tx_sgl() except that it accepts an optional
1292  * trailer buffer for IVs.
1293  */
1294 static void
1295 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1296     void *iv_buffer, int iv_len, int nsegs, int n)
1297 {
1298 	struct mbuf *m;
1299 	struct ulptx_sgl *usgl = dst;
1300 	int i, j, rc;
1301 	struct sglist sg;
1302 	struct sglist_seg segs[n];
1303 
1304 	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1305 
1306 	sglist_init(&sg, n, segs);
1307 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1308 	    V_ULPTX_NSGE(nsegs));
1309 
1310 	for (m = start; skip >= m->m_len; m = m->m_next)
1311 		skip -= m->m_len;
1312 
1313 	i = -1;
1314 	for (m = start; plen > 0; m = m->m_next) {
1315 		rc = sglist_append(&sg, mtod(m, char *) + skip,
1316 		    m->m_len - skip);
1317 		if (__predict_false(rc != 0))
1318 			panic("%s: sglist_append %d", __func__, rc);
1319 		plen -= m->m_len - skip;
1320 		skip = 0;
1321 
1322 		for (j = 0; j < sg.sg_nseg; i++, j++) {
1323 			if (i < 0) {
1324 				usgl->len0 = htobe32(segs[j].ss_len);
1325 				usgl->addr0 = htobe64(segs[j].ss_paddr);
1326 			} else {
1327 				usgl->sge[i / 2].len[i & 1] =
1328 				    htobe32(segs[j].ss_len);
1329 				usgl->sge[i / 2].addr[i & 1] =
1330 				    htobe64(segs[j].ss_paddr);
1331 			}
1332 #ifdef INVARIANTS
1333 			nsegs--;
1334 #endif
1335 		}
1336 		sglist_reset(&sg);
1337 	}
1338 	if (iv_buffer != NULL) {
1339 		rc = sglist_append(&sg, iv_buffer, iv_len);
1340 		if (__predict_false(rc != 0))
1341 			panic("%s: sglist_append %d", __func__, rc);
1342 
1343 		for (j = 0; j < sg.sg_nseg; i++, j++) {
1344 			if (i < 0) {
1345 				usgl->len0 = htobe32(segs[j].ss_len);
1346 				usgl->addr0 = htobe64(segs[j].ss_paddr);
1347 			} else {
1348 				usgl->sge[i / 2].len[i & 1] =
1349 				    htobe32(segs[j].ss_len);
1350 				usgl->sge[i / 2].addr[i & 1] =
1351 				    htobe64(segs[j].ss_paddr);
1352 			}
1353 #ifdef INVARIANTS
1354 			nsegs--;
1355 #endif
1356 		}
1357 	}
1358 	if (i & 1)
1359 		usgl->sge[i / 2].len[1] = htobe32(0);
1360 	KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1361 	    __func__, nsegs, start, iv_buffer));
1362 }
1363 
1364 /*
1365  * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1366  * is enabled.  Rather than transmitting bulk data, the socket buffer
1367  * contains TLS records.  The work request requires a full TLS record,
1368  * so batch mbufs up until a full TLS record is seen.  This requires
1369  * reading the TLS header out of the start of each record to determine
1370  * its length.
1371  */
1372 void
1373 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1374 {
1375 	struct tls_hdr thdr;
1376 	struct mbuf *sndptr;
1377 	struct fw_tlstx_data_wr *txwr;
1378 	struct cpl_tx_tls_sfo *cpl;
1379 	struct wrqe *wr;
1380 	u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1381 	u_int expn_size, iv_len, pdus, sndptroff;
1382 	struct tls_ofld_info *tls_ofld = &toep->tls;
1383 	struct inpcb *inp = toep->inp;
1384 	struct tcpcb *tp = intotcpcb(inp);
1385 	struct socket *so = inp->inp_socket;
1386 	struct sockbuf *sb = &so->so_snd;
1387 	int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1388 	struct ofld_tx_sdesc *txsd;
1389 	bool imm_ivs, imm_payload;
1390 	void *iv_buffer, *iv_dst, *buf;
1391 
1392 	INP_WLOCK_ASSERT(inp);
1393 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1394 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1395 
1396 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
1397 	    ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
1398 	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
1399 	KASSERT(tls_tx_key(toep),
1400 	    ("%s: TX key not set for toep %p", __func__, toep));
1401 
1402 #ifdef VERBOSE_TRACES
1403 	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1404 	    __func__, toep->tid, toep->flags, tp->t_flags);
1405 #endif
1406 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1407 		return;
1408 
1409 #ifdef RATELIMIT
1410 	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1411 	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1412 		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1413 	}
1414 #endif
1415 
1416 	/*
1417 	 * This function doesn't resume by itself.  Someone else must clear the
1418 	 * flag and call this function.
1419 	 */
1420 	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1421 		KASSERT(drop == 0,
1422 		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1423 		return;
1424 	}
1425 
1426 	txsd = &toep->txsd[toep->txsd_pidx];
1427 	for (;;) {
1428 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1429 		space = max_imm_tls_space(tx_credits);
1430 		wr_len = sizeof(struct fw_tlstx_data_wr) +
1431 		    sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1432 		if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1433 #ifdef VERBOSE_TRACES
1434 			CTR5(KTR_CXGBE,
1435 			    "%s: tid %d tx_credits %d min_wr %d space %d",
1436 			    __func__, toep->tid, tx_credits, wr_len +
1437 			    CIPHER_BLOCK_SIZE + 1, space);
1438 #endif
1439 			return;
1440 		}
1441 
1442 		SOCKBUF_LOCK(sb);
1443 		sowwakeup = drop;
1444 		if (drop) {
1445 			sbdrop_locked(sb, drop);
1446 			MPASS(tls_ofld->sb_off >= drop);
1447 			tls_ofld->sb_off -= drop;
1448 			drop = 0;
1449 		}
1450 
1451 		/*
1452 		 * Send a FIN if requested, but only if there's no
1453 		 * more data to send.
1454 		 */
1455 		if (sbavail(sb) == tls_ofld->sb_off &&
1456 		    toep->flags & TPF_SEND_FIN) {
1457 			if (sowwakeup)
1458 				sowwakeup_locked(so);
1459 			else
1460 				SOCKBUF_UNLOCK(sb);
1461 			SOCKBUF_UNLOCK_ASSERT(sb);
1462 			t4_close_conn(sc, toep);
1463 			return;
1464 		}
1465 
1466 		if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1467 			/*
1468 			 * A full TLS header is not yet queued, stop
1469 			 * for now until more data is added to the
1470 			 * socket buffer.  However, if the connection
1471 			 * has been closed, we will never get the rest
1472 			 * of the header so just discard the partial
1473 			 * header and close the connection.
1474 			 */
1475 #ifdef VERBOSE_TRACES
1476 			CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s",
1477 			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1478 			    toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN");
1479 #endif
1480 			if (sowwakeup)
1481 				sowwakeup_locked(so);
1482 			else
1483 				SOCKBUF_UNLOCK(sb);
1484 			SOCKBUF_UNLOCK_ASSERT(sb);
1485 			if (toep->flags & TPF_SEND_FIN)
1486 				t4_close_conn(sc, toep);
1487 			return;
1488 		}
1489 
1490 		/* Read the header of the next TLS record. */
1491 		sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1492 		m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1493 		tls_size = htons(thdr.length);
1494 		plen = TLS_HEADER_LENGTH + tls_size;
1495 		pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1496 		iv_len = pdus * CIPHER_BLOCK_SIZE;
1497 
1498 		if (sbavail(sb) < tls_ofld->sb_off + plen) {
1499 			/*
1500 			 * The full TLS record is not yet queued, stop
1501 			 * for now until more data is added to the
1502 			 * socket buffer.  However, if the connection
1503 			 * has been closed, we will never get the rest
1504 			 * of the record so just discard the partial
1505 			 * record and close the connection.
1506 			 */
1507 #ifdef VERBOSE_TRACES
1508 			CTR6(KTR_CXGBE,
1509 			    "%s: tid %d sbavail %d sb_off %d plen %d%s",
1510 			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1511 			    plen, toep->flags & TPF_SEND_FIN ? "" :
1512 			    " SEND_FIN");
1513 #endif
1514 			if (sowwakeup)
1515 				sowwakeup_locked(so);
1516 			else
1517 				SOCKBUF_UNLOCK(sb);
1518 			SOCKBUF_UNLOCK_ASSERT(sb);
1519 			if (toep->flags & TPF_SEND_FIN)
1520 				t4_close_conn(sc, toep);
1521 			return;
1522 		}
1523 
1524 		/* Shove if there is no additional data pending. */
1525 		shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1526 		    !(tp->t_flags & TF_MORETOCOME);
1527 
1528 		if (sb->sb_flags & SB_AUTOSIZE &&
1529 		    V_tcp_do_autosndbuf &&
1530 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
1531 		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1532 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1533 			    V_tcp_autosndbuf_max);
1534 
1535 			if (!sbreserve_locked(sb, newsize, so, NULL))
1536 				sb->sb_flags &= ~SB_AUTOSIZE;
1537 			else
1538 				sowwakeup = 1;	/* room available */
1539 		}
1540 		if (sowwakeup)
1541 			sowwakeup_locked(so);
1542 		else
1543 			SOCKBUF_UNLOCK(sb);
1544 		SOCKBUF_UNLOCK_ASSERT(sb);
1545 
1546 		if (__predict_false(toep->flags & TPF_FIN_SENT))
1547 			panic("%s: excess tx.", __func__);
1548 
1549 		/* Determine whether to use immediate vs SGL. */
1550 		imm_payload = false;
1551 		imm_ivs = false;
1552 		if (wr_len + iv_len <= space) {
1553 			imm_ivs = true;
1554 			wr_len += iv_len;
1555 			if (wr_len + tls_size <= space) {
1556 				wr_len += tls_size;
1557 				imm_payload = true;
1558 			}
1559 		}
1560 
1561 		/* Allocate space for IVs if needed. */
1562 		if (!imm_ivs) {
1563 			iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1564 			if (iv_buffer == NULL) {
1565 				/*
1566 				 * XXX: How to restart this?
1567 				 */
1568 				if (sowwakeup)
1569 					sowwakeup_locked(so);
1570 				else
1571 					SOCKBUF_UNLOCK(sb);
1572 				SOCKBUF_UNLOCK_ASSERT(sb);
1573 				CTR3(KTR_CXGBE,
1574 			    "%s: tid %d failed to alloc IV space len %d",
1575 				    __func__, toep->tid, iv_len);
1576 				return;
1577 			}
1578 		} else
1579 			iv_buffer = NULL;
1580 
1581 		/* Determine size of SGL. */
1582 		nsegs = 0;
1583 		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1584 		if (!imm_payload) {
1585 			nsegs = count_mbuf_segs(sndptr, sndptroff +
1586 			    TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1587 			if (!imm_ivs) {
1588 				int n = sglist_count(iv_buffer, iv_len);
1589 				nsegs += n;
1590 				if (n > max_nsegs_1mbuf)
1591 					max_nsegs_1mbuf = n;
1592 			}
1593 
1594 			/* Account for SGL in work request length. */
1595 			wr_len += sizeof(struct ulptx_sgl) +
1596 			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1597 		}
1598 
1599 		wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
1600 		if (wr == NULL) {
1601 			/* XXX: how will we recover from this? */
1602 			toep->flags |= TPF_TX_SUSPENDED;
1603 			return;
1604 		}
1605 
1606 #ifdef VERBOSE_TRACES
1607 		CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1608 		    __func__, toep->tid, thdr.type, tls_size, pdus);
1609 #endif
1610 		txwr = wrtod(wr);
1611 		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1612 		memset(txwr, 0, roundup2(wr_len, 16));
1613 		credits = howmany(wr_len, 16);
1614 		expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1615 		write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1616 		    tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1617 		write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1618 		tls_copy_tx_key(toep, cpl + 1);
1619 
1620 		/* Generate random IVs */
1621 		buf = (char *)(cpl + 1) + key_size(toep);
1622 		if (imm_ivs) {
1623 			MPASS(iv_buffer == NULL);
1624 			iv_dst = buf;
1625 			buf = (char *)iv_dst + iv_len;
1626 		} else
1627 			iv_dst = iv_buffer;
1628 		arc4rand(iv_dst, iv_len, 0);
1629 
1630 		if (imm_payload) {
1631 			m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1632 			    tls_size, buf);
1633 		} else {
1634 			write_tlstx_sgl(buf, sndptr,
1635 			    sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1636 			    iv_len, nsegs, max_nsegs_1mbuf);
1637 		}
1638 
1639 		KASSERT(toep->tx_credits >= credits,
1640 			("%s: not enough credits", __func__));
1641 
1642 		toep->tx_credits -= credits;
1643 
1644 		tp->snd_nxt += plen;
1645 		tp->snd_max += plen;
1646 
1647 		SOCKBUF_LOCK(sb);
1648 		sbsndptr_adv(sb, sb->sb_sndptr, plen);
1649 		tls_ofld->sb_off += plen;
1650 		SOCKBUF_UNLOCK(sb);
1651 
1652 		toep->flags |= TPF_TX_DATA_SENT;
1653 		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1654 			toep->flags |= TPF_TX_SUSPENDED;
1655 
1656 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1657 		txsd->plen = plen;
1658 		txsd->tx_credits = credits;
1659 		txsd->iv_buffer = iv_buffer;
1660 		txsd++;
1661 		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1662 			toep->txsd_pidx = 0;
1663 			txsd = &toep->txsd[0];
1664 		}
1665 		toep->txsd_avail--;
1666 
1667 		counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
1668 		counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, plen);
1669 
1670 		t4_l2t_send(sc, wr, toep->l2te);
1671 	}
1672 }
1673 
1674 #ifdef KERN_TLS
1675 static int
1676 count_ext_pgs_segs(struct mbuf *m)
1677 {
1678 	vm_paddr_t nextpa;
1679 	u_int i, nsegs;
1680 
1681 	MPASS(m->m_epg_npgs > 0);
1682 	nsegs = 1;
1683 	nextpa = m->m_epg_pa[0] + PAGE_SIZE;
1684 	for (i = 1; i < m->m_epg_npgs; i++) {
1685 		if (nextpa != m->m_epg_pa[i])
1686 			nsegs++;
1687 		nextpa = m->m_epg_pa[i] + PAGE_SIZE;
1688 	}
1689 	return (nsegs);
1690 }
1691 
1692 static void
1693 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
1694 {
1695 	struct ulptx_sgl *usgl = dst;
1696 	vm_paddr_t pa;
1697 	uint32_t len;
1698 	int i, j;
1699 
1700 	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1701 
1702 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1703 	    V_ULPTX_NSGE(nsegs));
1704 
1705 	/* Figure out the first S/G length. */
1706 	pa = m->m_epg_pa[0] + m->m_epg_1st_off;
1707 	usgl->addr0 = htobe64(pa);
1708 	len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
1709 	pa += len;
1710 	for (i = 1; i < m->m_epg_npgs; i++) {
1711 		if (m->m_epg_pa[i] != pa)
1712 			break;
1713 		len += m_epg_pagelen(m, i, 0);
1714 		pa += m_epg_pagelen(m, i, 0);
1715 	}
1716 	usgl->len0 = htobe32(len);
1717 #ifdef INVARIANTS
1718 	nsegs--;
1719 #endif
1720 
1721 	j = -1;
1722 	for (; i < m->m_epg_npgs; i++) {
1723 		if (j == -1 || m->m_epg_pa[i] != pa) {
1724 			if (j >= 0)
1725 				usgl->sge[j / 2].len[j & 1] = htobe32(len);
1726 			j++;
1727 #ifdef INVARIANTS
1728 			nsegs--;
1729 #endif
1730 			pa = m->m_epg_pa[i];
1731 			usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
1732 			len = m_epg_pagelen(m, i, 0);
1733 			pa += len;
1734 		} else {
1735 			len += m_epg_pagelen(m, i, 0);
1736 			pa += m_epg_pagelen(m, i, 0);
1737 		}
1738 	}
1739 	if (j >= 0) {
1740 		usgl->sge[j / 2].len[j & 1] = htobe32(len);
1741 
1742 		if ((j & 1) == 0)
1743 			usgl->sge[j / 2].len[1] = htobe32(0);
1744 	}
1745 	KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
1746 }
1747 
1748 /*
1749  * Similar to t4_push_frames() but handles sockets that contain TLS
1750  * record mbufs.  Unlike TLSOM, each mbuf is a complete TLS record and
1751  * corresponds to a single work request.
1752  */
1753 void
1754 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
1755 {
1756 	struct tls_hdr *thdr;
1757 	struct fw_tlstx_data_wr *txwr;
1758 	struct cpl_tx_tls_sfo *cpl;
1759 	struct wrqe *wr;
1760 	struct mbuf *m;
1761 	u_int nsegs, credits, wr_len;
1762 	u_int expn_size;
1763 	struct inpcb *inp = toep->inp;
1764 	struct tcpcb *tp = intotcpcb(inp);
1765 	struct socket *so = inp->inp_socket;
1766 	struct sockbuf *sb = &so->so_snd;
1767 	int tls_size, tx_credits, shove, sowwakeup;
1768 	struct ofld_tx_sdesc *txsd;
1769 	char *buf;
1770 
1771 	INP_WLOCK_ASSERT(inp);
1772 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1773 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1774 
1775 	KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
1776 	    ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
1777 	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
1778 	KASSERT(tls_tx_key(toep),
1779 	    ("%s: TX key not set for toep %p", __func__, toep));
1780 
1781 #ifdef VERBOSE_TRACES
1782 	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1783 	    __func__, toep->tid, toep->flags, tp->t_flags);
1784 #endif
1785 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1786 		return;
1787 
1788 #ifdef RATELIMIT
1789 	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1790 	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1791 		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1792 	}
1793 #endif
1794 
1795 	/*
1796 	 * This function doesn't resume by itself.  Someone else must clear the
1797 	 * flag and call this function.
1798 	 */
1799 	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1800 		KASSERT(drop == 0,
1801 		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1802 		return;
1803 	}
1804 
1805 	txsd = &toep->txsd[toep->txsd_pidx];
1806 	for (;;) {
1807 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1808 
1809 		SOCKBUF_LOCK(sb);
1810 		sowwakeup = drop;
1811 		if (drop) {
1812 			sbdrop_locked(sb, drop);
1813 			drop = 0;
1814 		}
1815 
1816 		m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
1817 
1818 		/*
1819 		 * Send a FIN if requested, but only if there's no
1820 		 * more data to send.
1821 		 */
1822 		if (m == NULL && toep->flags & TPF_SEND_FIN) {
1823 			if (sowwakeup)
1824 				sowwakeup_locked(so);
1825 			else
1826 				SOCKBUF_UNLOCK(sb);
1827 			SOCKBUF_UNLOCK_ASSERT(sb);
1828 			t4_close_conn(sc, toep);
1829 			return;
1830 		}
1831 
1832 		/*
1833 		 * If there is no ready data to send, wait until more
1834 		 * data arrives.
1835 		 */
1836 		if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
1837 			if (sowwakeup)
1838 				sowwakeup_locked(so);
1839 			else
1840 				SOCKBUF_UNLOCK(sb);
1841 			SOCKBUF_UNLOCK_ASSERT(sb);
1842 #ifdef VERBOSE_TRACES
1843 			CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
1844 			    __func__, toep->tid);
1845 #endif
1846 			return;
1847 		}
1848 
1849 		KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP",
1850 		    __func__, m));
1851 		KASSERT(m->m_epg_tls != NULL,
1852 		    ("%s: mbuf %p doesn't have TLS session", __func__, m));
1853 
1854 		/* Calculate WR length. */
1855 		wr_len = sizeof(struct fw_tlstx_data_wr) +
1856 		    sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1857 
1858 		/* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
1859 		MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
1860 		wr_len += AES_BLOCK_LEN;
1861 
1862 		/* Account for SGL in work request length. */
1863 		nsegs = count_ext_pgs_segs(m);
1864 		wr_len += sizeof(struct ulptx_sgl) +
1865 		    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1866 
1867 		/* Not enough credits for this work request. */
1868 		if (howmany(wr_len, 16) > tx_credits) {
1869 			if (sowwakeup)
1870 				sowwakeup_locked(so);
1871 			else
1872 				SOCKBUF_UNLOCK(sb);
1873 			SOCKBUF_UNLOCK_ASSERT(sb);
1874 #ifdef VERBOSE_TRACES
1875 			CTR5(KTR_CXGBE,
1876 	    "%s: tid %d mbuf %p requires %d credits, but only %d available",
1877 			    __func__, toep->tid, m, howmany(wr_len, 16),
1878 			    tx_credits);
1879 #endif
1880 			toep->flags |= TPF_TX_SUSPENDED;
1881 			return;
1882 		}
1883 
1884 		/* Shove if there is no additional data pending. */
1885 		shove = ((m->m_next == NULL ||
1886 		    (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
1887 		    (tp->t_flags & TF_MORETOCOME) == 0;
1888 
1889 		if (sb->sb_flags & SB_AUTOSIZE &&
1890 		    V_tcp_do_autosndbuf &&
1891 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
1892 		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1893 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1894 			    V_tcp_autosndbuf_max);
1895 
1896 			if (!sbreserve_locked(sb, newsize, so, NULL))
1897 				sb->sb_flags &= ~SB_AUTOSIZE;
1898 			else
1899 				sowwakeup = 1;	/* room available */
1900 		}
1901 		if (sowwakeup)
1902 			sowwakeup_locked(so);
1903 		else
1904 			SOCKBUF_UNLOCK(sb);
1905 		SOCKBUF_UNLOCK_ASSERT(sb);
1906 
1907 		if (__predict_false(toep->flags & TPF_FIN_SENT))
1908 			panic("%s: excess tx.", __func__);
1909 
1910 		wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
1911 		if (wr == NULL) {
1912 			/* XXX: how will we recover from this? */
1913 			toep->flags |= TPF_TX_SUSPENDED;
1914 			return;
1915 		}
1916 
1917 		thdr = (struct tls_hdr *)&m->m_epg_hdr;
1918 #ifdef VERBOSE_TRACES
1919 		CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
1920 		    __func__, toep->tid, m->m_epg_seqno, thdr->type,
1921 		    m->m_len);
1922 #endif
1923 		txwr = wrtod(wr);
1924 		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1925 		memset(txwr, 0, roundup2(wr_len, 16));
1926 		credits = howmany(wr_len, 16);
1927 		expn_size = m->m_epg_hdrlen +
1928 		    m->m_epg_trllen;
1929 		tls_size = m->m_len - expn_size;
1930 		write_tlstx_wr(txwr, toep, 0,
1931 		    tls_size, expn_size, 1, credits, shove, 1);
1932 		toep->tls.tx_seq_no = m->m_epg_seqno;
1933 		write_tlstx_cpl(cpl, toep, thdr, tls_size, 1);
1934 		tls_copy_tx_key(toep, cpl + 1);
1935 
1936 		/* Copy IV. */
1937 		buf = (char *)(cpl + 1) + key_size(toep);
1938 		memcpy(buf, thdr + 1, toep->tls.iv_len);
1939 		buf += AES_BLOCK_LEN;
1940 
1941 		write_ktlstx_sgl(buf, m, nsegs);
1942 
1943 		KASSERT(toep->tx_credits >= credits,
1944 			("%s: not enough credits", __func__));
1945 
1946 		toep->tx_credits -= credits;
1947 
1948 		tp->snd_nxt += m->m_len;
1949 		tp->snd_max += m->m_len;
1950 
1951 		SOCKBUF_LOCK(sb);
1952 		sb->sb_sndptr = m;
1953 		SOCKBUF_UNLOCK(sb);
1954 
1955 		toep->flags |= TPF_TX_DATA_SENT;
1956 		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1957 			toep->flags |= TPF_TX_SUSPENDED;
1958 
1959 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1960 		txsd->plen = m->m_len;
1961 		txsd->tx_credits = credits;
1962 		txsd++;
1963 		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1964 			toep->txsd_pidx = 0;
1965 			txsd = &toep->txsd[0];
1966 		}
1967 		toep->txsd_avail--;
1968 
1969 		counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
1970 		counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len);
1971 
1972 		t4_l2t_send(sc, wr, toep->l2te);
1973 	}
1974 }
1975 #endif
1976 
1977 /*
1978  * For TLS data we place received mbufs received via CPL_TLS_DATA into
1979  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1980  * received, the completed PDUs are placed into the socket receive
1981  * buffer.
1982  *
1983  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1984  */
1985 static int
1986 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1987 {
1988 	struct adapter *sc = iq->adapter;
1989 	const struct cpl_tls_data *cpl = mtod(m, const void *);
1990 	unsigned int tid = GET_TID(cpl);
1991 	struct toepcb *toep = lookup_tid(sc, tid);
1992 	struct inpcb *inp = toep->inp;
1993 	struct tcpcb *tp;
1994 	int len;
1995 
1996 	/* XXX: Should this match do_rx_data instead? */
1997 	KASSERT(!(toep->flags & TPF_SYNQE),
1998 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1999 
2000 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
2001 
2002 	/* strip off CPL header */
2003 	m_adj(m, sizeof(*cpl));
2004 	len = m->m_pkthdr.len;
2005 
2006 	toep->ofld_rxq->rx_toe_tls_octets += len;
2007 
2008 	KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
2009 	    ("%s: payload length mismatch", __func__));
2010 
2011 	INP_WLOCK(inp);
2012 	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
2013 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
2014 		    __func__, tid, len, inp->inp_flags);
2015 		INP_WUNLOCK(inp);
2016 		m_freem(m);
2017 		return (0);
2018 	}
2019 
2020 	/* Save TCP sequence number. */
2021 	m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
2022 
2023 	if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
2024 #ifdef INVARIANTS
2025 		panic("Failed to queue TLS data packet");
2026 #else
2027 		printf("%s: Failed to queue TLS data packet\n", __func__);
2028 		INP_WUNLOCK(inp);
2029 		m_freem(m);
2030 		return (0);
2031 #endif
2032 	}
2033 
2034 	tp = intotcpcb(inp);
2035 	tp->t_rcvtime = ticks;
2036 
2037 #ifdef VERBOSE_TRACES
2038 	CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
2039 	    be32toh(cpl->seq));
2040 #endif
2041 
2042 	INP_WUNLOCK(inp);
2043 	return (0);
2044 }
2045 
2046 static int
2047 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2048 {
2049 	struct adapter *sc = iq->adapter;
2050 	const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
2051 	struct tlsrx_hdr_pkt *tls_hdr_pkt;
2052 	unsigned int tid = GET_TID(cpl);
2053 	struct toepcb *toep = lookup_tid(sc, tid);
2054 	struct inpcb *inp = toep->inp;
2055 	struct tcpcb *tp;
2056 	struct socket *so;
2057 	struct sockbuf *sb;
2058 	struct mbuf *tls_data;
2059 #ifdef KERN_TLS
2060 	struct tls_get_record *tgr;
2061 	struct mbuf *control;
2062 #endif
2063 	int len, pdu_length, rx_credits;
2064 
2065 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
2066 	KASSERT(!(toep->flags & TPF_SYNQE),
2067 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
2068 
2069 	/* strip off CPL header */
2070 	m_adj(m, sizeof(*cpl));
2071 	len = m->m_pkthdr.len;
2072 
2073 	toep->ofld_rxq->rx_toe_tls_records++;
2074 
2075 	KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
2076 	    ("%s: payload length mismatch", __func__));
2077 
2078 	INP_WLOCK(inp);
2079 	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
2080 		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
2081 		    __func__, tid, len, inp->inp_flags);
2082 		INP_WUNLOCK(inp);
2083 		m_freem(m);
2084 		return (0);
2085 	}
2086 
2087 	pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
2088 
2089 	so = inp_inpcbtosocket(inp);
2090 	tp = intotcpcb(inp);
2091 
2092 #ifdef VERBOSE_TRACES
2093 	CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
2094 	    __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
2095 #endif
2096 
2097 	tp->rcv_nxt += pdu_length;
2098 	KASSERT(tp->rcv_wnd >= pdu_length,
2099 	    ("%s: negative window size", __func__));
2100 	tp->rcv_wnd -= pdu_length;
2101 
2102 	/* XXX: Not sure what to do about urgent data. */
2103 
2104 	/*
2105 	 * The payload of this CPL is the TLS header followed by
2106 	 * additional fields.
2107 	 */
2108 	KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
2109 	    ("%s: payload too small", __func__));
2110 	tls_hdr_pkt = mtod(m, void *);
2111 
2112 	tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
2113 	if (tls_data != NULL) {
2114 		KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
2115 		    ("%s: sequence mismatch", __func__));
2116 	}
2117 
2118 #ifdef KERN_TLS
2119 	if (toep->tls.mode == TLS_MODE_KTLS) {
2120 		/* Report decryption errors as EBADMSG. */
2121 		if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) !=
2122 		    0) {
2123 			m_freem(m);
2124 			m_freem(tls_data);
2125 
2126 			CURVNET_SET(toep->vnet);
2127 			so->so_error = EBADMSG;
2128 			sorwakeup(so);
2129 
2130 			INP_WUNLOCK(inp);
2131 			CURVNET_RESTORE();
2132 
2133 			return (0);
2134 		}
2135 
2136 		/* Allocate the control message mbuf. */
2137 		control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
2138 		    IPPROTO_TCP);
2139 		if (control == NULL) {
2140 			m_freem(m);
2141 			m_freem(tls_data);
2142 
2143 			CURVNET_SET(toep->vnet);
2144 			so->so_error = ENOBUFS;
2145 			sorwakeup(so);
2146 
2147 			INP_WUNLOCK(inp);
2148 			CURVNET_RESTORE();
2149 
2150 			return (0);
2151 		}
2152 
2153 		tgr = (struct tls_get_record *)
2154 		    CMSG_DATA(mtod(control, struct cmsghdr *));
2155 		tgr->tls_type = tls_hdr_pkt->type;
2156 		tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
2157 		tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
2158 
2159 		m_freem(m);
2160 
2161 		if (tls_data != NULL) {
2162 			m_last(tls_data)->m_flags |= M_EOR;
2163 			tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
2164 		} else
2165 			tgr->tls_length = 0;
2166 		m = tls_data;
2167 	} else
2168 #endif
2169 	{
2170 		/*
2171 		 * Only the TLS header is sent to OpenSSL, so report
2172 		 * errors by altering the record type.
2173 		 */
2174 		if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) !=
2175 		    0)
2176 			tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
2177 
2178 		/* Trim this CPL's mbuf to only include the TLS header. */
2179 		KASSERT(m->m_len == len && m->m_next == NULL,
2180 		    ("%s: CPL spans multiple mbufs", __func__));
2181 		m->m_len = TLS_HEADER_LENGTH;
2182 		m->m_pkthdr.len = TLS_HEADER_LENGTH;
2183 
2184 		if (tls_data != NULL) {
2185 			/*
2186 			 * Update the TLS header length to be the length of
2187 			 * the payload data.
2188 			 */
2189 			tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
2190 
2191 			m->m_next = tls_data;
2192 			m->m_pkthdr.len += tls_data->m_len;
2193 		}
2194 
2195 #ifdef KERN_TLS
2196 		control = NULL;
2197 #endif
2198 	}
2199 
2200 	sb = &so->so_rcv;
2201 	SOCKBUF_LOCK(sb);
2202 
2203 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
2204 		struct epoch_tracker et;
2205 
2206 		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
2207 		    __func__, tid, pdu_length);
2208 		m_freem(m);
2209 #ifdef KERN_TLS
2210 		m_freem(control);
2211 #endif
2212 		SOCKBUF_UNLOCK(sb);
2213 		INP_WUNLOCK(inp);
2214 
2215 		CURVNET_SET(toep->vnet);
2216 		NET_EPOCH_ENTER(et);
2217 		INP_WLOCK(inp);
2218 		tp = tcp_drop(tp, ECONNRESET);
2219 		if (tp)
2220 			INP_WUNLOCK(inp);
2221 		NET_EPOCH_EXIT(et);
2222 		CURVNET_RESTORE();
2223 
2224 		return (0);
2225 	}
2226 
2227 	/*
2228 	 * Not all of the bytes on the wire are included in the socket buffer
2229 	 * (e.g. the MAC of the TLS record).  However, those bytes are included
2230 	 * in the TCP sequence space.
2231 	 */
2232 
2233 	/* receive buffer autosize */
2234 	MPASS(toep->vnet == so->so_vnet);
2235 	CURVNET_SET(toep->vnet);
2236 	if (sb->sb_flags & SB_AUTOSIZE &&
2237 	    V_tcp_do_autorcvbuf &&
2238 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
2239 	    m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
2240 		unsigned int hiwat = sb->sb_hiwat;
2241 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
2242 		    V_tcp_autorcvbuf_max);
2243 
2244 		if (!sbreserve_locked(sb, newsize, so, NULL))
2245 			sb->sb_flags &= ~SB_AUTOSIZE;
2246 	}
2247 
2248 #ifdef KERN_TLS
2249 	if (control != NULL)
2250 		sbappendcontrol_locked(sb, m, control, 0);
2251 	else
2252 #endif
2253 		sbappendstream_locked(sb, m, 0);
2254 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
2255 #ifdef VERBOSE_TRACES
2256 	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
2257 	    __func__, tid, rx_credits, tp->rcv_wnd);
2258 #endif
2259 	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
2260 		rx_credits = send_rx_credits(sc, toep, rx_credits);
2261 		tp->rcv_wnd += rx_credits;
2262 		tp->rcv_adv += rx_credits;
2263 	}
2264 
2265 	sorwakeup_locked(so);
2266 	SOCKBUF_UNLOCK_ASSERT(sb);
2267 
2268 	INP_WUNLOCK(inp);
2269 	CURVNET_RESTORE();
2270 	return (0);
2271 }
2272 
2273 void
2274 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep,
2275     struct mbuf *m)
2276 {
2277 	struct inpcb *inp = toep->inp;
2278 	struct tls_ofld_info *tls_ofld = &toep->tls;
2279 	struct tls_hdr *hdr;
2280 	struct tcpcb *tp;
2281 	struct socket *so;
2282 	struct sockbuf *sb;
2283 	int error, len, rx_credits;
2284 
2285 	len = m->m_pkthdr.len;
2286 
2287 	INP_WLOCK_ASSERT(inp);
2288 
2289 	so = inp_inpcbtosocket(inp);
2290 	tp = intotcpcb(inp);
2291 	sb = &so->so_rcv;
2292 	SOCKBUF_LOCK(sb);
2293 	CURVNET_SET(toep->vnet);
2294 
2295 	tp->rcv_nxt += len;
2296 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
2297 	tp->rcv_wnd -= len;
2298 
2299 	/* Do we have a full TLS header? */
2300 	if (len < sizeof(*hdr)) {
2301 		CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header",
2302 		    __func__, toep->tid, len);
2303 		so->so_error = EMSGSIZE;
2304 		goto out;
2305 	}
2306 	hdr = mtod(m, struct tls_hdr *);
2307 
2308 	/* Is the header valid? */
2309 	if (be16toh(hdr->version) != tls_ofld->k_ctx.proto_ver) {
2310 		CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x",
2311 		    __func__, toep->tid, be16toh(hdr->version));
2312 		error = EINVAL;
2313 		goto report_error;
2314 	}
2315 	if (be16toh(hdr->length) < sizeof(*hdr)) {
2316 		CTR3(KTR_CXGBE, "%s: tid %u invalid length %u",
2317 		    __func__, toep->tid, be16toh(hdr->length));
2318 		error = EBADMSG;
2319 		goto report_error;
2320 	}
2321 
2322 	/* Did we get a truncated record? */
2323 	if (len < be16toh(hdr->length)) {
2324 		CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)",
2325 		    __func__, toep->tid, len, be16toh(hdr->length));
2326 
2327 		error = EMSGSIZE;
2328 		goto report_error;
2329 	}
2330 
2331 	/* Is the header type unknown? */
2332 	switch (hdr->type) {
2333 	case CONTENT_TYPE_CCS:
2334 	case CONTENT_TYPE_ALERT:
2335 	case CONTENT_TYPE_APP_DATA:
2336 	case CONTENT_TYPE_HANDSHAKE:
2337 		break;
2338 	default:
2339 		CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u",
2340 		    __func__, toep->tid, hdr->type);
2341 		error = EBADMSG;
2342 		goto report_error;
2343 	}
2344 
2345 	/*
2346 	 * Just punt.  Although this could fall back to software
2347 	 * decryption, this case should never really happen.
2348 	 */
2349 	CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u",
2350 	    __func__, toep->tid, hdr->type, be16toh(hdr->length));
2351 	error = EBADMSG;
2352 
2353 report_error:
2354 #ifdef KERN_TLS
2355 	if (toep->tls.mode == TLS_MODE_KTLS)
2356 		so->so_error = error;
2357 	else
2358 #endif
2359 	{
2360 		/*
2361 		 * Report errors by sending an empty TLS record
2362 		 * with an error record type.
2363 		 */
2364 		hdr->type = CONTENT_TYPE_ERROR;
2365 
2366 		/* Trim this CPL's mbuf to only include the TLS header. */
2367 		KASSERT(m->m_len == len && m->m_next == NULL,
2368 		    ("%s: CPL spans multiple mbufs", __func__));
2369 		m->m_len = TLS_HEADER_LENGTH;
2370 		m->m_pkthdr.len = TLS_HEADER_LENGTH;
2371 
2372 		sbappendstream_locked(sb, m, 0);
2373 		m = NULL;
2374 	}
2375 
2376 out:
2377 	/*
2378 	 * This connection is going to die anyway, so probably don't
2379 	 * need to bother with returning credits.
2380 	 */
2381 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
2382 #ifdef VERBOSE_TRACES
2383 	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
2384 	    __func__, toep->tid, rx_credits, tp->rcv_wnd);
2385 #endif
2386 	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
2387 		rx_credits = send_rx_credits(toep->vi->adapter, toep,
2388 		    rx_credits);
2389 		tp->rcv_wnd += rx_credits;
2390 		tp->rcv_adv += rx_credits;
2391 	}
2392 
2393 	sorwakeup_locked(so);
2394 	SOCKBUF_UNLOCK_ASSERT(sb);
2395 
2396 	INP_WUNLOCK(inp);
2397 	CURVNET_RESTORE();
2398 
2399 	m_freem(m);
2400 }
2401 
2402 void
2403 t4_tls_mod_load(void)
2404 {
2405 
2406 	t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
2407 	t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
2408 }
2409 
2410 void
2411 t4_tls_mod_unload(void)
2412 {
2413 
2414 	t4_register_cpl_handler(CPL_TLS_DATA, NULL);
2415 	t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
2416 }
2417 #endif	/* TCP_OFFLOAD */
2418