xref: /freebsd/sys/dev/cxgbe/crypto/t6_kern_tls.c (revision ccb59683b98360afaf5b5bb641a68fea22c68d0b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_kern_tls.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/ktr.h>
39 #include <sys/ktls.h>
40 #include <sys/sglist.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/sockbuf.h>
44 #include <netinet/in.h>
45 #include <netinet/in_pcb.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip6.h>
48 #include <netinet/tcp_var.h>
49 #include <opencrypto/cryptodev.h>
50 #include <opencrypto/xform.h>
51 
52 #include "common/common.h"
53 #include "common/t4_regs.h"
54 #include "common/t4_regs_values.h"
55 #include "common/t4_tcb.h"
56 #include "t4_l2t.h"
57 #include "t4_clip.h"
58 #include "t4_mp_ring.h"
59 #include "crypto/t4_crypto.h"
60 
61 #if defined(INET) || defined(INET6)
62 
63 #define TLS_HEADER_LENGTH		5
64 
65 struct tls_scmd {
66 	__be32 seqno_numivs;
67 	__be32 ivgen_hdrlen;
68 };
69 
70 struct tlspcb {
71 	struct m_snd_tag com;
72 	struct vi_info *vi;	/* virtual interface */
73 	struct adapter *sc;
74 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
75 	struct sge_txq *txq;
76 	int tid;		/* Connection identifier */
77 
78 	int tx_key_addr;
79 	bool inline_key;
80 	bool using_timestamps;
81 	unsigned char enc_mode;
82 
83 	struct tls_scmd scmd0;
84 	struct tls_scmd scmd0_short;
85 
86 	unsigned int tx_key_info_size;
87 
88 	uint32_t prev_seq;
89 	uint32_t prev_ack;
90 	uint32_t prev_tsecr;
91 	uint16_t prev_win;
92 	uint16_t prev_mss;
93 
94 	/* Only used outside of setup and teardown when using inline keys. */
95 	struct tls_keyctx keyctx;
96 
97 	/* Fields only used during setup and teardown. */
98 	struct inpcb *inp;	/* backpointer to host stack's PCB */
99 	struct sge_wrq *ctrlq;
100 	struct clip_entry *ce;	/* CLIP table entry used by this tid */
101 
102 	bool open_pending;
103 };
104 
105 static void t6_tls_tag_free(struct m_snd_tag *mst);
106 static int ktls_setup_keys(struct tlspcb *tlsp,
107     const struct ktls_session *tls, struct sge_txq *txq);
108 
109 static const struct if_snd_tag_sw t6_tls_tag_sw = {
110 	.snd_tag_free = t6_tls_tag_free,
111 	.type = IF_SND_TAG_TYPE_TLS
112 };
113 
114 static inline struct tlspcb *
115 mst_to_tls(struct m_snd_tag *t)
116 {
117 	return (__containerof(t, struct tlspcb, com));
118 }
119 
120 static struct tlspcb *
121 alloc_tlspcb(if_t ifp, struct vi_info *vi, int flags)
122 {
123 	struct port_info *pi = vi->pi;
124 	struct adapter *sc = pi->adapter;
125 	struct tlspcb *tlsp;
126 
127 	tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
128 	if (tlsp == NULL)
129 		return (NULL);
130 
131 	m_snd_tag_init(&tlsp->com, ifp, &t6_tls_tag_sw);
132 	tlsp->vi = vi;
133 	tlsp->sc = sc;
134 	tlsp->ctrlq = &sc->sge.ctrlq[pi->port_id];
135 	tlsp->tid = -1;
136 	tlsp->tx_key_addr = -1;
137 
138 	return (tlsp);
139 }
140 
141 static int
142 ktls_act_open_cpl_size(bool isipv6)
143 {
144 
145 	if (isipv6)
146 		return (sizeof(struct cpl_t6_act_open_req6));
147 	else
148 		return (sizeof(struct cpl_t6_act_open_req));
149 }
150 
151 static void
152 mk_ktls_act_open_req(struct adapter *sc, struct vi_info *vi, struct inpcb *inp,
153     struct tlspcb *tlsp, int atid, void *dst)
154 {
155 	struct tcpcb *tp = intotcpcb(inp);
156 	struct cpl_t6_act_open_req *cpl6;
157 	struct cpl_act_open_req *cpl;
158 	uint64_t options;
159 	int qid_atid;
160 
161 	cpl6 = dst;
162 	cpl = (struct cpl_act_open_req *)cpl6;
163 	INIT_TP_WR(cpl6, 0);
164 	qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
165 	    V_TID_COOKIE(CPL_COOKIE_KERN_TLS);
166 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
167 		qid_atid));
168 	inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
169 	    &cpl->peer_ip, &cpl->peer_port);
170 
171 	options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE);
172 	options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
173 	options |= F_NON_OFFLOAD;
174 	cpl->opt0 = htobe64(options);
175 
176 	options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
177 	if (tp->t_flags & TF_REQ_TSTMP)
178 		options |= F_TSTAMPS_EN;
179 	cpl->opt2 = htobe32(options);
180 }
181 
182 static void
183 mk_ktls_act_open_req6(struct adapter *sc, struct vi_info *vi,
184     struct inpcb *inp, struct tlspcb *tlsp, int atid, void *dst)
185 {
186 	struct tcpcb *tp = intotcpcb(inp);
187 	struct cpl_t6_act_open_req6 *cpl6;
188 	struct cpl_act_open_req6 *cpl;
189 	uint64_t options;
190 	int qid_atid;
191 
192 	cpl6 = dst;
193 	cpl = (struct cpl_act_open_req6 *)cpl6;
194 	INIT_TP_WR(cpl6, 0);
195 	qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
196 	    V_TID_COOKIE(CPL_COOKIE_KERN_TLS);
197 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
198 		qid_atid));
199 	cpl->local_port = inp->inp_lport;
200 	cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
201 	cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
202 	cpl->peer_port = inp->inp_fport;
203 	cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
204 	cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
205 
206 	options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE);
207 	options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
208 	options |= F_NON_OFFLOAD;
209 	cpl->opt0 = htobe64(options);
210 
211 	options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
212 	if (tp->t_flags & TF_REQ_TSTMP)
213 		options |= F_TSTAMPS_EN;
214 	cpl->opt2 = htobe32(options);
215 }
216 
217 static int
218 send_ktls_act_open_req(struct adapter *sc, struct vi_info *vi,
219     struct inpcb *inp, struct tlspcb *tlsp, int atid)
220 {
221 	struct wrqe *wr;
222 	bool isipv6;
223 
224 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
225 	if (isipv6) {
226 		tlsp->ce = t4_get_clip_entry(sc, &inp->in6p_laddr, true);
227 		if (tlsp->ce == NULL)
228 			return (ENOENT);
229 	}
230 
231 	wr = alloc_wrqe(ktls_act_open_cpl_size(isipv6), tlsp->ctrlq);
232 	if (wr == NULL) {
233 		CTR2(KTR_CXGBE, "%s: atid %d failed to alloc WR", __func__,
234 		    atid);
235 		return (ENOMEM);
236 	}
237 
238 	if (isipv6)
239 		mk_ktls_act_open_req6(sc, vi, inp, tlsp, atid, wrtod(wr));
240 	else
241 		mk_ktls_act_open_req(sc, vi, inp, tlsp, atid, wrtod(wr));
242 
243 	tlsp->open_pending = true;
244 	t4_wrq_tx(sc, wr);
245 	return (0);
246 }
247 
248 static int
249 ktls_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
250     struct mbuf *m)
251 {
252 	struct adapter *sc = iq->adapter;
253 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
254 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
255 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
256 	struct tlspcb *tlsp = lookup_atid(sc, atid);
257 	struct inpcb *inp = tlsp->inp;
258 
259 	CTR3(KTR_CXGBE, "%s: atid %d status %d", __func__, atid, status);
260 	free_atid(sc, atid);
261 	if (status == 0)
262 		tlsp->tid = GET_TID(cpl);
263 
264 	INP_WLOCK(inp);
265 	tlsp->open_pending = false;
266 	wakeup(tlsp);
267 	INP_WUNLOCK(inp);
268 	return (0);
269 }
270 
271 /* SET_TCB_FIELD sent as a ULP command looks like this */
272 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
273     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
274 
275 _Static_assert((LEN__SET_TCB_FIELD_ULP + sizeof(struct ulptx_idata)) % 16 == 0,
276     "CPL_SET_TCB_FIELD ULP command not 16-byte aligned");
277 
278 static void
279 write_set_tcb_field_ulp(struct tlspcb *tlsp, void *dst, struct sge_txq *txq,
280     uint16_t word, uint64_t mask, uint64_t val)
281 {
282 	struct ulp_txpkt *txpkt;
283 	struct ulptx_idata *idata;
284 	struct cpl_set_tcb_field_core *cpl;
285 
286 	/* ULP_TXPKT */
287 	txpkt = dst;
288 	txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
289 	    V_ULP_TXPKT_DATAMODIFY(0) |
290 	    V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
291 	    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
292 	txpkt->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
293 
294 	/* ULPTX_IDATA sub-command */
295 	idata = (struct ulptx_idata *)(txpkt + 1);
296 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
297 	idata->len = htobe32(sizeof(*cpl));
298 
299 	/* CPL_SET_TCB_FIELD */
300 	cpl = (struct cpl_set_tcb_field_core *)(idata + 1);
301 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tlsp->tid));
302 	cpl->reply_ctrl = htobe16(F_NO_REPLY);
303 	cpl->word_cookie = htobe16(V_WORD(word));
304 	cpl->mask = htobe64(mask);
305 	cpl->val = htobe64(val);
306 
307 	/* ULPTX_NOOP */
308 	idata = (struct ulptx_idata *)(cpl + 1);
309 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
310 	idata->len = htobe32(0);
311 }
312 
313 static int
314 ktls_set_tcb_fields(struct tlspcb *tlsp, struct tcpcb *tp, struct sge_txq *txq)
315 {
316 	struct fw_ulptx_wr *wr;
317 	struct mbuf *m;
318 	char *dst;
319 	void *items[1];
320 	int error, len;
321 
322 	len = sizeof(*wr) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
323 	if (tp->t_flags & TF_REQ_TSTMP)
324 		len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
325 	m = alloc_wr_mbuf(len, M_NOWAIT);
326 	if (m == NULL) {
327 		CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
328 		    tlsp->tid);
329 		return (ENOMEM);
330 	}
331 	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
332 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
333 
334 	/* FW_ULPTX_WR */
335 	wr = mtod(m, void *);
336 	wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
337 	wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
338 	    V_FW_WR_LEN16(len / 16));
339 	wr->cookie = 0;
340 	dst = (char *)(wr + 1);
341 
342         /* Clear TF_NON_OFFLOAD and set TF_CORE_BYPASS */
343 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_T_FLAGS,
344 	    V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1) | V_TF_NON_OFFLOAD(1)),
345 	    V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1)));
346 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
347 
348 	/* Clear the SND_UNA_RAW, SND_NXT_RAW, and SND_MAX_RAW offsets. */
349 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_UNA_RAW,
350 	    V_TCB_SND_NXT_RAW(M_TCB_SND_NXT_RAW) |
351 	    V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW),
352 	    V_TCB_SND_NXT_RAW(0) | V_TCB_SND_UNA_RAW(0));
353 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
354 
355 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_MAX_RAW,
356 	    V_TCB_SND_MAX_RAW(M_TCB_SND_MAX_RAW), V_TCB_SND_MAX_RAW(0));
357 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
358 
359 	if (tp->t_flags & TF_REQ_TSTMP) {
360 		write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_TIMESTAMP_OFFSET,
361 		    V_TCB_TIMESTAMP_OFFSET(M_TCB_TIMESTAMP_OFFSET),
362 		    V_TCB_TIMESTAMP_OFFSET(tp->ts_offset >> 28));
363 		dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
364 	}
365 
366 	KASSERT(dst - (char *)wr == len, ("%s: length mismatch", __func__));
367 
368 	items[0] = m;
369 	error = mp_ring_enqueue(txq->r, items, 1, 1);
370 	if (error)
371 		m_free(m);
372 	return (error);
373 }
374 
375 int
376 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
377     struct m_snd_tag **pt)
378 {
379 	const struct ktls_session *tls;
380 	struct tlspcb *tlsp;
381 	struct adapter *sc;
382 	struct vi_info *vi;
383 	struct inpcb *inp;
384 	struct tcpcb *tp;
385 	struct sge_txq *txq;
386 	int atid, error, explicit_iv_size, keyid, mac_first;
387 
388 	tls = params->tls.tls;
389 
390 	/* Only TLS 1.1 and TLS 1.2 are currently supported. */
391 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
392 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
393 	    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
394 		return (EPROTONOSUPPORT);
395 
396 	/* Sanity check values in *tls. */
397 	switch (tls->params.cipher_algorithm) {
398 	case CRYPTO_AES_CBC:
399 		/* XXX: Explicitly ignore any provided IV. */
400 		switch (tls->params.cipher_key_len) {
401 		case 128 / 8:
402 		case 192 / 8:
403 		case 256 / 8:
404 			break;
405 		default:
406 			return (EINVAL);
407 		}
408 		switch (tls->params.auth_algorithm) {
409 		case CRYPTO_SHA1_HMAC:
410 		case CRYPTO_SHA2_256_HMAC:
411 		case CRYPTO_SHA2_384_HMAC:
412 			break;
413 		default:
414 			return (EPROTONOSUPPORT);
415 		}
416 		explicit_iv_size = AES_BLOCK_LEN;
417 		mac_first = 1;
418 		break;
419 	case CRYPTO_AES_NIST_GCM_16:
420 		if (tls->params.iv_len != SALT_SIZE)
421 			return (EINVAL);
422 		switch (tls->params.cipher_key_len) {
423 		case 128 / 8:
424 		case 192 / 8:
425 		case 256 / 8:
426 			break;
427 		default:
428 			return (EINVAL);
429 		}
430 		explicit_iv_size = 8;
431 		mac_first = 0;
432 		break;
433 	default:
434 		return (EPROTONOSUPPORT);
435 	}
436 
437 	vi = if_getsoftc(ifp);
438 	sc = vi->adapter;
439 
440 	tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
441 
442 	atid = alloc_atid(sc, tlsp);
443 	if (atid < 0) {
444 		error = ENOMEM;
445 		goto failed;
446 	}
447 
448 	if (sc->tlst.inline_keys)
449 		keyid = -1;
450 	else
451 		keyid = t4_alloc_tls_keyid(sc);
452 	if (keyid < 0) {
453 		CTR2(KTR_CXGBE, "%s: atid %d using immediate key ctx", __func__,
454 		    atid);
455 		tlsp->inline_key = true;
456 	} else {
457 		tlsp->tx_key_addr = keyid;
458 		CTR3(KTR_CXGBE, "%s: atid %d allocated TX key addr %#x",
459 		    __func__,
460 		    atid, tlsp->tx_key_addr);
461 	}
462 
463 	inp = params->tls.inp;
464 	INP_RLOCK(inp);
465 	if (inp->inp_flags & INP_DROPPED) {
466 		INP_RUNLOCK(inp);
467 		error = ECONNRESET;
468 		goto failed;
469 	}
470 	tlsp->inp = inp;
471 
472 	tp = intotcpcb(inp);
473 	if (tp->t_flags & TF_REQ_TSTMP) {
474 		tlsp->using_timestamps = true;
475 		if ((tp->ts_offset & 0xfffffff) != 0) {
476 			INP_RUNLOCK(inp);
477 			error = EINVAL;
478 			goto failed;
479 		}
480 	} else
481 		tlsp->using_timestamps = false;
482 
483 	error = send_ktls_act_open_req(sc, vi, inp, tlsp, atid);
484 	if (error) {
485 		INP_RUNLOCK(inp);
486 		goto failed;
487 	}
488 
489 	/* Wait for reply to active open. */
490 	CTR2(KTR_CXGBE, "%s: atid %d sent CPL_ACT_OPEN_REQ", __func__,
491 	    atid);
492 	while (tlsp->open_pending) {
493 		/*
494 		 * XXX: PCATCH?  We would then have to discard the PCB
495 		 * when the completion CPL arrived.
496 		 */
497 		error = rw_sleep(tlsp, &inp->inp_lock, 0, "t6tlsop", 0);
498 	}
499 
500 	atid = -1;
501 	if (tlsp->tid < 0) {
502 		INP_RUNLOCK(inp);
503 		error = ENOMEM;
504 		goto failed;
505 	}
506 
507 	if (inp->inp_flags & INP_DROPPED) {
508 		INP_RUNLOCK(inp);
509 		error = ECONNRESET;
510 		goto failed;
511 	}
512 
513 	txq = &sc->sge.txq[vi->first_txq];
514 	if (inp->inp_flowtype != M_HASHTYPE_NONE)
515 		txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
516 		    vi->rsrv_noflowq);
517 	tlsp->txq = txq;
518 
519 	error = ktls_set_tcb_fields(tlsp, tp, txq);
520 	INP_RUNLOCK(inp);
521 	if (error)
522 		goto failed;
523 
524 	error = ktls_setup_keys(tlsp, tls, txq);
525 	if (error)
526 		goto failed;
527 
528 	tlsp->enc_mode = t4_tls_cipher_mode(tls);
529 	tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
530 
531 	/* The SCMD fields used when encrypting a full TLS record. */
532 	tlsp->scmd0.seqno_numivs = htobe32(V_SCMD_SEQ_NO_CTRL(3) |
533 	    V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
534 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
535 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
536 	    V_SCMD_CIPH_MODE(tlsp->enc_mode) |
537 	    V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
538 	    V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
539 	    V_SCMD_IV_SIZE(explicit_iv_size / 2) | V_SCMD_NUM_IVS(1));
540 
541 	tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
542 	    V_SCMD_TLS_FRAG_ENABLE(0);
543 	if (tlsp->inline_key)
544 		tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
545 	tlsp->scmd0.ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen);
546 
547 	/*
548 	 * The SCMD fields used when encrypting a partial TLS record
549 	 * (no trailer and possibly a truncated payload).
550 	 */
551 	tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
552 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
553 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
554 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
555 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
556 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
557 	    V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
558 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
559 		tlsp->scmd0_short.seqno_numivs |=
560 		    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
561 	else
562 		tlsp->scmd0_short.seqno_numivs |=
563 		    V_SCMD_CIPH_MODE(tlsp->enc_mode);
564 	tlsp->scmd0_short.seqno_numivs =
565 	    htobe32(tlsp->scmd0_short.seqno_numivs);
566 
567 	tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
568 	    V_SCMD_TLS_FRAG_ENABLE(0) |
569 	    V_SCMD_AADIVDROP(1);
570 	if (tlsp->inline_key)
571 		tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
572 
573 	TXQ_LOCK(txq);
574 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
575 		txq->kern_tls_gcm++;
576 	else
577 		txq->kern_tls_cbc++;
578 	TXQ_UNLOCK(txq);
579 	*pt = &tlsp->com;
580 	return (0);
581 
582 failed:
583 	if (atid >= 0)
584 		free_atid(sc, atid);
585 	m_snd_tag_rele(&tlsp->com);
586 	return (error);
587 }
588 
589 static int
590 ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
591     struct sge_txq *txq)
592 {
593 	struct tls_key_req *kwr;
594 	struct tls_keyctx *kctx;
595 	void *items[1];
596 	struct mbuf *m;
597 	int error;
598 
599 	/*
600 	 * Store the salt and keys in the key context.  For
601 	 * connections with an inline key, this key context is passed
602 	 * as immediate data in each work request.  For connections
603 	 * storing the key in DDR, a work request is used to store a
604 	 * copy of the key context in DDR.
605 	 */
606 	t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
607 	if (tlsp->inline_key)
608 		return (0);
609 
610 	/* Populate key work request. */
611         m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
612 	if (m == NULL) {
613 		CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
614 		    tlsp->tid);
615 		return (ENOMEM);
616 	}
617 	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
618 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
619 	kwr = mtod(m, void *);
620 	memset(kwr, 0, TLS_KEY_WR_SZ);
621 
622 	t4_write_tlskey_wr(tls, KTLS_TX, tlsp->tid, 0, tlsp->tx_key_addr, kwr);
623 	kctx = (struct tls_keyctx *)(kwr + 1);
624 	memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
625 
626 	/*
627 	 * Place the key work request in the transmit queue.  It
628 	 * should be sent to the NIC before any TLS packets using this
629 	 * session.
630 	 */
631 	items[0] = m;
632 	error = mp_ring_enqueue(txq->r, items, 1, 1);
633 	if (error)
634 		m_free(m);
635 	else
636 		CTR2(KTR_CXGBE, "%s: tid %d sent key WR", __func__, tlsp->tid);
637 	return (error);
638 }
639 
640 static u_int
641 ktls_base_wr_size(struct tlspcb *tlsp)
642 {
643 	u_int wr_len;
644 
645 	wr_len = sizeof(struct fw_ulptx_wr);	// 16
646 	wr_len += sizeof(struct ulp_txpkt);	// 8
647 	wr_len += sizeof(struct ulptx_idata);	// 8
648 	wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
649 	if (tlsp->inline_key)
650 		wr_len += tlsp->tx_key_info_size;
651 	else {
652 		wr_len += sizeof(struct ulptx_sc_memrd);// 8
653 		wr_len += sizeof(struct ulptx_idata);	// 8
654 	}
655 	wr_len += sizeof(struct cpl_tx_data);	// 16
656 	return (wr_len);
657 }
658 
659 /* How many bytes of TCP payload to send for a given TLS record. */
660 static u_int
661 ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
662 {
663 	struct tls_record_layer *hdr;
664 	u_int plen, mlen;
665 
666 	M_ASSERTEXTPG(m_tls);
667 	hdr = (void *)m_tls->m_epg_hdr;
668 	plen = ntohs(hdr->tls_length);
669 
670 	/*
671 	 * What range of the TLS record is the mbuf requesting to be
672 	 * sent.
673 	 */
674 	mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
675 
676 	/* Always send complete records. */
677 	if (mlen == TLS_HEADER_LENGTH + plen)
678 		return (mlen);
679 
680 	/*
681 	 * If the host stack has asked to send part of the trailer,
682 	 * trim the length to avoid sending any of the trailer.  There
683 	 * is no way to send a partial trailer currently.
684 	 */
685 	if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen)
686 		mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen;
687 
688 
689 	/*
690 	 * For AES-CBC adjust the ciphertext length for the block
691 	 * size.
692 	 */
693 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
694 	    mlen > TLS_HEADER_LENGTH) {
695 		mlen = TLS_HEADER_LENGTH + rounddown(mlen - TLS_HEADER_LENGTH,
696 		    AES_BLOCK_LEN);
697 	}
698 
699 #ifdef VERBOSE_TRACES
700 	CTR4(KTR_CXGBE, "%s: tid %d short TLS record (%u vs %u)",
701 	    __func__, tlsp->tid, mlen, TLS_HEADER_LENGTH + plen);
702 #endif
703 	return (mlen);
704 }
705 
706 /*
707  * For a "short" TLS record, determine the offset into the TLS record
708  * payload to send.  This offset does not include the TLS header, but
709  * a non-zero offset implies that a header will not be sent.
710  */
711 static u_int
712 ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
713 {
714 	struct tls_record_layer *hdr;
715 	u_int offset, plen;
716 #ifdef INVARIANTS
717 	u_int mlen;
718 #endif
719 
720 	M_ASSERTEXTPG(m_tls);
721 	hdr = (void *)m_tls->m_epg_hdr;
722 	plen = ntohs(hdr->tls_length);
723 #ifdef INVARIANTS
724 	mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
725 	MPASS(mlen < TLS_HEADER_LENGTH + plen);
726 #endif
727 	if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
728 		return (0);
729 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
730 		/*
731 		 * Always send something.  This function is only called
732 		 * if we aren't sending the tag at all, but if the
733 		 * request starts in the tag then we are in an odd
734 		 * state where would effectively send nothing.  Cap
735 		 * the offset at the last byte of the record payload
736 		 * to send the last cipher block.
737 		 */
738 		offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
739 		    (plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1);
740 		return (rounddown(offset, AES_BLOCK_LEN));
741 	}
742 	return (0);
743 }
744 
745 static u_int
746 ktls_sgl_size(u_int nsegs)
747 {
748 	u_int wr_len;
749 
750 	/* First segment is part of ulptx_sgl. */
751 	nsegs--;
752 
753 	wr_len = sizeof(struct ulptx_sgl);
754 	wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
755 	return (wr_len);
756 }
757 
758 static int
759 ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
760     int *nsegsp)
761 {
762 	struct tls_record_layer *hdr;
763 	u_int imm_len, offset, plen, wr_len, tlen;
764 
765 	M_ASSERTEXTPG(m_tls);
766 
767 	/*
768 	 * Determine the size of the TLS record payload to send
769 	 * excluding header and trailer.
770 	 */
771 	tlen = ktls_tcp_payload_length(tlsp, m_tls);
772 	if (tlen <= m_tls->m_epg_hdrlen) {
773 		/*
774 		 * For requests that only want to send the TLS header,
775 		 * send a tunnelled packet as immediate data.
776 		 */
777 		wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
778 		    sizeof(struct cpl_tx_pkt_core) +
779 		    roundup2(m->m_len + m_tls->m_len, 16);
780 		if (wr_len > SGE_MAX_WR_LEN) {
781 			CTR3(KTR_CXGBE,
782 		    "%s: tid %d TLS header-only packet too long (len %d)",
783 			    __func__, tlsp->tid, m->m_len + m_tls->m_len);
784 		}
785 
786 		/* This should always be the last TLS record in a chain. */
787 		MPASS(m_tls->m_next == NULL);
788 		*nsegsp = 0;
789 		return (wr_len);
790 	}
791 
792 	hdr = (void *)m_tls->m_epg_hdr;
793 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
794 	if (tlen < plen) {
795 		plen = tlen;
796 		offset = ktls_payload_offset(tlsp, m_tls);
797 	} else
798 		offset = 0;
799 
800 	/* Calculate the size of the work request. */
801 	wr_len = ktls_base_wr_size(tlsp);
802 
803 	/*
804 	 * Full records and short records with an offset of 0 include
805 	 * the TLS header as immediate data.  Short records include a
806 	 * raw AES IV as immediate data.
807 	 */
808 	imm_len = 0;
809 	if (offset == 0)
810 		imm_len += m_tls->m_epg_hdrlen;
811 	if (plen == tlen)
812 		imm_len += AES_BLOCK_LEN;
813 	wr_len += roundup2(imm_len, 16);
814 
815 	/* TLS record payload via DSGL. */
816 	*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
817 	    plen - (m_tls->m_epg_hdrlen + offset));
818 	wr_len += ktls_sgl_size(*nsegsp);
819 
820 	wr_len = roundup2(wr_len, 16);
821 	return (wr_len);
822 }
823 
824 /*
825  * See if we have any TCP options requiring a dedicated options-only
826  * packet.
827  */
828 static int
829 ktls_has_tcp_options(struct tcphdr *tcp)
830 {
831 	u_char *cp;
832 	int cnt, opt, optlen;
833 
834 	cp = (u_char *)(tcp + 1);
835 	cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
836 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
837 		opt = cp[0];
838 		if (opt == TCPOPT_EOL)
839 			break;
840 		if (opt == TCPOPT_NOP)
841 			optlen = 1;
842 		else {
843 			if (cnt < 2)
844 				break;
845 			optlen = cp[1];
846 			if (optlen < 2 || optlen > cnt)
847 				break;
848 		}
849 		switch (opt) {
850 		case TCPOPT_NOP:
851 		case TCPOPT_TIMESTAMP:
852 			break;
853 		default:
854 			return (1);
855 		}
856 	}
857 	return (0);
858 }
859 
860 /*
861  * Find the TCP timestamp option.
862  */
863 static void *
864 ktls_find_tcp_timestamps(struct tcphdr *tcp)
865 {
866 	u_char *cp;
867 	int cnt, opt, optlen;
868 
869 	cp = (u_char *)(tcp + 1);
870 	cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
871 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
872 		opt = cp[0];
873 		if (opt == TCPOPT_EOL)
874 			break;
875 		if (opt == TCPOPT_NOP)
876 			optlen = 1;
877 		else {
878 			if (cnt < 2)
879 				break;
880 			optlen = cp[1];
881 			if (optlen < 2 || optlen > cnt)
882 				break;
883 		}
884 		if (opt == TCPOPT_TIMESTAMP && optlen == TCPOLEN_TIMESTAMP)
885 			return (cp + 2);
886 	}
887 	return (NULL);
888 }
889 
890 int
891 t6_ktls_parse_pkt(struct mbuf *m)
892 {
893 	struct tlspcb *tlsp;
894 	struct ether_header *eh;
895 	struct ip *ip;
896 	struct ip6_hdr *ip6;
897 	struct tcphdr *tcp;
898 	struct mbuf *m_tls;
899 	void *items[1];
900 	int nsegs;
901 	u_int wr_len, tot_len;
902 
903 	/*
904 	 * Locate headers in initial mbuf.
905 	 *
906 	 * XXX: This assumes all of the headers are in the initial mbuf.
907 	 * Could perhaps use m_advance() like parse_pkt() if that turns
908 	 * out to not be true.
909 	 */
910 	M_ASSERTPKTHDR(m);
911 	MPASS(m->m_pkthdr.snd_tag != NULL);
912 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
913 
914 	if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
915 		CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short", __func__,
916 		    tlsp->tid);
917 		return (EINVAL);
918 	}
919 	eh = mtod(m, struct ether_header *);
920 	if (ntohs(eh->ether_type) != ETHERTYPE_IP &&
921 	    ntohs(eh->ether_type) != ETHERTYPE_IPV6) {
922 		CTR2(KTR_CXGBE, "%s: tid %d mbuf not ETHERTYPE_IP{,V6}",
923 		    __func__, tlsp->tid);
924 		return (EINVAL);
925 	}
926 	m->m_pkthdr.l2hlen = sizeof(*eh);
927 
928 	/* XXX: Reject unsupported IP options? */
929 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
930 		ip = (struct ip *)(eh + 1);
931 		if (ip->ip_p != IPPROTO_TCP) {
932 			CTR2(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP",
933 			    __func__, tlsp->tid);
934 			return (EINVAL);
935 		}
936 		m->m_pkthdr.l3hlen = ip->ip_hl * 4;
937 	} else {
938 		ip6 = (struct ip6_hdr *)(eh + 1);
939 		if (ip6->ip6_nxt != IPPROTO_TCP) {
940 			CTR3(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP (%u)",
941 			    __func__, tlsp->tid, ip6->ip6_nxt);
942 			return (EINVAL);
943 		}
944 		m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
945 	}
946 	if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
947 	    sizeof(*tcp)) {
948 		CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short (2)",
949 		    __func__, tlsp->tid);
950 		return (EINVAL);
951 	}
952 	tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
953 	m->m_pkthdr.l4hlen = tcp->th_off * 4;
954 
955 	/* Bail if there is TCP payload before the TLS record. */
956 	if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
957 	    m->m_pkthdr.l4hlen) {
958 		CTR6(KTR_CXGBE,
959 		    "%s: tid %d header mbuf bad length (%d + %d + %d != %d)",
960 		    __func__, tlsp->tid, m->m_pkthdr.l2hlen,
961 		    m->m_pkthdr.l3hlen, m->m_pkthdr.l4hlen, m->m_len);
962 		return (EINVAL);
963 	}
964 
965 	/* Assume all headers are in 'm' for now. */
966 	MPASS(m->m_next != NULL);
967 	MPASS(m->m_next->m_flags & M_EXTPG);
968 
969 	tot_len = 0;
970 
971 	/*
972 	 * Each of the remaining mbufs in the chain should reference a
973 	 * TLS record.
974 	 */
975 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
976 		MPASS(m_tls->m_flags & M_EXTPG);
977 
978 		wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
979 #ifdef VERBOSE_TRACES
980 		CTR4(KTR_CXGBE, "%s: tid %d wr_len %d nsegs %d", __func__,
981 		    tlsp->tid, wr_len, nsegs);
982 #endif
983 		if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
984 			return (EFBIG);
985 		tot_len += roundup2(wr_len, EQ_ESIZE);
986 
987 		/*
988 		 * Store 'nsegs' for the first TLS record in the
989 		 * header mbuf's metadata.
990 		 */
991 		if (m_tls == m->m_next)
992 			set_mbuf_nsegs(m, nsegs);
993 	}
994 
995 	MPASS(tot_len != 0);
996 
997 	/*
998 	 * See if we have any TCP options or a FIN requiring a
999 	 * dedicated packet.
1000 	 */
1001 	if ((tcp->th_flags & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
1002 		wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
1003 		    sizeof(struct cpl_tx_pkt_core) + roundup2(m->m_len, 16);
1004 		if (wr_len > SGE_MAX_WR_LEN) {
1005 			CTR3(KTR_CXGBE,
1006 			    "%s: tid %d options-only packet too long (len %d)",
1007 			    __func__, tlsp->tid, m->m_len);
1008 			return (EINVAL);
1009 		}
1010 		tot_len += roundup2(wr_len, EQ_ESIZE);
1011 	}
1012 
1013 	/* Include room for a TP work request to program an L2T entry. */
1014 	tot_len += EQ_ESIZE;
1015 
1016 	/*
1017 	 * Include room for a ULPTX work request including up to 5
1018 	 * CPL_SET_TCB_FIELD commands before the first TLS work
1019 	 * request.
1020 	 */
1021 	wr_len = sizeof(struct fw_ulptx_wr) +
1022 	    5 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1023 
1024 	/*
1025 	 * If timestamps are present, reserve 1 more command for
1026 	 * setting the echoed timestamp.
1027 	 */
1028 	if (tlsp->using_timestamps)
1029 		wr_len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1030 
1031 	tot_len += roundup2(wr_len, EQ_ESIZE);
1032 
1033 	set_mbuf_len16(m, tot_len / 16);
1034 #ifdef VERBOSE_TRACES
1035 	CTR4(KTR_CXGBE, "%s: tid %d len16 %d nsegs %d", __func__,
1036 	    tlsp->tid, mbuf_len16(m), mbuf_nsegs(m));
1037 #endif
1038 	items[0] = m;
1039 	return (mp_ring_enqueue(tlsp->txq->r, items, 1, 256));
1040 }
1041 
1042 /*
1043  * If the SGL ends on an address that is not 16 byte aligned, this function will
1044  * add a 0 filled flit at the end.
1045  */
1046 static void
1047 write_gl_to_buf(struct sglist *gl, caddr_t to)
1048 {
1049 	struct sglist_seg *seg;
1050 	__be64 *flitp;
1051 	struct ulptx_sgl *usgl;
1052 	int i, nflits, nsegs;
1053 
1054 	KASSERT(((uintptr_t)to & 0xf) == 0,
1055 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
1056 
1057 	nsegs = gl->sg_nseg;
1058 	MPASS(nsegs > 0);
1059 
1060 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
1061 	flitp = (__be64 *)to;
1062 	seg = &gl->sg_segs[0];
1063 	usgl = (void *)flitp;
1064 
1065 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1066 	    V_ULPTX_NSGE(nsegs));
1067 	usgl->len0 = htobe32(seg->ss_len);
1068 	usgl->addr0 = htobe64(seg->ss_paddr);
1069 	seg++;
1070 
1071 	for (i = 0; i < nsegs - 1; i++, seg++) {
1072 		usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
1073 		usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
1074 	}
1075 	if (i & 1)
1076 		usgl->sge[i / 2].len[1] = htobe32(0);
1077 	flitp += nflits;
1078 
1079 	if (nflits & 1) {
1080 		MPASS(((uintptr_t)flitp) & 0xf);
1081 		*flitp++ = 0;
1082 	}
1083 
1084 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
1085 }
1086 
1087 static inline void
1088 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
1089 {
1090 
1091 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
1092 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
1093 
1094 	if (__predict_true((uintptr_t)(*to) + len <=
1095 	    (uintptr_t)&eq->desc[eq->sidx])) {
1096 		bcopy(from, *to, len);
1097 		(*to) += len;
1098 		if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
1099 			(*to) = (caddr_t)eq->desc;
1100 	} else {
1101 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
1102 
1103 		bcopy(from, *to, portion);
1104 		from += portion;
1105 		portion = len - portion;	/* remaining */
1106 		bcopy(from, (void *)eq->desc, portion);
1107 		(*to) = (caddr_t)eq->desc + portion;
1108 	}
1109 }
1110 
1111 static int
1112 ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m,
1113     u_int available, u_int pidx)
1114 {
1115 	struct tx_sdesc *txsd;
1116 	struct fw_eth_tx_pkt_wr *wr;
1117 	struct cpl_tx_pkt_core *cpl;
1118 	uint32_t ctrl;
1119 	uint64_t ctrl1;
1120 	int len16, ndesc, pktlen;
1121 	struct ether_header *eh;
1122 	struct ip *ip, newip;
1123 	struct ip6_hdr *ip6, newip6;
1124 	struct tcphdr *tcp, newtcp;
1125 	caddr_t out;
1126 
1127 	TXQ_LOCK_ASSERT_OWNED(txq);
1128 	M_ASSERTPKTHDR(m);
1129 
1130 	wr = dst;
1131 	pktlen = m->m_len;
1132 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1133 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1134 	ndesc = tx_len16_to_desc(len16);
1135 	MPASS(ndesc <= available);
1136 
1137 	/* Firmware work request header */
1138 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1139 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1140 
1141 	ctrl = V_FW_WR_LEN16(len16);
1142 	wr->equiq_to_len16 = htobe32(ctrl);
1143 	wr->r3 = 0;
1144 
1145 	cpl = (void *)(wr + 1);
1146 
1147 	/* CPL header */
1148 	cpl->ctrl0 = txq->cpl_ctrl0;
1149 	cpl->pack = 0;
1150 	cpl->len = htobe16(pktlen);
1151 
1152 	out = (void *)(cpl + 1);
1153 
1154 	/* Copy over Ethernet header. */
1155 	eh = mtod(m, struct ether_header *);
1156 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1157 
1158 	/* Fixup length in IP header and copy out. */
1159 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1160 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1161 		newip = *ip;
1162 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1163 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1164 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1165 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1166 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1167 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1168 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1169 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1170 	} else {
1171 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1172 		newip6 = *ip6;
1173 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1174 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1175 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1176 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1177 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1178 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1179 	}
1180 	cpl->ctrl1 = htobe64(ctrl1);
1181 	txq->txcsum++;
1182 
1183 	/* Clear PUSH and FIN in the TCP header if present. */
1184 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1185 	newtcp = *tcp;
1186 	newtcp.th_flags &= ~(TH_PUSH | TH_FIN);
1187 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1188 
1189 	/* Copy rest of packet. */
1190 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, pktlen -
1191 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1192 	txq->imm_wrs++;
1193 
1194 	txq->txpkt_wrs++;
1195 
1196 	txq->kern_tls_options++;
1197 
1198 	txsd = &txq->sdesc[pidx];
1199 	txsd->m = NULL;
1200 	txsd->desc_used = ndesc;
1201 
1202 	return (ndesc);
1203 }
1204 
1205 static int
1206 ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
1207     struct mbuf *m_tls, u_int available, tcp_seq tcp_seqno, u_int pidx)
1208 {
1209 	struct tx_sdesc *txsd;
1210 	struct fw_eth_tx_pkt_wr *wr;
1211 	struct cpl_tx_pkt_core *cpl;
1212 	uint32_t ctrl;
1213 	uint64_t ctrl1;
1214 	int len16, ndesc, pktlen;
1215 	struct ether_header *eh;
1216 	struct ip *ip, newip;
1217 	struct ip6_hdr *ip6, newip6;
1218 	struct tcphdr *tcp, newtcp;
1219 	caddr_t out;
1220 
1221 	TXQ_LOCK_ASSERT_OWNED(txq);
1222 	M_ASSERTPKTHDR(m);
1223 
1224 	/* Locate the template TLS header. */
1225 	M_ASSERTEXTPG(m_tls);
1226 
1227 	/* This should always be the last TLS record in a chain. */
1228 	MPASS(m_tls->m_next == NULL);
1229 
1230 	wr = dst;
1231 	pktlen = m->m_len + m_tls->m_len;
1232 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1233 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1234 	ndesc = tx_len16_to_desc(len16);
1235 	MPASS(ndesc <= available);
1236 
1237 	/* Firmware work request header */
1238 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1239 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1240 
1241 	ctrl = V_FW_WR_LEN16(len16);
1242 	wr->equiq_to_len16 = htobe32(ctrl);
1243 	wr->r3 = 0;
1244 
1245 	cpl = (void *)(wr + 1);
1246 
1247 	/* CPL header */
1248 	cpl->ctrl0 = txq->cpl_ctrl0;
1249 	cpl->pack = 0;
1250 	cpl->len = htobe16(pktlen);
1251 
1252 	out = (void *)(cpl + 1);
1253 
1254 	/* Copy over Ethernet header. */
1255 	eh = mtod(m, struct ether_header *);
1256 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1257 
1258 	/* Fixup length in IP header and copy out. */
1259 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1260 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1261 		newip = *ip;
1262 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1263 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1264 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1265 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1266 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1267 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1268 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1269 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1270 	} else {
1271 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1272 		newip6 = *ip6;
1273 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1274 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1275 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1276 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1277 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1278 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1279 	}
1280 	cpl->ctrl1 = htobe64(ctrl1);
1281 	txq->txcsum++;
1282 
1283 	/* Set sequence number in TCP header. */
1284 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1285 	newtcp = *tcp;
1286 	newtcp.th_seq = htonl(tcp_seqno + mtod(m_tls, vm_offset_t));
1287 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1288 
1289 	/* Copy rest of TCP header. */
1290 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1291 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1292 
1293 	/* Copy the subset of the TLS header requested. */
1294 	copy_to_txd(&txq->eq, (char *)m_tls->m_epg_hdr +
1295 	    mtod(m_tls, vm_offset_t), &out, m_tls->m_len);
1296 	txq->imm_wrs++;
1297 
1298 	txq->txpkt_wrs++;
1299 
1300 	txq->kern_tls_header++;
1301 
1302 	txsd = &txq->sdesc[pidx];
1303 	txsd->m = m;
1304 	txsd->desc_used = ndesc;
1305 
1306 	return (ndesc);
1307 }
1308 
1309 _Static_assert(sizeof(struct cpl_set_tcb_field) <= EQ_ESIZE,
1310     "CPL_SET_TCB_FIELD must be smaller than a single TX descriptor");
1311 _Static_assert(W_TCB_SND_UNA_RAW == W_TCB_SND_NXT_RAW,
1312     "SND_NXT_RAW and SND_UNA_RAW are in different words");
1313 
1314 static int
1315 ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
1316     struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls, u_int available,
1317     tcp_seq tcp_seqno, uint32_t *tsopt, u_int pidx, bool set_l2t_idx)
1318 {
1319 	struct sge_eq *eq = &txq->eq;
1320 	struct tx_sdesc *txsd;
1321 	struct fw_ulptx_wr *wr;
1322 	struct ulp_txpkt *txpkt;
1323 	struct ulptx_sc_memrd *memrd;
1324 	struct ulptx_idata *idata;
1325 	struct cpl_tx_sec_pdu *sec_pdu;
1326 	struct cpl_tx_data *tx_data;
1327 	struct tls_record_layer *hdr;
1328 	char *iv, *out;
1329 	u_int aad_start, aad_stop;
1330 	u_int auth_start, auth_stop, auth_insert;
1331 	u_int cipher_start, cipher_stop, iv_offset;
1332 	u_int imm_len, mss, ndesc, offset, plen, tlen, twr_len, wr_len;
1333 	u_int fields, nsegs, tx_max_offset, tx_max;
1334 	bool first_wr, last_wr, using_scratch;
1335 
1336 	ndesc = 0;
1337 	MPASS(tlsp->txq == txq);
1338 
1339 	first_wr = (tlsp->prev_seq == 0 && tlsp->prev_ack == 0 &&
1340 	    tlsp->prev_win == 0);
1341 
1342 	/*
1343 	 * Use the per-txq scratch pad if near the end of the ring to
1344 	 * simplify handling of wrap-around.  This uses a simple but
1345 	 * not quite perfect test of using the scratch buffer if we
1346 	 * can't fit a maximal work request in without wrapping.
1347 	 */
1348 	using_scratch = (eq->sidx - pidx < SGE_MAX_WR_LEN / EQ_ESIZE);
1349 
1350 	/* Locate the TLS header. */
1351 	M_ASSERTEXTPG(m_tls);
1352 	hdr = (void *)m_tls->m_epg_hdr;
1353 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
1354 
1355 	/* Determine how much of the TLS record to send. */
1356 	tlen = ktls_tcp_payload_length(tlsp, m_tls);
1357 	if (tlen <= m_tls->m_epg_hdrlen) {
1358 		/*
1359 		 * For requests that only want to send the TLS header,
1360 		 * send a tunnelled packet as immediate data.
1361 		 */
1362 #ifdef VERBOSE_TRACES
1363 		CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
1364 		    __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
1365 #endif
1366 		return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
1367 		    tcp_seqno, pidx));
1368 	}
1369 	if (tlen < plen) {
1370 		plen = tlen;
1371 		offset = ktls_payload_offset(tlsp, m_tls);
1372 #ifdef VERBOSE_TRACES
1373 		CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
1374 		    __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
1375 #endif
1376 		if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
1377 			txq->kern_tls_fin_short++;
1378 #ifdef INVARIANTS
1379 			panic("%s: FIN on short TLS record", __func__);
1380 #endif
1381 		}
1382 	} else
1383 		offset = 0;
1384 
1385 	/*
1386 	 * This is the last work request for a given TLS mbuf chain if
1387 	 * it is the last mbuf in the chain and FIN is not set.  If
1388 	 * FIN is set, then ktls_write_tcp_fin() will write out the
1389 	 * last work request.
1390 	 */
1391 	last_wr = m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) == 0;
1392 
1393 	/*
1394 	 * The host stack may ask us to not send part of the start of
1395 	 * a TLS record.  (For example, the stack might have
1396 	 * previously sent a "short" TLS record and might later send
1397 	 * down an mbuf that requests to send the remainder of the TLS
1398 	 * record.)  The crypto engine must process a TLS record from
1399 	 * the beginning if computing a GCM tag or HMAC, so we always
1400 	 * send the TLS record from the beginning as input to the
1401 	 * crypto engine and via CPL_TX_DATA to TP.  However, TP will
1402 	 * drop individual packets after they have been chopped up
1403 	 * into MSS-sized chunks if the entire sequence range of those
1404 	 * packets is less than SND_UNA.  SND_UNA is computed as
1405 	 * TX_MAX - SND_UNA_RAW.  Thus, use the offset stored in
1406 	 * m_data to set TX_MAX to the first byte in the TCP sequence
1407 	 * space the host actually wants us to send and set
1408 	 * SND_UNA_RAW to 0.
1409 	 *
1410 	 * If the host sends us back to back requests that span the
1411 	 * trailer of a single TLS record (first request ends "in" the
1412 	 * trailer and second request starts at the next byte but
1413 	 * still "in" the trailer), the initial bytes of the trailer
1414 	 * that the first request drops will not be retransmitted.  If
1415 	 * the host uses the same requests when retransmitting the
1416 	 * connection will hang.  To handle this, always transmit the
1417 	 * full trailer for a request that begins "in" the trailer
1418 	 * (the second request in the example above).  This should
1419 	 * also help to avoid retransmits for the common case.
1420 	 *
1421 	 * A similar condition exists when using CBC for back to back
1422 	 * requests that span a single AES block.  The first request
1423 	 * will be truncated to end at the end of the previous AES
1424 	 * block.  To handle this, always begin transmission at the
1425 	 * start of the current AES block.
1426 	 */
1427 	tx_max_offset = mtod(m_tls, vm_offset_t);
1428 	if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1429 	    m_tls->m_epg_trllen) {
1430 		/* Always send the full trailer. */
1431 		tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1432 		    m_tls->m_epg_trllen;
1433 	}
1434 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
1435 	    tx_max_offset > TLS_HEADER_LENGTH) {
1436 		/* Always send all of the first AES block. */
1437 		tx_max_offset = TLS_HEADER_LENGTH +
1438 		    rounddown(tx_max_offset - TLS_HEADER_LENGTH,
1439 		    AES_BLOCK_LEN);
1440 	}
1441 	tx_max = tcp_seqno + tx_max_offset;
1442 
1443 	/*
1444 	 * Update TCB fields.  Reserve space for the FW_ULPTX_WR header
1445 	 * but don't populate it until we know how many field updates
1446 	 * are required.
1447 	 */
1448 	if (using_scratch)
1449 		wr = (void *)txq->ss;
1450 	else
1451 		wr = dst;
1452 	out = (void *)(wr + 1);
1453 	fields = 0;
1454 	if (set_l2t_idx) {
1455 		KASSERT(m->m_next == m_tls,
1456 		    ("trying to set L2T_IX for subsequent TLS WR"));
1457 #ifdef VERBOSE_TRACES
1458 		CTR3(KTR_CXGBE, "%s: tid %d set L2T_IX to %d", __func__,
1459 		    tlsp->tid, tlsp->l2te->idx);
1460 #endif
1461 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_L2T_IX,
1462 		    V_TCB_L2T_IX(M_TCB_L2T_IX), V_TCB_L2T_IX(tlsp->l2te->idx));
1463 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1464 		fields++;
1465 	}
1466 	if (tsopt != NULL && tlsp->prev_tsecr != ntohl(tsopt[1])) {
1467 		KASSERT(m->m_next == m_tls,
1468 		    ("trying to set T_RTSEQ_RECENT for subsequent TLS WR"));
1469 #ifdef VERBOSE_TRACES
1470 		CTR2(KTR_CXGBE, "%s: tid %d wrote updated T_RTSEQ_RECENT",
1471 		    __func__, tlsp->tid);
1472 #endif
1473 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_T_RTSEQ_RECENT,
1474 		    V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT),
1475 		    V_TCB_T_RTSEQ_RECENT(ntohl(tsopt[1])));
1476 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1477 		fields++;
1478 
1479 		tlsp->prev_tsecr = ntohl(tsopt[1]);
1480 	}
1481 
1482 	if (first_wr || tlsp->prev_seq != tx_max) {
1483 		KASSERT(m->m_next == m_tls,
1484 		    ("trying to set TX_MAX for subsequent TLS WR"));
1485 #ifdef VERBOSE_TRACES
1486 		CTR4(KTR_CXGBE,
1487 		    "%s: tid %d setting TX_MAX to %u (tcp_seqno %u)",
1488 		    __func__, tlsp->tid, tx_max, tcp_seqno);
1489 #endif
1490 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_TX_MAX,
1491 		    V_TCB_TX_MAX(M_TCB_TX_MAX), V_TCB_TX_MAX(tx_max));
1492 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1493 		fields++;
1494 	}
1495 
1496 	/*
1497 	 * If there is data to drop at the beginning of this TLS
1498 	 * record or if this is a retransmit,
1499 	 * reset SND_UNA_RAW to 0 so that SND_UNA == TX_MAX.
1500 	 */
1501 	if (tlsp->prev_seq != tx_max || mtod(m_tls, vm_offset_t) != 0) {
1502 		KASSERT(m->m_next == m_tls,
1503 		    ("trying to clear SND_UNA_RAW for subsequent TLS WR"));
1504 #ifdef VERBOSE_TRACES
1505 		CTR2(KTR_CXGBE, "%s: tid %d clearing SND_UNA_RAW", __func__,
1506 		    tlsp->tid);
1507 #endif
1508 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_SND_UNA_RAW,
1509 		    V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW),
1510 		    V_TCB_SND_UNA_RAW(0));
1511 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1512 		fields++;
1513 	}
1514 
1515 	/*
1516 	 * Store the expected sequence number of the next byte after
1517 	 * this record.
1518 	 */
1519 	tlsp->prev_seq = tcp_seqno + tlen;
1520 
1521 	if (first_wr || tlsp->prev_ack != ntohl(tcp->th_ack)) {
1522 		KASSERT(m->m_next == m_tls,
1523 		    ("trying to set RCV_NXT for subsequent TLS WR"));
1524 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_NXT,
1525 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1526 		    V_TCB_RCV_NXT(ntohl(tcp->th_ack)));
1527 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1528 		fields++;
1529 
1530 		tlsp->prev_ack = ntohl(tcp->th_ack);
1531 	}
1532 
1533 	if (first_wr || tlsp->prev_win != ntohs(tcp->th_win)) {
1534 		KASSERT(m->m_next == m_tls,
1535 		    ("trying to set RCV_WND for subsequent TLS WR"));
1536 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_WND,
1537 		    V_TCB_RCV_WND(M_TCB_RCV_WND),
1538 		    V_TCB_RCV_WND(ntohs(tcp->th_win)));
1539 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1540 		fields++;
1541 
1542 		tlsp->prev_win = ntohs(tcp->th_win);
1543 	}
1544 
1545 	/* Use cached value for first record in chain. */
1546 	if (m->m_next == m_tls)
1547 		nsegs = mbuf_nsegs(m);
1548 	else
1549 		nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
1550 		    offset, plen - (m_tls->m_epg_hdrlen + offset));
1551 
1552 	/* Calculate the size of the TLS work request. */
1553 	twr_len = ktls_base_wr_size(tlsp);
1554 
1555 	imm_len = 0;
1556 	if (offset == 0)
1557 		imm_len += m_tls->m_epg_hdrlen;
1558 	if (plen == tlen)
1559 		imm_len += AES_BLOCK_LEN;
1560 	twr_len += roundup2(imm_len, 16);
1561 	twr_len += ktls_sgl_size(nsegs);
1562 
1563 	/*
1564 	 * If any field updates were required, determine if they can
1565 	 * be included in the TLS work request.  If not, use the
1566 	 * FW_ULPTX_WR work request header at 'wr' as a dedicated work
1567 	 * request for the field updates and start a new work request
1568 	 * for the TLS work request afterward.
1569 	 */
1570 	if (fields != 0) {
1571 		wr_len = fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1572 		if (twr_len + wr_len <= SGE_MAX_WR_LEN &&
1573 		    tlsp->sc->tlst.combo_wrs) {
1574 			wr_len += twr_len;
1575 			txpkt = (void *)out;
1576 		} else {
1577 			wr_len += sizeof(*wr);
1578 			wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1579 			wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1580 			    V_FW_WR_LEN16(wr_len / 16));
1581 			wr->cookie = 0;
1582 
1583 			/*
1584 			 * If we were using scratch space, copy the
1585 			 * field updates work request to the ring.
1586 			 */
1587 			if (using_scratch) {
1588 				out = dst;
1589 				copy_to_txd(eq, txq->ss, &out, wr_len);
1590 			}
1591 
1592 			ndesc = howmany(wr_len, EQ_ESIZE);
1593 			MPASS(ndesc <= available);
1594 
1595 			txq->raw_wrs++;
1596 			txsd = &txq->sdesc[pidx];
1597 			txsd->m = NULL;
1598 			txsd->desc_used = ndesc;
1599 			IDXINCR(pidx, ndesc, eq->sidx);
1600 			dst = &eq->desc[pidx];
1601 
1602 			/*
1603 			 * Determine if we should use scratch space
1604 			 * for the TLS work request based on the
1605 			 * available space after advancing pidx for
1606 			 * the field updates work request.
1607 			 */
1608 			wr_len = twr_len;
1609 			using_scratch = (eq->sidx - pidx <
1610 			    howmany(wr_len, EQ_ESIZE));
1611 			if (using_scratch)
1612 				wr = (void *)txq->ss;
1613 			else
1614 				wr = dst;
1615 			txpkt = (void *)(wr + 1);
1616 		}
1617 	} else {
1618 		wr_len = twr_len;
1619 		txpkt = (void *)out;
1620 	}
1621 
1622 	wr_len = roundup2(wr_len, 16);
1623 	MPASS(ndesc + howmany(wr_len, EQ_ESIZE) <= available);
1624 
1625 	/* FW_ULPTX_WR */
1626 	wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1627 	wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1628 	    V_FW_WR_LEN16(wr_len / 16));
1629 	wr->cookie = 0;
1630 
1631 	/* ULP_TXPKT */
1632 	txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
1633 	    V_ULP_TXPKT_DATAMODIFY(0) |
1634 	    V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
1635 	    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
1636 	txpkt->len = htobe32(howmany(twr_len - sizeof(*wr), 16));
1637 
1638 	/* ULPTX_IDATA sub-command */
1639 	idata = (void *)(txpkt + 1);
1640 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1641 	    V_ULP_TX_SC_MORE(1));
1642 	idata->len = sizeof(struct cpl_tx_sec_pdu);
1643 
1644 	/*
1645 	 * The key context, CPL_TX_DATA, and immediate data are part
1646 	 * of this ULPTX_IDATA when using an inline key.  When reading
1647 	 * the key from memory, the CPL_TX_DATA and immediate data are
1648 	 * part of a separate ULPTX_IDATA.
1649 	 */
1650 	if (tlsp->inline_key)
1651 		idata->len += tlsp->tx_key_info_size +
1652 		    sizeof(struct cpl_tx_data) + imm_len;
1653 	idata->len = htobe32(idata->len);
1654 
1655 	/* CPL_TX_SEC_PDU */
1656 	sec_pdu = (void *)(idata + 1);
1657 
1658 	/*
1659 	 * For short records, AAD is counted as header data in SCMD0,
1660 	 * the IV is next followed by a cipher region for the payload.
1661 	 */
1662 	if (plen == tlen) {
1663 		aad_start = 0;
1664 		aad_stop = 0;
1665 		iv_offset = 1;
1666 		auth_start = 0;
1667 		auth_stop = 0;
1668 		auth_insert = 0;
1669 		cipher_start = AES_BLOCK_LEN + 1;
1670 		cipher_stop = 0;
1671 
1672 		sec_pdu->pldlen = htobe32(16 + plen -
1673 		    (m_tls->m_epg_hdrlen + offset));
1674 
1675 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1676 		sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
1677 		sec_pdu->ivgen_hdrlen = htobe32(
1678 		    tlsp->scmd0_short.ivgen_hdrlen |
1679 		    V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0));
1680 
1681 		txq->kern_tls_short++;
1682 	} else {
1683 		/*
1684 		 * AAD is TLS header.  IV is after AAD.  The cipher region
1685 		 * starts after the IV.  See comments in ccr_authenc() and
1686 		 * ccr_gmac() in t4_crypto.c regarding cipher and auth
1687 		 * start/stop values.
1688 		 */
1689 		aad_start = 1;
1690 		aad_stop = TLS_HEADER_LENGTH;
1691 		iv_offset = TLS_HEADER_LENGTH + 1;
1692 		cipher_start = m_tls->m_epg_hdrlen + 1;
1693 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1694 			cipher_stop = 0;
1695 			auth_start = cipher_start;
1696 			auth_stop = 0;
1697 			auth_insert = 0;
1698 		} else {
1699 			cipher_stop = 0;
1700 			auth_start = cipher_start;
1701 			auth_stop = 0;
1702 			auth_insert = 0;
1703 		}
1704 
1705 		sec_pdu->pldlen = htobe32(plen);
1706 
1707 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1708 		sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
1709 		sec_pdu->ivgen_hdrlen = tlsp->scmd0.ivgen_hdrlen;
1710 
1711 		if (mtod(m_tls, vm_offset_t) == 0)
1712 			txq->kern_tls_full++;
1713 		else
1714 			txq->kern_tls_partial++;
1715 	}
1716 	sec_pdu->op_ivinsrtofst = htobe32(
1717 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1718 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1719 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
1720 	sec_pdu->aadstart_cipherstop_hi = htobe32(
1721 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1722 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1723 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1724 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1725 	sec_pdu->cipherstop_lo_authinsert = htobe32(
1726 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1727 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1728 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1729 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1730 
1731 	sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
1732 
1733 	/* Key context */
1734 	out = (void *)(sec_pdu + 1);
1735 	if (tlsp->inline_key) {
1736 		memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
1737 		out += tlsp->tx_key_info_size;
1738 	} else {
1739 		/* ULPTX_SC_MEMRD to read key context. */
1740 		memrd = (void *)out;
1741 		memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
1742 		    V_ULP_TX_SC_MORE(1) |
1743 		    V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
1744 		memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
1745 
1746 		/* ULPTX_IDATA for CPL_TX_DATA and TLS header. */
1747 		idata = (void *)(memrd + 1);
1748 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1749 		    V_ULP_TX_SC_MORE(1));
1750 		idata->len = htobe32(sizeof(struct cpl_tx_data) + imm_len);
1751 
1752 		out = (void *)(idata + 1);
1753 	}
1754 
1755 	/* CPL_TX_DATA */
1756 	tx_data = (void *)out;
1757 	OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tlsp->tid));
1758 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1759 		mss = m->m_pkthdr.tso_segsz;
1760 		tlsp->prev_mss = mss;
1761 	} else if (tlsp->prev_mss != 0)
1762 		mss = tlsp->prev_mss;
1763 	else
1764 		mss = if_getmtu(tlsp->vi->ifp) -
1765 		    (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
1766 	if (offset == 0) {
1767 		tx_data->len = htobe32(V_TX_DATA_MSS(mss) | V_TX_LENGTH(tlen));
1768 		tx_data->rsvd = htobe32(tcp_seqno);
1769 	} else {
1770 		tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
1771 		    V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
1772 		tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
1773 	}
1774 	tx_data->flags = htobe32(F_TX_BYPASS);
1775 	if (last_wr && tcp->th_flags & TH_PUSH)
1776 		tx_data->flags |= htobe32(F_TX_PUSH | F_TX_SHOVE);
1777 
1778 	/* Populate the TLS header */
1779 	out = (void *)(tx_data + 1);
1780 	if (offset == 0) {
1781 		memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
1782 		out += m_tls->m_epg_hdrlen;
1783 	}
1784 
1785 	/* AES IV for a short record. */
1786 	if (plen == tlen) {
1787 		iv = out;
1788 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1789 			memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
1790 			memcpy(iv + 4, hdr + 1, 8);
1791 			*(uint32_t *)(iv + 12) = htobe32(2 +
1792 			    offset / AES_BLOCK_LEN);
1793 		} else
1794 			memcpy(iv, hdr + 1, AES_BLOCK_LEN);
1795 		out += AES_BLOCK_LEN;
1796 	}
1797 
1798 	if (imm_len % 16 != 0) {
1799 		/* Zero pad to an 8-byte boundary. */
1800 		memset(out, 0, 8 - (imm_len % 8));
1801 		out += 8 - (imm_len % 8);
1802 
1803 		/*
1804 		 * Insert a ULP_TX_SC_NOOP if needed so the SGL is
1805 		 * 16-byte aligned.
1806 		 */
1807 		if (imm_len % 16 <= 8) {
1808 			idata = (void *)out;
1809 			idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1810 			idata->len = htobe32(0);
1811 			out = (void *)(idata + 1);
1812 		}
1813 	}
1814 
1815 	/* SGL for record payload */
1816 	sglist_reset(txq->gl);
1817 	if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
1818 	    plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
1819 #ifdef INVARIANTS
1820 		panic("%s: failed to append sglist", __func__);
1821 #endif
1822 	}
1823 	write_gl_to_buf(txq->gl, out);
1824 
1825 	if (using_scratch) {
1826 		out = dst;
1827 		copy_to_txd(eq, txq->ss, &out, wr_len);
1828 	}
1829 
1830 	ndesc += howmany(wr_len, EQ_ESIZE);
1831 	MPASS(ndesc <= available);
1832 
1833 	txq->kern_tls_records++;
1834 	txq->kern_tls_octets += tlen - mtod(m_tls, vm_offset_t);
1835 	if (mtod(m_tls, vm_offset_t) != 0) {
1836 		if (offset == 0)
1837 			txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
1838 		else
1839 			txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
1840 			    (m_tls->m_epg_hdrlen + offset);
1841 	}
1842 
1843 	txsd = &txq->sdesc[pidx];
1844 	if (last_wr)
1845 		txsd->m = m;
1846 	else
1847 		txsd->m = NULL;
1848 	txsd->desc_used = howmany(wr_len, EQ_ESIZE);
1849 
1850 	return (ndesc);
1851 }
1852 
1853 static int
1854 ktls_write_tcp_fin(struct sge_txq *txq, void *dst, struct mbuf *m,
1855     u_int available, tcp_seq tcp_seqno, u_int pidx)
1856 {
1857 	struct tx_sdesc *txsd;
1858 	struct fw_eth_tx_pkt_wr *wr;
1859 	struct cpl_tx_pkt_core *cpl;
1860 	uint32_t ctrl;
1861 	uint64_t ctrl1;
1862 	int len16, ndesc, pktlen;
1863 	struct ether_header *eh;
1864 	struct ip *ip, newip;
1865 	struct ip6_hdr *ip6, newip6;
1866 	struct tcphdr *tcp, newtcp;
1867 	caddr_t out;
1868 
1869 	TXQ_LOCK_ASSERT_OWNED(txq);
1870 	M_ASSERTPKTHDR(m);
1871 
1872 	wr = dst;
1873 	pktlen = m->m_len;
1874 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1875 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1876 	ndesc = tx_len16_to_desc(len16);
1877 	MPASS(ndesc <= available);
1878 
1879 	/* Firmware work request header */
1880 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1881 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1882 
1883 	ctrl = V_FW_WR_LEN16(len16);
1884 	wr->equiq_to_len16 = htobe32(ctrl);
1885 	wr->r3 = 0;
1886 
1887 	cpl = (void *)(wr + 1);
1888 
1889 	/* CPL header */
1890 	cpl->ctrl0 = txq->cpl_ctrl0;
1891 	cpl->pack = 0;
1892 	cpl->len = htobe16(pktlen);
1893 
1894 	out = (void *)(cpl + 1);
1895 
1896 	/* Copy over Ethernet header. */
1897 	eh = mtod(m, struct ether_header *);
1898 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1899 
1900 	/* Fixup length in IP header and copy out. */
1901 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1902 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1903 		newip = *ip;
1904 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1905 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1906 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1907 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1908 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1909 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1910 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1911 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1912 	} else {
1913 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1914 		newip6 = *ip6;
1915 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1916 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1917 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1918 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1919 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1920 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1921 	}
1922 	cpl->ctrl1 = htobe64(ctrl1);
1923 	txq->txcsum++;
1924 
1925 	/* Set sequence number in TCP header. */
1926 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1927 	newtcp = *tcp;
1928 	newtcp.th_seq = htonl(tcp_seqno);
1929 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1930 
1931 	/* Copy rest of packet. */
1932 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1933 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1934 	txq->imm_wrs++;
1935 
1936 	txq->txpkt_wrs++;
1937 
1938 	txq->kern_tls_fin++;
1939 
1940 	txsd = &txq->sdesc[pidx];
1941 	txsd->m = m;
1942 	txsd->desc_used = ndesc;
1943 
1944 	return (ndesc);
1945 }
1946 
1947 int
1948 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
1949     u_int available)
1950 {
1951 	struct sge_eq *eq = &txq->eq;
1952 	struct tx_sdesc *txsd;
1953 	struct tlspcb *tlsp;
1954 	struct tcphdr *tcp;
1955 	struct mbuf *m_tls;
1956 	struct ether_header *eh;
1957 	tcp_seq tcp_seqno;
1958 	u_int ndesc, pidx, totdesc;
1959 	uint16_t vlan_tag;
1960 	bool has_fin, set_l2t_idx;
1961 	void *tsopt;
1962 
1963 	M_ASSERTPKTHDR(m);
1964 	MPASS(m->m_pkthdr.snd_tag != NULL);
1965 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
1966 
1967 	totdesc = 0;
1968 	eh = mtod(m, struct ether_header *);
1969 	tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
1970 	    m->m_pkthdr.l3hlen);
1971 	pidx = eq->pidx;
1972 	has_fin = (tcp->th_flags & TH_FIN) != 0;
1973 
1974 	/*
1975 	 * If this TLS record has a FIN, then we will send any
1976 	 * requested options as part of the FIN packet.
1977 	 */
1978 	if (!has_fin && ktls_has_tcp_options(tcp)) {
1979 		ndesc = ktls_write_tcp_options(txq, dst, m, available, pidx);
1980 		totdesc += ndesc;
1981 		IDXINCR(pidx, ndesc, eq->sidx);
1982 		dst = &eq->desc[pidx];
1983 #ifdef VERBOSE_TRACES
1984 		CTR2(KTR_CXGBE, "%s: tid %d wrote TCP options packet", __func__,
1985 		    tlsp->tid);
1986 #endif
1987 	}
1988 
1989 	/*
1990 	 * Allocate a new L2T entry if necessary.  This may write out
1991 	 * a work request to the txq.
1992 	 */
1993 	if (m->m_flags & M_VLANTAG)
1994 		vlan_tag = m->m_pkthdr.ether_vtag;
1995 	else
1996 		vlan_tag = 0xfff;
1997 	set_l2t_idx = false;
1998 	if (tlsp->l2te == NULL || tlsp->l2te->vlan != vlan_tag ||
1999 	    memcmp(tlsp->l2te->dmac, eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
2000 		set_l2t_idx = true;
2001 		if (tlsp->l2te)
2002 			t4_l2t_release(tlsp->l2te);
2003 		tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
2004 		    vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
2005 		if (tlsp->l2te == NULL)
2006 			CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
2007 		if (ndesc != 0) {
2008 			MPASS(ndesc <= available - totdesc);
2009 
2010 			txq->raw_wrs++;
2011 			txsd = &txq->sdesc[pidx];
2012 			txsd->m = NULL;
2013 			txsd->desc_used = ndesc;
2014 			totdesc += ndesc;
2015 			IDXINCR(pidx, ndesc, eq->sidx);
2016 			dst = &eq->desc[pidx];
2017 		}
2018 	}
2019 
2020 	/*
2021 	 * Iterate over each TLS record constructing a work request
2022 	 * for that record.
2023 	 */
2024 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
2025 		MPASS(m_tls->m_flags & M_EXTPG);
2026 
2027 		/*
2028 		 * Determine the initial TCP sequence number for this
2029 		 * record.
2030 		 */
2031 		tsopt = NULL;
2032 		if (m_tls == m->m_next) {
2033 			tcp_seqno = ntohl(tcp->th_seq) -
2034 			    mtod(m_tls, vm_offset_t);
2035 			if (tlsp->using_timestamps)
2036 				tsopt = ktls_find_tcp_timestamps(tcp);
2037 		} else {
2038 			MPASS(mtod(m_tls, vm_offset_t) == 0);
2039 			tcp_seqno = tlsp->prev_seq;
2040 		}
2041 
2042 		ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
2043 		    available - totdesc, tcp_seqno, tsopt, pidx, set_l2t_idx);
2044 		totdesc += ndesc;
2045 		IDXINCR(pidx, ndesc, eq->sidx);
2046 		dst = &eq->desc[pidx];
2047 
2048 		/* Only need to set the L2T index once. */
2049 		set_l2t_idx = false;
2050 	}
2051 
2052 	if (has_fin) {
2053 		/*
2054 		 * If the TCP header for this chain has FIN sent, then
2055 		 * explicitly send a packet that has FIN set.  This
2056 		 * will also have PUSH set if requested.  This assumes
2057 		 * we sent at least one TLS record work request and
2058 		 * uses the TCP sequence number after that reqeust as
2059 		 * the sequence number for the FIN packet.
2060 		 */
2061 		ndesc = ktls_write_tcp_fin(txq, dst, m, available,
2062 		    tlsp->prev_seq, pidx);
2063 		totdesc += ndesc;
2064 	}
2065 
2066 	MPASS(totdesc <= available);
2067 	return (totdesc);
2068 }
2069 
2070 static void
2071 t6_tls_tag_free(struct m_snd_tag *mst)
2072 {
2073 	struct adapter *sc;
2074 	struct tlspcb *tlsp;
2075 
2076 	tlsp = mst_to_tls(mst);
2077 	sc = tlsp->sc;
2078 
2079 	CTR2(KTR_CXGBE, "%s: tid %d", __func__, tlsp->tid);
2080 
2081 	if (tlsp->l2te)
2082 		t4_l2t_release(tlsp->l2te);
2083 	if (tlsp->tid >= 0)
2084 		release_tid(sc, tlsp->tid, tlsp->ctrlq);
2085 	if (tlsp->ce)
2086 		t4_release_clip_entry(sc, tlsp->ce);
2087 	if (tlsp->tx_key_addr >= 0)
2088 		t4_free_tls_keyid(sc, tlsp->tx_key_addr);
2089 
2090 	zfree(tlsp, M_CXGBE);
2091 }
2092 
2093 void
2094 t6_ktls_modload(void)
2095 {
2096 
2097 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, ktls_act_open_rpl,
2098 	    CPL_COOKIE_KERN_TLS);
2099 }
2100 
2101 void
2102 t6_ktls_modunload(void)
2103 {
2104 
2105 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL,
2106 	    CPL_COOKIE_KERN_TLS);
2107 }
2108 
2109 #else
2110 
2111 int
2112 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
2113     struct m_snd_tag **pt)
2114 {
2115 	return (ENXIO);
2116 }
2117 
2118 int
2119 t6_ktls_parse_pkt(struct mbuf *m)
2120 {
2121 	return (EINVAL);
2122 }
2123 
2124 int
2125 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
2126     u_int available)
2127 {
2128 	panic("can't happen");
2129 }
2130 
2131 void
2132 t6_ktls_modload(void)
2133 {
2134 }
2135 
2136 void
2137 t6_ktls_modunload(void)
2138 {
2139 }
2140 
2141 #endif
2142