xref: /freebsd/sys/dev/cxgbe/crypto/t6_kern_tls.c (revision 55141f2c8991b2a6adbf30bb0fe3e6cbc303f06d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_kern_tls.h"
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/ktr.h>
37 #include <sys/ktls.h>
38 #include <sys/sglist.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/sockbuf.h>
42 #include <netinet/in.h>
43 #include <netinet/in_pcb.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip6.h>
46 #include <netinet/tcp_var.h>
47 #include <opencrypto/cryptodev.h>
48 #include <opencrypto/xform.h>
49 
50 #include "common/common.h"
51 #include "common/t4_regs.h"
52 #include "common/t4_regs_values.h"
53 #include "common/t4_tcb.h"
54 #include "t4_l2t.h"
55 #include "t4_clip.h"
56 #include "t4_mp_ring.h"
57 #include "crypto/t4_crypto.h"
58 
59 #if defined(INET) || defined(INET6)
60 
61 #define TLS_HEADER_LENGTH		5
62 
63 struct tls_scmd {
64 	__be32 seqno_numivs;
65 	__be32 ivgen_hdrlen;
66 };
67 
68 struct tlspcb {
69 	struct m_snd_tag com;
70 	struct vi_info *vi;	/* virtual interface */
71 	struct adapter *sc;
72 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
73 	struct sge_txq *txq;
74 	int tid;		/* Connection identifier */
75 
76 	int tx_key_addr;
77 	bool inline_key;
78 	bool using_timestamps;
79 	unsigned char enc_mode;
80 
81 	struct tls_scmd scmd0;
82 	struct tls_scmd scmd0_short;
83 
84 	unsigned int tx_key_info_size;
85 
86 	uint32_t prev_seq;
87 	uint32_t prev_ack;
88 	uint32_t prev_tsecr;
89 	uint16_t prev_win;
90 	uint16_t prev_mss;
91 
92 	/* Only used outside of setup and teardown when using inline keys. */
93 	struct tls_keyctx keyctx;
94 
95 	/* Fields only used during setup and teardown. */
96 	struct inpcb *inp;	/* backpointer to host stack's PCB */
97 	struct sge_wrq *ctrlq;
98 	struct clip_entry *ce;	/* CLIP table entry used by this tid */
99 
100 	bool open_pending;
101 };
102 
103 static void t6_tls_tag_free(struct m_snd_tag *mst);
104 static int ktls_setup_keys(struct tlspcb *tlsp,
105     const struct ktls_session *tls, struct sge_txq *txq);
106 
107 static const struct if_snd_tag_sw t6_tls_tag_sw = {
108 	.snd_tag_free = t6_tls_tag_free,
109 	.type = IF_SND_TAG_TYPE_TLS
110 };
111 
112 static inline struct tlspcb *
113 mst_to_tls(struct m_snd_tag *t)
114 {
115 	return (__containerof(t, struct tlspcb, com));
116 }
117 
118 static struct tlspcb *
119 alloc_tlspcb(if_t ifp, struct vi_info *vi, int flags)
120 {
121 	struct port_info *pi = vi->pi;
122 	struct adapter *sc = pi->adapter;
123 	struct tlspcb *tlsp;
124 
125 	tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
126 	if (tlsp == NULL)
127 		return (NULL);
128 
129 	m_snd_tag_init(&tlsp->com, ifp, &t6_tls_tag_sw);
130 	tlsp->vi = vi;
131 	tlsp->sc = sc;
132 	tlsp->ctrlq = &sc->sge.ctrlq[pi->port_id];
133 	tlsp->tid = -1;
134 	tlsp->tx_key_addr = -1;
135 
136 	return (tlsp);
137 }
138 
139 static int
140 ktls_act_open_cpl_size(bool isipv6)
141 {
142 
143 	if (isipv6)
144 		return (sizeof(struct cpl_t6_act_open_req6));
145 	else
146 		return (sizeof(struct cpl_t6_act_open_req));
147 }
148 
149 static void
150 mk_ktls_act_open_req(struct adapter *sc, struct vi_info *vi, struct inpcb *inp,
151     struct tlspcb *tlsp, int atid, void *dst)
152 {
153 	struct tcpcb *tp = intotcpcb(inp);
154 	struct cpl_t6_act_open_req *cpl6;
155 	struct cpl_act_open_req *cpl;
156 	uint64_t options;
157 	int qid_atid;
158 
159 	cpl6 = dst;
160 	cpl = (struct cpl_act_open_req *)cpl6;
161 	INIT_TP_WR(cpl6, 0);
162 	qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
163 	    V_TID_COOKIE(CPL_COOKIE_KERN_TLS);
164 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
165 		qid_atid));
166 	inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
167 	    &cpl->peer_ip, &cpl->peer_port);
168 
169 	options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE);
170 	options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
171 	options |= F_NON_OFFLOAD;
172 	cpl->opt0 = htobe64(options);
173 
174 	options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
175 	if (tp->t_flags & TF_REQ_TSTMP)
176 		options |= F_TSTAMPS_EN;
177 	cpl->opt2 = htobe32(options);
178 }
179 
180 static void
181 mk_ktls_act_open_req6(struct adapter *sc, struct vi_info *vi,
182     struct inpcb *inp, struct tlspcb *tlsp, int atid, void *dst)
183 {
184 	struct tcpcb *tp = intotcpcb(inp);
185 	struct cpl_t6_act_open_req6 *cpl6;
186 	struct cpl_act_open_req6 *cpl;
187 	uint64_t options;
188 	int qid_atid;
189 
190 	cpl6 = dst;
191 	cpl = (struct cpl_act_open_req6 *)cpl6;
192 	INIT_TP_WR(cpl6, 0);
193 	qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
194 	    V_TID_COOKIE(CPL_COOKIE_KERN_TLS);
195 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
196 		qid_atid));
197 	cpl->local_port = inp->inp_lport;
198 	cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
199 	cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
200 	cpl->peer_port = inp->inp_fport;
201 	cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
202 	cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
203 
204 	options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE);
205 	options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
206 	options |= F_NON_OFFLOAD;
207 	cpl->opt0 = htobe64(options);
208 
209 	options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
210 	if (tp->t_flags & TF_REQ_TSTMP)
211 		options |= F_TSTAMPS_EN;
212 	cpl->opt2 = htobe32(options);
213 }
214 
215 static int
216 send_ktls_act_open_req(struct adapter *sc, struct vi_info *vi,
217     struct inpcb *inp, struct tlspcb *tlsp, int atid)
218 {
219 	struct wrqe *wr;
220 	bool isipv6;
221 
222 	isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
223 	if (isipv6) {
224 		tlsp->ce = t4_get_clip_entry(sc, &inp->in6p_laddr, true);
225 		if (tlsp->ce == NULL)
226 			return (ENOENT);
227 	}
228 
229 	wr = alloc_wrqe(ktls_act_open_cpl_size(isipv6), tlsp->ctrlq);
230 	if (wr == NULL) {
231 		CTR2(KTR_CXGBE, "%s: atid %d failed to alloc WR", __func__,
232 		    atid);
233 		return (ENOMEM);
234 	}
235 
236 	if (isipv6)
237 		mk_ktls_act_open_req6(sc, vi, inp, tlsp, atid, wrtod(wr));
238 	else
239 		mk_ktls_act_open_req(sc, vi, inp, tlsp, atid, wrtod(wr));
240 
241 	tlsp->open_pending = true;
242 	t4_wrq_tx(sc, wr);
243 	return (0);
244 }
245 
246 static int
247 ktls_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
248     struct mbuf *m)
249 {
250 	struct adapter *sc = iq->adapter;
251 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
252 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
253 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
254 	struct tlspcb *tlsp = lookup_atid(sc, atid);
255 	struct inpcb *inp = tlsp->inp;
256 
257 	CTR3(KTR_CXGBE, "%s: atid %d status %d", __func__, atid, status);
258 	free_atid(sc, atid);
259 	if (status == 0)
260 		tlsp->tid = GET_TID(cpl);
261 
262 	INP_WLOCK(inp);
263 	tlsp->open_pending = false;
264 	wakeup(tlsp);
265 	INP_WUNLOCK(inp);
266 	return (0);
267 }
268 
269 /* SET_TCB_FIELD sent as a ULP command looks like this */
270 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
271     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
272 
273 _Static_assert((LEN__SET_TCB_FIELD_ULP + sizeof(struct ulptx_idata)) % 16 == 0,
274     "CPL_SET_TCB_FIELD ULP command not 16-byte aligned");
275 
276 static void
277 write_set_tcb_field_ulp(struct tlspcb *tlsp, void *dst, struct sge_txq *txq,
278     uint16_t word, uint64_t mask, uint64_t val)
279 {
280 	struct ulp_txpkt *txpkt;
281 	struct ulptx_idata *idata;
282 	struct cpl_set_tcb_field_core *cpl;
283 
284 	/* ULP_TXPKT */
285 	txpkt = dst;
286 	txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
287 	    V_ULP_TXPKT_DATAMODIFY(0) |
288 	    V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
289 	    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
290 	txpkt->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
291 
292 	/* ULPTX_IDATA sub-command */
293 	idata = (struct ulptx_idata *)(txpkt + 1);
294 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
295 	idata->len = htobe32(sizeof(*cpl));
296 
297 	/* CPL_SET_TCB_FIELD */
298 	cpl = (struct cpl_set_tcb_field_core *)(idata + 1);
299 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tlsp->tid));
300 	cpl->reply_ctrl = htobe16(F_NO_REPLY);
301 	cpl->word_cookie = htobe16(V_WORD(word));
302 	cpl->mask = htobe64(mask);
303 	cpl->val = htobe64(val);
304 
305 	/* ULPTX_NOOP */
306 	idata = (struct ulptx_idata *)(cpl + 1);
307 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
308 	idata->len = htobe32(0);
309 }
310 
311 static int
312 ktls_set_tcb_fields(struct tlspcb *tlsp, struct tcpcb *tp, struct sge_txq *txq)
313 {
314 	struct fw_ulptx_wr *wr;
315 	struct mbuf *m;
316 	char *dst;
317 	void *items[1];
318 	int error, len;
319 
320 	len = sizeof(*wr) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
321 	if (tp->t_flags & TF_REQ_TSTMP)
322 		len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
323 	m = alloc_wr_mbuf(len, M_NOWAIT);
324 	if (m == NULL) {
325 		CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
326 		    tlsp->tid);
327 		return (ENOMEM);
328 	}
329 	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
330 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
331 
332 	/* FW_ULPTX_WR */
333 	wr = mtod(m, void *);
334 	wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
335 	wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
336 	    V_FW_WR_LEN16(len / 16));
337 	wr->cookie = 0;
338 	dst = (char *)(wr + 1);
339 
340         /* Clear TF_NON_OFFLOAD and set TF_CORE_BYPASS */
341 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_T_FLAGS,
342 	    V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1) | V_TF_NON_OFFLOAD(1)),
343 	    V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1)));
344 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
345 
346 	/* Clear the SND_UNA_RAW, SND_NXT_RAW, and SND_MAX_RAW offsets. */
347 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_UNA_RAW,
348 	    V_TCB_SND_NXT_RAW(M_TCB_SND_NXT_RAW) |
349 	    V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW),
350 	    V_TCB_SND_NXT_RAW(0) | V_TCB_SND_UNA_RAW(0));
351 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
352 
353 	write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_MAX_RAW,
354 	    V_TCB_SND_MAX_RAW(M_TCB_SND_MAX_RAW), V_TCB_SND_MAX_RAW(0));
355 	dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
356 
357 	if (tp->t_flags & TF_REQ_TSTMP) {
358 		write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_TIMESTAMP_OFFSET,
359 		    V_TCB_TIMESTAMP_OFFSET(M_TCB_TIMESTAMP_OFFSET),
360 		    V_TCB_TIMESTAMP_OFFSET(tp->ts_offset >> 28));
361 		dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
362 	}
363 
364 	KASSERT(dst - (char *)wr == len, ("%s: length mismatch", __func__));
365 
366 	items[0] = m;
367 	error = mp_ring_enqueue(txq->r, items, 1, 1);
368 	if (error)
369 		m_free(m);
370 	return (error);
371 }
372 
373 int
374 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
375     struct m_snd_tag **pt)
376 {
377 	const struct ktls_session *tls;
378 	struct tlspcb *tlsp;
379 	struct adapter *sc;
380 	struct vi_info *vi;
381 	struct inpcb *inp;
382 	struct tcpcb *tp;
383 	struct sge_txq *txq;
384 	int atid, error, explicit_iv_size, keyid, mac_first;
385 
386 	tls = params->tls.tls;
387 
388 	/* Only TLS 1.1 and TLS 1.2 are currently supported. */
389 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
390 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
391 	    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
392 		return (EPROTONOSUPPORT);
393 
394 	/* Sanity check values in *tls. */
395 	switch (tls->params.cipher_algorithm) {
396 	case CRYPTO_AES_CBC:
397 		/* XXX: Explicitly ignore any provided IV. */
398 		switch (tls->params.cipher_key_len) {
399 		case 128 / 8:
400 		case 192 / 8:
401 		case 256 / 8:
402 			break;
403 		default:
404 			return (EINVAL);
405 		}
406 		switch (tls->params.auth_algorithm) {
407 		case CRYPTO_SHA1_HMAC:
408 		case CRYPTO_SHA2_256_HMAC:
409 		case CRYPTO_SHA2_384_HMAC:
410 			break;
411 		default:
412 			return (EPROTONOSUPPORT);
413 		}
414 		explicit_iv_size = AES_BLOCK_LEN;
415 		mac_first = 1;
416 		break;
417 	case CRYPTO_AES_NIST_GCM_16:
418 		if (tls->params.iv_len != SALT_SIZE)
419 			return (EINVAL);
420 		switch (tls->params.cipher_key_len) {
421 		case 128 / 8:
422 		case 192 / 8:
423 		case 256 / 8:
424 			break;
425 		default:
426 			return (EINVAL);
427 		}
428 		explicit_iv_size = 8;
429 		mac_first = 0;
430 		break;
431 	default:
432 		return (EPROTONOSUPPORT);
433 	}
434 
435 	vi = if_getsoftc(ifp);
436 	sc = vi->adapter;
437 
438 	tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
439 
440 	atid = alloc_atid(sc, tlsp);
441 	if (atid < 0) {
442 		error = ENOMEM;
443 		goto failed;
444 	}
445 
446 	if (sc->tlst.inline_keys)
447 		keyid = -1;
448 	else
449 		keyid = t4_alloc_tls_keyid(sc);
450 	if (keyid < 0) {
451 		CTR2(KTR_CXGBE, "%s: atid %d using immediate key ctx", __func__,
452 		    atid);
453 		tlsp->inline_key = true;
454 	} else {
455 		tlsp->tx_key_addr = keyid;
456 		CTR3(KTR_CXGBE, "%s: atid %d allocated TX key addr %#x",
457 		    __func__,
458 		    atid, tlsp->tx_key_addr);
459 	}
460 
461 	inp = params->tls.inp;
462 	INP_RLOCK(inp);
463 	if (inp->inp_flags & INP_DROPPED) {
464 		INP_RUNLOCK(inp);
465 		error = ECONNRESET;
466 		goto failed;
467 	}
468 	tlsp->inp = inp;
469 
470 	tp = intotcpcb(inp);
471 	if (tp->t_flags & TF_REQ_TSTMP) {
472 		tlsp->using_timestamps = true;
473 		if ((tp->ts_offset & 0xfffffff) != 0) {
474 			INP_RUNLOCK(inp);
475 			error = EINVAL;
476 			goto failed;
477 		}
478 	} else
479 		tlsp->using_timestamps = false;
480 
481 	error = send_ktls_act_open_req(sc, vi, inp, tlsp, atid);
482 	if (error) {
483 		INP_RUNLOCK(inp);
484 		goto failed;
485 	}
486 
487 	/* Wait for reply to active open. */
488 	CTR2(KTR_CXGBE, "%s: atid %d sent CPL_ACT_OPEN_REQ", __func__,
489 	    atid);
490 	while (tlsp->open_pending) {
491 		/*
492 		 * XXX: PCATCH?  We would then have to discard the PCB
493 		 * when the completion CPL arrived.
494 		 */
495 		error = rw_sleep(tlsp, &inp->inp_lock, 0, "t6tlsop", 0);
496 	}
497 
498 	atid = -1;
499 	if (tlsp->tid < 0) {
500 		INP_RUNLOCK(inp);
501 		error = ENOMEM;
502 		goto failed;
503 	}
504 
505 	if (inp->inp_flags & INP_DROPPED) {
506 		INP_RUNLOCK(inp);
507 		error = ECONNRESET;
508 		goto failed;
509 	}
510 
511 	txq = &sc->sge.txq[vi->first_txq];
512 	if (inp->inp_flowtype != M_HASHTYPE_NONE)
513 		txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
514 		    vi->rsrv_noflowq);
515 	tlsp->txq = txq;
516 
517 	error = ktls_set_tcb_fields(tlsp, tp, txq);
518 	INP_RUNLOCK(inp);
519 	if (error)
520 		goto failed;
521 
522 	error = ktls_setup_keys(tlsp, tls, txq);
523 	if (error)
524 		goto failed;
525 
526 	tlsp->enc_mode = t4_tls_cipher_mode(tls);
527 	tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
528 
529 	/* The SCMD fields used when encrypting a full TLS record. */
530 	tlsp->scmd0.seqno_numivs = htobe32(V_SCMD_SEQ_NO_CTRL(3) |
531 	    V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
532 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
533 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
534 	    V_SCMD_CIPH_MODE(tlsp->enc_mode) |
535 	    V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
536 	    V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
537 	    V_SCMD_IV_SIZE(explicit_iv_size / 2) | V_SCMD_NUM_IVS(1));
538 
539 	tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
540 	    V_SCMD_TLS_FRAG_ENABLE(0);
541 	if (tlsp->inline_key)
542 		tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
543 	tlsp->scmd0.ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen);
544 
545 	/*
546 	 * The SCMD fields used when encrypting a partial TLS record
547 	 * (no trailer and possibly a truncated payload).
548 	 */
549 	tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
550 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
551 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
552 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
553 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
554 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
555 	    V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
556 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
557 		tlsp->scmd0_short.seqno_numivs |=
558 		    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
559 	else
560 		tlsp->scmd0_short.seqno_numivs |=
561 		    V_SCMD_CIPH_MODE(tlsp->enc_mode);
562 	tlsp->scmd0_short.seqno_numivs =
563 	    htobe32(tlsp->scmd0_short.seqno_numivs);
564 
565 	tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
566 	    V_SCMD_TLS_FRAG_ENABLE(0) |
567 	    V_SCMD_AADIVDROP(1);
568 	if (tlsp->inline_key)
569 		tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
570 
571 	TXQ_LOCK(txq);
572 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
573 		txq->kern_tls_gcm++;
574 	else
575 		txq->kern_tls_cbc++;
576 	TXQ_UNLOCK(txq);
577 	*pt = &tlsp->com;
578 	return (0);
579 
580 failed:
581 	if (atid >= 0)
582 		free_atid(sc, atid);
583 	m_snd_tag_rele(&tlsp->com);
584 	return (error);
585 }
586 
587 static int
588 ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
589     struct sge_txq *txq)
590 {
591 	struct tls_key_req *kwr;
592 	struct tls_keyctx *kctx;
593 	void *items[1];
594 	struct mbuf *m;
595 	int error;
596 
597 	/*
598 	 * Store the salt and keys in the key context.  For
599 	 * connections with an inline key, this key context is passed
600 	 * as immediate data in each work request.  For connections
601 	 * storing the key in DDR, a work request is used to store a
602 	 * copy of the key context in DDR.
603 	 */
604 	t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
605 	if (tlsp->inline_key)
606 		return (0);
607 
608 	/* Populate key work request. */
609         m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
610 	if (m == NULL) {
611 		CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
612 		    tlsp->tid);
613 		return (ENOMEM);
614 	}
615 	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
616 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
617 	kwr = mtod(m, void *);
618 	memset(kwr, 0, TLS_KEY_WR_SZ);
619 
620 	t4_write_tlskey_wr(tls, KTLS_TX, tlsp->tid, 0, tlsp->tx_key_addr, kwr);
621 	kctx = (struct tls_keyctx *)(kwr + 1);
622 	memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
623 
624 	/*
625 	 * Place the key work request in the transmit queue.  It
626 	 * should be sent to the NIC before any TLS packets using this
627 	 * session.
628 	 */
629 	items[0] = m;
630 	error = mp_ring_enqueue(txq->r, items, 1, 1);
631 	if (error)
632 		m_free(m);
633 	else
634 		CTR2(KTR_CXGBE, "%s: tid %d sent key WR", __func__, tlsp->tid);
635 	return (error);
636 }
637 
638 static u_int
639 ktls_base_wr_size(struct tlspcb *tlsp)
640 {
641 	u_int wr_len;
642 
643 	wr_len = sizeof(struct fw_ulptx_wr);	// 16
644 	wr_len += sizeof(struct ulp_txpkt);	// 8
645 	wr_len += sizeof(struct ulptx_idata);	// 8
646 	wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
647 	if (tlsp->inline_key)
648 		wr_len += tlsp->tx_key_info_size;
649 	else {
650 		wr_len += sizeof(struct ulptx_sc_memrd);// 8
651 		wr_len += sizeof(struct ulptx_idata);	// 8
652 	}
653 	wr_len += sizeof(struct cpl_tx_data);	// 16
654 	return (wr_len);
655 }
656 
657 /* How many bytes of TCP payload to send for a given TLS record. */
658 static u_int
659 ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
660 {
661 	struct tls_record_layer *hdr;
662 	u_int plen, mlen;
663 
664 	M_ASSERTEXTPG(m_tls);
665 	hdr = (void *)m_tls->m_epg_hdr;
666 	plen = ntohs(hdr->tls_length);
667 
668 	/*
669 	 * What range of the TLS record is the mbuf requesting to be
670 	 * sent.
671 	 */
672 	mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
673 
674 	/* Always send complete records. */
675 	if (mlen == TLS_HEADER_LENGTH + plen)
676 		return (mlen);
677 
678 	/*
679 	 * If the host stack has asked to send part of the trailer,
680 	 * trim the length to avoid sending any of the trailer.  There
681 	 * is no way to send a partial trailer currently.
682 	 */
683 	if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen)
684 		mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen;
685 
686 
687 	/*
688 	 * For AES-CBC adjust the ciphertext length for the block
689 	 * size.
690 	 */
691 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
692 	    mlen > TLS_HEADER_LENGTH) {
693 		mlen = TLS_HEADER_LENGTH + rounddown(mlen - TLS_HEADER_LENGTH,
694 		    AES_BLOCK_LEN);
695 	}
696 
697 #ifdef VERBOSE_TRACES
698 	CTR4(KTR_CXGBE, "%s: tid %d short TLS record (%u vs %u)",
699 	    __func__, tlsp->tid, mlen, TLS_HEADER_LENGTH + plen);
700 #endif
701 	return (mlen);
702 }
703 
704 /*
705  * For a "short" TLS record, determine the offset into the TLS record
706  * payload to send.  This offset does not include the TLS header, but
707  * a non-zero offset implies that a header will not be sent.
708  */
709 static u_int
710 ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
711 {
712 	struct tls_record_layer *hdr;
713 	u_int offset, plen;
714 #ifdef INVARIANTS
715 	u_int mlen;
716 #endif
717 
718 	M_ASSERTEXTPG(m_tls);
719 	hdr = (void *)m_tls->m_epg_hdr;
720 	plen = ntohs(hdr->tls_length);
721 #ifdef INVARIANTS
722 	mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
723 	MPASS(mlen < TLS_HEADER_LENGTH + plen);
724 #endif
725 	if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
726 		return (0);
727 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
728 		/*
729 		 * Always send something.  This function is only called
730 		 * if we aren't sending the tag at all, but if the
731 		 * request starts in the tag then we are in an odd
732 		 * state where would effectively send nothing.  Cap
733 		 * the offset at the last byte of the record payload
734 		 * to send the last cipher block.
735 		 */
736 		offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
737 		    (plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1);
738 		return (rounddown(offset, AES_BLOCK_LEN));
739 	}
740 	return (0);
741 }
742 
743 static u_int
744 ktls_sgl_size(u_int nsegs)
745 {
746 	u_int wr_len;
747 
748 	/* First segment is part of ulptx_sgl. */
749 	nsegs--;
750 
751 	wr_len = sizeof(struct ulptx_sgl);
752 	wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
753 	return (wr_len);
754 }
755 
756 static int
757 ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
758     int *nsegsp)
759 {
760 	struct tls_record_layer *hdr;
761 	u_int imm_len, offset, plen, wr_len, tlen;
762 
763 	M_ASSERTEXTPG(m_tls);
764 
765 	/*
766 	 * Determine the size of the TLS record payload to send
767 	 * excluding header and trailer.
768 	 */
769 	tlen = ktls_tcp_payload_length(tlsp, m_tls);
770 	if (tlen <= m_tls->m_epg_hdrlen) {
771 		/*
772 		 * For requests that only want to send the TLS header,
773 		 * send a tunnelled packet as immediate data.
774 		 */
775 		wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
776 		    sizeof(struct cpl_tx_pkt_core) +
777 		    roundup2(m->m_len + m_tls->m_len, 16);
778 		if (wr_len > SGE_MAX_WR_LEN) {
779 			CTR3(KTR_CXGBE,
780 		    "%s: tid %d TLS header-only packet too long (len %d)",
781 			    __func__, tlsp->tid, m->m_len + m_tls->m_len);
782 		}
783 
784 		/* This should always be the last TLS record in a chain. */
785 		MPASS(m_tls->m_next == NULL);
786 		*nsegsp = 0;
787 		return (wr_len);
788 	}
789 
790 	hdr = (void *)m_tls->m_epg_hdr;
791 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
792 	if (tlen < plen) {
793 		plen = tlen;
794 		offset = ktls_payload_offset(tlsp, m_tls);
795 	} else
796 		offset = 0;
797 
798 	/* Calculate the size of the work request. */
799 	wr_len = ktls_base_wr_size(tlsp);
800 
801 	/*
802 	 * Full records and short records with an offset of 0 include
803 	 * the TLS header as immediate data.  Short records include a
804 	 * raw AES IV as immediate data.
805 	 */
806 	imm_len = 0;
807 	if (offset == 0)
808 		imm_len += m_tls->m_epg_hdrlen;
809 	if (plen == tlen)
810 		imm_len += AES_BLOCK_LEN;
811 	wr_len += roundup2(imm_len, 16);
812 
813 	/* TLS record payload via DSGL. */
814 	*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
815 	    plen - (m_tls->m_epg_hdrlen + offset));
816 	wr_len += ktls_sgl_size(*nsegsp);
817 
818 	wr_len = roundup2(wr_len, 16);
819 	return (wr_len);
820 }
821 
822 /*
823  * See if we have any TCP options requiring a dedicated options-only
824  * packet.
825  */
826 static int
827 ktls_has_tcp_options(struct tcphdr *tcp)
828 {
829 	u_char *cp;
830 	int cnt, opt, optlen;
831 
832 	cp = (u_char *)(tcp + 1);
833 	cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
834 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
835 		opt = cp[0];
836 		if (opt == TCPOPT_EOL)
837 			break;
838 		if (opt == TCPOPT_NOP)
839 			optlen = 1;
840 		else {
841 			if (cnt < 2)
842 				break;
843 			optlen = cp[1];
844 			if (optlen < 2 || optlen > cnt)
845 				break;
846 		}
847 		switch (opt) {
848 		case TCPOPT_NOP:
849 		case TCPOPT_TIMESTAMP:
850 			break;
851 		default:
852 			return (1);
853 		}
854 	}
855 	return (0);
856 }
857 
858 /*
859  * Find the TCP timestamp option.
860  */
861 static void *
862 ktls_find_tcp_timestamps(struct tcphdr *tcp)
863 {
864 	u_char *cp;
865 	int cnt, opt, optlen;
866 
867 	cp = (u_char *)(tcp + 1);
868 	cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
869 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
870 		opt = cp[0];
871 		if (opt == TCPOPT_EOL)
872 			break;
873 		if (opt == TCPOPT_NOP)
874 			optlen = 1;
875 		else {
876 			if (cnt < 2)
877 				break;
878 			optlen = cp[1];
879 			if (optlen < 2 || optlen > cnt)
880 				break;
881 		}
882 		if (opt == TCPOPT_TIMESTAMP && optlen == TCPOLEN_TIMESTAMP)
883 			return (cp + 2);
884 	}
885 	return (NULL);
886 }
887 
888 int
889 t6_ktls_parse_pkt(struct mbuf *m)
890 {
891 	struct tlspcb *tlsp;
892 	struct ether_header *eh;
893 	struct ip *ip;
894 	struct ip6_hdr *ip6;
895 	struct tcphdr *tcp;
896 	struct mbuf *m_tls;
897 	void *items[1];
898 	int nsegs;
899 	u_int wr_len, tot_len;
900 
901 	/*
902 	 * Locate headers in initial mbuf.
903 	 *
904 	 * XXX: This assumes all of the headers are in the initial mbuf.
905 	 * Could perhaps use m_advance() like parse_pkt() if that turns
906 	 * out to not be true.
907 	 */
908 	M_ASSERTPKTHDR(m);
909 	MPASS(m->m_pkthdr.snd_tag != NULL);
910 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
911 
912 	if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
913 		CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short", __func__,
914 		    tlsp->tid);
915 		return (EINVAL);
916 	}
917 	eh = mtod(m, struct ether_header *);
918 	if (ntohs(eh->ether_type) != ETHERTYPE_IP &&
919 	    ntohs(eh->ether_type) != ETHERTYPE_IPV6) {
920 		CTR2(KTR_CXGBE, "%s: tid %d mbuf not ETHERTYPE_IP{,V6}",
921 		    __func__, tlsp->tid);
922 		return (EINVAL);
923 	}
924 	m->m_pkthdr.l2hlen = sizeof(*eh);
925 
926 	/* XXX: Reject unsupported IP options? */
927 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
928 		ip = (struct ip *)(eh + 1);
929 		if (ip->ip_p != IPPROTO_TCP) {
930 			CTR2(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP",
931 			    __func__, tlsp->tid);
932 			return (EINVAL);
933 		}
934 		m->m_pkthdr.l3hlen = ip->ip_hl * 4;
935 	} else {
936 		ip6 = (struct ip6_hdr *)(eh + 1);
937 		if (ip6->ip6_nxt != IPPROTO_TCP) {
938 			CTR3(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP (%u)",
939 			    __func__, tlsp->tid, ip6->ip6_nxt);
940 			return (EINVAL);
941 		}
942 		m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
943 	}
944 	if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
945 	    sizeof(*tcp)) {
946 		CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short (2)",
947 		    __func__, tlsp->tid);
948 		return (EINVAL);
949 	}
950 	tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
951 	m->m_pkthdr.l4hlen = tcp->th_off * 4;
952 
953 	/* Bail if there is TCP payload before the TLS record. */
954 	if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
955 	    m->m_pkthdr.l4hlen) {
956 		CTR6(KTR_CXGBE,
957 		    "%s: tid %d header mbuf bad length (%d + %d + %d != %d)",
958 		    __func__, tlsp->tid, m->m_pkthdr.l2hlen,
959 		    m->m_pkthdr.l3hlen, m->m_pkthdr.l4hlen, m->m_len);
960 		return (EINVAL);
961 	}
962 
963 	/* Assume all headers are in 'm' for now. */
964 	MPASS(m->m_next != NULL);
965 	MPASS(m->m_next->m_flags & M_EXTPG);
966 
967 	tot_len = 0;
968 
969 	/*
970 	 * Each of the remaining mbufs in the chain should reference a
971 	 * TLS record.
972 	 */
973 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
974 		MPASS(m_tls->m_flags & M_EXTPG);
975 
976 		wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
977 #ifdef VERBOSE_TRACES
978 		CTR4(KTR_CXGBE, "%s: tid %d wr_len %d nsegs %d", __func__,
979 		    tlsp->tid, wr_len, nsegs);
980 #endif
981 		if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
982 			return (EFBIG);
983 		tot_len += roundup2(wr_len, EQ_ESIZE);
984 
985 		/*
986 		 * Store 'nsegs' for the first TLS record in the
987 		 * header mbuf's metadata.
988 		 */
989 		if (m_tls == m->m_next)
990 			set_mbuf_nsegs(m, nsegs);
991 	}
992 
993 	MPASS(tot_len != 0);
994 
995 	/*
996 	 * See if we have any TCP options or a FIN requiring a
997 	 * dedicated packet.
998 	 */
999 	if ((tcp->th_flags & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
1000 		wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
1001 		    sizeof(struct cpl_tx_pkt_core) + roundup2(m->m_len, 16);
1002 		if (wr_len > SGE_MAX_WR_LEN) {
1003 			CTR3(KTR_CXGBE,
1004 			    "%s: tid %d options-only packet too long (len %d)",
1005 			    __func__, tlsp->tid, m->m_len);
1006 			return (EINVAL);
1007 		}
1008 		tot_len += roundup2(wr_len, EQ_ESIZE);
1009 	}
1010 
1011 	/* Include room for a TP work request to program an L2T entry. */
1012 	tot_len += EQ_ESIZE;
1013 
1014 	/*
1015 	 * Include room for a ULPTX work request including up to 5
1016 	 * CPL_SET_TCB_FIELD commands before the first TLS work
1017 	 * request.
1018 	 */
1019 	wr_len = sizeof(struct fw_ulptx_wr) +
1020 	    5 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1021 
1022 	/*
1023 	 * If timestamps are present, reserve 1 more command for
1024 	 * setting the echoed timestamp.
1025 	 */
1026 	if (tlsp->using_timestamps)
1027 		wr_len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1028 
1029 	tot_len += roundup2(wr_len, EQ_ESIZE);
1030 
1031 	set_mbuf_len16(m, tot_len / 16);
1032 #ifdef VERBOSE_TRACES
1033 	CTR4(KTR_CXGBE, "%s: tid %d len16 %d nsegs %d", __func__,
1034 	    tlsp->tid, mbuf_len16(m), mbuf_nsegs(m));
1035 #endif
1036 	items[0] = m;
1037 	return (mp_ring_enqueue(tlsp->txq->r, items, 1, 256));
1038 }
1039 
1040 /*
1041  * If the SGL ends on an address that is not 16 byte aligned, this function will
1042  * add a 0 filled flit at the end.
1043  */
1044 static void
1045 write_gl_to_buf(struct sglist *gl, caddr_t to)
1046 {
1047 	struct sglist_seg *seg;
1048 	__be64 *flitp;
1049 	struct ulptx_sgl *usgl;
1050 	int i, nflits, nsegs;
1051 
1052 	KASSERT(((uintptr_t)to & 0xf) == 0,
1053 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
1054 
1055 	nsegs = gl->sg_nseg;
1056 	MPASS(nsegs > 0);
1057 
1058 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
1059 	flitp = (__be64 *)to;
1060 	seg = &gl->sg_segs[0];
1061 	usgl = (void *)flitp;
1062 
1063 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1064 	    V_ULPTX_NSGE(nsegs));
1065 	usgl->len0 = htobe32(seg->ss_len);
1066 	usgl->addr0 = htobe64(seg->ss_paddr);
1067 	seg++;
1068 
1069 	for (i = 0; i < nsegs - 1; i++, seg++) {
1070 		usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
1071 		usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
1072 	}
1073 	if (i & 1)
1074 		usgl->sge[i / 2].len[1] = htobe32(0);
1075 	flitp += nflits;
1076 
1077 	if (nflits & 1) {
1078 		MPASS(((uintptr_t)flitp) & 0xf);
1079 		*flitp++ = 0;
1080 	}
1081 
1082 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
1083 }
1084 
1085 static inline void
1086 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
1087 {
1088 
1089 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
1090 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
1091 
1092 	if (__predict_true((uintptr_t)(*to) + len <=
1093 	    (uintptr_t)&eq->desc[eq->sidx])) {
1094 		bcopy(from, *to, len);
1095 		(*to) += len;
1096 		if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
1097 			(*to) = (caddr_t)eq->desc;
1098 	} else {
1099 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
1100 
1101 		bcopy(from, *to, portion);
1102 		from += portion;
1103 		portion = len - portion;	/* remaining */
1104 		bcopy(from, (void *)eq->desc, portion);
1105 		(*to) = (caddr_t)eq->desc + portion;
1106 	}
1107 }
1108 
1109 static int
1110 ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m,
1111     u_int available, u_int pidx)
1112 {
1113 	struct tx_sdesc *txsd;
1114 	struct fw_eth_tx_pkt_wr *wr;
1115 	struct cpl_tx_pkt_core *cpl;
1116 	uint32_t ctrl;
1117 	uint64_t ctrl1;
1118 	int len16, ndesc, pktlen;
1119 	struct ether_header *eh;
1120 	struct ip *ip, newip;
1121 	struct ip6_hdr *ip6, newip6;
1122 	struct tcphdr *tcp, newtcp;
1123 	caddr_t out;
1124 
1125 	TXQ_LOCK_ASSERT_OWNED(txq);
1126 	M_ASSERTPKTHDR(m);
1127 
1128 	wr = dst;
1129 	pktlen = m->m_len;
1130 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1131 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1132 	ndesc = tx_len16_to_desc(len16);
1133 	MPASS(ndesc <= available);
1134 
1135 	/* Firmware work request header */
1136 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1137 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1138 
1139 	ctrl = V_FW_WR_LEN16(len16);
1140 	wr->equiq_to_len16 = htobe32(ctrl);
1141 	wr->r3 = 0;
1142 
1143 	cpl = (void *)(wr + 1);
1144 
1145 	/* CPL header */
1146 	cpl->ctrl0 = txq->cpl_ctrl0;
1147 	cpl->pack = 0;
1148 	cpl->len = htobe16(pktlen);
1149 
1150 	out = (void *)(cpl + 1);
1151 
1152 	/* Copy over Ethernet header. */
1153 	eh = mtod(m, struct ether_header *);
1154 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1155 
1156 	/* Fixup length in IP header and copy out. */
1157 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1158 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1159 		newip = *ip;
1160 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1161 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1162 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1163 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1164 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1165 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1166 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1167 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1168 	} else {
1169 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1170 		newip6 = *ip6;
1171 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1172 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1173 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1174 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1175 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1176 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1177 	}
1178 	cpl->ctrl1 = htobe64(ctrl1);
1179 	txq->txcsum++;
1180 
1181 	/* Clear PUSH and FIN in the TCP header if present. */
1182 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1183 	newtcp = *tcp;
1184 	newtcp.th_flags &= ~(TH_PUSH | TH_FIN);
1185 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1186 
1187 	/* Copy rest of packet. */
1188 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, pktlen -
1189 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1190 	txq->imm_wrs++;
1191 
1192 	txq->txpkt_wrs++;
1193 
1194 	txq->kern_tls_options++;
1195 
1196 	txsd = &txq->sdesc[pidx];
1197 	txsd->m = NULL;
1198 	txsd->desc_used = ndesc;
1199 
1200 	return (ndesc);
1201 }
1202 
1203 static int
1204 ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
1205     struct mbuf *m_tls, u_int available, tcp_seq tcp_seqno, u_int pidx)
1206 {
1207 	struct tx_sdesc *txsd;
1208 	struct fw_eth_tx_pkt_wr *wr;
1209 	struct cpl_tx_pkt_core *cpl;
1210 	uint32_t ctrl;
1211 	uint64_t ctrl1;
1212 	int len16, ndesc, pktlen;
1213 	struct ether_header *eh;
1214 	struct ip *ip, newip;
1215 	struct ip6_hdr *ip6, newip6;
1216 	struct tcphdr *tcp, newtcp;
1217 	caddr_t out;
1218 
1219 	TXQ_LOCK_ASSERT_OWNED(txq);
1220 	M_ASSERTPKTHDR(m);
1221 
1222 	/* Locate the template TLS header. */
1223 	M_ASSERTEXTPG(m_tls);
1224 
1225 	/* This should always be the last TLS record in a chain. */
1226 	MPASS(m_tls->m_next == NULL);
1227 
1228 	wr = dst;
1229 	pktlen = m->m_len + m_tls->m_len;
1230 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1231 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1232 	ndesc = tx_len16_to_desc(len16);
1233 	MPASS(ndesc <= available);
1234 
1235 	/* Firmware work request header */
1236 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1237 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1238 
1239 	ctrl = V_FW_WR_LEN16(len16);
1240 	wr->equiq_to_len16 = htobe32(ctrl);
1241 	wr->r3 = 0;
1242 
1243 	cpl = (void *)(wr + 1);
1244 
1245 	/* CPL header */
1246 	cpl->ctrl0 = txq->cpl_ctrl0;
1247 	cpl->pack = 0;
1248 	cpl->len = htobe16(pktlen);
1249 
1250 	out = (void *)(cpl + 1);
1251 
1252 	/* Copy over Ethernet header. */
1253 	eh = mtod(m, struct ether_header *);
1254 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1255 
1256 	/* Fixup length in IP header and copy out. */
1257 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1258 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1259 		newip = *ip;
1260 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1261 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1262 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1263 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1264 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1265 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1266 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1267 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1268 	} else {
1269 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1270 		newip6 = *ip6;
1271 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1272 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1273 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1274 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1275 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1276 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1277 	}
1278 	cpl->ctrl1 = htobe64(ctrl1);
1279 	txq->txcsum++;
1280 
1281 	/* Set sequence number in TCP header. */
1282 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1283 	newtcp = *tcp;
1284 	newtcp.th_seq = htonl(tcp_seqno + mtod(m_tls, vm_offset_t));
1285 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1286 
1287 	/* Copy rest of TCP header. */
1288 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1289 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1290 
1291 	/* Copy the subset of the TLS header requested. */
1292 	copy_to_txd(&txq->eq, (char *)m_tls->m_epg_hdr +
1293 	    mtod(m_tls, vm_offset_t), &out, m_tls->m_len);
1294 	txq->imm_wrs++;
1295 
1296 	txq->txpkt_wrs++;
1297 
1298 	txq->kern_tls_header++;
1299 
1300 	txsd = &txq->sdesc[pidx];
1301 	txsd->m = m;
1302 	txsd->desc_used = ndesc;
1303 
1304 	return (ndesc);
1305 }
1306 
1307 _Static_assert(sizeof(struct cpl_set_tcb_field) <= EQ_ESIZE,
1308     "CPL_SET_TCB_FIELD must be smaller than a single TX descriptor");
1309 _Static_assert(W_TCB_SND_UNA_RAW == W_TCB_SND_NXT_RAW,
1310     "SND_NXT_RAW and SND_UNA_RAW are in different words");
1311 
1312 static int
1313 ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
1314     struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls, u_int available,
1315     tcp_seq tcp_seqno, uint32_t *tsopt, u_int pidx, bool set_l2t_idx)
1316 {
1317 	struct sge_eq *eq = &txq->eq;
1318 	struct tx_sdesc *txsd;
1319 	struct fw_ulptx_wr *wr;
1320 	struct ulp_txpkt *txpkt;
1321 	struct ulptx_sc_memrd *memrd;
1322 	struct ulptx_idata *idata;
1323 	struct cpl_tx_sec_pdu *sec_pdu;
1324 	struct cpl_tx_data *tx_data;
1325 	struct tls_record_layer *hdr;
1326 	char *iv, *out;
1327 	u_int aad_start, aad_stop;
1328 	u_int auth_start, auth_stop, auth_insert;
1329 	u_int cipher_start, cipher_stop, iv_offset;
1330 	u_int imm_len, mss, ndesc, offset, plen, tlen, twr_len, wr_len;
1331 	u_int fields, nsegs, tx_max_offset, tx_max;
1332 	bool first_wr, last_wr, using_scratch;
1333 
1334 	ndesc = 0;
1335 	MPASS(tlsp->txq == txq);
1336 
1337 	first_wr = (tlsp->prev_seq == 0 && tlsp->prev_ack == 0 &&
1338 	    tlsp->prev_win == 0);
1339 
1340 	/*
1341 	 * Use the per-txq scratch pad if near the end of the ring to
1342 	 * simplify handling of wrap-around.  This uses a simple but
1343 	 * not quite perfect test of using the scratch buffer if we
1344 	 * can't fit a maximal work request in without wrapping.
1345 	 */
1346 	using_scratch = (eq->sidx - pidx < SGE_MAX_WR_LEN / EQ_ESIZE);
1347 
1348 	/* Locate the TLS header. */
1349 	M_ASSERTEXTPG(m_tls);
1350 	hdr = (void *)m_tls->m_epg_hdr;
1351 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
1352 
1353 	/* Determine how much of the TLS record to send. */
1354 	tlen = ktls_tcp_payload_length(tlsp, m_tls);
1355 	if (tlen <= m_tls->m_epg_hdrlen) {
1356 		/*
1357 		 * For requests that only want to send the TLS header,
1358 		 * send a tunnelled packet as immediate data.
1359 		 */
1360 #ifdef VERBOSE_TRACES
1361 		CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
1362 		    __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
1363 #endif
1364 		return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
1365 		    tcp_seqno, pidx));
1366 	}
1367 	if (tlen < plen) {
1368 		plen = tlen;
1369 		offset = ktls_payload_offset(tlsp, m_tls);
1370 #ifdef VERBOSE_TRACES
1371 		CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
1372 		    __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
1373 #endif
1374 		if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
1375 			txq->kern_tls_fin_short++;
1376 #ifdef INVARIANTS
1377 			panic("%s: FIN on short TLS record", __func__);
1378 #endif
1379 		}
1380 	} else
1381 		offset = 0;
1382 
1383 	/*
1384 	 * This is the last work request for a given TLS mbuf chain if
1385 	 * it is the last mbuf in the chain and FIN is not set.  If
1386 	 * FIN is set, then ktls_write_tcp_fin() will write out the
1387 	 * last work request.
1388 	 */
1389 	last_wr = m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) == 0;
1390 
1391 	/*
1392 	 * The host stack may ask us to not send part of the start of
1393 	 * a TLS record.  (For example, the stack might have
1394 	 * previously sent a "short" TLS record and might later send
1395 	 * down an mbuf that requests to send the remainder of the TLS
1396 	 * record.)  The crypto engine must process a TLS record from
1397 	 * the beginning if computing a GCM tag or HMAC, so we always
1398 	 * send the TLS record from the beginning as input to the
1399 	 * crypto engine and via CPL_TX_DATA to TP.  However, TP will
1400 	 * drop individual packets after they have been chopped up
1401 	 * into MSS-sized chunks if the entire sequence range of those
1402 	 * packets is less than SND_UNA.  SND_UNA is computed as
1403 	 * TX_MAX - SND_UNA_RAW.  Thus, use the offset stored in
1404 	 * m_data to set TX_MAX to the first byte in the TCP sequence
1405 	 * space the host actually wants us to send and set
1406 	 * SND_UNA_RAW to 0.
1407 	 *
1408 	 * If the host sends us back to back requests that span the
1409 	 * trailer of a single TLS record (first request ends "in" the
1410 	 * trailer and second request starts at the next byte but
1411 	 * still "in" the trailer), the initial bytes of the trailer
1412 	 * that the first request drops will not be retransmitted.  If
1413 	 * the host uses the same requests when retransmitting the
1414 	 * connection will hang.  To handle this, always transmit the
1415 	 * full trailer for a request that begins "in" the trailer
1416 	 * (the second request in the example above).  This should
1417 	 * also help to avoid retransmits for the common case.
1418 	 *
1419 	 * A similar condition exists when using CBC for back to back
1420 	 * requests that span a single AES block.  The first request
1421 	 * will be truncated to end at the end of the previous AES
1422 	 * block.  To handle this, always begin transmission at the
1423 	 * start of the current AES block.
1424 	 */
1425 	tx_max_offset = mtod(m_tls, vm_offset_t);
1426 	if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1427 	    m_tls->m_epg_trllen) {
1428 		/* Always send the full trailer. */
1429 		tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1430 		    m_tls->m_epg_trllen;
1431 	}
1432 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
1433 	    tx_max_offset > TLS_HEADER_LENGTH) {
1434 		/* Always send all of the first AES block. */
1435 		tx_max_offset = TLS_HEADER_LENGTH +
1436 		    rounddown(tx_max_offset - TLS_HEADER_LENGTH,
1437 		    AES_BLOCK_LEN);
1438 	}
1439 	tx_max = tcp_seqno + tx_max_offset;
1440 
1441 	/*
1442 	 * Update TCB fields.  Reserve space for the FW_ULPTX_WR header
1443 	 * but don't populate it until we know how many field updates
1444 	 * are required.
1445 	 */
1446 	if (using_scratch)
1447 		wr = (void *)txq->ss;
1448 	else
1449 		wr = dst;
1450 	out = (void *)(wr + 1);
1451 	fields = 0;
1452 	if (set_l2t_idx) {
1453 		KASSERT(m->m_next == m_tls,
1454 		    ("trying to set L2T_IX for subsequent TLS WR"));
1455 #ifdef VERBOSE_TRACES
1456 		CTR3(KTR_CXGBE, "%s: tid %d set L2T_IX to %d", __func__,
1457 		    tlsp->tid, tlsp->l2te->idx);
1458 #endif
1459 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_L2T_IX,
1460 		    V_TCB_L2T_IX(M_TCB_L2T_IX), V_TCB_L2T_IX(tlsp->l2te->idx));
1461 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1462 		fields++;
1463 	}
1464 	if (tsopt != NULL && tlsp->prev_tsecr != ntohl(tsopt[1])) {
1465 		KASSERT(m->m_next == m_tls,
1466 		    ("trying to set T_RTSEQ_RECENT for subsequent TLS WR"));
1467 #ifdef VERBOSE_TRACES
1468 		CTR2(KTR_CXGBE, "%s: tid %d wrote updated T_RTSEQ_RECENT",
1469 		    __func__, tlsp->tid);
1470 #endif
1471 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_T_RTSEQ_RECENT,
1472 		    V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT),
1473 		    V_TCB_T_RTSEQ_RECENT(ntohl(tsopt[1])));
1474 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1475 		fields++;
1476 
1477 		tlsp->prev_tsecr = ntohl(tsopt[1]);
1478 	}
1479 
1480 	if (first_wr || tlsp->prev_seq != tx_max) {
1481 		KASSERT(m->m_next == m_tls,
1482 		    ("trying to set TX_MAX for subsequent TLS WR"));
1483 #ifdef VERBOSE_TRACES
1484 		CTR4(KTR_CXGBE,
1485 		    "%s: tid %d setting TX_MAX to %u (tcp_seqno %u)",
1486 		    __func__, tlsp->tid, tx_max, tcp_seqno);
1487 #endif
1488 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_TX_MAX,
1489 		    V_TCB_TX_MAX(M_TCB_TX_MAX), V_TCB_TX_MAX(tx_max));
1490 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1491 		fields++;
1492 	}
1493 
1494 	/*
1495 	 * If there is data to drop at the beginning of this TLS
1496 	 * record or if this is a retransmit,
1497 	 * reset SND_UNA_RAW to 0 so that SND_UNA == TX_MAX.
1498 	 */
1499 	if (tlsp->prev_seq != tx_max || mtod(m_tls, vm_offset_t) != 0) {
1500 		KASSERT(m->m_next == m_tls,
1501 		    ("trying to clear SND_UNA_RAW for subsequent TLS WR"));
1502 #ifdef VERBOSE_TRACES
1503 		CTR2(KTR_CXGBE, "%s: tid %d clearing SND_UNA_RAW", __func__,
1504 		    tlsp->tid);
1505 #endif
1506 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_SND_UNA_RAW,
1507 		    V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW),
1508 		    V_TCB_SND_UNA_RAW(0));
1509 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1510 		fields++;
1511 	}
1512 
1513 	/*
1514 	 * Store the expected sequence number of the next byte after
1515 	 * this record.
1516 	 */
1517 	tlsp->prev_seq = tcp_seqno + tlen;
1518 
1519 	if (first_wr || tlsp->prev_ack != ntohl(tcp->th_ack)) {
1520 		KASSERT(m->m_next == m_tls,
1521 		    ("trying to set RCV_NXT for subsequent TLS WR"));
1522 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_NXT,
1523 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1524 		    V_TCB_RCV_NXT(ntohl(tcp->th_ack)));
1525 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1526 		fields++;
1527 
1528 		tlsp->prev_ack = ntohl(tcp->th_ack);
1529 	}
1530 
1531 	if (first_wr || tlsp->prev_win != ntohs(tcp->th_win)) {
1532 		KASSERT(m->m_next == m_tls,
1533 		    ("trying to set RCV_WND for subsequent TLS WR"));
1534 		write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_WND,
1535 		    V_TCB_RCV_WND(M_TCB_RCV_WND),
1536 		    V_TCB_RCV_WND(ntohs(tcp->th_win)));
1537 		out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1538 		fields++;
1539 
1540 		tlsp->prev_win = ntohs(tcp->th_win);
1541 	}
1542 
1543 	/* Use cached value for first record in chain. */
1544 	if (m->m_next == m_tls)
1545 		nsegs = mbuf_nsegs(m);
1546 	else
1547 		nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
1548 		    offset, plen - (m_tls->m_epg_hdrlen + offset));
1549 
1550 	/* Calculate the size of the TLS work request. */
1551 	twr_len = ktls_base_wr_size(tlsp);
1552 
1553 	imm_len = 0;
1554 	if (offset == 0)
1555 		imm_len += m_tls->m_epg_hdrlen;
1556 	if (plen == tlen)
1557 		imm_len += AES_BLOCK_LEN;
1558 	twr_len += roundup2(imm_len, 16);
1559 	twr_len += ktls_sgl_size(nsegs);
1560 
1561 	/*
1562 	 * If any field updates were required, determine if they can
1563 	 * be included in the TLS work request.  If not, use the
1564 	 * FW_ULPTX_WR work request header at 'wr' as a dedicated work
1565 	 * request for the field updates and start a new work request
1566 	 * for the TLS work request afterward.
1567 	 */
1568 	if (fields != 0) {
1569 		wr_len = fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1570 		if (twr_len + wr_len <= SGE_MAX_WR_LEN &&
1571 		    tlsp->sc->tlst.combo_wrs) {
1572 			wr_len += twr_len;
1573 			txpkt = (void *)out;
1574 		} else {
1575 			wr_len += sizeof(*wr);
1576 			wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1577 			wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1578 			    V_FW_WR_LEN16(wr_len / 16));
1579 			wr->cookie = 0;
1580 
1581 			/*
1582 			 * If we were using scratch space, copy the
1583 			 * field updates work request to the ring.
1584 			 */
1585 			if (using_scratch) {
1586 				out = dst;
1587 				copy_to_txd(eq, txq->ss, &out, wr_len);
1588 			}
1589 
1590 			ndesc = howmany(wr_len, EQ_ESIZE);
1591 			MPASS(ndesc <= available);
1592 
1593 			txq->raw_wrs++;
1594 			txsd = &txq->sdesc[pidx];
1595 			txsd->m = NULL;
1596 			txsd->desc_used = ndesc;
1597 			IDXINCR(pidx, ndesc, eq->sidx);
1598 			dst = &eq->desc[pidx];
1599 
1600 			/*
1601 			 * Determine if we should use scratch space
1602 			 * for the TLS work request based on the
1603 			 * available space after advancing pidx for
1604 			 * the field updates work request.
1605 			 */
1606 			wr_len = twr_len;
1607 			using_scratch = (eq->sidx - pidx <
1608 			    howmany(wr_len, EQ_ESIZE));
1609 			if (using_scratch)
1610 				wr = (void *)txq->ss;
1611 			else
1612 				wr = dst;
1613 			txpkt = (void *)(wr + 1);
1614 		}
1615 	} else {
1616 		wr_len = twr_len;
1617 		txpkt = (void *)out;
1618 	}
1619 
1620 	wr_len = roundup2(wr_len, 16);
1621 	MPASS(ndesc + howmany(wr_len, EQ_ESIZE) <= available);
1622 
1623 	/* FW_ULPTX_WR */
1624 	wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1625 	wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1626 	    V_FW_WR_LEN16(wr_len / 16));
1627 	wr->cookie = 0;
1628 
1629 	/* ULP_TXPKT */
1630 	txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
1631 	    V_ULP_TXPKT_DATAMODIFY(0) |
1632 	    V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
1633 	    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
1634 	txpkt->len = htobe32(howmany(twr_len - sizeof(*wr), 16));
1635 
1636 	/* ULPTX_IDATA sub-command */
1637 	idata = (void *)(txpkt + 1);
1638 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1639 	    V_ULP_TX_SC_MORE(1));
1640 	idata->len = sizeof(struct cpl_tx_sec_pdu);
1641 
1642 	/*
1643 	 * The key context, CPL_TX_DATA, and immediate data are part
1644 	 * of this ULPTX_IDATA when using an inline key.  When reading
1645 	 * the key from memory, the CPL_TX_DATA and immediate data are
1646 	 * part of a separate ULPTX_IDATA.
1647 	 */
1648 	if (tlsp->inline_key)
1649 		idata->len += tlsp->tx_key_info_size +
1650 		    sizeof(struct cpl_tx_data) + imm_len;
1651 	idata->len = htobe32(idata->len);
1652 
1653 	/* CPL_TX_SEC_PDU */
1654 	sec_pdu = (void *)(idata + 1);
1655 
1656 	/*
1657 	 * For short records, AAD is counted as header data in SCMD0,
1658 	 * the IV is next followed by a cipher region for the payload.
1659 	 */
1660 	if (plen == tlen) {
1661 		aad_start = 0;
1662 		aad_stop = 0;
1663 		iv_offset = 1;
1664 		auth_start = 0;
1665 		auth_stop = 0;
1666 		auth_insert = 0;
1667 		cipher_start = AES_BLOCK_LEN + 1;
1668 		cipher_stop = 0;
1669 
1670 		sec_pdu->pldlen = htobe32(16 + plen -
1671 		    (m_tls->m_epg_hdrlen + offset));
1672 
1673 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1674 		sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
1675 		sec_pdu->ivgen_hdrlen = htobe32(
1676 		    tlsp->scmd0_short.ivgen_hdrlen |
1677 		    V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0));
1678 
1679 		txq->kern_tls_short++;
1680 	} else {
1681 		/*
1682 		 * AAD is TLS header.  IV is after AAD.  The cipher region
1683 		 * starts after the IV.  See comments in ccr_authenc() and
1684 		 * ccr_gmac() in t4_crypto.c regarding cipher and auth
1685 		 * start/stop values.
1686 		 */
1687 		aad_start = 1;
1688 		aad_stop = TLS_HEADER_LENGTH;
1689 		iv_offset = TLS_HEADER_LENGTH + 1;
1690 		cipher_start = m_tls->m_epg_hdrlen + 1;
1691 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1692 			cipher_stop = 0;
1693 			auth_start = cipher_start;
1694 			auth_stop = 0;
1695 			auth_insert = 0;
1696 		} else {
1697 			cipher_stop = 0;
1698 			auth_start = cipher_start;
1699 			auth_stop = 0;
1700 			auth_insert = 0;
1701 		}
1702 
1703 		sec_pdu->pldlen = htobe32(plen);
1704 
1705 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1706 		sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
1707 		sec_pdu->ivgen_hdrlen = tlsp->scmd0.ivgen_hdrlen;
1708 
1709 		if (mtod(m_tls, vm_offset_t) == 0)
1710 			txq->kern_tls_full++;
1711 		else
1712 			txq->kern_tls_partial++;
1713 	}
1714 	sec_pdu->op_ivinsrtofst = htobe32(
1715 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1716 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1717 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
1718 	sec_pdu->aadstart_cipherstop_hi = htobe32(
1719 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1720 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1721 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1722 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1723 	sec_pdu->cipherstop_lo_authinsert = htobe32(
1724 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1725 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1726 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1727 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1728 
1729 	sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
1730 
1731 	/* Key context */
1732 	out = (void *)(sec_pdu + 1);
1733 	if (tlsp->inline_key) {
1734 		memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
1735 		out += tlsp->tx_key_info_size;
1736 	} else {
1737 		/* ULPTX_SC_MEMRD to read key context. */
1738 		memrd = (void *)out;
1739 		memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
1740 		    V_ULP_TX_SC_MORE(1) |
1741 		    V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
1742 		memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
1743 
1744 		/* ULPTX_IDATA for CPL_TX_DATA and TLS header. */
1745 		idata = (void *)(memrd + 1);
1746 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1747 		    V_ULP_TX_SC_MORE(1));
1748 		idata->len = htobe32(sizeof(struct cpl_tx_data) + imm_len);
1749 
1750 		out = (void *)(idata + 1);
1751 	}
1752 
1753 	/* CPL_TX_DATA */
1754 	tx_data = (void *)out;
1755 	OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tlsp->tid));
1756 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1757 		mss = m->m_pkthdr.tso_segsz;
1758 		tlsp->prev_mss = mss;
1759 	} else if (tlsp->prev_mss != 0)
1760 		mss = tlsp->prev_mss;
1761 	else
1762 		mss = if_getmtu(tlsp->vi->ifp) -
1763 		    (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
1764 	if (offset == 0) {
1765 		tx_data->len = htobe32(V_TX_DATA_MSS(mss) | V_TX_LENGTH(tlen));
1766 		tx_data->rsvd = htobe32(tcp_seqno);
1767 	} else {
1768 		tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
1769 		    V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
1770 		tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
1771 	}
1772 	tx_data->flags = htobe32(F_TX_BYPASS);
1773 	if (last_wr && tcp->th_flags & TH_PUSH)
1774 		tx_data->flags |= htobe32(F_TX_PUSH | F_TX_SHOVE);
1775 
1776 	/* Populate the TLS header */
1777 	out = (void *)(tx_data + 1);
1778 	if (offset == 0) {
1779 		memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
1780 		out += m_tls->m_epg_hdrlen;
1781 	}
1782 
1783 	/* AES IV for a short record. */
1784 	if (plen == tlen) {
1785 		iv = out;
1786 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1787 			memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
1788 			memcpy(iv + 4, hdr + 1, 8);
1789 			*(uint32_t *)(iv + 12) = htobe32(2 +
1790 			    offset / AES_BLOCK_LEN);
1791 		} else
1792 			memcpy(iv, hdr + 1, AES_BLOCK_LEN);
1793 		out += AES_BLOCK_LEN;
1794 	}
1795 
1796 	if (imm_len % 16 != 0) {
1797 		/* Zero pad to an 8-byte boundary. */
1798 		memset(out, 0, 8 - (imm_len % 8));
1799 		out += 8 - (imm_len % 8);
1800 
1801 		/*
1802 		 * Insert a ULP_TX_SC_NOOP if needed so the SGL is
1803 		 * 16-byte aligned.
1804 		 */
1805 		if (imm_len % 16 <= 8) {
1806 			idata = (void *)out;
1807 			idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1808 			idata->len = htobe32(0);
1809 			out = (void *)(idata + 1);
1810 		}
1811 	}
1812 
1813 	/* SGL for record payload */
1814 	sglist_reset(txq->gl);
1815 	if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
1816 	    plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
1817 #ifdef INVARIANTS
1818 		panic("%s: failed to append sglist", __func__);
1819 #endif
1820 	}
1821 	write_gl_to_buf(txq->gl, out);
1822 
1823 	if (using_scratch) {
1824 		out = dst;
1825 		copy_to_txd(eq, txq->ss, &out, wr_len);
1826 	}
1827 
1828 	ndesc += howmany(wr_len, EQ_ESIZE);
1829 	MPASS(ndesc <= available);
1830 
1831 	txq->kern_tls_records++;
1832 	txq->kern_tls_octets += tlen - mtod(m_tls, vm_offset_t);
1833 	if (mtod(m_tls, vm_offset_t) != 0) {
1834 		if (offset == 0)
1835 			txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
1836 		else
1837 			txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
1838 			    (m_tls->m_epg_hdrlen + offset);
1839 	}
1840 
1841 	txsd = &txq->sdesc[pidx];
1842 	if (last_wr)
1843 		txsd->m = m;
1844 	else
1845 		txsd->m = NULL;
1846 	txsd->desc_used = howmany(wr_len, EQ_ESIZE);
1847 
1848 	return (ndesc);
1849 }
1850 
1851 static int
1852 ktls_write_tcp_fin(struct sge_txq *txq, void *dst, struct mbuf *m,
1853     u_int available, tcp_seq tcp_seqno, u_int pidx)
1854 {
1855 	struct tx_sdesc *txsd;
1856 	struct fw_eth_tx_pkt_wr *wr;
1857 	struct cpl_tx_pkt_core *cpl;
1858 	uint32_t ctrl;
1859 	uint64_t ctrl1;
1860 	int len16, ndesc, pktlen;
1861 	struct ether_header *eh;
1862 	struct ip *ip, newip;
1863 	struct ip6_hdr *ip6, newip6;
1864 	struct tcphdr *tcp, newtcp;
1865 	caddr_t out;
1866 
1867 	TXQ_LOCK_ASSERT_OWNED(txq);
1868 	M_ASSERTPKTHDR(m);
1869 
1870 	wr = dst;
1871 	pktlen = m->m_len;
1872 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1873 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1874 	ndesc = tx_len16_to_desc(len16);
1875 	MPASS(ndesc <= available);
1876 
1877 	/* Firmware work request header */
1878 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1879 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
1880 
1881 	ctrl = V_FW_WR_LEN16(len16);
1882 	wr->equiq_to_len16 = htobe32(ctrl);
1883 	wr->r3 = 0;
1884 
1885 	cpl = (void *)(wr + 1);
1886 
1887 	/* CPL header */
1888 	cpl->ctrl0 = txq->cpl_ctrl0;
1889 	cpl->pack = 0;
1890 	cpl->len = htobe16(pktlen);
1891 
1892 	out = (void *)(cpl + 1);
1893 
1894 	/* Copy over Ethernet header. */
1895 	eh = mtod(m, struct ether_header *);
1896 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1897 
1898 	/* Fixup length in IP header and copy out. */
1899 	if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1900 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1901 		newip = *ip;
1902 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1903 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1904 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
1905 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1906 			    m->m_pkthdr.l3hlen - sizeof(*ip));
1907 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
1908 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1909 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1910 	} else {
1911 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1912 		newip6 = *ip6;
1913 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1914 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1915 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1916 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
1917 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1918 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1919 	}
1920 	cpl->ctrl1 = htobe64(ctrl1);
1921 	txq->txcsum++;
1922 
1923 	/* Set sequence number in TCP header. */
1924 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1925 	newtcp = *tcp;
1926 	newtcp.th_seq = htonl(tcp_seqno);
1927 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1928 
1929 	/* Copy rest of packet. */
1930 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1931 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1932 	txq->imm_wrs++;
1933 
1934 	txq->txpkt_wrs++;
1935 
1936 	txq->kern_tls_fin++;
1937 
1938 	txsd = &txq->sdesc[pidx];
1939 	txsd->m = m;
1940 	txsd->desc_used = ndesc;
1941 
1942 	return (ndesc);
1943 }
1944 
1945 int
1946 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
1947     u_int available)
1948 {
1949 	struct sge_eq *eq = &txq->eq;
1950 	struct tx_sdesc *txsd;
1951 	struct tlspcb *tlsp;
1952 	struct tcphdr *tcp;
1953 	struct mbuf *m_tls;
1954 	struct ether_header *eh;
1955 	tcp_seq tcp_seqno;
1956 	u_int ndesc, pidx, totdesc;
1957 	uint16_t vlan_tag;
1958 	bool has_fin, set_l2t_idx;
1959 	void *tsopt;
1960 
1961 	M_ASSERTPKTHDR(m);
1962 	MPASS(m->m_pkthdr.snd_tag != NULL);
1963 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
1964 
1965 	totdesc = 0;
1966 	eh = mtod(m, struct ether_header *);
1967 	tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
1968 	    m->m_pkthdr.l3hlen);
1969 	pidx = eq->pidx;
1970 	has_fin = (tcp->th_flags & TH_FIN) != 0;
1971 
1972 	/*
1973 	 * If this TLS record has a FIN, then we will send any
1974 	 * requested options as part of the FIN packet.
1975 	 */
1976 	if (!has_fin && ktls_has_tcp_options(tcp)) {
1977 		ndesc = ktls_write_tcp_options(txq, dst, m, available, pidx);
1978 		totdesc += ndesc;
1979 		IDXINCR(pidx, ndesc, eq->sidx);
1980 		dst = &eq->desc[pidx];
1981 #ifdef VERBOSE_TRACES
1982 		CTR2(KTR_CXGBE, "%s: tid %d wrote TCP options packet", __func__,
1983 		    tlsp->tid);
1984 #endif
1985 	}
1986 
1987 	/*
1988 	 * Allocate a new L2T entry if necessary.  This may write out
1989 	 * a work request to the txq.
1990 	 */
1991 	if (m->m_flags & M_VLANTAG)
1992 		vlan_tag = m->m_pkthdr.ether_vtag;
1993 	else
1994 		vlan_tag = 0xfff;
1995 	set_l2t_idx = false;
1996 	if (tlsp->l2te == NULL || tlsp->l2te->vlan != vlan_tag ||
1997 	    memcmp(tlsp->l2te->dmac, eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
1998 		set_l2t_idx = true;
1999 		if (tlsp->l2te)
2000 			t4_l2t_release(tlsp->l2te);
2001 		tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
2002 		    vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
2003 		if (tlsp->l2te == NULL)
2004 			CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
2005 		if (ndesc != 0) {
2006 			MPASS(ndesc <= available - totdesc);
2007 
2008 			txq->raw_wrs++;
2009 			txsd = &txq->sdesc[pidx];
2010 			txsd->m = NULL;
2011 			txsd->desc_used = ndesc;
2012 			totdesc += ndesc;
2013 			IDXINCR(pidx, ndesc, eq->sidx);
2014 			dst = &eq->desc[pidx];
2015 		}
2016 	}
2017 
2018 	/*
2019 	 * Iterate over each TLS record constructing a work request
2020 	 * for that record.
2021 	 */
2022 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
2023 		MPASS(m_tls->m_flags & M_EXTPG);
2024 
2025 		/*
2026 		 * Determine the initial TCP sequence number for this
2027 		 * record.
2028 		 */
2029 		tsopt = NULL;
2030 		if (m_tls == m->m_next) {
2031 			tcp_seqno = ntohl(tcp->th_seq) -
2032 			    mtod(m_tls, vm_offset_t);
2033 			if (tlsp->using_timestamps)
2034 				tsopt = ktls_find_tcp_timestamps(tcp);
2035 		} else {
2036 			MPASS(mtod(m_tls, vm_offset_t) == 0);
2037 			tcp_seqno = tlsp->prev_seq;
2038 		}
2039 
2040 		ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
2041 		    available - totdesc, tcp_seqno, tsopt, pidx, set_l2t_idx);
2042 		totdesc += ndesc;
2043 		IDXINCR(pidx, ndesc, eq->sidx);
2044 		dst = &eq->desc[pidx];
2045 
2046 		/* Only need to set the L2T index once. */
2047 		set_l2t_idx = false;
2048 	}
2049 
2050 	if (has_fin) {
2051 		/*
2052 		 * If the TCP header for this chain has FIN sent, then
2053 		 * explicitly send a packet that has FIN set.  This
2054 		 * will also have PUSH set if requested.  This assumes
2055 		 * we sent at least one TLS record work request and
2056 		 * uses the TCP sequence number after that reqeust as
2057 		 * the sequence number for the FIN packet.
2058 		 */
2059 		ndesc = ktls_write_tcp_fin(txq, dst, m, available,
2060 		    tlsp->prev_seq, pidx);
2061 		totdesc += ndesc;
2062 	}
2063 
2064 	MPASS(totdesc <= available);
2065 	return (totdesc);
2066 }
2067 
2068 static void
2069 t6_tls_tag_free(struct m_snd_tag *mst)
2070 {
2071 	struct adapter *sc;
2072 	struct tlspcb *tlsp;
2073 
2074 	tlsp = mst_to_tls(mst);
2075 	sc = tlsp->sc;
2076 
2077 	CTR2(KTR_CXGBE, "%s: tid %d", __func__, tlsp->tid);
2078 
2079 	if (tlsp->l2te)
2080 		t4_l2t_release(tlsp->l2te);
2081 	if (tlsp->tid >= 0)
2082 		release_tid(sc, tlsp->tid, tlsp->ctrlq);
2083 	if (tlsp->ce)
2084 		t4_release_clip_entry(sc, tlsp->ce);
2085 	if (tlsp->tx_key_addr >= 0)
2086 		t4_free_tls_keyid(sc, tlsp->tx_key_addr);
2087 
2088 	zfree(tlsp, M_CXGBE);
2089 }
2090 
2091 void
2092 t6_ktls_modload(void)
2093 {
2094 
2095 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, ktls_act_open_rpl,
2096 	    CPL_COOKIE_KERN_TLS);
2097 }
2098 
2099 void
2100 t6_ktls_modunload(void)
2101 {
2102 
2103 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL,
2104 	    CPL_COOKIE_KERN_TLS);
2105 }
2106 
2107 #else
2108 
2109 int
2110 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
2111     struct m_snd_tag **pt)
2112 {
2113 	return (ENXIO);
2114 }
2115 
2116 int
2117 t6_ktls_parse_pkt(struct mbuf *m)
2118 {
2119 	return (EINVAL);
2120 }
2121 
2122 int
2123 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
2124     u_int available)
2125 {
2126 	panic("can't happen");
2127 }
2128 
2129 void
2130 t6_ktls_modload(void)
2131 {
2132 }
2133 
2134 void
2135 t6_ktls_modunload(void)
2136 {
2137 }
2138 
2139 #endif
2140