xref: /freebsd/sys/dev/cxgbe/crypto/t7_kern_tls.c (revision 9e269eafebfca6c876be76a78e4bda621a921e45)
14f272a5eSJohn Baldwin /*-
24f272a5eSJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
34f272a5eSJohn Baldwin  *
44f272a5eSJohn Baldwin  * Copyright (c) 2025 Chelsio Communications
54f272a5eSJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
64f272a5eSJohn Baldwin  *
74f272a5eSJohn Baldwin  * Redistribution and use in source and binary forms, with or without
84f272a5eSJohn Baldwin  * modification, are permitted provided that the following conditions
94f272a5eSJohn Baldwin  * are met:
104f272a5eSJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
114f272a5eSJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
124f272a5eSJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
134f272a5eSJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
144f272a5eSJohn Baldwin  *    documentation and/or other materials provided with the distribution.
154f272a5eSJohn Baldwin  *
164f272a5eSJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174f272a5eSJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184f272a5eSJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194f272a5eSJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204f272a5eSJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214f272a5eSJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224f272a5eSJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234f272a5eSJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244f272a5eSJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254f272a5eSJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264f272a5eSJohn Baldwin  * SUCH DAMAGE.
274f272a5eSJohn Baldwin  */
284f272a5eSJohn Baldwin 
294f272a5eSJohn Baldwin #include "opt_inet.h"
304f272a5eSJohn Baldwin #include "opt_inet6.h"
314f272a5eSJohn Baldwin #include "opt_kern_tls.h"
324f272a5eSJohn Baldwin 
334f272a5eSJohn Baldwin #include <sys/cdefs.h>
344f272a5eSJohn Baldwin __FBSDID("$FreeBSD$");
354f272a5eSJohn Baldwin 
364f272a5eSJohn Baldwin #include <sys/param.h>
374f272a5eSJohn Baldwin #include <sys/ktr.h>
384f272a5eSJohn Baldwin #include <sys/ktls.h>
394f272a5eSJohn Baldwin #include <sys/sglist.h>
404f272a5eSJohn Baldwin #include <sys/socket.h>
414f272a5eSJohn Baldwin #include <sys/socketvar.h>
424f272a5eSJohn Baldwin #include <sys/sockbuf.h>
434f272a5eSJohn Baldwin #include <netinet/in.h>
444f272a5eSJohn Baldwin #include <netinet/in_pcb.h>
454f272a5eSJohn Baldwin #include <netinet/ip.h>
464f272a5eSJohn Baldwin #include <netinet/ip6.h>
474f272a5eSJohn Baldwin #include <netinet/tcp_var.h>
484f272a5eSJohn Baldwin #include <opencrypto/cryptodev.h>
494f272a5eSJohn Baldwin #include <opencrypto/xform.h>
50*9e269eafSJohn Baldwin #include <vm/vm.h>
51*9e269eafSJohn Baldwin #include <vm/pmap.h>
524f272a5eSJohn Baldwin 
534f272a5eSJohn Baldwin #include "common/common.h"
544f272a5eSJohn Baldwin #include "common/t4_regs.h"
554f272a5eSJohn Baldwin #include "common/t4_regs_values.h"
564f272a5eSJohn Baldwin #include "common/t4_tcb.h"
574f272a5eSJohn Baldwin #include "t4_l2t.h"
584f272a5eSJohn Baldwin #include "t4_clip.h"
594f272a5eSJohn Baldwin #include "t4_mp_ring.h"
604f272a5eSJohn Baldwin #include "crypto/t4_crypto.h"
614f272a5eSJohn Baldwin 
624f272a5eSJohn Baldwin #if defined(INET) || defined(INET6)
634f272a5eSJohn Baldwin 
644f272a5eSJohn Baldwin #define TLS_HEADER_LENGTH		5
654f272a5eSJohn Baldwin 
664f272a5eSJohn Baldwin struct tls_scmd {
674f272a5eSJohn Baldwin 	__be32 seqno_numivs;
684f272a5eSJohn Baldwin 	__be32 ivgen_hdrlen;
694f272a5eSJohn Baldwin };
704f272a5eSJohn Baldwin 
714f272a5eSJohn Baldwin struct tlspcb {
724f272a5eSJohn Baldwin 	struct m_snd_tag com;
734f272a5eSJohn Baldwin 	struct vi_info *vi;	/* virtual interface */
744f272a5eSJohn Baldwin 	struct adapter *sc;
754f272a5eSJohn Baldwin 	struct sge_txq *txq;
764f272a5eSJohn Baldwin 
774f272a5eSJohn Baldwin 	int tx_key_addr;
784f272a5eSJohn Baldwin 	bool inline_key;
795b9dc22cSJohn Baldwin 	bool tls13;
804f272a5eSJohn Baldwin 	unsigned char enc_mode;
814f272a5eSJohn Baldwin 
824f272a5eSJohn Baldwin 	struct tls_scmd scmd0;
83*9e269eafSJohn Baldwin 	struct tls_scmd scmd0_partial;
844f272a5eSJohn Baldwin 	struct tls_scmd scmd0_short;
854f272a5eSJohn Baldwin 
864f272a5eSJohn Baldwin 	unsigned int tx_key_info_size;
874f272a5eSJohn Baldwin 
884f272a5eSJohn Baldwin 	uint16_t prev_mss;
894f272a5eSJohn Baldwin 
90*9e269eafSJohn Baldwin 	/* Fields used for GCM records using GHASH state. */
91*9e269eafSJohn Baldwin 	uint16_t ghash_offset;
92*9e269eafSJohn Baldwin 	uint64_t ghash_tls_seqno;
93*9e269eafSJohn Baldwin 	char ghash[AES_GMAC_HASH_LEN];
94*9e269eafSJohn Baldwin 	bool ghash_valid;
95*9e269eafSJohn Baldwin 	bool ghash_pending;
96*9e269eafSJohn Baldwin 	bool ghash_lcb;
97*9e269eafSJohn Baldwin 	bool queue_mbufs;
98*9e269eafSJohn Baldwin 	uint8_t rx_chid;
99*9e269eafSJohn Baldwin 	uint16_t rx_qid;
100*9e269eafSJohn Baldwin 	struct mbufq pending_mbufs;
101*9e269eafSJohn Baldwin 
102*9e269eafSJohn Baldwin 	/*
103*9e269eafSJohn Baldwin 	 * Only used outside of setup and teardown when using inline
104*9e269eafSJohn Baldwin 	 * keys or for partial GCM mode.
105*9e269eafSJohn Baldwin 	 */
1064f272a5eSJohn Baldwin 	struct tls_keyctx keyctx;
1074f272a5eSJohn Baldwin };
1084f272a5eSJohn Baldwin 
1094f272a5eSJohn Baldwin static void t7_tls_tag_free(struct m_snd_tag *mst);
1104f272a5eSJohn Baldwin static int ktls_setup_keys(struct tlspcb *tlsp,
1114f272a5eSJohn Baldwin     const struct ktls_session *tls, struct sge_txq *txq);
1124f272a5eSJohn Baldwin 
113*9e269eafSJohn Baldwin static void *zero_buffer;
114*9e269eafSJohn Baldwin static vm_paddr_t zero_buffer_pa;
115*9e269eafSJohn Baldwin 
1164f272a5eSJohn Baldwin static const struct if_snd_tag_sw t7_tls_tag_sw = {
1174f272a5eSJohn Baldwin 	.snd_tag_free = t7_tls_tag_free,
1184f272a5eSJohn Baldwin 	.type = IF_SND_TAG_TYPE_TLS
1194f272a5eSJohn Baldwin };
1204f272a5eSJohn Baldwin 
1214f272a5eSJohn Baldwin static inline struct tlspcb *
1224f272a5eSJohn Baldwin mst_to_tls(struct m_snd_tag *t)
1234f272a5eSJohn Baldwin {
1244f272a5eSJohn Baldwin 	return (__containerof(t, struct tlspcb, com));
1254f272a5eSJohn Baldwin }
1264f272a5eSJohn Baldwin 
1274f272a5eSJohn Baldwin static struct tlspcb *
1284f272a5eSJohn Baldwin alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
1294f272a5eSJohn Baldwin {
1304f272a5eSJohn Baldwin 	struct port_info *pi = vi->pi;
1314f272a5eSJohn Baldwin 	struct adapter *sc = pi->adapter;
1324f272a5eSJohn Baldwin 	struct tlspcb *tlsp;
1334f272a5eSJohn Baldwin 
1344f272a5eSJohn Baldwin 	tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
1354f272a5eSJohn Baldwin 	if (tlsp == NULL)
1364f272a5eSJohn Baldwin 		return (NULL);
1374f272a5eSJohn Baldwin 
1384f272a5eSJohn Baldwin 	m_snd_tag_init(&tlsp->com, ifp, &t7_tls_tag_sw);
1394f272a5eSJohn Baldwin 	tlsp->vi = vi;
1404f272a5eSJohn Baldwin 	tlsp->sc = sc;
1414f272a5eSJohn Baldwin 	tlsp->tx_key_addr = -1;
142*9e269eafSJohn Baldwin 	tlsp->ghash_offset = -1;
143*9e269eafSJohn Baldwin 	tlsp->rx_chid = pi->rx_chan;
144*9e269eafSJohn Baldwin 	tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
145*9e269eafSJohn Baldwin 	mbufq_init(&tlsp->pending_mbufs, INT_MAX);
1464f272a5eSJohn Baldwin 
1474f272a5eSJohn Baldwin 	return (tlsp);
1484f272a5eSJohn Baldwin }
1494f272a5eSJohn Baldwin 
1504f272a5eSJohn Baldwin int
1514f272a5eSJohn Baldwin t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
1524f272a5eSJohn Baldwin     struct m_snd_tag **pt)
1534f272a5eSJohn Baldwin {
1544f272a5eSJohn Baldwin 	const struct ktls_session *tls;
1554f272a5eSJohn Baldwin 	struct tlspcb *tlsp;
1564f272a5eSJohn Baldwin 	struct adapter *sc;
1574f272a5eSJohn Baldwin 	struct vi_info *vi;
1584f272a5eSJohn Baldwin 	struct inpcb *inp;
1594f272a5eSJohn Baldwin 	struct sge_txq *txq;
1605b9dc22cSJohn Baldwin 	int error, iv_size, keyid, mac_first;
1614f272a5eSJohn Baldwin 
1624f272a5eSJohn Baldwin 	tls = params->tls.tls;
1634f272a5eSJohn Baldwin 
1645b9dc22cSJohn Baldwin 	/* TLS 1.1 through TLS 1.3 are currently supported. */
1654f272a5eSJohn Baldwin 	if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1664f272a5eSJohn Baldwin 	    tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
1675b9dc22cSJohn Baldwin 	    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
1684f272a5eSJohn Baldwin 		return (EPROTONOSUPPORT);
1694f272a5eSJohn Baldwin 
1704f272a5eSJohn Baldwin 	/* Sanity check values in *tls. */
1714f272a5eSJohn Baldwin 	switch (tls->params.cipher_algorithm) {
1724f272a5eSJohn Baldwin 	case CRYPTO_AES_CBC:
1734f272a5eSJohn Baldwin 		/* XXX: Explicitly ignore any provided IV. */
1744f272a5eSJohn Baldwin 		switch (tls->params.cipher_key_len) {
1754f272a5eSJohn Baldwin 		case 128 / 8:
1764f272a5eSJohn Baldwin 		case 192 / 8:
1774f272a5eSJohn Baldwin 		case 256 / 8:
1784f272a5eSJohn Baldwin 			break;
1794f272a5eSJohn Baldwin 		default:
1804f272a5eSJohn Baldwin 			return (EINVAL);
1814f272a5eSJohn Baldwin 		}
1824f272a5eSJohn Baldwin 		switch (tls->params.auth_algorithm) {
1834f272a5eSJohn Baldwin 		case CRYPTO_SHA1_HMAC:
1844f272a5eSJohn Baldwin 		case CRYPTO_SHA2_256_HMAC:
1854f272a5eSJohn Baldwin 		case CRYPTO_SHA2_384_HMAC:
1864f272a5eSJohn Baldwin 			break;
1874f272a5eSJohn Baldwin 		default:
1884f272a5eSJohn Baldwin 			return (EPROTONOSUPPORT);
1894f272a5eSJohn Baldwin 		}
1905b9dc22cSJohn Baldwin 		iv_size = AES_BLOCK_LEN;
1914f272a5eSJohn Baldwin 		mac_first = 1;
1924f272a5eSJohn Baldwin 		break;
1934f272a5eSJohn Baldwin 	case CRYPTO_AES_NIST_GCM_16:
1944f272a5eSJohn Baldwin 		switch (tls->params.cipher_key_len) {
1954f272a5eSJohn Baldwin 		case 128 / 8:
1964f272a5eSJohn Baldwin 		case 192 / 8:
1974f272a5eSJohn Baldwin 		case 256 / 8:
1984f272a5eSJohn Baldwin 			break;
1994f272a5eSJohn Baldwin 		default:
2004f272a5eSJohn Baldwin 			return (EINVAL);
2014f272a5eSJohn Baldwin 		}
2025b9dc22cSJohn Baldwin 
2035b9dc22cSJohn Baldwin 		/*
2045b9dc22cSJohn Baldwin 		 * The IV size for TLS 1.2 is the explicit IV in the
2055b9dc22cSJohn Baldwin 		 * record header.  For TLS 1.3 it is the size of the
2065b9dc22cSJohn Baldwin 		 * sequence number.
2075b9dc22cSJohn Baldwin 		 */
2085b9dc22cSJohn Baldwin 		iv_size = 8;
2094f272a5eSJohn Baldwin 		mac_first = 0;
2104f272a5eSJohn Baldwin 		break;
2114f272a5eSJohn Baldwin 	default:
2124f272a5eSJohn Baldwin 		return (EPROTONOSUPPORT);
2134f272a5eSJohn Baldwin 	}
2144f272a5eSJohn Baldwin 
2154f272a5eSJohn Baldwin 	vi = if_getsoftc(ifp);
2164f272a5eSJohn Baldwin 	sc = vi->adapter;
2174f272a5eSJohn Baldwin 
2184f272a5eSJohn Baldwin 	tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
219*9e269eafSJohn Baldwin 
220*9e269eafSJohn Baldwin 	/*
221*9e269eafSJohn Baldwin 	 * Pointers with the low bit set in the pointer can't
222*9e269eafSJohn Baldwin 	 * be stored as the cookie in the CPL_FW6_PLD reply.
223*9e269eafSJohn Baldwin 	 */
224*9e269eafSJohn Baldwin 	if (((uintptr_t)tlsp & CPL_FW6_COOKIE_MASK) != 0) {
225*9e269eafSJohn Baldwin 		error = EINVAL;
226*9e269eafSJohn Baldwin 		goto failed;
227*9e269eafSJohn Baldwin 	}
228*9e269eafSJohn Baldwin 
2295b9dc22cSJohn Baldwin 	tlsp->tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
2304f272a5eSJohn Baldwin 
2314f272a5eSJohn Baldwin 	if (sc->tlst.inline_keys)
2324f272a5eSJohn Baldwin 		keyid = -1;
2334f272a5eSJohn Baldwin 	else
2344f272a5eSJohn Baldwin 		keyid = t4_alloc_tls_keyid(sc);
2354f272a5eSJohn Baldwin 	if (keyid < 0) {
2364f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p using immediate key ctx", __func__,
2374f272a5eSJohn Baldwin 		    tlsp);
2384f272a5eSJohn Baldwin 		tlsp->inline_key = true;
2394f272a5eSJohn Baldwin 	} else {
2404f272a5eSJohn Baldwin 		tlsp->tx_key_addr = keyid;
2414f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p allocated TX key addr %#x", __func__,
2424f272a5eSJohn Baldwin 		    tlsp, tlsp->tx_key_addr);
2434f272a5eSJohn Baldwin 	}
2444f272a5eSJohn Baldwin 
2454f272a5eSJohn Baldwin 	inp = params->tls.inp;
2464f272a5eSJohn Baldwin 	INP_RLOCK(inp);
2474f272a5eSJohn Baldwin 	if (inp->inp_flags & INP_DROPPED) {
2484f272a5eSJohn Baldwin 		INP_RUNLOCK(inp);
2494f272a5eSJohn Baldwin 		error = ECONNRESET;
2504f272a5eSJohn Baldwin 		goto failed;
2514f272a5eSJohn Baldwin 	}
2524f272a5eSJohn Baldwin 
2534f272a5eSJohn Baldwin 	txq = &sc->sge.txq[vi->first_txq];
2544f272a5eSJohn Baldwin 	if (inp->inp_flowtype != M_HASHTYPE_NONE)
2554f272a5eSJohn Baldwin 		txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
2564f272a5eSJohn Baldwin 		    vi->rsrv_noflowq);
2574f272a5eSJohn Baldwin 	tlsp->txq = txq;
2584f272a5eSJohn Baldwin 	INP_RUNLOCK(inp);
2594f272a5eSJohn Baldwin 
2604f272a5eSJohn Baldwin 	error = ktls_setup_keys(tlsp, tls, txq);
2614f272a5eSJohn Baldwin 	if (error)
2624f272a5eSJohn Baldwin 		goto failed;
2634f272a5eSJohn Baldwin 
2644f272a5eSJohn Baldwin 	tlsp->enc_mode = t4_tls_cipher_mode(tls);
2654f272a5eSJohn Baldwin 	tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
2664f272a5eSJohn Baldwin 
2674f272a5eSJohn Baldwin 	/* The SCMD fields used when encrypting a full TLS record. */
2685b9dc22cSJohn Baldwin 	if (tlsp->tls13)
2695b9dc22cSJohn Baldwin 		tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0);
2705b9dc22cSJohn Baldwin 	else
2715b9dc22cSJohn Baldwin 		tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(3);
2725b9dc22cSJohn Baldwin 	tlsp->scmd0.seqno_numivs |=
2734f272a5eSJohn Baldwin 	    V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
2744f272a5eSJohn Baldwin 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
2754f272a5eSJohn Baldwin 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
2764f272a5eSJohn Baldwin 	    V_SCMD_CIPH_MODE(tlsp->enc_mode) |
2774f272a5eSJohn Baldwin 	    V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
2784f272a5eSJohn Baldwin 	    V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
2795b9dc22cSJohn Baldwin 	    V_SCMD_IV_SIZE(iv_size / 2) | V_SCMD_NUM_IVS(1);
2805b9dc22cSJohn Baldwin 	tlsp->scmd0.seqno_numivs = htobe32(tlsp->scmd0.seqno_numivs);
2814f272a5eSJohn Baldwin 
2824f272a5eSJohn Baldwin 	tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
2834f272a5eSJohn Baldwin 	    V_SCMD_TLS_FRAG_ENABLE(0);
2844f272a5eSJohn Baldwin 	if (tlsp->inline_key)
2854f272a5eSJohn Baldwin 		tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
2864f272a5eSJohn Baldwin 
2874f272a5eSJohn Baldwin 	/*
288*9e269eafSJohn Baldwin 	 * The SCMD fields used when encrypting a short TLS record
2894f272a5eSJohn Baldwin 	 * (no trailer and possibly a truncated payload).
2904f272a5eSJohn Baldwin 	 */
2914f272a5eSJohn Baldwin 	tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
2924f272a5eSJohn Baldwin 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
2934f272a5eSJohn Baldwin 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
2944f272a5eSJohn Baldwin 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
2954f272a5eSJohn Baldwin 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
2964f272a5eSJohn Baldwin 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
2974f272a5eSJohn Baldwin 	    V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
2984f272a5eSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
2994f272a5eSJohn Baldwin 		tlsp->scmd0_short.seqno_numivs |=
3004f272a5eSJohn Baldwin 		    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
3014f272a5eSJohn Baldwin 	else
3024f272a5eSJohn Baldwin 		tlsp->scmd0_short.seqno_numivs |=
3034f272a5eSJohn Baldwin 		    V_SCMD_CIPH_MODE(tlsp->enc_mode);
3044f272a5eSJohn Baldwin 	tlsp->scmd0_short.seqno_numivs =
3054f272a5eSJohn Baldwin 	    htobe32(tlsp->scmd0_short.seqno_numivs);
3064f272a5eSJohn Baldwin 
3074f272a5eSJohn Baldwin 	tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
3084f272a5eSJohn Baldwin 	    V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1);
3094f272a5eSJohn Baldwin 	if (tlsp->inline_key)
3104f272a5eSJohn Baldwin 		tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
3114f272a5eSJohn Baldwin 
312*9e269eafSJohn Baldwin 	/*
313*9e269eafSJohn Baldwin 	 * The SCMD fields used when encrypting a short TLS record
314*9e269eafSJohn Baldwin 	 * using a partial GHASH.
315*9e269eafSJohn Baldwin 	 */
316*9e269eafSJohn Baldwin 	tlsp->scmd0_partial.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
317*9e269eafSJohn Baldwin 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
318*9e269eafSJohn Baldwin 	    V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
319*9e269eafSJohn Baldwin 	    V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
320*9e269eafSJohn Baldwin 	    V_SCMD_CIPH_MODE(tlsp->enc_mode) |
321*9e269eafSJohn Baldwin 	    V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
322*9e269eafSJohn Baldwin 	    V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
323*9e269eafSJohn Baldwin 	    V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(1);
324*9e269eafSJohn Baldwin 	tlsp->scmd0_partial.seqno_numivs =
325*9e269eafSJohn Baldwin 	    htobe32(tlsp->scmd0_partial.seqno_numivs);
326*9e269eafSJohn Baldwin 
327*9e269eafSJohn Baldwin 	tlsp->scmd0_partial.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
328*9e269eafSJohn Baldwin 	    V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1) |
329*9e269eafSJohn Baldwin 	    V_SCMD_KEY_CTX_INLINE(1);
330*9e269eafSJohn Baldwin 
3314f272a5eSJohn Baldwin 	TXQ_LOCK(txq);
3324f272a5eSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
3334f272a5eSJohn Baldwin 		txq->kern_tls_gcm++;
3344f272a5eSJohn Baldwin 	else
3354f272a5eSJohn Baldwin 		txq->kern_tls_cbc++;
3364f272a5eSJohn Baldwin 	TXQ_UNLOCK(txq);
3374f272a5eSJohn Baldwin 	*pt = &tlsp->com;
3384f272a5eSJohn Baldwin 	return (0);
3394f272a5eSJohn Baldwin 
3404f272a5eSJohn Baldwin failed:
3414f272a5eSJohn Baldwin 	m_snd_tag_rele(&tlsp->com);
3424f272a5eSJohn Baldwin 	return (error);
3434f272a5eSJohn Baldwin }
3444f272a5eSJohn Baldwin 
3454f272a5eSJohn Baldwin static int
3464f272a5eSJohn Baldwin ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
3474f272a5eSJohn Baldwin     struct sge_txq *txq)
3484f272a5eSJohn Baldwin {
3494f272a5eSJohn Baldwin 	struct tls_key_req *kwr;
3504f272a5eSJohn Baldwin 	struct tls_keyctx *kctx;
3514f272a5eSJohn Baldwin 	void *items[1];
3524f272a5eSJohn Baldwin 	struct mbuf *m;
3534f272a5eSJohn Baldwin 	int error;
3544f272a5eSJohn Baldwin 
3554f272a5eSJohn Baldwin 	/*
3564f272a5eSJohn Baldwin 	 * Store the salt and keys in the key context.  For
3574f272a5eSJohn Baldwin 	 * connections with an inline key, this key context is passed
3584f272a5eSJohn Baldwin 	 * as immediate data in each work request.  For connections
3594f272a5eSJohn Baldwin 	 * storing the key in DDR, a work request is used to store a
3604f272a5eSJohn Baldwin 	 * copy of the key context in DDR.
3614f272a5eSJohn Baldwin 	 */
3624f272a5eSJohn Baldwin 	t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
3634f272a5eSJohn Baldwin 	if (tlsp->inline_key)
3644f272a5eSJohn Baldwin 		return (0);
3654f272a5eSJohn Baldwin 
3664f272a5eSJohn Baldwin 	/* Populate key work request. */
3674f272a5eSJohn Baldwin         m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
3684f272a5eSJohn Baldwin 	if (m == NULL) {
3694f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p failed to alloc WR mbuf", __func__,
3704f272a5eSJohn Baldwin 		    tlsp);
3714f272a5eSJohn Baldwin 		return (ENOMEM);
3724f272a5eSJohn Baldwin 	}
3734f272a5eSJohn Baldwin 	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
3744f272a5eSJohn Baldwin 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
3754f272a5eSJohn Baldwin 	kwr = mtod(m, void *);
3764f272a5eSJohn Baldwin 	memset(kwr, 0, TLS_KEY_WR_SZ);
3774f272a5eSJohn Baldwin 
3784f272a5eSJohn Baldwin 	t4_write_tlskey_wr(tls, KTLS_TX, 0, 0, tlsp->tx_key_addr, kwr);
3794f272a5eSJohn Baldwin 	kctx = (struct tls_keyctx *)(kwr + 1);
3804f272a5eSJohn Baldwin 	memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
3814f272a5eSJohn Baldwin 
3824f272a5eSJohn Baldwin 	/*
3834f272a5eSJohn Baldwin 	 * Place the key work request in the transmit queue.  It
3844f272a5eSJohn Baldwin 	 * should be sent to the NIC before any TLS packets using this
3854f272a5eSJohn Baldwin 	 * session.
3864f272a5eSJohn Baldwin 	 */
3874f272a5eSJohn Baldwin 	items[0] = m;
3884f272a5eSJohn Baldwin 	error = mp_ring_enqueue(txq->r, items, 1, 1);
3894f272a5eSJohn Baldwin 	if (error)
3904f272a5eSJohn Baldwin 		m_free(m);
3914f272a5eSJohn Baldwin 	else
3924f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p sent key WR", __func__, tlsp);
3934f272a5eSJohn Baldwin 	return (error);
3944f272a5eSJohn Baldwin }
3954f272a5eSJohn Baldwin 
3964f272a5eSJohn Baldwin static u_int
397*9e269eafSJohn Baldwin ktls_base_wr_size(struct tlspcb *tlsp, bool inline_key)
3984f272a5eSJohn Baldwin {
3994f272a5eSJohn Baldwin 	u_int wr_len;
4004f272a5eSJohn Baldwin 
4014f272a5eSJohn Baldwin 	wr_len = sizeof(struct fw_ulptx_wr);	// 16
4024f272a5eSJohn Baldwin 	wr_len += sizeof(struct ulp_txpkt);	// 8
4034f272a5eSJohn Baldwin 	wr_len += sizeof(struct ulptx_idata);	// 8
4044f272a5eSJohn Baldwin 	wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
405*9e269eafSJohn Baldwin 	if (inline_key)
4064f272a5eSJohn Baldwin 		wr_len += tlsp->tx_key_info_size;
4074f272a5eSJohn Baldwin 	else {
4084f272a5eSJohn Baldwin 		wr_len += sizeof(struct ulptx_sc_memrd);// 8
4094f272a5eSJohn Baldwin 		wr_len += sizeof(struct ulptx_idata);	// 8
4104f272a5eSJohn Baldwin 	}
4114f272a5eSJohn Baldwin 	/* SplitMode CPL_RX_PHYS_DSGL here if needed. */
4124f272a5eSJohn Baldwin 	/* CPL_TX_*_LSO here if needed. */
4134f272a5eSJohn Baldwin 	wr_len += sizeof(struct cpl_tx_pkt_core);// 16
4144f272a5eSJohn Baldwin 	return (wr_len);
4154f272a5eSJohn Baldwin }
4164f272a5eSJohn Baldwin 
4174f272a5eSJohn Baldwin static u_int
4184f272a5eSJohn Baldwin ktls_sgl_size(u_int nsegs)
4194f272a5eSJohn Baldwin {
4204f272a5eSJohn Baldwin 	u_int wr_len;
4214f272a5eSJohn Baldwin 
4224f272a5eSJohn Baldwin 	/* First segment is part of ulptx_sgl. */
4234f272a5eSJohn Baldwin 	nsegs--;
4244f272a5eSJohn Baldwin 
4254f272a5eSJohn Baldwin 	wr_len = sizeof(struct ulptx_sgl);
4264f272a5eSJohn Baldwin 	wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
4274f272a5eSJohn Baldwin 	return (wr_len);
4284f272a5eSJohn Baldwin }
4294f272a5eSJohn Baldwin 
4304f272a5eSJohn Baldwin /*
4314f272a5eSJohn Baldwin  * A request that doesn't need to generate the TLS trailer is a short
4324f272a5eSJohn Baldwin  * record.  For these requests, part of the TLS record payload is
4334f272a5eSJohn Baldwin  * encrypted without invoking the MAC.
4344f272a5eSJohn Baldwin  *
4354f272a5eSJohn Baldwin  * Returns true if this record should be sent as a short record.  In
4364f272a5eSJohn Baldwin  * either case, the remaining outputs describe the how much of the
4374f272a5eSJohn Baldwin  * TLS record to send as input to the crypto block and the amount of
4384f272a5eSJohn Baldwin  * crypto output to trim via SplitMode:
4394f272a5eSJohn Baldwin  *
4404f272a5eSJohn Baldwin  * *header_len - Number of bytes of TLS header to pass as immediate
4414f272a5eSJohn Baldwin  *               data
4424f272a5eSJohn Baldwin  *
4434f272a5eSJohn Baldwin  * *offset - Start offset of TLS record payload to pass as DSGL data
4444f272a5eSJohn Baldwin  *
4454f272a5eSJohn Baldwin  * *plen - Length of TLS record payload to pass as DSGL data
4464f272a5eSJohn Baldwin  *
4474f272a5eSJohn Baldwin  * *leading_waste - amount of non-packet-header bytes to drop at the
4484f272a5eSJohn Baldwin  *                  start of the crypto output
4494f272a5eSJohn Baldwin  *
4504f272a5eSJohn Baldwin  * *trailing_waste - amount of crypto output to drop from the end
4514f272a5eSJohn Baldwin  */
4524f272a5eSJohn Baldwin static bool
453*9e269eafSJohn Baldwin ktls_is_short_record(struct tlspcb *tlsp, struct mbuf *m_tls, u_int tlen,
454*9e269eafSJohn Baldwin     u_int rlen, u_int *header_len, u_int *offset, u_int *plen,
455*9e269eafSJohn Baldwin     u_int *leading_waste, u_int *trailing_waste, bool send_partial_ghash,
456*9e269eafSJohn Baldwin     bool request_ghash)
4574f272a5eSJohn Baldwin {
458*9e269eafSJohn Baldwin 	u_int new_tlen, trailer_len;
4594f272a5eSJohn Baldwin 
4604f272a5eSJohn Baldwin 	MPASS(tlen > m_tls->m_epg_hdrlen);
4614f272a5eSJohn Baldwin 
4624f272a5eSJohn Baldwin 	/*
4635b9dc22cSJohn Baldwin 	 * For TLS 1.3 treat the inner record type stored as the first
4645b9dc22cSJohn Baldwin 	 * byte of the trailer as part of the payload rather than part
4655b9dc22cSJohn Baldwin 	 * of the trailer.
4665b9dc22cSJohn Baldwin 	 */
4675b9dc22cSJohn Baldwin 	trailer_len = m_tls->m_epg_trllen;
4685b9dc22cSJohn Baldwin 	if (tlsp->tls13)
4695b9dc22cSJohn Baldwin 		trailer_len--;
4705b9dc22cSJohn Baldwin 
4715b9dc22cSJohn Baldwin 	/*
4724f272a5eSJohn Baldwin 	 * Default to sending the full record as input to the crypto
4734f272a5eSJohn Baldwin 	 * engine and relying on SplitMode to drop any waste.
4744f272a5eSJohn Baldwin 	 */
4754f272a5eSJohn Baldwin 	*header_len = m_tls->m_epg_hdrlen;
4764f272a5eSJohn Baldwin 	*offset = 0;
4775b9dc22cSJohn Baldwin 	*plen = rlen - (m_tls->m_epg_hdrlen + trailer_len);
4784f272a5eSJohn Baldwin 	*leading_waste = mtod(m_tls, vm_offset_t);
4794f272a5eSJohn Baldwin 	*trailing_waste = rlen - tlen;
4804f272a5eSJohn Baldwin 	if (!tlsp->sc->tlst.short_records)
4814f272a5eSJohn Baldwin 		return (false);
4824f272a5eSJohn Baldwin 
4834f272a5eSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC) {
4844f272a5eSJohn Baldwin 		/*
4854f272a5eSJohn Baldwin 		 * For AES-CBC we have to send input from the start of
4864f272a5eSJohn Baldwin 		 * the TLS record payload that is a multiple of the
4874f272a5eSJohn Baldwin 		 * block size.  new_tlen rounds up tlen to the end of
4884f272a5eSJohn Baldwin 		 * the containing AES block.  If this last block
4894f272a5eSJohn Baldwin 		 * overlaps with the trailer, send the full record to
4904f272a5eSJohn Baldwin 		 * generate the MAC.
4914f272a5eSJohn Baldwin 		 */
4924f272a5eSJohn Baldwin 		new_tlen = TLS_HEADER_LENGTH +
4934f272a5eSJohn Baldwin 		    roundup2(tlen - TLS_HEADER_LENGTH, AES_BLOCK_LEN);
4945b9dc22cSJohn Baldwin 		if (rlen - new_tlen < trailer_len)
4954f272a5eSJohn Baldwin 			return (false);
4964f272a5eSJohn Baldwin 
4974f272a5eSJohn Baldwin 		*trailing_waste = new_tlen - tlen;
4984f272a5eSJohn Baldwin 		*plen = new_tlen - m_tls->m_epg_hdrlen;
4994f272a5eSJohn Baldwin 	} else {
500*9e269eafSJohn Baldwin 		if (rlen - tlen < trailer_len ||
501*9e269eafSJohn Baldwin 		    (rlen - tlen == trailer_len && request_ghash)) {
5024f272a5eSJohn Baldwin 			/*
503*9e269eafSJohn Baldwin 			 * For AES-GCM we have to send the full record
504*9e269eafSJohn Baldwin 			 * if the end overlaps with the trailer and a
505*9e269eafSJohn Baldwin 			 * partial GHASH isn't being sent.
5064f272a5eSJohn Baldwin 			 */
507*9e269eafSJohn Baldwin 			if (!send_partial_ghash)
5084f272a5eSJohn Baldwin 				return (false);
5094f272a5eSJohn Baldwin 
5104f272a5eSJohn Baldwin 			/*
511*9e269eafSJohn Baldwin 			 * Will need to treat any excess trailer bytes as
512*9e269eafSJohn Baldwin 			 * trailing waste.  *trailing_waste is already
513*9e269eafSJohn Baldwin 			 * correct.
514*9e269eafSJohn Baldwin 			 */
515*9e269eafSJohn Baldwin 		} else {
516*9e269eafSJohn Baldwin 			/*
517*9e269eafSJohn Baldwin 			 * We can use AES-CTR or AES-GCM in partial GHASH
518*9e269eafSJohn Baldwin 			 * mode to encrypt a partial PDU.
519*9e269eafSJohn Baldwin 			 *
520*9e269eafSJohn Baldwin 			 * The last block can be partially encrypted
521*9e269eafSJohn Baldwin 			 * without any trailing waste.
5224f272a5eSJohn Baldwin 			 */
5234f272a5eSJohn Baldwin 			*trailing_waste = 0;
5244f272a5eSJohn Baldwin 			*plen = tlen - m_tls->m_epg_hdrlen;
525*9e269eafSJohn Baldwin 		}
5264f272a5eSJohn Baldwin 
5274f272a5eSJohn Baldwin 		/*
528*9e269eafSJohn Baldwin 		 * If this request starts at the first byte of the
529*9e269eafSJohn Baldwin 		 * payload (so the previous request sent the full TLS
530*9e269eafSJohn Baldwin 		 * header as a tunnel packet) and a partial GHASH is
531*9e269eafSJohn Baldwin 		 * being requested, the full TLS header must be sent
532*9e269eafSJohn Baldwin 		 * as input for the GHASH.
533*9e269eafSJohn Baldwin 		 */
534*9e269eafSJohn Baldwin 		if (mtod(m_tls, vm_offset_t) == m_tls->m_epg_hdrlen &&
535*9e269eafSJohn Baldwin 		    request_ghash)
536*9e269eafSJohn Baldwin 			return (true);
537*9e269eafSJohn Baldwin 
538*9e269eafSJohn Baldwin 		/*
539*9e269eafSJohn Baldwin 		 * In addition, we can minimize leading waste by
540*9e269eafSJohn Baldwin 		 * starting encryption at the start of the closest AES
541*9e269eafSJohn Baldwin 		 * block.
5424f272a5eSJohn Baldwin 		 */
5434f272a5eSJohn Baldwin 		if (mtod(m_tls, vm_offset_t) >= m_tls->m_epg_hdrlen) {
5444f272a5eSJohn Baldwin 			*header_len = 0;
545*9e269eafSJohn Baldwin 			*offset = mtod(m_tls, vm_offset_t) -
546*9e269eafSJohn Baldwin 			    m_tls->m_epg_hdrlen;
547*9e269eafSJohn Baldwin 			if (*offset >= *plen)
548*9e269eafSJohn Baldwin 				*offset = *plen;
549*9e269eafSJohn Baldwin 			else
550*9e269eafSJohn Baldwin 				*offset = rounddown2(*offset, AES_BLOCK_LEN);
551*9e269eafSJohn Baldwin 
552*9e269eafSJohn Baldwin 			/*
553*9e269eafSJohn Baldwin 			 * If the request is just bytes from the trailer,
554*9e269eafSJohn Baldwin 			 * trim the offset to the end of the payload.
555*9e269eafSJohn Baldwin 			 */
556*9e269eafSJohn Baldwin 			*offset = min(*offset, *plen);
5574f272a5eSJohn Baldwin 			*plen -= *offset;
5584f272a5eSJohn Baldwin 			*leading_waste -= (m_tls->m_epg_hdrlen + *offset);
5594f272a5eSJohn Baldwin 		}
5604f272a5eSJohn Baldwin 	}
5614f272a5eSJohn Baldwin 	return (true);
5624f272a5eSJohn Baldwin }
5634f272a5eSJohn Baldwin 
564*9e269eafSJohn Baldwin /* Size of the AES-GCM TLS AAD for a given connection. */
565*9e269eafSJohn Baldwin static int
566*9e269eafSJohn Baldwin ktls_gcm_aad_len(struct tlspcb *tlsp)
567*9e269eafSJohn Baldwin {
568*9e269eafSJohn Baldwin 	return (tlsp->tls13 ? sizeof(struct tls_aead_data_13) :
569*9e269eafSJohn Baldwin 	    sizeof(struct tls_aead_data));
570*9e269eafSJohn Baldwin }
571*9e269eafSJohn Baldwin 
5724f272a5eSJohn Baldwin static int
5734f272a5eSJohn Baldwin ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
5744f272a5eSJohn Baldwin     int *nsegsp)
5754f272a5eSJohn Baldwin {
576*9e269eafSJohn Baldwin 	const struct tls_record_layer *hdr;
577*9e269eafSJohn Baldwin 	u_int header_len, imm_len, offset, plen, rlen, tlen, wr_len;
5784f272a5eSJohn Baldwin 	u_int leading_waste, trailing_waste;
579*9e269eafSJohn Baldwin 	bool inline_key, last_ghash_frag, request_ghash, send_partial_ghash;
5804f272a5eSJohn Baldwin 	bool short_record;
5814f272a5eSJohn Baldwin 
5824f272a5eSJohn Baldwin 	M_ASSERTEXTPG(m_tls);
5834f272a5eSJohn Baldwin 
5844f272a5eSJohn Baldwin 	/*
5854f272a5eSJohn Baldwin 	 * The relative offset of the last byte to send from the TLS
5864f272a5eSJohn Baldwin 	 * record.
5874f272a5eSJohn Baldwin 	 */
5884f272a5eSJohn Baldwin 	tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
5894f272a5eSJohn Baldwin 	if (tlen <= m_tls->m_epg_hdrlen) {
5904f272a5eSJohn Baldwin 		/*
5914f272a5eSJohn Baldwin 		 * For requests that only want to send the TLS header,
5924f272a5eSJohn Baldwin 		 * send a tunnelled packet as immediate data.
5934f272a5eSJohn Baldwin 		 */
5944f272a5eSJohn Baldwin 		wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
5954f272a5eSJohn Baldwin 		    sizeof(struct cpl_tx_pkt_core) +
5964f272a5eSJohn Baldwin 		    roundup2(m->m_len + m_tls->m_len, 16);
5974f272a5eSJohn Baldwin 		if (wr_len > SGE_MAX_WR_LEN) {
5984f272a5eSJohn Baldwin 			CTR(KTR_CXGBE,
5994f272a5eSJohn Baldwin 		    "%s: %p TLS header-only packet too long (len %d)",
6004f272a5eSJohn Baldwin 			    __func__, tlsp, m->m_len + m_tls->m_len);
6014f272a5eSJohn Baldwin 		}
6024f272a5eSJohn Baldwin 
6034f272a5eSJohn Baldwin 		/* This should always be the last TLS record in a chain. */
6044f272a5eSJohn Baldwin 		MPASS(m_tls->m_next == NULL);
6054f272a5eSJohn Baldwin 		*nsegsp = 0;
6064f272a5eSJohn Baldwin 		return (wr_len);
6074f272a5eSJohn Baldwin 	}
6084f272a5eSJohn Baldwin 
609*9e269eafSJohn Baldwin 	hdr = (void *)m_tls->m_epg_hdr;
610*9e269eafSJohn Baldwin 	rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
611*9e269eafSJohn Baldwin 
612*9e269eafSJohn Baldwin 	/*
613*9e269eafSJohn Baldwin 	 * See if this request might make use of GHASH state.  This
614*9e269eafSJohn Baldwin 	 * errs on the side of over-budgeting the WR size.
615*9e269eafSJohn Baldwin 	 */
616*9e269eafSJohn Baldwin 	last_ghash_frag = false;
617*9e269eafSJohn Baldwin 	request_ghash = false;
618*9e269eafSJohn Baldwin 	send_partial_ghash = false;
619*9e269eafSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
620*9e269eafSJohn Baldwin 	    tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
621*9e269eafSJohn Baldwin 		u_int trailer_len;
622*9e269eafSJohn Baldwin 
623*9e269eafSJohn Baldwin 		trailer_len = m_tls->m_epg_trllen;
624*9e269eafSJohn Baldwin 		if (tlsp->tls13)
625*9e269eafSJohn Baldwin 			trailer_len--;
626*9e269eafSJohn Baldwin 		KASSERT(trailer_len == AES_GMAC_HASH_LEN,
627*9e269eafSJohn Baldwin 		    ("invalid trailer length for AES-GCM"));
628*9e269eafSJohn Baldwin 
629*9e269eafSJohn Baldwin 		/* Is this the start of a TLS record? */
630*9e269eafSJohn Baldwin 		if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
631*9e269eafSJohn Baldwin 			/*
632*9e269eafSJohn Baldwin 			 * Might use partial GHASH if this doesn't
633*9e269eafSJohn Baldwin 			 * send the full record.
634*9e269eafSJohn Baldwin 			 */
635*9e269eafSJohn Baldwin 			if (tlen < rlen) {
636*9e269eafSJohn Baldwin 				if (tlen < (rlen - trailer_len))
637*9e269eafSJohn Baldwin 					send_partial_ghash = true;
638*9e269eafSJohn Baldwin 				request_ghash = true;
639*9e269eafSJohn Baldwin 			}
640*9e269eafSJohn Baldwin 		} else {
641*9e269eafSJohn Baldwin 			send_partial_ghash = true;
642*9e269eafSJohn Baldwin 			if (tlen < rlen)
643*9e269eafSJohn Baldwin 				request_ghash = true;
644*9e269eafSJohn Baldwin 			if (tlen >= (rlen - trailer_len))
645*9e269eafSJohn Baldwin 				last_ghash_frag = true;
646*9e269eafSJohn Baldwin 		}
647*9e269eafSJohn Baldwin 	}
648*9e269eafSJohn Baldwin 
649*9e269eafSJohn Baldwin 	/*
650*9e269eafSJohn Baldwin 	 * Assume not sending partial GHASH for this call to get the
651*9e269eafSJohn Baldwin 	 * larger size.
652*9e269eafSJohn Baldwin 	 */
653*9e269eafSJohn Baldwin 	short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
654*9e269eafSJohn Baldwin 	    &header_len, &offset, &plen, &leading_waste, &trailing_waste,
655*9e269eafSJohn Baldwin 	    false, request_ghash);
656*9e269eafSJohn Baldwin 
657*9e269eafSJohn Baldwin 	inline_key = send_partial_ghash || tlsp->inline_key;
6584f272a5eSJohn Baldwin 
6594f272a5eSJohn Baldwin 	/* Calculate the size of the work request. */
660*9e269eafSJohn Baldwin 	wr_len = ktls_base_wr_size(tlsp, inline_key);
661*9e269eafSJohn Baldwin 
662*9e269eafSJohn Baldwin 	if (send_partial_ghash)
663*9e269eafSJohn Baldwin 		wr_len += AES_GMAC_HASH_LEN;
6644f272a5eSJohn Baldwin 
6654f272a5eSJohn Baldwin 	if (leading_waste != 0 || trailing_waste != 0) {
6664f272a5eSJohn Baldwin 		/*
6674f272a5eSJohn Baldwin 		 * Partial records might require a SplitMode
6684f272a5eSJohn Baldwin 		 * CPL_RX_PHYS_DSGL.
6694f272a5eSJohn Baldwin 		 */
6704f272a5eSJohn Baldwin 		wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
6714f272a5eSJohn Baldwin 	}
6724f272a5eSJohn Baldwin 
6734f272a5eSJohn Baldwin 	/* Budget for an LSO header even if we don't use it. */
6744f272a5eSJohn Baldwin 	wr_len += sizeof(struct cpl_tx_pkt_lso_core);
6754f272a5eSJohn Baldwin 
6764f272a5eSJohn Baldwin 	/*
6774f272a5eSJohn Baldwin 	 * Headers (including the TLS header) are always sent as
6784f272a5eSJohn Baldwin 	 * immediate data.  Short records include a raw AES IV as
6795b9dc22cSJohn Baldwin 	 * immediate data.  TLS 1.3 non-short records include a
6805b9dc22cSJohn Baldwin 	 * placeholder for the sequence number as immediate data.
681*9e269eafSJohn Baldwin 	 * Short records using a partial hash may also need to send
682*9e269eafSJohn Baldwin 	 * TLS AAD.  If a partial hash might be sent, assume a short
683*9e269eafSJohn Baldwin 	 * record to get the larger size.
6844f272a5eSJohn Baldwin 	 */
6854f272a5eSJohn Baldwin 	imm_len = m->m_len + header_len;
686*9e269eafSJohn Baldwin 	if (short_record || send_partial_ghash) {
6874f272a5eSJohn Baldwin 		imm_len += AES_BLOCK_LEN;
688*9e269eafSJohn Baldwin 		if (send_partial_ghash && header_len != 0)
689*9e269eafSJohn Baldwin 			imm_len += ktls_gcm_aad_len(tlsp);
690*9e269eafSJohn Baldwin 	} else if (tlsp->tls13)
6915b9dc22cSJohn Baldwin 		imm_len += sizeof(uint64_t);
6924f272a5eSJohn Baldwin 	wr_len += roundup2(imm_len, 16);
6934f272a5eSJohn Baldwin 
694*9e269eafSJohn Baldwin 	/*
695*9e269eafSJohn Baldwin 	 * TLS record payload via DSGL.  For partial GCM mode we
696*9e269eafSJohn Baldwin 	 * might need an extra SG entry for a placeholder.
697*9e269eafSJohn Baldwin 	 */
6984f272a5eSJohn Baldwin 	*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
6994f272a5eSJohn Baldwin 	    plen);
700*9e269eafSJohn Baldwin 	wr_len += ktls_sgl_size(*nsegsp + (last_ghash_frag ? 1 : 0));
701*9e269eafSJohn Baldwin 
702*9e269eafSJohn Baldwin 	if (request_ghash) {
703*9e269eafSJohn Baldwin 		/* AES-GCM records might return a partial hash. */
704*9e269eafSJohn Baldwin 		wr_len += sizeof(struct ulp_txpkt);
705*9e269eafSJohn Baldwin 		wr_len += sizeof(struct ulptx_idata);
706*9e269eafSJohn Baldwin 		wr_len += sizeof(struct cpl_tx_tls_ack);
707*9e269eafSJohn Baldwin 		wr_len += sizeof(struct rss_header) +
708*9e269eafSJohn Baldwin 		    sizeof(struct cpl_fw6_pld);
709*9e269eafSJohn Baldwin 		wr_len += AES_GMAC_HASH_LEN;
710*9e269eafSJohn Baldwin 	}
7114f272a5eSJohn Baldwin 
7124f272a5eSJohn Baldwin 	wr_len = roundup2(wr_len, 16);
7134f272a5eSJohn Baldwin 	return (wr_len);
7144f272a5eSJohn Baldwin }
7154f272a5eSJohn Baldwin 
716*9e269eafSJohn Baldwin /* Queue the next pending packet. */
717*9e269eafSJohn Baldwin static void
718*9e269eafSJohn Baldwin ktls_queue_next_packet(struct tlspcb *tlsp, bool enqueue_only)
719*9e269eafSJohn Baldwin {
720*9e269eafSJohn Baldwin #ifdef KTR
721*9e269eafSJohn Baldwin 	struct ether_header *eh;
722*9e269eafSJohn Baldwin 	struct tcphdr *tcp;
723*9e269eafSJohn Baldwin 	tcp_seq tcp_seqno;
724*9e269eafSJohn Baldwin #endif
725*9e269eafSJohn Baldwin 	struct mbuf *m;
726*9e269eafSJohn Baldwin 	void *items[1];
727*9e269eafSJohn Baldwin 	int rc;
728*9e269eafSJohn Baldwin 
729*9e269eafSJohn Baldwin 	TXQ_LOCK_ASSERT_OWNED(tlsp->txq);
730*9e269eafSJohn Baldwin 	KASSERT(tlsp->queue_mbufs, ("%s: mbufs not being queued for %p",
731*9e269eafSJohn Baldwin 	    __func__, tlsp));
732*9e269eafSJohn Baldwin 	for (;;) {
733*9e269eafSJohn Baldwin 		m = mbufq_dequeue(&tlsp->pending_mbufs);
734*9e269eafSJohn Baldwin 		if (m == NULL) {
735*9e269eafSJohn Baldwin 			tlsp->queue_mbufs = false;
736*9e269eafSJohn Baldwin 			return;
737*9e269eafSJohn Baldwin 		}
738*9e269eafSJohn Baldwin 
739*9e269eafSJohn Baldwin #ifdef KTR
740*9e269eafSJohn Baldwin 		eh = mtod(m, struct ether_header *);
741*9e269eafSJohn Baldwin 		tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
742*9e269eafSJohn Baldwin 		    m->m_pkthdr.l3hlen);
743*9e269eafSJohn Baldwin 		tcp_seqno = ntohl(tcp->th_seq);
744*9e269eafSJohn Baldwin #ifdef VERBOSE_TRACES
745*9e269eafSJohn Baldwin 		CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__,
746*9e269eafSJohn Baldwin 		    m->m_pkthdr.len, tcp_seqno);
747*9e269eafSJohn Baldwin #endif
748*9e269eafSJohn Baldwin #endif
749*9e269eafSJohn Baldwin 
750*9e269eafSJohn Baldwin 		items[0] = m;
751*9e269eafSJohn Baldwin 		if (enqueue_only)
752*9e269eafSJohn Baldwin 			rc = mp_ring_enqueue_only(tlsp->txq->r, items, 1);
753*9e269eafSJohn Baldwin 		else {
754*9e269eafSJohn Baldwin 			TXQ_UNLOCK(tlsp->txq);
755*9e269eafSJohn Baldwin 			rc = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
756*9e269eafSJohn Baldwin 			TXQ_LOCK(tlsp->txq);
757*9e269eafSJohn Baldwin 		}
758*9e269eafSJohn Baldwin 		if (__predict_true(rc == 0))
759*9e269eafSJohn Baldwin 			return;
760*9e269eafSJohn Baldwin 
761*9e269eafSJohn Baldwin 		CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u dropped", __func__,
762*9e269eafSJohn Baldwin 		    m->m_pkthdr.len, tcp_seqno);
763*9e269eafSJohn Baldwin 		m_freem(m);
764*9e269eafSJohn Baldwin 	}
765*9e269eafSJohn Baldwin }
766*9e269eafSJohn Baldwin 
7674f272a5eSJohn Baldwin int
7684f272a5eSJohn Baldwin t7_ktls_parse_pkt(struct mbuf *m)
7694f272a5eSJohn Baldwin {
7704f272a5eSJohn Baldwin 	struct tlspcb *tlsp;
7714f272a5eSJohn Baldwin 	struct ether_header *eh;
7724f272a5eSJohn Baldwin 	struct ip *ip;
7734f272a5eSJohn Baldwin 	struct ip6_hdr *ip6;
7744f272a5eSJohn Baldwin 	struct tcphdr *tcp;
7754f272a5eSJohn Baldwin 	struct mbuf *m_tls;
7764f272a5eSJohn Baldwin 	void *items[1];
777*9e269eafSJohn Baldwin 	int error, nsegs;
7784f272a5eSJohn Baldwin 	u_int wr_len, tot_len;
7794f272a5eSJohn Baldwin 	uint16_t eh_type;
7804f272a5eSJohn Baldwin 
7814f272a5eSJohn Baldwin 	/*
7824f272a5eSJohn Baldwin 	 * Locate headers in initial mbuf.
7834f272a5eSJohn Baldwin 	 *
7844f272a5eSJohn Baldwin 	 * XXX: This assumes all of the headers are in the initial mbuf.
7854f272a5eSJohn Baldwin 	 * Could perhaps use m_advance() like parse_pkt() if that turns
7864f272a5eSJohn Baldwin 	 * out to not be true.
7874f272a5eSJohn Baldwin 	 */
7884f272a5eSJohn Baldwin 	M_ASSERTPKTHDR(m);
7894f272a5eSJohn Baldwin 	MPASS(m->m_pkthdr.snd_tag != NULL);
7904f272a5eSJohn Baldwin 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
7914f272a5eSJohn Baldwin 
7924f272a5eSJohn Baldwin 	if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
7934f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p header mbuf too short", __func__, tlsp);
7944f272a5eSJohn Baldwin 		return (EINVAL);
7954f272a5eSJohn Baldwin 	}
7964f272a5eSJohn Baldwin 	eh = mtod(m, struct ether_header *);
7974f272a5eSJohn Baldwin 	eh_type = ntohs(eh->ether_type);
7984f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_VLAN) {
7994f272a5eSJohn Baldwin 		struct ether_vlan_header *evh = (void *)eh;
8004f272a5eSJohn Baldwin 
8014f272a5eSJohn Baldwin 		eh_type = ntohs(evh->evl_proto);
8024f272a5eSJohn Baldwin 		m->m_pkthdr.l2hlen = sizeof(*evh);
8034f272a5eSJohn Baldwin 	} else
8044f272a5eSJohn Baldwin 		m->m_pkthdr.l2hlen = sizeof(*eh);
8054f272a5eSJohn Baldwin 
8064f272a5eSJohn Baldwin 	switch (eh_type) {
8074f272a5eSJohn Baldwin 	case ETHERTYPE_IP:
8084f272a5eSJohn Baldwin 		ip = (struct ip *)(eh + 1);
8094f272a5eSJohn Baldwin 		if (ip->ip_p != IPPROTO_TCP) {
8104f272a5eSJohn Baldwin 			CTR(KTR_CXGBE, "%s: %p mbuf not IPPROTO_TCP", __func__,
8114f272a5eSJohn Baldwin 			    tlsp);
8124f272a5eSJohn Baldwin 			return (EINVAL);
8134f272a5eSJohn Baldwin 		}
8144f272a5eSJohn Baldwin 		m->m_pkthdr.l3hlen = ip->ip_hl * 4;
8154f272a5eSJohn Baldwin 		break;
8164f272a5eSJohn Baldwin 	case ETHERTYPE_IPV6:
8174f272a5eSJohn Baldwin 		ip6 = (struct ip6_hdr *)(eh + 1);
8184f272a5eSJohn Baldwin 		if (ip6->ip6_nxt != IPPROTO_TCP) {
8194f272a5eSJohn Baldwin 			CTR(KTR_CXGBE, "%s: %p, mbuf not IPPROTO_TCP (%u)",
8204f272a5eSJohn Baldwin 			    __func__, tlsp, ip6->ip6_nxt);
8214f272a5eSJohn Baldwin 			return (EINVAL);
8224f272a5eSJohn Baldwin 		}
8234f272a5eSJohn Baldwin 		m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
8244f272a5eSJohn Baldwin 		break;
8254f272a5eSJohn Baldwin 	default:
8264f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p mbuf not ETHERTYPE_IP{,V6}", __func__,
8274f272a5eSJohn Baldwin 		    tlsp);
8284f272a5eSJohn Baldwin 		return (EINVAL);
8294f272a5eSJohn Baldwin 	}
8304f272a5eSJohn Baldwin 	if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
8314f272a5eSJohn Baldwin 	    sizeof(*tcp)) {
8324f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p header mbuf too short (2)", __func__,
8334f272a5eSJohn Baldwin 		    tlsp);
8344f272a5eSJohn Baldwin 		return (EINVAL);
8354f272a5eSJohn Baldwin 	}
8364f272a5eSJohn Baldwin 	tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
8374f272a5eSJohn Baldwin 	m->m_pkthdr.l4hlen = tcp->th_off * 4;
8384f272a5eSJohn Baldwin 
8394f272a5eSJohn Baldwin 	/* Bail if there is TCP payload before the TLS record. */
8404f272a5eSJohn Baldwin 	if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
8414f272a5eSJohn Baldwin 	    m->m_pkthdr.l4hlen) {
8424f272a5eSJohn Baldwin 		CTR(KTR_CXGBE,
8434f272a5eSJohn Baldwin 		    "%s: %p header mbuf bad length (%d + %d + %d != %d)",
8444f272a5eSJohn Baldwin 		    __func__, tlsp, m->m_pkthdr.l2hlen, m->m_pkthdr.l3hlen,
8454f272a5eSJohn Baldwin 		    m->m_pkthdr.l4hlen, m->m_len);
8464f272a5eSJohn Baldwin 		return (EINVAL);
8474f272a5eSJohn Baldwin 	}
8484f272a5eSJohn Baldwin 
8494f272a5eSJohn Baldwin 	/* Assume all headers are in 'm' for now. */
8504f272a5eSJohn Baldwin 	MPASS(m->m_next != NULL);
8514f272a5eSJohn Baldwin 	MPASS(m->m_next->m_flags & M_EXTPG);
8524f272a5eSJohn Baldwin 
8534f272a5eSJohn Baldwin 	tot_len = 0;
8544f272a5eSJohn Baldwin 
8554f272a5eSJohn Baldwin 	/*
8564f272a5eSJohn Baldwin 	 * Each of the remaining mbufs in the chain should reference a
8574f272a5eSJohn Baldwin 	 * TLS record.
8584f272a5eSJohn Baldwin 	 */
8594f272a5eSJohn Baldwin 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
8604f272a5eSJohn Baldwin 		MPASS(m_tls->m_flags & M_EXTPG);
8614f272a5eSJohn Baldwin 
8624f272a5eSJohn Baldwin 		wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
8634f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
8644f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p wr_len %d nsegs %d", __func__, tlsp,
8654f272a5eSJohn Baldwin 		    wr_len, nsegs);
8664f272a5eSJohn Baldwin #endif
8674f272a5eSJohn Baldwin 		if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
8684f272a5eSJohn Baldwin 			return (EFBIG);
8694f272a5eSJohn Baldwin 		tot_len += roundup2(wr_len, EQ_ESIZE);
8704f272a5eSJohn Baldwin 
8714f272a5eSJohn Baldwin 		/*
8724f272a5eSJohn Baldwin 		 * Store 'nsegs' for the first TLS record in the
8734f272a5eSJohn Baldwin 		 * header mbuf's metadata.
8744f272a5eSJohn Baldwin 		 */
8754f272a5eSJohn Baldwin 		if (m_tls == m->m_next)
8764f272a5eSJohn Baldwin 			set_mbuf_nsegs(m, nsegs);
8774f272a5eSJohn Baldwin 	}
8784f272a5eSJohn Baldwin 
8794f272a5eSJohn Baldwin 	MPASS(tot_len != 0);
8804f272a5eSJohn Baldwin 	set_mbuf_len16(m, tot_len / 16);
881*9e269eafSJohn Baldwin 
882*9e269eafSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
883*9e269eafSJohn Baldwin 		/* Defer packets beyond what has been sent so far. */
884*9e269eafSJohn Baldwin 		TXQ_LOCK(tlsp->txq);
885*9e269eafSJohn Baldwin 		if (tlsp->queue_mbufs) {
886*9e269eafSJohn Baldwin 			error = mbufq_enqueue(&tlsp->pending_mbufs, m);
887*9e269eafSJohn Baldwin 			if (error == 0) {
888*9e269eafSJohn Baldwin #ifdef VERBOSE_TRACES
889*9e269eafSJohn Baldwin 				CTR(KTR_CXGBE,
890*9e269eafSJohn Baldwin 				    "%s: %p len16 %d nsegs %d TCP seq %u deferred",
891*9e269eafSJohn Baldwin 				    __func__, tlsp, mbuf_len16(m),
892*9e269eafSJohn Baldwin 				    mbuf_nsegs(m), ntohl(tcp->th_seq));
893*9e269eafSJohn Baldwin #endif
894*9e269eafSJohn Baldwin 			}
895*9e269eafSJohn Baldwin 			TXQ_UNLOCK(tlsp->txq);
896*9e269eafSJohn Baldwin 			return (error);
897*9e269eafSJohn Baldwin 		}
898*9e269eafSJohn Baldwin 		tlsp->queue_mbufs = true;
899*9e269eafSJohn Baldwin 		TXQ_UNLOCK(tlsp->txq);
900*9e269eafSJohn Baldwin 	}
901*9e269eafSJohn Baldwin 
9024f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
9034f272a5eSJohn Baldwin 	CTR(KTR_CXGBE, "%s: %p len16 %d nsegs %d", __func__, tlsp,
9044f272a5eSJohn Baldwin 	    mbuf_len16(m), mbuf_nsegs(m));
9054f272a5eSJohn Baldwin #endif
9064f272a5eSJohn Baldwin 	items[0] = m;
907*9e269eafSJohn Baldwin 	error = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
908*9e269eafSJohn Baldwin 	if (__predict_false(error != 0)) {
909*9e269eafSJohn Baldwin 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
910*9e269eafSJohn Baldwin 			TXQ_LOCK(tlsp->txq);
911*9e269eafSJohn Baldwin 			ktls_queue_next_packet(tlsp, false);
912*9e269eafSJohn Baldwin 			TXQ_UNLOCK(tlsp->txq);
913*9e269eafSJohn Baldwin 		}
914*9e269eafSJohn Baldwin 	}
915*9e269eafSJohn Baldwin 	return (error);
9164f272a5eSJohn Baldwin }
9174f272a5eSJohn Baldwin 
9184f272a5eSJohn Baldwin static inline bool
9194f272a5eSJohn Baldwin needs_vlan_insertion(struct mbuf *m)
9204f272a5eSJohn Baldwin {
9214f272a5eSJohn Baldwin 
9224f272a5eSJohn Baldwin 	M_ASSERTPKTHDR(m);
9234f272a5eSJohn Baldwin 
9244f272a5eSJohn Baldwin 	return (m->m_flags & M_VLANTAG);
9254f272a5eSJohn Baldwin }
9264f272a5eSJohn Baldwin 
9274f272a5eSJohn Baldwin static inline uint64_t
9284f272a5eSJohn Baldwin pkt_ctrl1(struct sge_txq *txq, struct mbuf *m, uint16_t eh_type)
9294f272a5eSJohn Baldwin {
9304f272a5eSJohn Baldwin 	uint64_t ctrl1;
9314f272a5eSJohn Baldwin 
9324f272a5eSJohn Baldwin 	/* Checksums are always offloaded */
9334f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_IP) {
9344f272a5eSJohn Baldwin 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
9354f272a5eSJohn Baldwin 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
9364f272a5eSJohn Baldwin 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
9374f272a5eSJohn Baldwin 	} else {
9384f272a5eSJohn Baldwin 		MPASS(m->m_pkthdr.l3hlen == sizeof(struct ip6_hdr));
9394f272a5eSJohn Baldwin 		ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
9404f272a5eSJohn Baldwin 		    V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
9414f272a5eSJohn Baldwin 		    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
9424f272a5eSJohn Baldwin 	}
9434f272a5eSJohn Baldwin 	txq->txcsum++;
9444f272a5eSJohn Baldwin 
9454f272a5eSJohn Baldwin 	/* VLAN tag insertion */
9464f272a5eSJohn Baldwin 	if (needs_vlan_insertion(m)) {
9474f272a5eSJohn Baldwin 		ctrl1 |= F_TXPKT_VLAN_VLD |
9484f272a5eSJohn Baldwin 		    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
9494f272a5eSJohn Baldwin 		txq->vlan_insertion++;
9504f272a5eSJohn Baldwin 	}
9514f272a5eSJohn Baldwin 
9524f272a5eSJohn Baldwin 	return (ctrl1);
9534f272a5eSJohn Baldwin }
9544f272a5eSJohn Baldwin 
9554f272a5eSJohn Baldwin static inline void *
9564f272a5eSJohn Baldwin write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type,
9574f272a5eSJohn Baldwin     int total_len)
9584f272a5eSJohn Baldwin {
9594f272a5eSJohn Baldwin 	struct cpl_tx_pkt_lso_core *lso;
9604f272a5eSJohn Baldwin 	uint32_t ctrl;
9614f272a5eSJohn Baldwin 
9624f272a5eSJohn Baldwin 	KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
9634f272a5eSJohn Baldwin 	    m0->m_pkthdr.l4hlen > 0,
9644f272a5eSJohn Baldwin 	    ("%s: mbuf %p needs TSO but missing header lengths",
9654f272a5eSJohn Baldwin 		__func__, m0));
9664f272a5eSJohn Baldwin 
9674f272a5eSJohn Baldwin 	ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
9684f272a5eSJohn Baldwin 	    F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
9694f272a5eSJohn Baldwin 	    V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
9704f272a5eSJohn Baldwin 	    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
9714f272a5eSJohn Baldwin 	    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
9724f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_IPV6)
9734f272a5eSJohn Baldwin 		ctrl |= F_LSO_IPV6;
9744f272a5eSJohn Baldwin 
9754f272a5eSJohn Baldwin 	lso = cpl;
9764f272a5eSJohn Baldwin 	lso->lso_ctrl = htobe32(ctrl);
9774f272a5eSJohn Baldwin 	lso->ipid_ofst = htobe16(0);
9784f272a5eSJohn Baldwin 	lso->mss = htobe16(mss);
9794f272a5eSJohn Baldwin 	lso->seqno_offset = htobe32(0);
9804f272a5eSJohn Baldwin 	lso->len = htobe32(total_len);
9814f272a5eSJohn Baldwin 
9824f272a5eSJohn Baldwin 	return (lso + 1);
9834f272a5eSJohn Baldwin }
9844f272a5eSJohn Baldwin 
9854f272a5eSJohn Baldwin static inline void *
986*9e269eafSJohn Baldwin write_tx_tls_ack(void *dst, u_int rx_chid, u_int hash_len, bool ghash_lcb)
987*9e269eafSJohn Baldwin {
988*9e269eafSJohn Baldwin 	struct cpl_tx_tls_ack *cpl;
989*9e269eafSJohn Baldwin 	uint32_t flags;
990*9e269eafSJohn Baldwin 
991*9e269eafSJohn Baldwin 	flags = ghash_lcb ? F_CPL_TX_TLS_ACK_LCB : F_CPL_TX_TLS_ACK_PHASH;
992*9e269eafSJohn Baldwin 	cpl = dst;
993*9e269eafSJohn Baldwin 	cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) |
994*9e269eafSJohn Baldwin 	    V_T7_CPL_TX_TLS_ACK_RXCHID(rx_chid) | F_CPL_TX_TLS_ACK_ULPTXLPBK |
995*9e269eafSJohn Baldwin 	    flags);
996*9e269eafSJohn Baldwin 
997*9e269eafSJohn Baldwin 	/* 32 == AckEncCpl, 16 == LCB */
998*9e269eafSJohn Baldwin 	cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len));
999*9e269eafSJohn Baldwin 	cpl->Rsvd3 = 0;
1000*9e269eafSJohn Baldwin 
1001*9e269eafSJohn Baldwin 	return (cpl + 1);
1002*9e269eafSJohn Baldwin }
1003*9e269eafSJohn Baldwin 
1004*9e269eafSJohn Baldwin static inline void *
1005*9e269eafSJohn Baldwin write_fw6_pld(void *dst, u_int rx_chid, u_int rx_qid, u_int hash_len,
1006*9e269eafSJohn Baldwin     uint64_t cookie)
1007*9e269eafSJohn Baldwin {
1008*9e269eafSJohn Baldwin 	struct rss_header *rss;
1009*9e269eafSJohn Baldwin 	struct cpl_fw6_pld *cpl;
1010*9e269eafSJohn Baldwin 
1011*9e269eafSJohn Baldwin 	rss = dst;
1012*9e269eafSJohn Baldwin 	memset(rss, 0, sizeof(*rss));
1013*9e269eafSJohn Baldwin 	rss->opcode = CPL_FW6_PLD;
1014*9e269eafSJohn Baldwin 	rss->qid = htobe16(rx_qid);
1015*9e269eafSJohn Baldwin 	rss->channel = rx_chid;
1016*9e269eafSJohn Baldwin 
1017*9e269eafSJohn Baldwin 	cpl = (void *)(rss + 1);
1018*9e269eafSJohn Baldwin 	memset(cpl, 0, sizeof(*cpl));
1019*9e269eafSJohn Baldwin 	cpl->opcode = CPL_FW6_PLD;
1020*9e269eafSJohn Baldwin 	cpl->len = htobe16(hash_len);
1021*9e269eafSJohn Baldwin 	cpl->data[1] = htobe64(cookie);
1022*9e269eafSJohn Baldwin 
1023*9e269eafSJohn Baldwin 	return (cpl + 1);
1024*9e269eafSJohn Baldwin }
1025*9e269eafSJohn Baldwin 
1026*9e269eafSJohn Baldwin static inline void *
10274f272a5eSJohn Baldwin write_split_mode_rx_phys(void *dst, struct mbuf *m, struct mbuf *m_tls,
10284f272a5eSJohn Baldwin     u_int crypto_hdr_len, u_int leading_waste, u_int trailing_waste)
10294f272a5eSJohn Baldwin {
10304f272a5eSJohn Baldwin 	struct cpl_t7_rx_phys_dsgl *cpl;
10314f272a5eSJohn Baldwin 	uint16_t *len;
10324f272a5eSJohn Baldwin 	uint8_t numsge;
10334f272a5eSJohn Baldwin 
10344f272a5eSJohn Baldwin 	/* Forward first (3) and third (1) segments. */
10354f272a5eSJohn Baldwin 	numsge = 0xa;
10364f272a5eSJohn Baldwin 
10374f272a5eSJohn Baldwin 	cpl = dst;
10384f272a5eSJohn Baldwin 	cpl->ot.opcode = CPL_RX_PHYS_DSGL;
10394f272a5eSJohn Baldwin 	cpl->PhysAddrFields_lo_to_NumSGE =
10404f272a5eSJohn Baldwin 	    htobe32(F_CPL_T7_RX_PHYS_DSGL_SPLITMODE |
10414f272a5eSJohn Baldwin 	    V_CPL_T7_RX_PHYS_DSGL_NUMSGE(numsge));
10424f272a5eSJohn Baldwin 
10434f272a5eSJohn Baldwin 	len = (uint16_t *)(cpl->RSSCopy);
10444f272a5eSJohn Baldwin 
10454f272a5eSJohn Baldwin 	/*
10464f272a5eSJohn Baldwin 	 * First segment always contains packet headers as well as
10474f272a5eSJohn Baldwin 	 * transmit-related CPLs.
10484f272a5eSJohn Baldwin 	 */
10494f272a5eSJohn Baldwin 	len[0] = htobe16(crypto_hdr_len);
10504f272a5eSJohn Baldwin 
10514f272a5eSJohn Baldwin 	/*
10524f272a5eSJohn Baldwin 	 * Second segment is "gap" of data to drop at the front of the
10534f272a5eSJohn Baldwin 	 * TLS record.
10544f272a5eSJohn Baldwin 	 */
10554f272a5eSJohn Baldwin 	len[1] = htobe16(leading_waste);
10564f272a5eSJohn Baldwin 
10574f272a5eSJohn Baldwin 	/* Third segment is how much of the TLS record to send. */
10584f272a5eSJohn Baldwin 	len[2] = htobe16(m_tls->m_len);
10594f272a5eSJohn Baldwin 
10604f272a5eSJohn Baldwin 	/* Fourth segment is how much data to drop at the end. */
10614f272a5eSJohn Baldwin 	len[3] = htobe16(trailing_waste);
10624f272a5eSJohn Baldwin 
10634f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
10644f272a5eSJohn Baldwin 	CTR(KTR_CXGBE, "%s: forward %u skip %u forward %u skip %u",
10654f272a5eSJohn Baldwin 	    __func__, be16toh(len[0]), be16toh(len[1]), be16toh(len[2]),
10664f272a5eSJohn Baldwin 	    be16toh(len[3]));
10674f272a5eSJohn Baldwin #endif
10684f272a5eSJohn Baldwin 	return (cpl + 1);
10694f272a5eSJohn Baldwin }
10704f272a5eSJohn Baldwin 
10714f272a5eSJohn Baldwin /*
10724f272a5eSJohn Baldwin  * If the SGL ends on an address that is not 16 byte aligned, this function will
10734f272a5eSJohn Baldwin  * add a 0 filled flit at the end.
10744f272a5eSJohn Baldwin  */
1075*9e269eafSJohn Baldwin static void *
10764f272a5eSJohn Baldwin write_gl_to_buf(struct sglist *gl, caddr_t to)
10774f272a5eSJohn Baldwin {
10784f272a5eSJohn Baldwin 	struct sglist_seg *seg;
10794f272a5eSJohn Baldwin 	__be64 *flitp;
10804f272a5eSJohn Baldwin 	struct ulptx_sgl *usgl;
10814f272a5eSJohn Baldwin 	int i, nflits, nsegs;
10824f272a5eSJohn Baldwin 
10834f272a5eSJohn Baldwin 	KASSERT(((uintptr_t)to & 0xf) == 0,
10844f272a5eSJohn Baldwin 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
10854f272a5eSJohn Baldwin 
10864f272a5eSJohn Baldwin 	nsegs = gl->sg_nseg;
10874f272a5eSJohn Baldwin 	MPASS(nsegs > 0);
10884f272a5eSJohn Baldwin 
10894f272a5eSJohn Baldwin 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
10904f272a5eSJohn Baldwin 	flitp = (__be64 *)to;
10914f272a5eSJohn Baldwin 	seg = &gl->sg_segs[0];
10924f272a5eSJohn Baldwin 	usgl = (void *)flitp;
10934f272a5eSJohn Baldwin 
10944f272a5eSJohn Baldwin 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
10954f272a5eSJohn Baldwin 	    V_ULPTX_NSGE(nsegs));
10964f272a5eSJohn Baldwin 	usgl->len0 = htobe32(seg->ss_len);
10974f272a5eSJohn Baldwin 	usgl->addr0 = htobe64(seg->ss_paddr);
10984f272a5eSJohn Baldwin 	seg++;
10994f272a5eSJohn Baldwin 
11004f272a5eSJohn Baldwin 	for (i = 0; i < nsegs - 1; i++, seg++) {
11014f272a5eSJohn Baldwin 		usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
11024f272a5eSJohn Baldwin 		usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
11034f272a5eSJohn Baldwin 	}
11044f272a5eSJohn Baldwin 	if (i & 1)
11054f272a5eSJohn Baldwin 		usgl->sge[i / 2].len[1] = htobe32(0);
11064f272a5eSJohn Baldwin 	flitp += nflits;
11074f272a5eSJohn Baldwin 
11084f272a5eSJohn Baldwin 	if (nflits & 1) {
11094f272a5eSJohn Baldwin 		MPASS(((uintptr_t)flitp) & 0xf);
11104f272a5eSJohn Baldwin 		*flitp++ = 0;
11114f272a5eSJohn Baldwin 	}
11124f272a5eSJohn Baldwin 
11134f272a5eSJohn Baldwin 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
1114*9e269eafSJohn Baldwin 	return (flitp);
11154f272a5eSJohn Baldwin }
11164f272a5eSJohn Baldwin 
11174f272a5eSJohn Baldwin static inline void
1118*9e269eafSJohn Baldwin copy_to_txd(struct sge_eq *eq, const char *from, caddr_t *to, int len)
11194f272a5eSJohn Baldwin {
11204f272a5eSJohn Baldwin 
11214f272a5eSJohn Baldwin 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
11224f272a5eSJohn Baldwin 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
11234f272a5eSJohn Baldwin 
11244f272a5eSJohn Baldwin 	if (__predict_true((uintptr_t)(*to) + len <=
11254f272a5eSJohn Baldwin 	    (uintptr_t)&eq->desc[eq->sidx])) {
11264f272a5eSJohn Baldwin 		bcopy(from, *to, len);
11274f272a5eSJohn Baldwin 		(*to) += len;
11284f272a5eSJohn Baldwin 		if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
11294f272a5eSJohn Baldwin 			(*to) = (caddr_t)eq->desc;
11304f272a5eSJohn Baldwin 	} else {
11314f272a5eSJohn Baldwin 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
11324f272a5eSJohn Baldwin 
11334f272a5eSJohn Baldwin 		bcopy(from, *to, portion);
11344f272a5eSJohn Baldwin 		from += portion;
11354f272a5eSJohn Baldwin 		portion = len - portion;	/* remaining */
11364f272a5eSJohn Baldwin 		bcopy(from, (void *)eq->desc, portion);
11374f272a5eSJohn Baldwin 		(*to) = (caddr_t)eq->desc + portion;
11384f272a5eSJohn Baldwin 	}
11394f272a5eSJohn Baldwin }
11404f272a5eSJohn Baldwin 
11414f272a5eSJohn Baldwin static int
11424f272a5eSJohn Baldwin ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
1143*9e269eafSJohn Baldwin     const void *src, u_int len, u_int available, tcp_seq tcp_seqno, u_int pidx,
1144*9e269eafSJohn Baldwin     uint16_t eh_type, bool last_wr)
11454f272a5eSJohn Baldwin {
11464f272a5eSJohn Baldwin 	struct tx_sdesc *txsd;
11474f272a5eSJohn Baldwin 	struct fw_eth_tx_pkt_wr *wr;
11484f272a5eSJohn Baldwin 	struct cpl_tx_pkt_core *cpl;
11494f272a5eSJohn Baldwin 	uint32_t ctrl;
11504f272a5eSJohn Baldwin 	int len16, ndesc, pktlen;
11514f272a5eSJohn Baldwin 	struct ether_header *eh;
11524f272a5eSJohn Baldwin 	struct ip *ip, newip;
11534f272a5eSJohn Baldwin 	struct ip6_hdr *ip6, newip6;
11544f272a5eSJohn Baldwin 	struct tcphdr *tcp, newtcp;
11554f272a5eSJohn Baldwin 	caddr_t out;
11564f272a5eSJohn Baldwin 
11574f272a5eSJohn Baldwin 	TXQ_LOCK_ASSERT_OWNED(txq);
11584f272a5eSJohn Baldwin 	M_ASSERTPKTHDR(m);
11594f272a5eSJohn Baldwin 
11604f272a5eSJohn Baldwin 	wr = dst;
1161*9e269eafSJohn Baldwin 	pktlen = m->m_len + len;
11624f272a5eSJohn Baldwin 	ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
11634f272a5eSJohn Baldwin 	len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
11644f272a5eSJohn Baldwin 	ndesc = tx_len16_to_desc(len16);
11654f272a5eSJohn Baldwin 	MPASS(ndesc <= available);
11664f272a5eSJohn Baldwin 
11674f272a5eSJohn Baldwin 	/* Firmware work request header */
11684f272a5eSJohn Baldwin 	/* TODO: Handle VF work request. */
11694f272a5eSJohn Baldwin 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
11704f272a5eSJohn Baldwin 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
11714f272a5eSJohn Baldwin 
11724f272a5eSJohn Baldwin 	ctrl = V_FW_WR_LEN16(len16);
11734f272a5eSJohn Baldwin 	wr->equiq_to_len16 = htobe32(ctrl);
11744f272a5eSJohn Baldwin 	wr->r3 = 0;
11754f272a5eSJohn Baldwin 
11764f272a5eSJohn Baldwin 	cpl = (void *)(wr + 1);
11774f272a5eSJohn Baldwin 
11784f272a5eSJohn Baldwin 	/* CPL header */
11794f272a5eSJohn Baldwin 	cpl->ctrl0 = txq->cpl_ctrl0;
11804f272a5eSJohn Baldwin 	cpl->pack = 0;
11814f272a5eSJohn Baldwin 	cpl->len = htobe16(pktlen);
11824f272a5eSJohn Baldwin 
11834f272a5eSJohn Baldwin 	out = (void *)(cpl + 1);
11844f272a5eSJohn Baldwin 
11854f272a5eSJohn Baldwin 	/* Copy over Ethernet header. */
11864f272a5eSJohn Baldwin 	eh = mtod(m, struct ether_header *);
11874f272a5eSJohn Baldwin 	copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
11884f272a5eSJohn Baldwin 
11894f272a5eSJohn Baldwin 	/* Fixup length in IP header and copy out. */
11904f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_IP) {
11914f272a5eSJohn Baldwin 		ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
11924f272a5eSJohn Baldwin 		newip = *ip;
11934f272a5eSJohn Baldwin 		newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
11944f272a5eSJohn Baldwin 		copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
11954f272a5eSJohn Baldwin 		if (m->m_pkthdr.l3hlen > sizeof(*ip))
11964f272a5eSJohn Baldwin 			copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
11974f272a5eSJohn Baldwin 			    m->m_pkthdr.l3hlen - sizeof(*ip));
11984f272a5eSJohn Baldwin 	} else {
11994f272a5eSJohn Baldwin 		ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
12004f272a5eSJohn Baldwin 		newip6 = *ip6;
12014f272a5eSJohn Baldwin 		newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
12024f272a5eSJohn Baldwin 		    sizeof(*ip6));
12034f272a5eSJohn Baldwin 		copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
12044f272a5eSJohn Baldwin 		MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
12054f272a5eSJohn Baldwin 	}
12064f272a5eSJohn Baldwin 	cpl->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
12074f272a5eSJohn Baldwin 
12084f272a5eSJohn Baldwin 	/* Set sequence number in TCP header. */
12094f272a5eSJohn Baldwin 	tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
12104f272a5eSJohn Baldwin 	newtcp = *tcp;
12114f272a5eSJohn Baldwin 	newtcp.th_seq = htonl(tcp_seqno);
12124f272a5eSJohn Baldwin 	copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
12134f272a5eSJohn Baldwin 
12144f272a5eSJohn Baldwin 	/* Copy rest of TCP header. */
12154f272a5eSJohn Baldwin 	copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
12164f272a5eSJohn Baldwin 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
12174f272a5eSJohn Baldwin 
1218*9e269eafSJohn Baldwin 	/* Copy the payload data. */
1219*9e269eafSJohn Baldwin 	copy_to_txd(&txq->eq, src, &out, len);
12204f272a5eSJohn Baldwin 	txq->imm_wrs++;
12214f272a5eSJohn Baldwin 
12224f272a5eSJohn Baldwin 	txq->txpkt_wrs++;
12234f272a5eSJohn Baldwin 
12244f272a5eSJohn Baldwin 	txsd = &txq->sdesc[pidx];
1225*9e269eafSJohn Baldwin 	if (last_wr)
12264f272a5eSJohn Baldwin 		txsd->m = m;
1227*9e269eafSJohn Baldwin 	else
1228*9e269eafSJohn Baldwin 		txsd->m = NULL;
12294f272a5eSJohn Baldwin 	txsd->desc_used = ndesc;
12304f272a5eSJohn Baldwin 
12314f272a5eSJohn Baldwin 	return (ndesc);
12324f272a5eSJohn Baldwin }
12334f272a5eSJohn Baldwin 
12344f272a5eSJohn Baldwin static int
12354f272a5eSJohn Baldwin ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
12364f272a5eSJohn Baldwin     void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
12374f272a5eSJohn Baldwin     u_int available, tcp_seq tcp_seqno, u_int pidx, uint16_t eh_type,
12384f272a5eSJohn Baldwin     uint16_t mss)
12394f272a5eSJohn Baldwin {
12404f272a5eSJohn Baldwin 	struct sge_eq *eq = &txq->eq;
12414f272a5eSJohn Baldwin 	struct tx_sdesc *txsd;
12424f272a5eSJohn Baldwin 	struct fw_ulptx_wr *wr;
12434f272a5eSJohn Baldwin 	struct ulp_txpkt *txpkt;
12444f272a5eSJohn Baldwin 	struct ulptx_sc_memrd *memrd;
12454f272a5eSJohn Baldwin 	struct ulptx_idata *idata;
12464f272a5eSJohn Baldwin 	struct cpl_tx_sec_pdu *sec_pdu;
12474f272a5eSJohn Baldwin 	struct cpl_tx_pkt_core *tx_pkt;
12484f272a5eSJohn Baldwin 	const struct tls_record_layer *hdr;
12494f272a5eSJohn Baldwin 	struct ip *ip;
12504f272a5eSJohn Baldwin 	struct ip6_hdr *ip6;
12514f272a5eSJohn Baldwin 	struct tcphdr *newtcp;
12524f272a5eSJohn Baldwin 	char *iv, *out;
12534f272a5eSJohn Baldwin 	u_int aad_start, aad_stop;
12544f272a5eSJohn Baldwin 	u_int auth_start, auth_stop, auth_insert;
12554f272a5eSJohn Baldwin 	u_int cipher_start, cipher_stop, iv_offset;
1256*9e269eafSJohn Baldwin 	u_int header_len, offset, plen, rlen, tlen;
1257*9e269eafSJohn Baldwin 	u_int imm_len, ndesc, nsegs, txpkt_lens[2], wr_len;
12584f272a5eSJohn Baldwin 	u_int cpl_len, crypto_hdr_len, post_key_context_len;
12594f272a5eSJohn Baldwin 	u_int leading_waste, trailing_waste;
12604f272a5eSJohn Baldwin 	u_short ip_len;
1261*9e269eafSJohn Baldwin 	bool inline_key, ghash_lcb, last_ghash_frag, last_wr, need_lso;
1262*9e269eafSJohn Baldwin 	bool request_ghash, send_partial_ghash, short_record, split_mode;
1263*9e269eafSJohn Baldwin 	bool using_scratch;
12644f272a5eSJohn Baldwin 
12654f272a5eSJohn Baldwin 	MPASS(tlsp->txq == txq);
12664f272a5eSJohn Baldwin 	M_ASSERTEXTPG(m_tls);
12674f272a5eSJohn Baldwin 
1268*9e269eafSJohn Baldwin 	/* Final work request for this mbuf chain? */
1269*9e269eafSJohn Baldwin 	last_wr = (m_tls->m_next == NULL);
1270*9e269eafSJohn Baldwin 
12714f272a5eSJohn Baldwin 	/*
12724f272a5eSJohn Baldwin 	 * The relative offset of the last byte to send from the TLS
12734f272a5eSJohn Baldwin 	 * record.
12744f272a5eSJohn Baldwin 	 */
12754f272a5eSJohn Baldwin 	tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
12764f272a5eSJohn Baldwin 	if (tlen <= m_tls->m_epg_hdrlen) {
12774f272a5eSJohn Baldwin 		/*
12784f272a5eSJohn Baldwin 		 * For requests that only want to send the TLS header,
12794f272a5eSJohn Baldwin 		 * send a tunnelled packet as immediate data.
12804f272a5eSJohn Baldwin 		 */
12814f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
12824f272a5eSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p header-only TLS record %u", __func__,
12834f272a5eSJohn Baldwin 		    tlsp, (u_int)m_tls->m_epg_seqno);
12844f272a5eSJohn Baldwin #endif
1285*9e269eafSJohn Baldwin 		/* This should always be the last TLS record in a chain. */
1286*9e269eafSJohn Baldwin 		MPASS(last_wr);
1287*9e269eafSJohn Baldwin 
1288*9e269eafSJohn Baldwin 		txq->kern_tls_header++;
1289*9e269eafSJohn Baldwin 
1290*9e269eafSJohn Baldwin 		return (ktls_write_tunnel_packet(txq, dst, m,
1291*9e269eafSJohn Baldwin 		    (char *)m_tls->m_epg_hdr + mtod(m_tls, vm_offset_t),
1292*9e269eafSJohn Baldwin 		    m_tls->m_len, available, tcp_seqno, pidx, eh_type,
1293*9e269eafSJohn Baldwin 		    last_wr));
12944f272a5eSJohn Baldwin 	}
12954f272a5eSJohn Baldwin 
12964f272a5eSJohn Baldwin 	/* Locate the TLS header. */
12974f272a5eSJohn Baldwin 	hdr = (void *)m_tls->m_epg_hdr;
1298*9e269eafSJohn Baldwin 	rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
12994f272a5eSJohn Baldwin 
13004f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
13014f272a5eSJohn Baldwin 	CTR(KTR_CXGBE, "%s: offset %lu len %u TCP seq %u TLS record %u",
13024f272a5eSJohn Baldwin 	    __func__, mtod(m_tls, vm_offset_t), m_tls->m_len, tcp_seqno,
13034f272a5eSJohn Baldwin 	    (u_int)m_tls->m_epg_seqno);
13044f272a5eSJohn Baldwin #endif
13054f272a5eSJohn Baldwin 
1306*9e269eafSJohn Baldwin 	/* Should this request make use of GHASH state? */
1307*9e269eafSJohn Baldwin 	ghash_lcb = false;
1308*9e269eafSJohn Baldwin 	last_ghash_frag = false;
1309*9e269eafSJohn Baldwin 	request_ghash = false;
1310*9e269eafSJohn Baldwin 	send_partial_ghash = false;
1311*9e269eafSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
1312*9e269eafSJohn Baldwin 	    tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
1313*9e269eafSJohn Baldwin 		u_int trailer_len;
1314*9e269eafSJohn Baldwin 
1315*9e269eafSJohn Baldwin 		trailer_len = m_tls->m_epg_trllen;
1316*9e269eafSJohn Baldwin 		if (tlsp->tls13)
1317*9e269eafSJohn Baldwin 			trailer_len--;
1318*9e269eafSJohn Baldwin 		KASSERT(trailer_len == AES_GMAC_HASH_LEN,
1319*9e269eafSJohn Baldwin 		    ("invalid trailer length for AES-GCM"));
1320*9e269eafSJohn Baldwin 
1321*9e269eafSJohn Baldwin 		/* Is this the start of a TLS record? */
1322*9e269eafSJohn Baldwin 		if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
1323*9e269eafSJohn Baldwin 			/*
1324*9e269eafSJohn Baldwin 			 * If this is the very first TLS record or
1325*9e269eafSJohn Baldwin 			 * if this is a newer TLS record, request a partial
1326*9e269eafSJohn Baldwin 			 * hash, but not if we are going to send the whole
1327*9e269eafSJohn Baldwin 			 * thing.
1328*9e269eafSJohn Baldwin 			 */
1329*9e269eafSJohn Baldwin 			if ((tlsp->ghash_tls_seqno == 0 ||
1330*9e269eafSJohn Baldwin 			    tlsp->ghash_tls_seqno < m_tls->m_epg_seqno) &&
1331*9e269eafSJohn Baldwin 			    tlen < rlen) {
1332*9e269eafSJohn Baldwin 				/*
1333*9e269eafSJohn Baldwin 				 * If we are only missing part or all
1334*9e269eafSJohn Baldwin 				 * of the trailer, send a normal full
1335*9e269eafSJohn Baldwin 				 * record but request the hash.
1336*9e269eafSJohn Baldwin 				 * Otherwise, use partial GHASH mode.
1337*9e269eafSJohn Baldwin 				 */
1338*9e269eafSJohn Baldwin 				if (tlen >= (rlen - trailer_len))
1339*9e269eafSJohn Baldwin 					ghash_lcb = true;
1340*9e269eafSJohn Baldwin 				else
1341*9e269eafSJohn Baldwin 					send_partial_ghash = true;
1342*9e269eafSJohn Baldwin 				request_ghash = true;
1343*9e269eafSJohn Baldwin 				tlsp->ghash_tls_seqno = m_tls->m_epg_seqno;
1344*9e269eafSJohn Baldwin 			}
1345*9e269eafSJohn Baldwin 		} else if (tlsp->ghash_tls_seqno == m_tls->m_epg_seqno &&
1346*9e269eafSJohn Baldwin 		    tlsp->ghash_valid) {
1347*9e269eafSJohn Baldwin 			/*
1348*9e269eafSJohn Baldwin 			 * Compute the offset of the first AES block as
1349*9e269eafSJohn Baldwin 			 * is done in ktls_is_short_record.
1350*9e269eafSJohn Baldwin 			 */
1351*9e269eafSJohn Baldwin 			if (rlen - tlen < trailer_len)
1352*9e269eafSJohn Baldwin 				plen = rlen - (m_tls->m_epg_hdrlen +
1353*9e269eafSJohn Baldwin 				    trailer_len);
1354*9e269eafSJohn Baldwin 			else
1355*9e269eafSJohn Baldwin 				plen = tlen - m_tls->m_epg_hdrlen;
1356*9e269eafSJohn Baldwin 			offset = mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen;
1357*9e269eafSJohn Baldwin 			if (offset >= plen)
1358*9e269eafSJohn Baldwin 				offset = plen;
1359*9e269eafSJohn Baldwin 			else
1360*9e269eafSJohn Baldwin 				offset = rounddown2(offset, AES_BLOCK_LEN);
1361*9e269eafSJohn Baldwin 			if (tlsp->ghash_offset == offset) {
1362*9e269eafSJohn Baldwin 				if (offset == plen) {
1363*9e269eafSJohn Baldwin 					/*
1364*9e269eafSJohn Baldwin 					 * Send a partial trailer as a
1365*9e269eafSJohn Baldwin 					 * tunnelled packet as
1366*9e269eafSJohn Baldwin 					 * immediate data.
1367*9e269eafSJohn Baldwin 					 */
1368*9e269eafSJohn Baldwin #ifdef VERBOSE_TRACES
1369*9e269eafSJohn Baldwin 					CTR(KTR_CXGBE,
1370*9e269eafSJohn Baldwin 					    "%s: %p trailer-only TLS record %u",
1371*9e269eafSJohn Baldwin 					    __func__, tlsp,
1372*9e269eafSJohn Baldwin 					    (u_int)m_tls->m_epg_seqno);
1373*9e269eafSJohn Baldwin #endif
1374*9e269eafSJohn Baldwin 
1375*9e269eafSJohn Baldwin 					txq->kern_tls_trailer++;
1376*9e269eafSJohn Baldwin 
1377*9e269eafSJohn Baldwin 					offset = mtod(m_tls, vm_offset_t) -
1378*9e269eafSJohn Baldwin 					    (m_tls->m_epg_hdrlen + plen);
1379*9e269eafSJohn Baldwin 					KASSERT(offset <= AES_GMAC_HASH_LEN,
1380*9e269eafSJohn Baldwin 					    ("offset outside of trailer"));
1381*9e269eafSJohn Baldwin 					return (ktls_write_tunnel_packet(txq,
1382*9e269eafSJohn Baldwin 					    dst, m, tlsp->ghash + offset,
1383*9e269eafSJohn Baldwin 					    m_tls->m_len, available, tcp_seqno,
1384*9e269eafSJohn Baldwin 					    pidx, eh_type, last_wr));
1385*9e269eafSJohn Baldwin 				}
1386*9e269eafSJohn Baldwin 
1387*9e269eafSJohn Baldwin 				/*
1388*9e269eafSJohn Baldwin 				 * If this request sends the end of
1389*9e269eafSJohn Baldwin 				 * the payload, it is the last
1390*9e269eafSJohn Baldwin 				 * fragment.
1391*9e269eafSJohn Baldwin 				 */
1392*9e269eafSJohn Baldwin 				if (tlen >= (rlen - trailer_len)) {
1393*9e269eafSJohn Baldwin 					last_ghash_frag = true;
1394*9e269eafSJohn Baldwin 					ghash_lcb = true;
1395*9e269eafSJohn Baldwin 				}
1396*9e269eafSJohn Baldwin 
1397*9e269eafSJohn Baldwin 				/*
1398*9e269eafSJohn Baldwin 				 * Only use partial GCM mode (rather
1399*9e269eafSJohn Baldwin 				 * than an AES-CTR short record) if
1400*9e269eafSJohn Baldwin 				 * there is input auth data to pass to
1401*9e269eafSJohn Baldwin 				 * the GHASH.  That is true so long as
1402*9e269eafSJohn Baldwin 				 * there is at least one full block of
1403*9e269eafSJohn Baldwin 				 * payload data, or if the remaining
1404*9e269eafSJohn Baldwin 				 * payload data is the final partial
1405*9e269eafSJohn Baldwin 				 * block.
1406*9e269eafSJohn Baldwin 				 */
1407*9e269eafSJohn Baldwin 				if (plen - offset >= GMAC_BLOCK_LEN ||
1408*9e269eafSJohn Baldwin 				    last_ghash_frag) {
1409*9e269eafSJohn Baldwin 					send_partial_ghash = true;
1410*9e269eafSJohn Baldwin 
1411*9e269eafSJohn Baldwin 					/*
1412*9e269eafSJohn Baldwin 					 * If not sending the complete
1413*9e269eafSJohn Baldwin 					 * end of the record, this is
1414*9e269eafSJohn Baldwin 					 * a middle request so needs
1415*9e269eafSJohn Baldwin 					 * to request an updated
1416*9e269eafSJohn Baldwin 					 * partial hash.
1417*9e269eafSJohn Baldwin 					 */
1418*9e269eafSJohn Baldwin 					if (tlen < rlen)
1419*9e269eafSJohn Baldwin 						request_ghash = true;
1420*9e269eafSJohn Baldwin 				}
1421*9e269eafSJohn Baldwin 			}
1422*9e269eafSJohn Baldwin 		}
1423*9e269eafSJohn Baldwin 	}
1424*9e269eafSJohn Baldwin 
1425*9e269eafSJohn Baldwin 	short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
1426*9e269eafSJohn Baldwin 	    &header_len, &offset, &plen, &leading_waste, &trailing_waste,
1427*9e269eafSJohn Baldwin 	    send_partial_ghash, request_ghash);
1428*9e269eafSJohn Baldwin 
14294f272a5eSJohn Baldwin 	if (short_record) {
14304f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
14314f272a5eSJohn Baldwin 		CTR(KTR_CXGBE,
14324f272a5eSJohn Baldwin 		    "%s: %p short TLS record %u hdr %u offs %u plen %u",
14334f272a5eSJohn Baldwin 		    __func__, tlsp, (u_int)m_tls->m_epg_seqno, header_len,
14344f272a5eSJohn Baldwin 		    offset, plen);
1435*9e269eafSJohn Baldwin 		if (send_partial_ghash) {
1436*9e269eafSJohn Baldwin 			if (header_len != 0)
1437*9e269eafSJohn Baldwin 				CTR(KTR_CXGBE, "%s: %p sending initial GHASH",
1438*9e269eafSJohn Baldwin 				    __func__, tlsp);
1439*9e269eafSJohn Baldwin 			else
1440*9e269eafSJohn Baldwin 				CTR(KTR_CXGBE, "%s: %p sending partial GHASH for offset %u%s",
1441*9e269eafSJohn Baldwin 				    __func__, tlsp, tlsp->ghash_offset,
1442*9e269eafSJohn Baldwin 				    last_ghash_frag ? ", last_frag" : "");
1443*9e269eafSJohn Baldwin 		}
14444f272a5eSJohn Baldwin #endif
1445*9e269eafSJohn Baldwin 		KASSERT(send_partial_ghash || !request_ghash,
1446*9e269eafSJohn Baldwin 		    ("requesting but not sending partial hash for short record"));
1447*9e269eafSJohn Baldwin 	} else {
1448*9e269eafSJohn Baldwin 		KASSERT(!send_partial_ghash,
1449*9e269eafSJohn Baldwin 		    ("sending partial hash with full record"));
14504f272a5eSJohn Baldwin 	}
14514f272a5eSJohn Baldwin 
1452*9e269eafSJohn Baldwin 	if (tlen < rlen && m_tls->m_next == NULL &&
14534f272a5eSJohn Baldwin 	    (tcp->th_flags & TH_FIN) != 0) {
14544f272a5eSJohn Baldwin 		txq->kern_tls_fin_short++;
14554f272a5eSJohn Baldwin #ifdef INVARIANTS
14564f272a5eSJohn Baldwin 		panic("%s: FIN on short TLS record", __func__);
14574f272a5eSJohn Baldwin #endif
14584f272a5eSJohn Baldwin 	}
14594f272a5eSJohn Baldwin 
1460*9e269eafSJohn Baldwin 	/*
1461*9e269eafSJohn Baldwin 	 * Use cached value for first record in chain if not using
1462*9e269eafSJohn Baldwin 	 * partial GCM mode. ktls_parse_pkt() calculates nsegs based
1463*9e269eafSJohn Baldwin 	 * on send_partial_ghash being false.
1464*9e269eafSJohn Baldwin 	 */
1465*9e269eafSJohn Baldwin 	if (m->m_next == m_tls && !send_partial_ghash)
14664f272a5eSJohn Baldwin 		nsegs = mbuf_nsegs(m);
14674f272a5eSJohn Baldwin 	else
14684f272a5eSJohn Baldwin 		nsegs = sglist_count_mbuf_epg(m_tls,
14694f272a5eSJohn Baldwin 		    m_tls->m_epg_hdrlen + offset, plen);
14704f272a5eSJohn Baldwin 
14714f272a5eSJohn Baldwin 	/* Determine if we need an LSO header. */
14724f272a5eSJohn Baldwin 	need_lso = (m_tls->m_len > mss);
14734f272a5eSJohn Baldwin 
14744f272a5eSJohn Baldwin 	/* Calculate the size of the TLS work request. */
1475*9e269eafSJohn Baldwin 	inline_key = send_partial_ghash || tlsp->inline_key;
1476*9e269eafSJohn Baldwin 	wr_len = ktls_base_wr_size(tlsp, inline_key);
1477*9e269eafSJohn Baldwin 
1478*9e269eafSJohn Baldwin 	if (send_partial_ghash) {
1479*9e269eafSJohn Baldwin 		/* Inline key context includes partial hash in OPAD. */
1480*9e269eafSJohn Baldwin 		wr_len += AES_GMAC_HASH_LEN;
1481*9e269eafSJohn Baldwin 	}
14824f272a5eSJohn Baldwin 
14834f272a5eSJohn Baldwin 	/*
14844f272a5eSJohn Baldwin 	 * SplitMode is required if there is any thing we need to trim
14854f272a5eSJohn Baldwin 	 * from the crypto output, either at the front or end of the
14864f272a5eSJohn Baldwin 	 * record.  Note that short records might not need trimming.
14874f272a5eSJohn Baldwin 	 */
14884f272a5eSJohn Baldwin 	split_mode = leading_waste != 0 || trailing_waste != 0;
14894f272a5eSJohn Baldwin 	if (split_mode) {
14904f272a5eSJohn Baldwin 		/*
14914f272a5eSJohn Baldwin 		 * Partial records require a SplitMode
14924f272a5eSJohn Baldwin 		 * CPL_RX_PHYS_DSGL.
14934f272a5eSJohn Baldwin 		 */
14944f272a5eSJohn Baldwin 		wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
14954f272a5eSJohn Baldwin 	}
14964f272a5eSJohn Baldwin 
14974f272a5eSJohn Baldwin 	if (need_lso)
14984f272a5eSJohn Baldwin 		wr_len += sizeof(struct cpl_tx_pkt_lso_core);
14994f272a5eSJohn Baldwin 
15004f272a5eSJohn Baldwin 	imm_len = m->m_len + header_len;
1501*9e269eafSJohn Baldwin 	if (short_record) {
15024f272a5eSJohn Baldwin 		imm_len += AES_BLOCK_LEN;
1503*9e269eafSJohn Baldwin 		if (send_partial_ghash && header_len != 0)
1504*9e269eafSJohn Baldwin 			imm_len += ktls_gcm_aad_len(tlsp);
1505*9e269eafSJohn Baldwin 	} else if (tlsp->tls13)
15065b9dc22cSJohn Baldwin 		imm_len += sizeof(uint64_t);
15074f272a5eSJohn Baldwin 	wr_len += roundup2(imm_len, 16);
1508*9e269eafSJohn Baldwin 	wr_len += ktls_sgl_size(nsegs + (last_ghash_frag ? 1 : 0));
15094f272a5eSJohn Baldwin 	wr_len = roundup2(wr_len, 16);
1510*9e269eafSJohn Baldwin 	txpkt_lens[0] = wr_len - sizeof(*wr);
1511*9e269eafSJohn Baldwin 
1512*9e269eafSJohn Baldwin 	if (request_ghash) {
1513*9e269eafSJohn Baldwin 		/*
1514*9e269eafSJohn Baldwin 		 * Requesting the hash entails a second ULP_TX_PKT
1515*9e269eafSJohn Baldwin 		 * containing CPL_TX_TLS_ACK, CPL_FW6_PLD, and space
1516*9e269eafSJohn Baldwin 		 * for the hash.
1517*9e269eafSJohn Baldwin 		 */
1518*9e269eafSJohn Baldwin 		txpkt_lens[1] = sizeof(struct ulp_txpkt);
1519*9e269eafSJohn Baldwin 		txpkt_lens[1] += sizeof(struct ulptx_idata);
1520*9e269eafSJohn Baldwin 		txpkt_lens[1] += sizeof(struct cpl_tx_tls_ack);
1521*9e269eafSJohn Baldwin 		txpkt_lens[1] += sizeof(struct rss_header) +
1522*9e269eafSJohn Baldwin 		    sizeof(struct cpl_fw6_pld);
1523*9e269eafSJohn Baldwin 		txpkt_lens[1] += AES_GMAC_HASH_LEN;
1524*9e269eafSJohn Baldwin 		wr_len += txpkt_lens[1];
1525*9e269eafSJohn Baldwin 	} else
1526*9e269eafSJohn Baldwin 		txpkt_lens[1] = 0;
1527*9e269eafSJohn Baldwin 
15284f272a5eSJohn Baldwin 	ndesc = howmany(wr_len, EQ_ESIZE);
15294f272a5eSJohn Baldwin 	MPASS(ndesc <= available);
15304f272a5eSJohn Baldwin 
15314f272a5eSJohn Baldwin 	/*
15324f272a5eSJohn Baldwin 	 * Use the per-txq scratch pad if near the end of the ring to
15334f272a5eSJohn Baldwin 	 * simplify handling of wrap-around.
15344f272a5eSJohn Baldwin 	 */
15354f272a5eSJohn Baldwin 	using_scratch = (eq->sidx - pidx < ndesc);
15364f272a5eSJohn Baldwin 	if (using_scratch)
15374f272a5eSJohn Baldwin 		wr = (void *)txq->ss;
15384f272a5eSJohn Baldwin 	else
15394f272a5eSJohn Baldwin 		wr = dst;
15404f272a5eSJohn Baldwin 
15414f272a5eSJohn Baldwin 	/* FW_ULPTX_WR */
15424f272a5eSJohn Baldwin 	wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
15434f272a5eSJohn Baldwin 	wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
15444f272a5eSJohn Baldwin 	    V_FW_WR_LEN16(wr_len / 16));
15454f272a5eSJohn Baldwin 	wr->cookie = 0;
15464f272a5eSJohn Baldwin 
15474f272a5eSJohn Baldwin 	/* ULP_TXPKT */
15484f272a5eSJohn Baldwin 	txpkt = (void *)(wr + 1);
15494f272a5eSJohn Baldwin 	txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
15504f272a5eSJohn Baldwin 	    V_ULP_TXPKT_DATAMODIFY(0) |
15514f272a5eSJohn Baldwin 	    V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
15524f272a5eSJohn Baldwin 	    V_ULP_TXPKT_DEST(0) |
1553*9e269eafSJohn Baldwin 	    V_ULP_TXPKT_CMDMORE(request_ghash ? 1 : 0) |
15544f272a5eSJohn Baldwin 	    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
1555*9e269eafSJohn Baldwin 	txpkt->len = htobe32(howmany(txpkt_lens[0], 16));
15564f272a5eSJohn Baldwin 
15574f272a5eSJohn Baldwin 	/* ULPTX_IDATA sub-command */
15584f272a5eSJohn Baldwin 	idata = (void *)(txpkt + 1);
15594f272a5eSJohn Baldwin 	idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
15604f272a5eSJohn Baldwin 	    V_ULP_TX_SC_MORE(1));
15614f272a5eSJohn Baldwin 	idata->len = sizeof(struct cpl_tx_sec_pdu);
15624f272a5eSJohn Baldwin 
15634f272a5eSJohn Baldwin 	/*
15644f272a5eSJohn Baldwin 	 * After the key context comes CPL_RX_PHYS_DSGL, CPL_TX_*, and
15654f272a5eSJohn Baldwin 	 * immediate data containing headers.  When using an inline
15664f272a5eSJohn Baldwin 	 * key, these are counted as part of this ULPTX_IDATA.  When
15674f272a5eSJohn Baldwin 	 * reading the key from memory, these are part of a separate
15684f272a5eSJohn Baldwin 	 * ULPTX_IDATA.
15694f272a5eSJohn Baldwin 	 */
15704f272a5eSJohn Baldwin 	cpl_len = sizeof(struct cpl_tx_pkt_core);
15714f272a5eSJohn Baldwin 	if (need_lso)
15724f272a5eSJohn Baldwin 		cpl_len += sizeof(struct cpl_tx_pkt_lso_core);
15734f272a5eSJohn Baldwin 	if (split_mode)
15744f272a5eSJohn Baldwin 		cpl_len += sizeof(struct cpl_t7_rx_phys_dsgl);
15754f272a5eSJohn Baldwin 	post_key_context_len = cpl_len + imm_len;
15764f272a5eSJohn Baldwin 
1577*9e269eafSJohn Baldwin 	if (inline_key) {
15784f272a5eSJohn Baldwin 		idata->len += tlsp->tx_key_info_size + post_key_context_len;
1579*9e269eafSJohn Baldwin 		if (send_partial_ghash) {
1580*9e269eafSJohn Baldwin 			/* Partial GHASH in key context. */
1581*9e269eafSJohn Baldwin 			idata->len += AES_GMAC_HASH_LEN;
1582*9e269eafSJohn Baldwin 		}
1583*9e269eafSJohn Baldwin 	}
15844f272a5eSJohn Baldwin 	idata->len = htobe32(idata->len);
15854f272a5eSJohn Baldwin 
15864f272a5eSJohn Baldwin 	/* CPL_TX_SEC_PDU */
15874f272a5eSJohn Baldwin 	sec_pdu = (void *)(idata + 1);
15884f272a5eSJohn Baldwin 
15894f272a5eSJohn Baldwin 	/*
15904f272a5eSJohn Baldwin 	 * Packet headers are passed through unchanged by the crypto
15914f272a5eSJohn Baldwin 	 * engine by marking them as header data in SCMD0.
15924f272a5eSJohn Baldwin 	 */
15934f272a5eSJohn Baldwin 	crypto_hdr_len = m->m_len;
15944f272a5eSJohn Baldwin 
1595*9e269eafSJohn Baldwin 	if (send_partial_ghash) {
15964f272a5eSJohn Baldwin 		/*
1597*9e269eafSJohn Baldwin 		 * For short records using a partial hash, the TLS
1598*9e269eafSJohn Baldwin 		 * header is counted as header data in SCMD0.  TLS AAD
1599*9e269eafSJohn Baldwin 		 * is next (if AAD is present) followed by the AES-CTR
1600*9e269eafSJohn Baldwin 		 * IV.  Last is the cipher region for the payload.
16014f272a5eSJohn Baldwin 		 */
1602*9e269eafSJohn Baldwin 		if (header_len != 0) {
1603*9e269eafSJohn Baldwin 			aad_start = 1;
1604*9e269eafSJohn Baldwin 			aad_stop = ktls_gcm_aad_len(tlsp);
1605*9e269eafSJohn Baldwin 		} else {
1606*9e269eafSJohn Baldwin 			aad_start = 0;
1607*9e269eafSJohn Baldwin 			aad_stop = 0;
1608*9e269eafSJohn Baldwin 		}
1609*9e269eafSJohn Baldwin 		iv_offset = aad_stop + 1;
1610*9e269eafSJohn Baldwin 		cipher_start = iv_offset + AES_BLOCK_LEN;
1611*9e269eafSJohn Baldwin 		cipher_stop = 0;
1612*9e269eafSJohn Baldwin 		if (last_ghash_frag) {
1613*9e269eafSJohn Baldwin 			auth_start = cipher_start;
1614*9e269eafSJohn Baldwin 			auth_stop = AES_GMAC_HASH_LEN;
1615*9e269eafSJohn Baldwin 			auth_insert = auth_stop;
1616*9e269eafSJohn Baldwin 		} else if (plen < GMAC_BLOCK_LEN) {
1617*9e269eafSJohn Baldwin 			/*
1618*9e269eafSJohn Baldwin 			 * A request that sends part of the first AES
1619*9e269eafSJohn Baldwin 			 * block will only have AAD.
1620*9e269eafSJohn Baldwin 			 */
1621*9e269eafSJohn Baldwin 			KASSERT(header_len != 0,
1622*9e269eafSJohn Baldwin 			    ("%s: partial GHASH with no auth", __func__));
1623*9e269eafSJohn Baldwin 			auth_start = 0;
1624*9e269eafSJohn Baldwin 			auth_stop = 0;
1625*9e269eafSJohn Baldwin 			auth_insert = 0;
1626*9e269eafSJohn Baldwin 		} else {
1627*9e269eafSJohn Baldwin 			auth_start = cipher_start;
1628*9e269eafSJohn Baldwin 			auth_stop = plen % GMAC_BLOCK_LEN;
1629*9e269eafSJohn Baldwin 			auth_insert = 0;
1630*9e269eafSJohn Baldwin 		}
1631*9e269eafSJohn Baldwin 
1632*9e269eafSJohn Baldwin 		sec_pdu->pldlen = htobe32(aad_stop + AES_BLOCK_LEN + plen +
1633*9e269eafSJohn Baldwin 		    (last_ghash_frag ? AES_GMAC_HASH_LEN : 0));
1634*9e269eafSJohn Baldwin 
1635*9e269eafSJohn Baldwin 		/*
1636*9e269eafSJohn Baldwin 		 * For short records, the TLS header is treated as
1637*9e269eafSJohn Baldwin 		 * header data.
1638*9e269eafSJohn Baldwin 		 */
1639*9e269eafSJohn Baldwin 		crypto_hdr_len += header_len;
1640*9e269eafSJohn Baldwin 
1641*9e269eafSJohn Baldwin 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1642*9e269eafSJohn Baldwin 		sec_pdu->seqno_numivs = tlsp->scmd0_partial.seqno_numivs;
1643*9e269eafSJohn Baldwin 		sec_pdu->ivgen_hdrlen = tlsp->scmd0_partial.ivgen_hdrlen;
1644*9e269eafSJohn Baldwin 		if (last_ghash_frag)
1645*9e269eafSJohn Baldwin 			sec_pdu->ivgen_hdrlen |= V_SCMD_LAST_FRAG(1);
1646*9e269eafSJohn Baldwin 		else
1647*9e269eafSJohn Baldwin 			sec_pdu->ivgen_hdrlen |= V_SCMD_MORE_FRAGS(1);
1648*9e269eafSJohn Baldwin 		sec_pdu->ivgen_hdrlen = htobe32(sec_pdu->ivgen_hdrlen |
1649*9e269eafSJohn Baldwin 		    V_SCMD_HDR_LEN(crypto_hdr_len));
1650*9e269eafSJohn Baldwin 
1651*9e269eafSJohn Baldwin 		txq->kern_tls_partial_ghash++;
1652*9e269eafSJohn Baldwin 	} else if (short_record) {
1653*9e269eafSJohn Baldwin 		/*
1654*9e269eafSJohn Baldwin 		 * For short records without a partial hash, the TLS
1655*9e269eafSJohn Baldwin 		 * header is counted as header data in SCMD0 and the
1656*9e269eafSJohn Baldwin 		 * IV is next, followed by a cipher region for the
1657*9e269eafSJohn Baldwin 		 * payload.
1658*9e269eafSJohn Baldwin 		 */
16594f272a5eSJohn Baldwin 		aad_start = 0;
16604f272a5eSJohn Baldwin 		aad_stop = 0;
16614f272a5eSJohn Baldwin 		iv_offset = 1;
16624f272a5eSJohn Baldwin 		auth_start = 0;
16634f272a5eSJohn Baldwin 		auth_stop = 0;
16644f272a5eSJohn Baldwin 		auth_insert = 0;
16654f272a5eSJohn Baldwin 		cipher_start = AES_BLOCK_LEN + 1;
16664f272a5eSJohn Baldwin 		cipher_stop = 0;
16674f272a5eSJohn Baldwin 
16684f272a5eSJohn Baldwin 		sec_pdu->pldlen = htobe32(AES_BLOCK_LEN + plen);
16694f272a5eSJohn Baldwin 
16704f272a5eSJohn Baldwin 		/*
16714f272a5eSJohn Baldwin 		 * For short records, the TLS header is treated as
16724f272a5eSJohn Baldwin 		 * header data.
16734f272a5eSJohn Baldwin 		 */
16744f272a5eSJohn Baldwin 		crypto_hdr_len += header_len;
16754f272a5eSJohn Baldwin 
16764f272a5eSJohn Baldwin 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
16774f272a5eSJohn Baldwin 		sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
16784f272a5eSJohn Baldwin 		sec_pdu->ivgen_hdrlen = htobe32(
16794f272a5eSJohn Baldwin 		    tlsp->scmd0_short.ivgen_hdrlen |
16804f272a5eSJohn Baldwin 		    V_SCMD_HDR_LEN(crypto_hdr_len));
16814f272a5eSJohn Baldwin 
16824f272a5eSJohn Baldwin 		txq->kern_tls_short++;
16834f272a5eSJohn Baldwin 	} else {
16844f272a5eSJohn Baldwin 		/*
16855b9dc22cSJohn Baldwin 		 * AAD is TLS header.  IV is after AAD for TLS < 1.3.
16865b9dc22cSJohn Baldwin 		 * For TLS 1.3, a placeholder for the TLS sequence
16875b9dc22cSJohn Baldwin 		 * number is provided as an IV before the AAD.  The
16885b9dc22cSJohn Baldwin 		 * cipher region starts after the AAD and IV.  See
16895b9dc22cSJohn Baldwin 		 * comments in ccr_authenc() and ccr_gmac() in
16905b9dc22cSJohn Baldwin 		 * t4_crypto.c regarding cipher and auth start/stop
16915b9dc22cSJohn Baldwin 		 * values.
16924f272a5eSJohn Baldwin 		 */
16935b9dc22cSJohn Baldwin 		if (tlsp->tls13) {
16945b9dc22cSJohn Baldwin 			iv_offset = 1;
16955b9dc22cSJohn Baldwin 			aad_start = 1 + sizeof(uint64_t);
16965b9dc22cSJohn Baldwin 			aad_stop = sizeof(uint64_t) + TLS_HEADER_LENGTH;
16975b9dc22cSJohn Baldwin 			cipher_start = aad_stop + 1;
16985b9dc22cSJohn Baldwin 		} else {
16994f272a5eSJohn Baldwin 			aad_start = 1;
17004f272a5eSJohn Baldwin 			aad_stop = TLS_HEADER_LENGTH;
17014f272a5eSJohn Baldwin 			iv_offset = TLS_HEADER_LENGTH + 1;
17024f272a5eSJohn Baldwin 			cipher_start = m_tls->m_epg_hdrlen + 1;
17035b9dc22cSJohn Baldwin 		}
17044f272a5eSJohn Baldwin 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
17054f272a5eSJohn Baldwin 			cipher_stop = 0;
17064f272a5eSJohn Baldwin 			auth_start = cipher_start;
17074f272a5eSJohn Baldwin 			auth_stop = 0;
17084f272a5eSJohn Baldwin 			auth_insert = 0;
17094f272a5eSJohn Baldwin 		} else {
17104f272a5eSJohn Baldwin 			cipher_stop = 0;
17114f272a5eSJohn Baldwin 			auth_start = cipher_start;
17124f272a5eSJohn Baldwin 			auth_stop = 0;
17134f272a5eSJohn Baldwin 			auth_insert = 0;
17144f272a5eSJohn Baldwin 		}
17154f272a5eSJohn Baldwin 
17165b9dc22cSJohn Baldwin 		sec_pdu->pldlen = htobe32((tlsp->tls13 ? sizeof(uint64_t) : 0) +
17175b9dc22cSJohn Baldwin 		    m_tls->m_epg_hdrlen + plen);
17184f272a5eSJohn Baldwin 
17194f272a5eSJohn Baldwin 		/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
17204f272a5eSJohn Baldwin 		sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
17214f272a5eSJohn Baldwin 		sec_pdu->ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen |
17224f272a5eSJohn Baldwin 		    V_SCMD_HDR_LEN(crypto_hdr_len));
17234f272a5eSJohn Baldwin 
17244f272a5eSJohn Baldwin 		if (split_mode)
17254f272a5eSJohn Baldwin 			txq->kern_tls_partial++;
17264f272a5eSJohn Baldwin 		else
17274f272a5eSJohn Baldwin 			txq->kern_tls_full++;
17284f272a5eSJohn Baldwin 	}
17294f272a5eSJohn Baldwin 	sec_pdu->op_ivinsrtofst = htobe32(
17304f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
17314f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_CPLLEN(cpl_len / 8) |
1732*9e269eafSJohn Baldwin 	    V_CPL_TX_SEC_PDU_PLACEHOLDER(send_partial_ghash ? 1 : 0) |
17334f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
17344f272a5eSJohn Baldwin 	sec_pdu->aadstart_cipherstop_hi = htobe32(
17354f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
17364f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
17374f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
17384f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
17394f272a5eSJohn Baldwin 	sec_pdu->cipherstop_lo_authinsert = htobe32(
17404f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
17414f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
17424f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
17434f272a5eSJohn Baldwin 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
17444f272a5eSJohn Baldwin 
1745*9e269eafSJohn Baldwin 	if (send_partial_ghash && last_ghash_frag) {
1746*9e269eafSJohn Baldwin 		uint64_t aad_len, cipher_len;
1747*9e269eafSJohn Baldwin 
1748*9e269eafSJohn Baldwin 		aad_len = ktls_gcm_aad_len(tlsp);
1749*9e269eafSJohn Baldwin 		cipher_len = rlen - (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
1750*9e269eafSJohn Baldwin 		sec_pdu->scmd1 = htobe64(aad_len << 44 | cipher_len);
1751*9e269eafSJohn Baldwin 	} else
17524f272a5eSJohn Baldwin 		sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
17534f272a5eSJohn Baldwin 
17544f272a5eSJohn Baldwin 	/* Key context */
17554f272a5eSJohn Baldwin 	out = (void *)(sec_pdu + 1);
1756*9e269eafSJohn Baldwin 	if (inline_key) {
17574f272a5eSJohn Baldwin 		memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
1758*9e269eafSJohn Baldwin 		if (send_partial_ghash) {
1759*9e269eafSJohn Baldwin 			struct tls_keyctx *keyctx = (void *)out;
1760*9e269eafSJohn Baldwin 
1761*9e269eafSJohn Baldwin 			keyctx->u.txhdr.ctxlen++;
1762*9e269eafSJohn Baldwin 			keyctx->u.txhdr.dualck_to_txvalid &= ~htobe16(
1763*9e269eafSJohn Baldwin 			    V_KEY_CONTEXT_MK_SIZE(M_KEY_CONTEXT_MK_SIZE));
1764*9e269eafSJohn Baldwin 			keyctx->u.txhdr.dualck_to_txvalid |= htobe16(
1765*9e269eafSJohn Baldwin 			    F_KEY_CONTEXT_OPAD_PRESENT |
1766*9e269eafSJohn Baldwin 			    V_KEY_CONTEXT_MK_SIZE(0));
1767*9e269eafSJohn Baldwin 		}
17684f272a5eSJohn Baldwin 		out += tlsp->tx_key_info_size;
1769*9e269eafSJohn Baldwin 		if (send_partial_ghash) {
1770*9e269eafSJohn Baldwin 			if (header_len != 0)
1771*9e269eafSJohn Baldwin 				memset(out, 0, AES_GMAC_HASH_LEN);
1772*9e269eafSJohn Baldwin 			else
1773*9e269eafSJohn Baldwin 				memcpy(out, tlsp->ghash, AES_GMAC_HASH_LEN);
1774*9e269eafSJohn Baldwin 			out += AES_GMAC_HASH_LEN;
1775*9e269eafSJohn Baldwin 		}
17764f272a5eSJohn Baldwin 	} else {
17774f272a5eSJohn Baldwin 		/* ULPTX_SC_MEMRD to read key context. */
17784f272a5eSJohn Baldwin 		memrd = (void *)out;
17794f272a5eSJohn Baldwin 		memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
17804f272a5eSJohn Baldwin 		    V_ULP_TX_SC_MORE(1) |
17814f272a5eSJohn Baldwin 		    V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
17824f272a5eSJohn Baldwin 		memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
17834f272a5eSJohn Baldwin 
17844f272a5eSJohn Baldwin 		/* ULPTX_IDATA for CPL_TX_* and headers. */
17854f272a5eSJohn Baldwin 		idata = (void *)(memrd + 1);
17864f272a5eSJohn Baldwin 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
17874f272a5eSJohn Baldwin 		    V_ULP_TX_SC_MORE(1));
17884f272a5eSJohn Baldwin 		idata->len = htobe32(post_key_context_len);
17894f272a5eSJohn Baldwin 
17904f272a5eSJohn Baldwin 		out = (void *)(idata + 1);
17914f272a5eSJohn Baldwin 	}
17924f272a5eSJohn Baldwin 
17934f272a5eSJohn Baldwin 	/* CPL_RX_PHYS_DSGL */
17944f272a5eSJohn Baldwin 	if (split_mode) {
1795*9e269eafSJohn Baldwin 		crypto_hdr_len = sizeof(struct cpl_tx_pkt_core);
1796*9e269eafSJohn Baldwin 		if (need_lso)
1797*9e269eafSJohn Baldwin 			crypto_hdr_len += sizeof(struct cpl_tx_pkt_lso_core);
1798*9e269eafSJohn Baldwin 		crypto_hdr_len += m->m_len;
1799*9e269eafSJohn Baldwin 		out = write_split_mode_rx_phys(out, m, m_tls, crypto_hdr_len,
18004f272a5eSJohn Baldwin 		    leading_waste, trailing_waste);
18014f272a5eSJohn Baldwin 	}
18024f272a5eSJohn Baldwin 
18034f272a5eSJohn Baldwin 	/* CPL_TX_PKT_LSO */
18044f272a5eSJohn Baldwin 	if (need_lso) {
18054f272a5eSJohn Baldwin 		out = write_lso_cpl(out, m, mss, eh_type, m->m_len +
18064f272a5eSJohn Baldwin 		    m_tls->m_len);
18074f272a5eSJohn Baldwin 		txq->tso_wrs++;
18084f272a5eSJohn Baldwin 	}
18094f272a5eSJohn Baldwin 
18104f272a5eSJohn Baldwin 	/* CPL_TX_PKT_XT */
18114f272a5eSJohn Baldwin 	tx_pkt = (void *)out;
18124f272a5eSJohn Baldwin 	tx_pkt->ctrl0 = txq->cpl_ctrl0;
18134f272a5eSJohn Baldwin 	tx_pkt->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
18144f272a5eSJohn Baldwin 	tx_pkt->pack = 0;
18154f272a5eSJohn Baldwin 	tx_pkt->len = htobe16(m->m_len + m_tls->m_len);
18164f272a5eSJohn Baldwin 
18174f272a5eSJohn Baldwin 	/* Copy the packet headers. */
18184f272a5eSJohn Baldwin 	out = (void *)(tx_pkt + 1);
18194f272a5eSJohn Baldwin 	memcpy(out, mtod(m, char *), m->m_len);
18204f272a5eSJohn Baldwin 
18214f272a5eSJohn Baldwin 	/* Modify the packet length in the IP header. */
18224f272a5eSJohn Baldwin 	ip_len = m->m_len + m_tls->m_len - m->m_pkthdr.l2hlen;
18234f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_IP) {
18244f272a5eSJohn Baldwin 		ip = (void *)(out + m->m_pkthdr.l2hlen);
18254f272a5eSJohn Baldwin 		be16enc(&ip->ip_len, ip_len);
18264f272a5eSJohn Baldwin 	} else {
18274f272a5eSJohn Baldwin 		ip6 = (void *)(out + m->m_pkthdr.l2hlen);
18284f272a5eSJohn Baldwin 		be16enc(&ip6->ip6_plen, ip_len - sizeof(*ip6));
18294f272a5eSJohn Baldwin 	}
18304f272a5eSJohn Baldwin 
18314f272a5eSJohn Baldwin 	/* Modify sequence number and flags in TCP header. */
18324f272a5eSJohn Baldwin 	newtcp = (void *)(out + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
18334f272a5eSJohn Baldwin 	be32enc(&newtcp->th_seq, tcp_seqno);
18344f272a5eSJohn Baldwin 	if (!last_wr)
18354f272a5eSJohn Baldwin 		newtcp->th_flags = tcp->th_flags & ~(TH_PUSH | TH_FIN);
18364f272a5eSJohn Baldwin 	out += m->m_len;
18374f272a5eSJohn Baldwin 
18385b9dc22cSJohn Baldwin 	/*
18395b9dc22cSJohn Baldwin 	 * Insert placeholder for sequence number as IV for TLS 1.3
18405b9dc22cSJohn Baldwin 	 * non-short records.
18415b9dc22cSJohn Baldwin 	 */
18425b9dc22cSJohn Baldwin 	if (tlsp->tls13 && !short_record) {
18435b9dc22cSJohn Baldwin 		memset(out, 0, sizeof(uint64_t));
18445b9dc22cSJohn Baldwin 		out += sizeof(uint64_t);
18455b9dc22cSJohn Baldwin 	}
18465b9dc22cSJohn Baldwin 
18474f272a5eSJohn Baldwin 	/* Populate the TLS header */
18484f272a5eSJohn Baldwin 	memcpy(out, m_tls->m_epg_hdr, header_len);
18494f272a5eSJohn Baldwin 	out += header_len;
18504f272a5eSJohn Baldwin 
1851*9e269eafSJohn Baldwin 	/* TLS AAD for short records using a partial hash. */
1852*9e269eafSJohn Baldwin 	if (send_partial_ghash && header_len != 0) {
1853*9e269eafSJohn Baldwin 		if (tlsp->tls13) {
1854*9e269eafSJohn Baldwin 			struct tls_aead_data_13 ad;
1855*9e269eafSJohn Baldwin 
1856*9e269eafSJohn Baldwin 			ad.type = hdr->tls_type;
1857*9e269eafSJohn Baldwin 			ad.tls_vmajor = hdr->tls_vmajor;
1858*9e269eafSJohn Baldwin 			ad.tls_vminor = hdr->tls_vminor;
1859*9e269eafSJohn Baldwin 			ad.tls_length = hdr->tls_length;
1860*9e269eafSJohn Baldwin 			memcpy(out, &ad, sizeof(ad));
1861*9e269eafSJohn Baldwin 			out += sizeof(ad);
1862*9e269eafSJohn Baldwin 		} else {
1863*9e269eafSJohn Baldwin 			struct tls_aead_data ad;
1864*9e269eafSJohn Baldwin 			uint16_t cipher_len;
1865*9e269eafSJohn Baldwin 
1866*9e269eafSJohn Baldwin 			cipher_len = rlen -
1867*9e269eafSJohn Baldwin 			    (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
1868*9e269eafSJohn Baldwin 			ad.seq = htobe64(m_tls->m_epg_seqno);
1869*9e269eafSJohn Baldwin 			ad.type = hdr->tls_type;
1870*9e269eafSJohn Baldwin 			ad.tls_vmajor = hdr->tls_vmajor;
1871*9e269eafSJohn Baldwin 			ad.tls_vminor = hdr->tls_vminor;
1872*9e269eafSJohn Baldwin 			ad.tls_length = htons(cipher_len);
1873*9e269eafSJohn Baldwin 			memcpy(out, &ad, sizeof(ad));
1874*9e269eafSJohn Baldwin 			out += sizeof(ad);
1875*9e269eafSJohn Baldwin 		}
1876*9e269eafSJohn Baldwin 	}
1877*9e269eafSJohn Baldwin 
18784f272a5eSJohn Baldwin 	/* AES IV for a short record. */
18794f272a5eSJohn Baldwin 	if (short_record) {
18804f272a5eSJohn Baldwin 		iv = out;
18814f272a5eSJohn Baldwin 		if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
18824f272a5eSJohn Baldwin 			memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
18835b9dc22cSJohn Baldwin 			if (tlsp->tls13) {
18845b9dc22cSJohn Baldwin 				uint64_t value;
18855b9dc22cSJohn Baldwin 
18865b9dc22cSJohn Baldwin 				value = be64dec(tlsp->keyctx.u.txhdr.txsalt +
18875b9dc22cSJohn Baldwin 				    4);
18885b9dc22cSJohn Baldwin 				value ^= m_tls->m_epg_seqno;
18895b9dc22cSJohn Baldwin 				be64enc(iv + 4, value);
18905b9dc22cSJohn Baldwin 			} else
18914f272a5eSJohn Baldwin 				memcpy(iv + 4, hdr + 1, 8);
1892*9e269eafSJohn Baldwin 			if (send_partial_ghash)
1893*9e269eafSJohn Baldwin 				be32enc(iv + 12, 1 + offset / AES_BLOCK_LEN);
1894*9e269eafSJohn Baldwin 			else
18954f272a5eSJohn Baldwin 				be32enc(iv + 12, 2 + offset / AES_BLOCK_LEN);
18964f272a5eSJohn Baldwin 		} else
18974f272a5eSJohn Baldwin 			memcpy(iv, hdr + 1, AES_BLOCK_LEN);
18984f272a5eSJohn Baldwin 		out += AES_BLOCK_LEN;
18994f272a5eSJohn Baldwin 	}
19004f272a5eSJohn Baldwin 
19014f272a5eSJohn Baldwin 	if (imm_len % 16 != 0) {
19024f272a5eSJohn Baldwin 		if (imm_len % 8 != 0) {
19034f272a5eSJohn Baldwin 			/* Zero pad to an 8-byte boundary. */
19044f272a5eSJohn Baldwin 			memset(out, 0, 8 - (imm_len % 8));
19054f272a5eSJohn Baldwin 			out += 8 - (imm_len % 8);
19064f272a5eSJohn Baldwin 		}
19074f272a5eSJohn Baldwin 
19084f272a5eSJohn Baldwin 		/*
19094f272a5eSJohn Baldwin 		 * Insert a ULP_TX_SC_NOOP if needed so the SGL is
19104f272a5eSJohn Baldwin 		 * 16-byte aligned.
19114f272a5eSJohn Baldwin 		 */
19124f272a5eSJohn Baldwin 		if (imm_len % 16 <= 8) {
19134f272a5eSJohn Baldwin 			idata = (void *)out;
19144f272a5eSJohn Baldwin 			idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP) |
19154f272a5eSJohn Baldwin 			    V_ULP_TX_SC_MORE(1));
19164f272a5eSJohn Baldwin 			idata->len = htobe32(0);
19174f272a5eSJohn Baldwin 			out = (void *)(idata + 1);
19184f272a5eSJohn Baldwin 		}
19194f272a5eSJohn Baldwin 	}
19204f272a5eSJohn Baldwin 
19214f272a5eSJohn Baldwin 	/* SGL for record payload */
19224f272a5eSJohn Baldwin 	sglist_reset(txq->gl);
19234f272a5eSJohn Baldwin 	if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
19244f272a5eSJohn Baldwin 	    plen) != 0) {
19254f272a5eSJohn Baldwin #ifdef INVARIANTS
19264f272a5eSJohn Baldwin 		panic("%s: failed to append sglist", __func__);
19274f272a5eSJohn Baldwin #endif
19284f272a5eSJohn Baldwin 	}
1929*9e269eafSJohn Baldwin 	if (last_ghash_frag) {
1930*9e269eafSJohn Baldwin 		if (sglist_append_phys(txq->gl, zero_buffer_pa,
1931*9e269eafSJohn Baldwin 		    AES_GMAC_HASH_LEN) != 0) {
1932*9e269eafSJohn Baldwin #ifdef INVARIANTS
1933*9e269eafSJohn Baldwin 			panic("%s: failed to append sglist (2)", __func__);
1934*9e269eafSJohn Baldwin #endif
1935*9e269eafSJohn Baldwin 		}
1936*9e269eafSJohn Baldwin 	}
1937*9e269eafSJohn Baldwin 	out = write_gl_to_buf(txq->gl, out);
1938*9e269eafSJohn Baldwin 
1939*9e269eafSJohn Baldwin 	if (request_ghash) {
1940*9e269eafSJohn Baldwin 		/* ULP_TXPKT */
1941*9e269eafSJohn Baldwin 		txpkt = (void *)out;
1942*9e269eafSJohn Baldwin 		txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
1943*9e269eafSJohn Baldwin 		    V_ULP_TXPKT_DATAMODIFY(0) |
1944*9e269eafSJohn Baldwin 		    V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
1945*9e269eafSJohn Baldwin 		    V_ULP_TXPKT_DEST(0) |
1946*9e269eafSJohn Baldwin 		    V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
1947*9e269eafSJohn Baldwin 		txpkt->len = htobe32(howmany(txpkt_lens[1], 16));
1948*9e269eafSJohn Baldwin 
1949*9e269eafSJohn Baldwin 		/* ULPTX_IDATA sub-command */
1950*9e269eafSJohn Baldwin 		idata = (void *)(txpkt + 1);
1951*9e269eafSJohn Baldwin 		idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1952*9e269eafSJohn Baldwin 		    V_ULP_TX_SC_MORE(0));
1953*9e269eafSJohn Baldwin 		idata->len = sizeof(struct cpl_tx_tls_ack);
1954*9e269eafSJohn Baldwin 		idata->len += sizeof(struct rss_header) +
1955*9e269eafSJohn Baldwin 		    sizeof(struct cpl_fw6_pld);
1956*9e269eafSJohn Baldwin 		idata->len += AES_GMAC_HASH_LEN;
1957*9e269eafSJohn Baldwin 		idata->len = htobe32(idata->len);
1958*9e269eafSJohn Baldwin 		out = (void *)(idata + 1);
1959*9e269eafSJohn Baldwin 
1960*9e269eafSJohn Baldwin 		/* CPL_TX_TLS_ACK */
1961*9e269eafSJohn Baldwin 		out = write_tx_tls_ack(out, tlsp->rx_chid, AES_GMAC_HASH_LEN,
1962*9e269eafSJohn Baldwin 		    ghash_lcb);
1963*9e269eafSJohn Baldwin 
1964*9e269eafSJohn Baldwin 		/* CPL_FW6_PLD */
1965*9e269eafSJohn Baldwin 		out = write_fw6_pld(out, tlsp->rx_chid, tlsp->rx_qid,
1966*9e269eafSJohn Baldwin 		    AES_GMAC_HASH_LEN, (uintptr_t)tlsp | CPL_FW6_COOKIE_KTLS);
1967*9e269eafSJohn Baldwin 
1968*9e269eafSJohn Baldwin 		/* Space for partial hash. */
1969*9e269eafSJohn Baldwin 		memset(out, 0, AES_GMAC_HASH_LEN);
1970*9e269eafSJohn Baldwin 		out += AES_GMAC_HASH_LEN;
1971*9e269eafSJohn Baldwin 
1972*9e269eafSJohn Baldwin 		tlsp->ghash_pending = true;
1973*9e269eafSJohn Baldwin 		tlsp->ghash_valid = false;
1974*9e269eafSJohn Baldwin 		tlsp->ghash_lcb = ghash_lcb;
1975*9e269eafSJohn Baldwin 		if (last_ghash_frag)
1976*9e269eafSJohn Baldwin 			tlsp->ghash_offset = offset + plen;
1977*9e269eafSJohn Baldwin 		else
1978*9e269eafSJohn Baldwin 			tlsp->ghash_offset = rounddown2(offset + plen,
1979*9e269eafSJohn Baldwin 			    GMAC_BLOCK_LEN);
1980*9e269eafSJohn Baldwin #ifdef VERBOSE_TRACES
1981*9e269eafSJohn Baldwin 		CTR(KTR_CXGBE, "%s: %p requesting GHASH for offset %u",
1982*9e269eafSJohn Baldwin 		    __func__, tlsp, tlsp->ghash_offset);
1983*9e269eafSJohn Baldwin #endif
1984*9e269eafSJohn Baldwin 		m_snd_tag_ref(&tlsp->com);
1985*9e269eafSJohn Baldwin 
1986*9e269eafSJohn Baldwin 		txq->kern_tls_ghash_requested++;
1987*9e269eafSJohn Baldwin 	}
19884f272a5eSJohn Baldwin 
19894f272a5eSJohn Baldwin 	if (using_scratch) {
19904f272a5eSJohn Baldwin 		out = dst;
19914f272a5eSJohn Baldwin 		copy_to_txd(eq, txq->ss, &out, wr_len);
19924f272a5eSJohn Baldwin 	}
19934f272a5eSJohn Baldwin 
19944f272a5eSJohn Baldwin 	txq->kern_tls_records++;
19954f272a5eSJohn Baldwin 	txq->kern_tls_octets += m_tls->m_len;
19964f272a5eSJohn Baldwin 	if (split_mode) {
19974f272a5eSJohn Baldwin 		txq->kern_tls_splitmode++;
19984f272a5eSJohn Baldwin 		txq->kern_tls_waste += leading_waste + trailing_waste;
19994f272a5eSJohn Baldwin 	}
20004f272a5eSJohn Baldwin 	if (need_lso)
20014f272a5eSJohn Baldwin 		txq->kern_tls_lso++;
20024f272a5eSJohn Baldwin 
20034f272a5eSJohn Baldwin 	txsd = &txq->sdesc[pidx];
20044f272a5eSJohn Baldwin 	if (last_wr)
20054f272a5eSJohn Baldwin 		txsd->m = m;
20064f272a5eSJohn Baldwin 	else
20074f272a5eSJohn Baldwin 		txsd->m = NULL;
2008*9e269eafSJohn Baldwin 	txsd->desc_used = ndesc;
20094f272a5eSJohn Baldwin 
20104f272a5eSJohn Baldwin 	return (ndesc);
20114f272a5eSJohn Baldwin }
20124f272a5eSJohn Baldwin 
20134f272a5eSJohn Baldwin int
20144f272a5eSJohn Baldwin t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
20154f272a5eSJohn Baldwin     u_int available)
20164f272a5eSJohn Baldwin {
20174f272a5eSJohn Baldwin 	struct sge_eq *eq = &txq->eq;
20184f272a5eSJohn Baldwin 	struct tlspcb *tlsp;
20194f272a5eSJohn Baldwin 	struct tcphdr *tcp;
20204f272a5eSJohn Baldwin 	struct mbuf *m_tls;
20214f272a5eSJohn Baldwin 	struct ether_header *eh;
20224f272a5eSJohn Baldwin 	tcp_seq tcp_seqno;
20234f272a5eSJohn Baldwin 	u_int ndesc, pidx, totdesc;
20244f272a5eSJohn Baldwin 	uint16_t eh_type, mss;
20254f272a5eSJohn Baldwin 
2026*9e269eafSJohn Baldwin 	TXQ_LOCK_ASSERT_OWNED(txq);
20274f272a5eSJohn Baldwin 	M_ASSERTPKTHDR(m);
20284f272a5eSJohn Baldwin 	MPASS(m->m_pkthdr.snd_tag != NULL);
20294f272a5eSJohn Baldwin 	tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
20304f272a5eSJohn Baldwin 
20314f272a5eSJohn Baldwin 	totdesc = 0;
20324f272a5eSJohn Baldwin 	eh = mtod(m, struct ether_header *);
20334f272a5eSJohn Baldwin 	eh_type = ntohs(eh->ether_type);
20344f272a5eSJohn Baldwin 	if (eh_type == ETHERTYPE_VLAN) {
20354f272a5eSJohn Baldwin 		struct ether_vlan_header *evh = (void *)eh;
20364f272a5eSJohn Baldwin 
20374f272a5eSJohn Baldwin 		eh_type = ntohs(evh->evl_proto);
20384f272a5eSJohn Baldwin 	}
20394f272a5eSJohn Baldwin 
20404f272a5eSJohn Baldwin 	tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
20414f272a5eSJohn Baldwin 	    m->m_pkthdr.l3hlen);
20424f272a5eSJohn Baldwin 	pidx = eq->pidx;
20434f272a5eSJohn Baldwin 
20444f272a5eSJohn Baldwin 	/* Determine MSS. */
20454f272a5eSJohn Baldwin 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
20464f272a5eSJohn Baldwin 		mss = m->m_pkthdr.tso_segsz;
20474f272a5eSJohn Baldwin 		tlsp->prev_mss = mss;
20484f272a5eSJohn Baldwin 	} else if (tlsp->prev_mss != 0)
20494f272a5eSJohn Baldwin 		mss = tlsp->prev_mss;
20504f272a5eSJohn Baldwin 	else
20514f272a5eSJohn Baldwin 		mss = if_getmtu(tlsp->vi->ifp) -
20524f272a5eSJohn Baldwin 		    (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
20534f272a5eSJohn Baldwin 
20544f272a5eSJohn Baldwin 	/* Fetch the starting TCP sequence number for this chain. */
20554f272a5eSJohn Baldwin 	tcp_seqno = ntohl(tcp->th_seq);
20564f272a5eSJohn Baldwin #ifdef VERBOSE_TRACES
20574f272a5eSJohn Baldwin 	CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__, m->m_pkthdr.len,
20584f272a5eSJohn Baldwin 	    tcp_seqno);
20594f272a5eSJohn Baldwin #endif
2060*9e269eafSJohn Baldwin 	KASSERT(!tlsp->ghash_pending, ("%s: GHASH pending for send", __func__));
20614f272a5eSJohn Baldwin 
20624f272a5eSJohn Baldwin 	/*
20634f272a5eSJohn Baldwin 	 * Iterate over each TLS record constructing a work request
20644f272a5eSJohn Baldwin 	 * for that record.
20654f272a5eSJohn Baldwin 	 */
20664f272a5eSJohn Baldwin 	for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
20674f272a5eSJohn Baldwin 		MPASS(m_tls->m_flags & M_EXTPG);
20684f272a5eSJohn Baldwin 
20694f272a5eSJohn Baldwin 		ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
20704f272a5eSJohn Baldwin 		    available - totdesc, tcp_seqno, pidx, eh_type, mss);
20714f272a5eSJohn Baldwin 		totdesc += ndesc;
20724f272a5eSJohn Baldwin 		IDXINCR(pidx, ndesc, eq->sidx);
20734f272a5eSJohn Baldwin 		dst = &eq->desc[pidx];
20744f272a5eSJohn Baldwin 
20754f272a5eSJohn Baldwin 		tcp_seqno += m_tls->m_len;
20764f272a5eSJohn Baldwin 	}
20774f272a5eSJohn Baldwin 
2078*9e269eafSJohn Baldwin 	/*
2079*9e269eafSJohn Baldwin 	 * Queue another packet if this was a GCM request that didn't
2080*9e269eafSJohn Baldwin 	 * request a GHASH response.
2081*9e269eafSJohn Baldwin 	 */
2082*9e269eafSJohn Baldwin 	if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM && !tlsp->ghash_pending)
2083*9e269eafSJohn Baldwin 		ktls_queue_next_packet(tlsp, true);
2084*9e269eafSJohn Baldwin 
20854f272a5eSJohn Baldwin 	MPASS(totdesc <= available);
20864f272a5eSJohn Baldwin 	return (totdesc);
20874f272a5eSJohn Baldwin }
20884f272a5eSJohn Baldwin 
20894f272a5eSJohn Baldwin static void
20904f272a5eSJohn Baldwin t7_tls_tag_free(struct m_snd_tag *mst)
20914f272a5eSJohn Baldwin {
20924f272a5eSJohn Baldwin 	struct adapter *sc;
20934f272a5eSJohn Baldwin 	struct tlspcb *tlsp;
20944f272a5eSJohn Baldwin 
20954f272a5eSJohn Baldwin 	tlsp = mst_to_tls(mst);
20964f272a5eSJohn Baldwin 	sc = tlsp->sc;
20974f272a5eSJohn Baldwin 
20984f272a5eSJohn Baldwin 	CTR2(KTR_CXGBE, "%s: %p", __func__, tlsp);
20994f272a5eSJohn Baldwin 
21004f272a5eSJohn Baldwin 	if (tlsp->tx_key_addr >= 0)
21014f272a5eSJohn Baldwin 		t4_free_tls_keyid(sc, tlsp->tx_key_addr);
21024f272a5eSJohn Baldwin 
2103*9e269eafSJohn Baldwin 	KASSERT(mbufq_len(&tlsp->pending_mbufs) == 0,
2104*9e269eafSJohn Baldwin 	    ("%s: pending mbufs", __func__));
2105*9e269eafSJohn Baldwin 
21064f272a5eSJohn Baldwin 	zfree(tlsp, M_CXGBE);
21074f272a5eSJohn Baldwin }
21084f272a5eSJohn Baldwin 
2109*9e269eafSJohn Baldwin static int
2110*9e269eafSJohn Baldwin ktls_fw6_pld(struct sge_iq *iq, const struct rss_header *rss,
2111*9e269eafSJohn Baldwin     struct mbuf *m)
2112*9e269eafSJohn Baldwin {
2113*9e269eafSJohn Baldwin 	const struct cpl_fw6_pld *cpl;
2114*9e269eafSJohn Baldwin 	struct tlspcb *tlsp;
2115*9e269eafSJohn Baldwin 	const void *ghash;
2116*9e269eafSJohn Baldwin 
2117*9e269eafSJohn Baldwin 	if (m != NULL)
2118*9e269eafSJohn Baldwin 		cpl = mtod(m, const void *);
2119*9e269eafSJohn Baldwin 	else
2120*9e269eafSJohn Baldwin 		cpl = (const void *)(rss + 1);
2121*9e269eafSJohn Baldwin 
2122*9e269eafSJohn Baldwin 	tlsp = (struct tlspcb *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
2123*9e269eafSJohn Baldwin 	KASSERT(cpl->data[0] == 0, ("%s: error status returned", __func__));
2124*9e269eafSJohn Baldwin 
2125*9e269eafSJohn Baldwin 	TXQ_LOCK(tlsp->txq);
2126*9e269eafSJohn Baldwin #ifdef VERBOSE_TRACES
2127*9e269eafSJohn Baldwin 	CTR(KTR_CXGBE, "%s: %p received GHASH for offset %u%s", __func__, tlsp,
2128*9e269eafSJohn Baldwin 	    tlsp->ghash_offset, tlsp->ghash_lcb ? " in LCB" : "");
2129*9e269eafSJohn Baldwin #endif
2130*9e269eafSJohn Baldwin 	if (tlsp->ghash_lcb)
2131*9e269eafSJohn Baldwin 		ghash = &cpl->data[2];
2132*9e269eafSJohn Baldwin 	else
2133*9e269eafSJohn Baldwin 		ghash = cpl + 1;
2134*9e269eafSJohn Baldwin 	memcpy(tlsp->ghash, ghash, AES_GMAC_HASH_LEN);
2135*9e269eafSJohn Baldwin 	tlsp->ghash_valid = true;
2136*9e269eafSJohn Baldwin 	tlsp->ghash_pending = false;
2137*9e269eafSJohn Baldwin 	tlsp->txq->kern_tls_ghash_received++;
2138*9e269eafSJohn Baldwin 
2139*9e269eafSJohn Baldwin 	ktls_queue_next_packet(tlsp, false);
2140*9e269eafSJohn Baldwin 	TXQ_UNLOCK(tlsp->txq);
2141*9e269eafSJohn Baldwin 
2142*9e269eafSJohn Baldwin 	m_snd_tag_rele(&tlsp->com);
2143*9e269eafSJohn Baldwin 	m_freem(m);
2144*9e269eafSJohn Baldwin 	return (0);
2145*9e269eafSJohn Baldwin }
2146*9e269eafSJohn Baldwin 
2147*9e269eafSJohn Baldwin void
2148*9e269eafSJohn Baldwin t7_ktls_modload(void)
2149*9e269eafSJohn Baldwin {
2150*9e269eafSJohn Baldwin 	zero_buffer = malloc_aligned(AES_GMAC_HASH_LEN, AES_GMAC_HASH_LEN,
2151*9e269eafSJohn Baldwin 	    M_CXGBE, M_ZERO | M_WAITOK);
2152*9e269eafSJohn Baldwin 	zero_buffer_pa = vtophys(zero_buffer);
2153*9e269eafSJohn Baldwin 	t4_register_shared_cpl_handler(CPL_FW6_PLD, ktls_fw6_pld,
2154*9e269eafSJohn Baldwin 	    CPL_FW6_COOKIE_KTLS);
2155*9e269eafSJohn Baldwin }
2156*9e269eafSJohn Baldwin 
2157*9e269eafSJohn Baldwin void
2158*9e269eafSJohn Baldwin t7_ktls_modunload(void)
2159*9e269eafSJohn Baldwin {
2160*9e269eafSJohn Baldwin 	free(zero_buffer, M_CXGBE);
2161*9e269eafSJohn Baldwin 	t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL, CPL_FW6_COOKIE_KTLS);
2162*9e269eafSJohn Baldwin }
2163*9e269eafSJohn Baldwin 
21644f272a5eSJohn Baldwin #else
21654f272a5eSJohn Baldwin 
21664f272a5eSJohn Baldwin int
21674f272a5eSJohn Baldwin t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
21684f272a5eSJohn Baldwin     struct m_snd_tag **pt)
21694f272a5eSJohn Baldwin {
21704f272a5eSJohn Baldwin 	return (ENXIO);
21714f272a5eSJohn Baldwin }
21724f272a5eSJohn Baldwin 
21734f272a5eSJohn Baldwin int
21744f272a5eSJohn Baldwin t7_ktls_parse_pkt(struct mbuf *m)
21754f272a5eSJohn Baldwin {
21764f272a5eSJohn Baldwin 	return (EINVAL);
21774f272a5eSJohn Baldwin }
21784f272a5eSJohn Baldwin 
21794f272a5eSJohn Baldwin int
21804f272a5eSJohn Baldwin t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
21814f272a5eSJohn Baldwin     u_int available)
21824f272a5eSJohn Baldwin {
21834f272a5eSJohn Baldwin 	panic("can't happen");
21844f272a5eSJohn Baldwin }
21854f272a5eSJohn Baldwin 
2186*9e269eafSJohn Baldwin void
2187*9e269eafSJohn Baldwin t7_ktls_modload(void)
2188*9e269eafSJohn Baldwin {
2189*9e269eafSJohn Baldwin }
2190*9e269eafSJohn Baldwin 
2191*9e269eafSJohn Baldwin void
2192*9e269eafSJohn Baldwin t7_ktls_modunload(void)
2193*9e269eafSJohn Baldwin {
2194*9e269eafSJohn Baldwin }
2195*9e269eafSJohn Baldwin 
21964f272a5eSJohn Baldwin #endif
2197