1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2018-2019 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: John Baldwin <jhb@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_kern_tls.h" 33 34 #include <sys/param.h> 35 #include <sys/ktr.h> 36 #include <sys/ktls.h> 37 #include <sys/sglist.h> 38 #include <sys/socket.h> 39 #include <sys/socketvar.h> 40 #include <sys/sockbuf.h> 41 #include <netinet/in.h> 42 #include <netinet/in_pcb.h> 43 #include <netinet/ip.h> 44 #include <netinet/ip6.h> 45 #include <netinet/tcp_var.h> 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/xform.h> 48 49 #include "common/common.h" 50 #include "common/t4_regs.h" 51 #include "common/t4_regs_values.h" 52 #include "common/t4_tcb.h" 53 #include "t4_l2t.h" 54 #include "t4_clip.h" 55 #include "t4_mp_ring.h" 56 #include "crypto/t4_crypto.h" 57 58 #if defined(INET) || defined(INET6) 59 60 #define TLS_HEADER_LENGTH 5 61 62 struct tls_scmd { 63 __be32 seqno_numivs; 64 __be32 ivgen_hdrlen; 65 }; 66 67 struct tlspcb { 68 struct m_snd_tag com; 69 struct vi_info *vi; /* virtual interface */ 70 struct adapter *sc; 71 struct l2t_entry *l2te; /* L2 table entry used by this connection */ 72 struct sge_txq *txq; 73 int tid; /* Connection identifier */ 74 75 int tx_key_addr; 76 bool inline_key; 77 bool using_timestamps; 78 unsigned char enc_mode; 79 80 struct tls_scmd scmd0; 81 struct tls_scmd scmd0_short; 82 83 unsigned int tx_key_info_size; 84 85 uint32_t prev_seq; 86 uint32_t prev_ack; 87 uint32_t prev_tsecr; 88 uint16_t prev_win; 89 uint16_t prev_mss; 90 91 /* Only used outside of setup and teardown when using inline keys. */ 92 struct tls_keyctx keyctx; 93 94 /* Fields only used during setup and teardown. */ 95 struct inpcb *inp; /* backpointer to host stack's PCB */ 96 struct sge_wrq *ctrlq; 97 struct clip_entry *ce; /* CLIP table entry used by this tid */ 98 99 bool open_pending; 100 }; 101 102 static void t6_tls_tag_free(struct m_snd_tag *mst); 103 static int ktls_setup_keys(struct tlspcb *tlsp, 104 const struct ktls_session *tls, struct sge_txq *txq); 105 106 static const struct if_snd_tag_sw t6_tls_tag_sw = { 107 .snd_tag_free = t6_tls_tag_free, 108 .type = IF_SND_TAG_TYPE_TLS 109 }; 110 111 static inline struct tlspcb * 112 mst_to_tls(struct m_snd_tag *t) 113 { 114 return (__containerof(t, struct tlspcb, com)); 115 } 116 117 static struct tlspcb * 118 alloc_tlspcb(if_t ifp, struct vi_info *vi, int flags) 119 { 120 struct port_info *pi = vi->pi; 121 struct adapter *sc = pi->adapter; 122 struct tlspcb *tlsp; 123 124 tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags); 125 if (tlsp == NULL) 126 return (NULL); 127 128 m_snd_tag_init(&tlsp->com, ifp, &t6_tls_tag_sw); 129 tlsp->vi = vi; 130 tlsp->sc = sc; 131 tlsp->ctrlq = &sc->sge.ctrlq[pi->port_id]; 132 tlsp->tid = -1; 133 tlsp->tx_key_addr = -1; 134 135 return (tlsp); 136 } 137 138 static int 139 ktls_act_open_cpl_size(bool isipv6) 140 { 141 142 if (isipv6) 143 return (sizeof(struct cpl_t6_act_open_req6)); 144 else 145 return (sizeof(struct cpl_t6_act_open_req)); 146 } 147 148 static void 149 mk_ktls_act_open_req(struct adapter *sc, struct vi_info *vi, struct inpcb *inp, 150 struct tlspcb *tlsp, int atid, void *dst) 151 { 152 struct tcpcb *tp = intotcpcb(inp); 153 struct cpl_t6_act_open_req *cpl6; 154 struct cpl_act_open_req *cpl; 155 uint64_t options; 156 int qid_atid; 157 158 cpl6 = dst; 159 cpl = (struct cpl_act_open_req *)cpl6; 160 INIT_TP_WR(cpl6, 0); 161 qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) | 162 V_TID_COOKIE(CPL_COOKIE_KERN_TLS); 163 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 164 qid_atid)); 165 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, 166 &cpl->peer_ip, &cpl->peer_port); 167 168 options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE); 169 options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan); 170 options |= F_NON_OFFLOAD; 171 cpl->opt0 = htobe64(options); 172 173 options = V_TX_QUEUE(TX_MODQ(vi->pi->tx_chan)); 174 if (tp->t_flags & TF_REQ_TSTMP) 175 options |= F_TSTAMPS_EN; 176 cpl->opt2 = htobe32(options); 177 } 178 179 static void 180 mk_ktls_act_open_req6(struct adapter *sc, struct vi_info *vi, 181 struct inpcb *inp, struct tlspcb *tlsp, int atid, void *dst) 182 { 183 struct tcpcb *tp = intotcpcb(inp); 184 struct cpl_t6_act_open_req6 *cpl6; 185 struct cpl_act_open_req6 *cpl; 186 uint64_t options; 187 int qid_atid; 188 189 cpl6 = dst; 190 cpl = (struct cpl_act_open_req6 *)cpl6; 191 INIT_TP_WR(cpl6, 0); 192 qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) | 193 V_TID_COOKIE(CPL_COOKIE_KERN_TLS); 194 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 195 qid_atid)); 196 cpl->local_port = inp->inp_lport; 197 cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; 198 cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; 199 cpl->peer_port = inp->inp_fport; 200 cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; 201 cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; 202 203 options = F_TCAM_BYPASS | V_ULP_MODE(ULP_MODE_NONE); 204 options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan); 205 options |= F_NON_OFFLOAD; 206 cpl->opt0 = htobe64(options); 207 208 options = V_TX_QUEUE(TX_MODQ(vi->pi->tx_chan)); 209 if (tp->t_flags & TF_REQ_TSTMP) 210 options |= F_TSTAMPS_EN; 211 cpl->opt2 = htobe32(options); 212 } 213 214 static int 215 send_ktls_act_open_req(struct adapter *sc, struct vi_info *vi, 216 struct inpcb *inp, struct tlspcb *tlsp, int atid) 217 { 218 struct wrqe *wr; 219 bool isipv6; 220 221 isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 222 if (isipv6) { 223 tlsp->ce = t4_get_clip_entry(sc, &inp->in6p_laddr, true); 224 if (tlsp->ce == NULL) 225 return (ENOENT); 226 } 227 228 wr = alloc_wrqe(ktls_act_open_cpl_size(isipv6), tlsp->ctrlq); 229 if (wr == NULL) { 230 CTR2(KTR_CXGBE, "%s: atid %d failed to alloc WR", __func__, 231 atid); 232 return (ENOMEM); 233 } 234 235 if (isipv6) 236 mk_ktls_act_open_req6(sc, vi, inp, tlsp, atid, wrtod(wr)); 237 else 238 mk_ktls_act_open_req(sc, vi, inp, tlsp, atid, wrtod(wr)); 239 240 tlsp->open_pending = true; 241 t4_wrq_tx(sc, wr); 242 return (0); 243 } 244 245 static int 246 ktls_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss, 247 struct mbuf *m) 248 { 249 struct adapter *sc = iq->adapter; 250 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 251 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); 252 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); 253 struct tlspcb *tlsp = lookup_atid(sc, atid); 254 struct inpcb *inp = tlsp->inp; 255 256 CTR3(KTR_CXGBE, "%s: atid %d status %d", __func__, atid, status); 257 free_atid(sc, atid); 258 if (status == 0) 259 tlsp->tid = GET_TID(cpl); 260 261 INP_WLOCK(inp); 262 tlsp->open_pending = false; 263 wakeup(tlsp); 264 INP_WUNLOCK(inp); 265 return (0); 266 } 267 268 /* SET_TCB_FIELD sent as a ULP command looks like this */ 269 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 270 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 271 272 _Static_assert((LEN__SET_TCB_FIELD_ULP + sizeof(struct ulptx_idata)) % 16 == 0, 273 "CPL_SET_TCB_FIELD ULP command not 16-byte aligned"); 274 275 static void 276 write_set_tcb_field_ulp(struct tlspcb *tlsp, void *dst, struct sge_txq *txq, 277 uint16_t word, uint64_t mask, uint64_t val) 278 { 279 struct ulp_txpkt *txpkt; 280 struct ulptx_idata *idata; 281 struct cpl_set_tcb_field_core *cpl; 282 283 /* ULP_TXPKT */ 284 txpkt = dst; 285 txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 286 V_ULP_TXPKT_DATAMODIFY(0) | 287 V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) | 288 V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1)); 289 txpkt->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 290 291 /* ULPTX_IDATA sub-command */ 292 idata = (struct ulptx_idata *)(txpkt + 1); 293 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 294 idata->len = htobe32(sizeof(*cpl)); 295 296 /* CPL_SET_TCB_FIELD */ 297 cpl = (struct cpl_set_tcb_field_core *)(idata + 1); 298 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tlsp->tid)); 299 cpl->reply_ctrl = htobe16(F_NO_REPLY); 300 cpl->word_cookie = htobe16(V_WORD(word)); 301 cpl->mask = htobe64(mask); 302 cpl->val = htobe64(val); 303 304 /* ULPTX_NOOP */ 305 idata = (struct ulptx_idata *)(cpl + 1); 306 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 307 idata->len = htobe32(0); 308 } 309 310 static int 311 ktls_set_tcb_fields(struct tlspcb *tlsp, struct tcpcb *tp, struct sge_txq *txq) 312 { 313 struct fw_ulptx_wr *wr; 314 struct mbuf *m; 315 char *dst; 316 void *items[1]; 317 int error, len; 318 319 len = sizeof(*wr) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 320 if (tp->t_flags & TF_REQ_TSTMP) 321 len += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 322 m = alloc_wr_mbuf(len, M_NOWAIT); 323 if (m == NULL) { 324 CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__, 325 tlsp->tid); 326 return (ENOMEM); 327 } 328 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com); 329 m->m_pkthdr.csum_flags |= CSUM_SND_TAG; 330 331 /* FW_ULPTX_WR */ 332 wr = mtod(m, void *); 333 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR)); 334 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA | 335 V_FW_WR_LEN16(len / 16)); 336 wr->cookie = 0; 337 dst = (char *)(wr + 1); 338 339 /* Clear TF_NON_OFFLOAD and set TF_CORE_BYPASS */ 340 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_T_FLAGS, 341 V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1) | V_TF_NON_OFFLOAD(1)), 342 V_TCB_T_FLAGS(V_TF_CORE_BYPASS(1))); 343 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 344 345 /* Clear the SND_UNA_RAW, SND_NXT_RAW, and SND_MAX_RAW offsets. */ 346 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_UNA_RAW, 347 V_TCB_SND_NXT_RAW(M_TCB_SND_NXT_RAW) | 348 V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW), 349 V_TCB_SND_NXT_RAW(0) | V_TCB_SND_UNA_RAW(0)); 350 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 351 352 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_MAX_RAW, 353 V_TCB_SND_MAX_RAW(M_TCB_SND_MAX_RAW), V_TCB_SND_MAX_RAW(0)); 354 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 355 356 if (tp->t_flags & TF_REQ_TSTMP) { 357 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_TIMESTAMP_OFFSET, 358 V_TCB_TIMESTAMP_OFFSET(M_TCB_TIMESTAMP_OFFSET), 359 V_TCB_TIMESTAMP_OFFSET(tp->ts_offset >> 28)); 360 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 361 } 362 363 KASSERT(dst - (char *)wr == len, ("%s: length mismatch", __func__)); 364 365 items[0] = m; 366 error = mp_ring_enqueue(txq->r, items, 1, 1); 367 if (error) 368 m_free(m); 369 return (error); 370 } 371 372 int 373 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params, 374 struct m_snd_tag **pt) 375 { 376 const struct ktls_session *tls; 377 struct tlspcb *tlsp; 378 struct adapter *sc; 379 struct vi_info *vi; 380 struct inpcb *inp; 381 struct tcpcb *tp; 382 struct sge_txq *txq; 383 int atid, error, explicit_iv_size, keyid, mac_first; 384 385 tls = params->tls.tls; 386 387 /* Only TLS 1.1 and TLS 1.2 are currently supported. */ 388 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 389 tls->params.tls_vminor < TLS_MINOR_VER_ONE || 390 tls->params.tls_vminor > TLS_MINOR_VER_TWO) 391 return (EPROTONOSUPPORT); 392 393 /* Sanity check values in *tls. */ 394 switch (tls->params.cipher_algorithm) { 395 case CRYPTO_AES_CBC: 396 /* XXX: Explicitly ignore any provided IV. */ 397 switch (tls->params.cipher_key_len) { 398 case 128 / 8: 399 case 192 / 8: 400 case 256 / 8: 401 break; 402 default: 403 return (EINVAL); 404 } 405 switch (tls->params.auth_algorithm) { 406 case CRYPTO_SHA1_HMAC: 407 case CRYPTO_SHA2_256_HMAC: 408 case CRYPTO_SHA2_384_HMAC: 409 break; 410 default: 411 return (EPROTONOSUPPORT); 412 } 413 explicit_iv_size = AES_BLOCK_LEN; 414 mac_first = 1; 415 break; 416 case CRYPTO_AES_NIST_GCM_16: 417 if (tls->params.iv_len != SALT_SIZE) 418 return (EINVAL); 419 switch (tls->params.cipher_key_len) { 420 case 128 / 8: 421 case 192 / 8: 422 case 256 / 8: 423 break; 424 default: 425 return (EINVAL); 426 } 427 explicit_iv_size = 8; 428 mac_first = 0; 429 break; 430 default: 431 return (EPROTONOSUPPORT); 432 } 433 434 vi = if_getsoftc(ifp); 435 sc = vi->adapter; 436 437 tlsp = alloc_tlspcb(ifp, vi, M_WAITOK); 438 439 atid = alloc_atid(sc, tlsp); 440 if (atid < 0) { 441 error = ENOMEM; 442 goto failed; 443 } 444 445 if (sc->tlst.inline_keys) 446 keyid = -1; 447 else 448 keyid = t4_alloc_tls_keyid(sc); 449 if (keyid < 0) { 450 CTR2(KTR_CXGBE, "%s: atid %d using immediate key ctx", __func__, 451 atid); 452 tlsp->inline_key = true; 453 } else { 454 tlsp->tx_key_addr = keyid; 455 CTR3(KTR_CXGBE, "%s: atid %d allocated TX key addr %#x", 456 __func__, 457 atid, tlsp->tx_key_addr); 458 } 459 460 inp = params->tls.inp; 461 INP_RLOCK(inp); 462 if (inp->inp_flags & INP_DROPPED) { 463 INP_RUNLOCK(inp); 464 error = ECONNRESET; 465 goto failed; 466 } 467 tlsp->inp = inp; 468 469 tp = intotcpcb(inp); 470 if (tp->t_flags & TF_REQ_TSTMP) { 471 tlsp->using_timestamps = true; 472 if ((tp->ts_offset & 0xfffffff) != 0) { 473 INP_RUNLOCK(inp); 474 error = EINVAL; 475 goto failed; 476 } 477 } else 478 tlsp->using_timestamps = false; 479 480 error = send_ktls_act_open_req(sc, vi, inp, tlsp, atid); 481 if (error) { 482 INP_RUNLOCK(inp); 483 goto failed; 484 } 485 486 /* Wait for reply to active open. */ 487 CTR2(KTR_CXGBE, "%s: atid %d sent CPL_ACT_OPEN_REQ", __func__, 488 atid); 489 while (tlsp->open_pending) { 490 /* 491 * XXX: PCATCH? We would then have to discard the PCB 492 * when the completion CPL arrived. 493 */ 494 error = rw_sleep(tlsp, &inp->inp_lock, 0, "t6tlsop", 0); 495 } 496 497 atid = -1; 498 if (tlsp->tid < 0) { 499 INP_RUNLOCK(inp); 500 error = ENOMEM; 501 goto failed; 502 } 503 504 if (inp->inp_flags & INP_DROPPED) { 505 INP_RUNLOCK(inp); 506 error = ECONNRESET; 507 goto failed; 508 } 509 510 txq = &sc->sge.txq[vi->first_txq]; 511 if (inp->inp_flowtype != M_HASHTYPE_NONE) 512 txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) + 513 vi->rsrv_noflowq); 514 tlsp->txq = txq; 515 516 error = ktls_set_tcb_fields(tlsp, tp, txq); 517 INP_RUNLOCK(inp); 518 if (error) 519 goto failed; 520 521 error = ktls_setup_keys(tlsp, tls, txq); 522 if (error) 523 goto failed; 524 525 tlsp->enc_mode = t4_tls_cipher_mode(tls); 526 tlsp->tx_key_info_size = t4_tls_key_info_size(tls); 527 528 /* The SCMD fields used when encrypting a full TLS record. */ 529 tlsp->scmd0.seqno_numivs = htobe32(V_SCMD_SEQ_NO_CTRL(3) | 530 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) | 531 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) | 532 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) | 533 V_SCMD_CIPH_MODE(tlsp->enc_mode) | 534 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) | 535 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) | 536 V_SCMD_IV_SIZE(explicit_iv_size / 2) | V_SCMD_NUM_IVS(1)); 537 538 tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) | 539 V_SCMD_TLS_FRAG_ENABLE(0); 540 if (tlsp->inline_key) 541 tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1); 542 tlsp->scmd0.ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen); 543 544 /* 545 * The SCMD fields used when encrypting a partial TLS record 546 * (no trailer and possibly a truncated payload). 547 */ 548 tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) | 549 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 550 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) | 551 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) | 552 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 553 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 554 V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0); 555 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) 556 tlsp->scmd0_short.seqno_numivs |= 557 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR); 558 else 559 tlsp->scmd0_short.seqno_numivs |= 560 V_SCMD_CIPH_MODE(tlsp->enc_mode); 561 tlsp->scmd0_short.seqno_numivs = 562 htobe32(tlsp->scmd0_short.seqno_numivs); 563 564 tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) | 565 V_SCMD_TLS_FRAG_ENABLE(0) | 566 V_SCMD_AADIVDROP(1); 567 if (tlsp->inline_key) 568 tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1); 569 570 TXQ_LOCK(txq); 571 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) 572 txq->kern_tls_gcm++; 573 else 574 txq->kern_tls_cbc++; 575 TXQ_UNLOCK(txq); 576 *pt = &tlsp->com; 577 return (0); 578 579 failed: 580 if (atid >= 0) 581 free_atid(sc, atid); 582 m_snd_tag_rele(&tlsp->com); 583 return (error); 584 } 585 586 static int 587 ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls, 588 struct sge_txq *txq) 589 { 590 struct tls_key_req *kwr; 591 struct tls_keyctx *kctx; 592 void *items[1]; 593 struct mbuf *m; 594 int error; 595 596 /* 597 * Store the salt and keys in the key context. For 598 * connections with an inline key, this key context is passed 599 * as immediate data in each work request. For connections 600 * storing the key in DDR, a work request is used to store a 601 * copy of the key context in DDR. 602 */ 603 t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx); 604 if (tlsp->inline_key) 605 return (0); 606 607 /* Populate key work request. */ 608 m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT); 609 if (m == NULL) { 610 CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__, 611 tlsp->tid); 612 return (ENOMEM); 613 } 614 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com); 615 m->m_pkthdr.csum_flags |= CSUM_SND_TAG; 616 kwr = mtod(m, void *); 617 memset(kwr, 0, TLS_KEY_WR_SZ); 618 619 t4_write_tlskey_wr(tls, KTLS_TX, tlsp->tid, 0, tlsp->tx_key_addr, kwr); 620 kctx = (struct tls_keyctx *)(kwr + 1); 621 memcpy(kctx, &tlsp->keyctx, sizeof(*kctx)); 622 623 /* 624 * Place the key work request in the transmit queue. It 625 * should be sent to the NIC before any TLS packets using this 626 * session. 627 */ 628 items[0] = m; 629 error = mp_ring_enqueue(txq->r, items, 1, 1); 630 if (error) 631 m_free(m); 632 else 633 CTR2(KTR_CXGBE, "%s: tid %d sent key WR", __func__, tlsp->tid); 634 return (error); 635 } 636 637 static u_int 638 ktls_base_wr_size(struct tlspcb *tlsp) 639 { 640 u_int wr_len; 641 642 wr_len = sizeof(struct fw_ulptx_wr); // 16 643 wr_len += sizeof(struct ulp_txpkt); // 8 644 wr_len += sizeof(struct ulptx_idata); // 8 645 wr_len += sizeof(struct cpl_tx_sec_pdu);// 32 646 if (tlsp->inline_key) 647 wr_len += tlsp->tx_key_info_size; 648 else { 649 wr_len += sizeof(struct ulptx_sc_memrd);// 8 650 wr_len += sizeof(struct ulptx_idata); // 8 651 } 652 wr_len += sizeof(struct cpl_tx_data); // 16 653 return (wr_len); 654 } 655 656 /* How many bytes of TCP payload to send for a given TLS record. */ 657 static u_int 658 ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls) 659 { 660 struct tls_record_layer *hdr; 661 u_int plen, mlen; 662 663 M_ASSERTEXTPG(m_tls); 664 hdr = (void *)m_tls->m_epg_hdr; 665 plen = ntohs(hdr->tls_length); 666 667 /* 668 * What range of the TLS record is the mbuf requesting to be 669 * sent. 670 */ 671 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len; 672 673 /* Always send complete records. */ 674 if (mlen == TLS_HEADER_LENGTH + plen) 675 return (mlen); 676 677 /* 678 * If the host stack has asked to send part of the trailer, 679 * trim the length to avoid sending any of the trailer. There 680 * is no way to send a partial trailer currently. 681 */ 682 if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen) 683 mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen; 684 685 686 /* 687 * For AES-CBC adjust the ciphertext length for the block 688 * size. 689 */ 690 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC && 691 mlen > TLS_HEADER_LENGTH) { 692 mlen = TLS_HEADER_LENGTH + rounddown(mlen - TLS_HEADER_LENGTH, 693 AES_BLOCK_LEN); 694 } 695 696 #ifdef VERBOSE_TRACES 697 CTR4(KTR_CXGBE, "%s: tid %d short TLS record (%u vs %u)", 698 __func__, tlsp->tid, mlen, TLS_HEADER_LENGTH + plen); 699 #endif 700 return (mlen); 701 } 702 703 /* 704 * For a "short" TLS record, determine the offset into the TLS record 705 * payload to send. This offset does not include the TLS header, but 706 * a non-zero offset implies that a header will not be sent. 707 */ 708 static u_int 709 ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls) 710 { 711 struct tls_record_layer *hdr; 712 u_int offset, plen; 713 #ifdef INVARIANTS 714 u_int mlen; 715 #endif 716 717 M_ASSERTEXTPG(m_tls); 718 hdr = (void *)m_tls->m_epg_hdr; 719 plen = ntohs(hdr->tls_length); 720 #ifdef INVARIANTS 721 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len; 722 MPASS(mlen < TLS_HEADER_LENGTH + plen); 723 #endif 724 if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) 725 return (0); 726 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) { 727 /* 728 * Always send something. This function is only called 729 * if we aren't sending the tag at all, but if the 730 * request starts in the tag then we are in an odd 731 * state where would effectively send nothing. Cap 732 * the offset at the last byte of the record payload 733 * to send the last cipher block. 734 */ 735 offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen, 736 (plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1); 737 return (rounddown(offset, AES_BLOCK_LEN)); 738 } 739 return (0); 740 } 741 742 static u_int 743 ktls_sgl_size(u_int nsegs) 744 { 745 u_int wr_len; 746 747 /* First segment is part of ulptx_sgl. */ 748 nsegs--; 749 750 wr_len = sizeof(struct ulptx_sgl); 751 wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 752 return (wr_len); 753 } 754 755 static int 756 ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls, 757 int *nsegsp) 758 { 759 struct tls_record_layer *hdr; 760 u_int imm_len, offset, plen, wr_len, tlen; 761 762 M_ASSERTEXTPG(m_tls); 763 764 /* 765 * Determine the size of the TLS record payload to send 766 * excluding header and trailer. 767 */ 768 tlen = ktls_tcp_payload_length(tlsp, m_tls); 769 if (tlen <= m_tls->m_epg_hdrlen) { 770 /* 771 * For requests that only want to send the TLS header, 772 * send a tunnelled packet as immediate data. 773 */ 774 wr_len = sizeof(struct fw_eth_tx_pkt_wr) + 775 sizeof(struct cpl_tx_pkt_core) + 776 roundup2(m->m_len + m_tls->m_len, 16); 777 if (wr_len > SGE_MAX_WR_LEN) { 778 CTR3(KTR_CXGBE, 779 "%s: tid %d TLS header-only packet too long (len %d)", 780 __func__, tlsp->tid, m->m_len + m_tls->m_len); 781 } 782 783 /* This should always be the last TLS record in a chain. */ 784 MPASS(m_tls->m_next == NULL); 785 *nsegsp = 0; 786 return (wr_len); 787 } 788 789 hdr = (void *)m_tls->m_epg_hdr; 790 plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen; 791 if (tlen < plen) { 792 plen = tlen; 793 offset = ktls_payload_offset(tlsp, m_tls); 794 } else 795 offset = 0; 796 797 /* Calculate the size of the work request. */ 798 wr_len = ktls_base_wr_size(tlsp); 799 800 /* 801 * Full records and short records with an offset of 0 include 802 * the TLS header as immediate data. Short records include a 803 * raw AES IV as immediate data. 804 */ 805 imm_len = 0; 806 if (offset == 0) 807 imm_len += m_tls->m_epg_hdrlen; 808 if (plen == tlen) 809 imm_len += AES_BLOCK_LEN; 810 wr_len += roundup2(imm_len, 16); 811 812 /* TLS record payload via DSGL. */ 813 *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset, 814 plen - (m_tls->m_epg_hdrlen + offset)); 815 wr_len += ktls_sgl_size(*nsegsp); 816 817 wr_len = roundup2(wr_len, 16); 818 return (wr_len); 819 } 820 821 /* 822 * See if we have any TCP options requiring a dedicated options-only 823 * packet. 824 */ 825 static int 826 ktls_has_tcp_options(struct tcphdr *tcp) 827 { 828 u_char *cp; 829 int cnt, opt, optlen; 830 831 cp = (u_char *)(tcp + 1); 832 cnt = tcp->th_off * 4 - sizeof(struct tcphdr); 833 for (; cnt > 0; cnt -= optlen, cp += optlen) { 834 opt = cp[0]; 835 if (opt == TCPOPT_EOL) 836 break; 837 if (opt == TCPOPT_NOP) 838 optlen = 1; 839 else { 840 if (cnt < 2) 841 break; 842 optlen = cp[1]; 843 if (optlen < 2 || optlen > cnt) 844 break; 845 } 846 switch (opt) { 847 case TCPOPT_NOP: 848 case TCPOPT_TIMESTAMP: 849 break; 850 default: 851 return (1); 852 } 853 } 854 return (0); 855 } 856 857 /* 858 * Find the TCP timestamp option. 859 */ 860 static void * 861 ktls_find_tcp_timestamps(struct tcphdr *tcp) 862 { 863 u_char *cp; 864 int cnt, opt, optlen; 865 866 cp = (u_char *)(tcp + 1); 867 cnt = tcp->th_off * 4 - sizeof(struct tcphdr); 868 for (; cnt > 0; cnt -= optlen, cp += optlen) { 869 opt = cp[0]; 870 if (opt == TCPOPT_EOL) 871 break; 872 if (opt == TCPOPT_NOP) 873 optlen = 1; 874 else { 875 if (cnt < 2) 876 break; 877 optlen = cp[1]; 878 if (optlen < 2 || optlen > cnt) 879 break; 880 } 881 if (opt == TCPOPT_TIMESTAMP && optlen == TCPOLEN_TIMESTAMP) 882 return (cp + 2); 883 } 884 return (NULL); 885 } 886 887 int 888 t6_ktls_parse_pkt(struct mbuf *m) 889 { 890 struct tlspcb *tlsp; 891 struct ether_header *eh; 892 struct ip *ip; 893 struct ip6_hdr *ip6; 894 struct tcphdr *tcp; 895 struct mbuf *m_tls; 896 void *items[1]; 897 int nsegs; 898 u_int wr_len, tot_len; 899 900 /* 901 * Locate headers in initial mbuf. 902 * 903 * XXX: This assumes all of the headers are in the initial mbuf. 904 * Could perhaps use m_advance() like parse_pkt() if that turns 905 * out to not be true. 906 */ 907 M_ASSERTPKTHDR(m); 908 MPASS(m->m_pkthdr.snd_tag != NULL); 909 tlsp = mst_to_tls(m->m_pkthdr.snd_tag); 910 911 if (m->m_len <= sizeof(*eh) + sizeof(*ip)) { 912 CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short", __func__, 913 tlsp->tid); 914 return (EINVAL); 915 } 916 eh = mtod(m, struct ether_header *); 917 if (ntohs(eh->ether_type) != ETHERTYPE_IP && 918 ntohs(eh->ether_type) != ETHERTYPE_IPV6) { 919 CTR2(KTR_CXGBE, "%s: tid %d mbuf not ETHERTYPE_IP{,V6}", 920 __func__, tlsp->tid); 921 return (EINVAL); 922 } 923 m->m_pkthdr.l2hlen = sizeof(*eh); 924 925 /* XXX: Reject unsupported IP options? */ 926 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 927 ip = (struct ip *)(eh + 1); 928 if (ip->ip_p != IPPROTO_TCP) { 929 CTR2(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP", 930 __func__, tlsp->tid); 931 return (EINVAL); 932 } 933 m->m_pkthdr.l3hlen = ip->ip_hl * 4; 934 } else { 935 ip6 = (struct ip6_hdr *)(eh + 1); 936 if (ip6->ip6_nxt != IPPROTO_TCP) { 937 CTR3(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP (%u)", 938 __func__, tlsp->tid, ip6->ip6_nxt); 939 return (EINVAL); 940 } 941 m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); 942 } 943 if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + 944 sizeof(*tcp)) { 945 CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short (2)", 946 __func__, tlsp->tid); 947 return (EINVAL); 948 } 949 tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen); 950 m->m_pkthdr.l4hlen = tcp->th_off * 4; 951 952 /* Bail if there is TCP payload before the TLS record. */ 953 if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + 954 m->m_pkthdr.l4hlen) { 955 CTR6(KTR_CXGBE, 956 "%s: tid %d header mbuf bad length (%d + %d + %d != %d)", 957 __func__, tlsp->tid, m->m_pkthdr.l2hlen, 958 m->m_pkthdr.l3hlen, m->m_pkthdr.l4hlen, m->m_len); 959 return (EINVAL); 960 } 961 962 /* Assume all headers are in 'm' for now. */ 963 MPASS(m->m_next != NULL); 964 MPASS(m->m_next->m_flags & M_EXTPG); 965 966 tot_len = 0; 967 968 /* 969 * Each of the remaining mbufs in the chain should reference a 970 * TLS record. 971 */ 972 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) { 973 MPASS(m_tls->m_flags & M_EXTPG); 974 975 wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs); 976 #ifdef VERBOSE_TRACES 977 CTR4(KTR_CXGBE, "%s: tid %d wr_len %d nsegs %d", __func__, 978 tlsp->tid, wr_len, nsegs); 979 #endif 980 if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS) 981 return (EFBIG); 982 tot_len += roundup2(wr_len, EQ_ESIZE); 983 984 /* 985 * Store 'nsegs' for the first TLS record in the 986 * header mbuf's metadata. 987 */ 988 if (m_tls == m->m_next) 989 set_mbuf_nsegs(m, nsegs); 990 } 991 992 MPASS(tot_len != 0); 993 994 /* 995 * See if we have any TCP options or a FIN requiring a 996 * dedicated packet. 997 */ 998 if ((tcp_get_flags(tcp) & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) { 999 wr_len = sizeof(struct fw_eth_tx_pkt_wr) + 1000 sizeof(struct cpl_tx_pkt_core) + roundup2(m->m_len, 16); 1001 if (wr_len > SGE_MAX_WR_LEN) { 1002 CTR3(KTR_CXGBE, 1003 "%s: tid %d options-only packet too long (len %d)", 1004 __func__, tlsp->tid, m->m_len); 1005 return (EINVAL); 1006 } 1007 tot_len += roundup2(wr_len, EQ_ESIZE); 1008 } 1009 1010 /* Include room for a TP work request to program an L2T entry. */ 1011 tot_len += EQ_ESIZE; 1012 1013 /* 1014 * Include room for a ULPTX work request including up to 5 1015 * CPL_SET_TCB_FIELD commands before the first TLS work 1016 * request. 1017 */ 1018 wr_len = sizeof(struct fw_ulptx_wr) + 1019 5 * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1020 1021 /* 1022 * If timestamps are present, reserve 1 more command for 1023 * setting the echoed timestamp. 1024 */ 1025 if (tlsp->using_timestamps) 1026 wr_len += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1027 1028 tot_len += roundup2(wr_len, EQ_ESIZE); 1029 1030 set_mbuf_len16(m, tot_len / 16); 1031 #ifdef VERBOSE_TRACES 1032 CTR4(KTR_CXGBE, "%s: tid %d len16 %d nsegs %d", __func__, 1033 tlsp->tid, mbuf_len16(m), mbuf_nsegs(m)); 1034 #endif 1035 items[0] = m; 1036 return (mp_ring_enqueue(tlsp->txq->r, items, 1, 256)); 1037 } 1038 1039 /* 1040 * If the SGL ends on an address that is not 16 byte aligned, this function will 1041 * add a 0 filled flit at the end. 1042 */ 1043 static void 1044 write_gl_to_buf(struct sglist *gl, caddr_t to) 1045 { 1046 struct sglist_seg *seg; 1047 __be64 *flitp; 1048 struct ulptx_sgl *usgl; 1049 int i, nflits, nsegs; 1050 1051 KASSERT(((uintptr_t)to & 0xf) == 0, 1052 ("%s: SGL must start at a 16 byte boundary: %p", __func__, to)); 1053 1054 nsegs = gl->sg_nseg; 1055 MPASS(nsegs > 0); 1056 1057 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 1058 flitp = (__be64 *)to; 1059 seg = &gl->sg_segs[0]; 1060 usgl = (void *)flitp; 1061 1062 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1063 V_ULPTX_NSGE(nsegs)); 1064 usgl->len0 = htobe32(seg->ss_len); 1065 usgl->addr0 = htobe64(seg->ss_paddr); 1066 seg++; 1067 1068 for (i = 0; i < nsegs - 1; i++, seg++) { 1069 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 1070 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 1071 } 1072 if (i & 1) 1073 usgl->sge[i / 2].len[1] = htobe32(0); 1074 flitp += nflits; 1075 1076 if (nflits & 1) { 1077 MPASS(((uintptr_t)flitp) & 0xf); 1078 *flitp++ = 0; 1079 } 1080 1081 MPASS((((uintptr_t)flitp) & 0xf) == 0); 1082 } 1083 1084 static inline void 1085 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 1086 { 1087 1088 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 1089 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 1090 1091 if (__predict_true((uintptr_t)(*to) + len <= 1092 (uintptr_t)&eq->desc[eq->sidx])) { 1093 bcopy(from, *to, len); 1094 (*to) += len; 1095 if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx]) 1096 (*to) = (caddr_t)eq->desc; 1097 } else { 1098 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 1099 1100 bcopy(from, *to, portion); 1101 from += portion; 1102 portion = len - portion; /* remaining */ 1103 bcopy(from, (void *)eq->desc, portion); 1104 (*to) = (caddr_t)eq->desc + portion; 1105 } 1106 } 1107 1108 static int 1109 ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m, 1110 u_int available, u_int pidx) 1111 { 1112 struct tx_sdesc *txsd; 1113 struct fw_eth_tx_pkt_wr *wr; 1114 struct cpl_tx_pkt_core *cpl; 1115 uint32_t ctrl; 1116 uint64_t ctrl1; 1117 int len16, ndesc, pktlen; 1118 struct ether_header *eh; 1119 struct ip *ip, newip; 1120 struct ip6_hdr *ip6, newip6; 1121 struct tcphdr *tcp, newtcp; 1122 caddr_t out; 1123 1124 TXQ_LOCK_ASSERT_OWNED(txq); 1125 M_ASSERTPKTHDR(m); 1126 1127 wr = dst; 1128 pktlen = m->m_len; 1129 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen; 1130 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16); 1131 ndesc = tx_len16_to_desc(len16); 1132 MPASS(ndesc <= available); 1133 1134 /* Firmware work request header */ 1135 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1136 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 1137 1138 ctrl = V_FW_WR_LEN16(len16); 1139 wr->equiq_to_len16 = htobe32(ctrl); 1140 wr->r3 = 0; 1141 1142 cpl = (void *)(wr + 1); 1143 1144 /* CPL header */ 1145 cpl->ctrl0 = txq->cpl_ctrl0; 1146 cpl->pack = 0; 1147 cpl->len = htobe16(pktlen); 1148 1149 out = (void *)(cpl + 1); 1150 1151 /* Copy over Ethernet header. */ 1152 eh = mtod(m, struct ether_header *); 1153 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen); 1154 1155 /* Fixup length in IP header and copy out. */ 1156 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 1157 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1158 newip = *ip; 1159 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen); 1160 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip)); 1161 if (m->m_pkthdr.l3hlen > sizeof(*ip)) 1162 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out, 1163 m->m_pkthdr.l3hlen - sizeof(*ip)); 1164 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) | 1165 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1166 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1167 } else { 1168 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1169 newip6 = *ip6; 1170 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen - 1171 sizeof(*ip6)); 1172 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6)); 1173 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6)); 1174 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) | 1175 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1176 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1177 } 1178 cpl->ctrl1 = htobe64(ctrl1); 1179 txq->txcsum++; 1180 1181 /* Clear PUSH and FIN in the TCP header if present. */ 1182 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen); 1183 newtcp = *tcp; 1184 tcp_set_flags(&newtcp, tcp_get_flags(&newtcp) & ~(TH_PUSH | TH_FIN)); 1185 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp)); 1186 1187 /* Copy rest of packet. */ 1188 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, pktlen - 1189 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp))); 1190 txq->imm_wrs++; 1191 1192 txq->txpkt_wrs++; 1193 1194 txq->kern_tls_options++; 1195 1196 txsd = &txq->sdesc[pidx]; 1197 txsd->m = NULL; 1198 txsd->desc_used = ndesc; 1199 1200 return (ndesc); 1201 } 1202 1203 static int 1204 ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m, 1205 struct mbuf *m_tls, u_int available, tcp_seq tcp_seqno, u_int pidx) 1206 { 1207 struct tx_sdesc *txsd; 1208 struct fw_eth_tx_pkt_wr *wr; 1209 struct cpl_tx_pkt_core *cpl; 1210 uint32_t ctrl; 1211 uint64_t ctrl1; 1212 int len16, ndesc, pktlen; 1213 struct ether_header *eh; 1214 struct ip *ip, newip; 1215 struct ip6_hdr *ip6, newip6; 1216 struct tcphdr *tcp, newtcp; 1217 caddr_t out; 1218 1219 TXQ_LOCK_ASSERT_OWNED(txq); 1220 M_ASSERTPKTHDR(m); 1221 1222 /* Locate the template TLS header. */ 1223 M_ASSERTEXTPG(m_tls); 1224 1225 /* This should always be the last TLS record in a chain. */ 1226 MPASS(m_tls->m_next == NULL); 1227 1228 wr = dst; 1229 pktlen = m->m_len + m_tls->m_len; 1230 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen; 1231 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16); 1232 ndesc = tx_len16_to_desc(len16); 1233 MPASS(ndesc <= available); 1234 1235 /* Firmware work request header */ 1236 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1237 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 1238 1239 ctrl = V_FW_WR_LEN16(len16); 1240 wr->equiq_to_len16 = htobe32(ctrl); 1241 wr->r3 = 0; 1242 1243 cpl = (void *)(wr + 1); 1244 1245 /* CPL header */ 1246 cpl->ctrl0 = txq->cpl_ctrl0; 1247 cpl->pack = 0; 1248 cpl->len = htobe16(pktlen); 1249 1250 out = (void *)(cpl + 1); 1251 1252 /* Copy over Ethernet header. */ 1253 eh = mtod(m, struct ether_header *); 1254 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen); 1255 1256 /* Fixup length in IP header and copy out. */ 1257 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 1258 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1259 newip = *ip; 1260 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen); 1261 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip)); 1262 if (m->m_pkthdr.l3hlen > sizeof(*ip)) 1263 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out, 1264 m->m_pkthdr.l3hlen - sizeof(*ip)); 1265 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) | 1266 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1267 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1268 } else { 1269 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1270 newip6 = *ip6; 1271 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen - 1272 sizeof(*ip6)); 1273 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6)); 1274 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6)); 1275 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) | 1276 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1277 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1278 } 1279 cpl->ctrl1 = htobe64(ctrl1); 1280 txq->txcsum++; 1281 1282 /* Set sequence number in TCP header. */ 1283 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen); 1284 newtcp = *tcp; 1285 newtcp.th_seq = htonl(tcp_seqno + mtod(m_tls, vm_offset_t)); 1286 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp)); 1287 1288 /* Copy rest of TCP header. */ 1289 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len - 1290 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp))); 1291 1292 /* Copy the subset of the TLS header requested. */ 1293 copy_to_txd(&txq->eq, (char *)m_tls->m_epg_hdr + 1294 mtod(m_tls, vm_offset_t), &out, m_tls->m_len); 1295 txq->imm_wrs++; 1296 1297 txq->txpkt_wrs++; 1298 1299 txq->kern_tls_header++; 1300 1301 txsd = &txq->sdesc[pidx]; 1302 txsd->m = m; 1303 txsd->desc_used = ndesc; 1304 1305 return (ndesc); 1306 } 1307 1308 _Static_assert(sizeof(struct cpl_set_tcb_field) <= EQ_ESIZE, 1309 "CPL_SET_TCB_FIELD must be smaller than a single TX descriptor"); 1310 _Static_assert(W_TCB_SND_UNA_RAW == W_TCB_SND_NXT_RAW, 1311 "SND_NXT_RAW and SND_UNA_RAW are in different words"); 1312 1313 static int 1314 ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst, 1315 struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls, u_int available, 1316 tcp_seq tcp_seqno, uint32_t *tsopt, u_int pidx, bool set_l2t_idx) 1317 { 1318 struct sge_eq *eq = &txq->eq; 1319 struct tx_sdesc *txsd; 1320 struct fw_ulptx_wr *wr; 1321 struct ulp_txpkt *txpkt; 1322 struct ulptx_sc_memrd *memrd; 1323 struct ulptx_idata *idata; 1324 struct cpl_tx_sec_pdu *sec_pdu; 1325 struct cpl_tx_data *tx_data; 1326 struct tls_record_layer *hdr; 1327 char *iv, *out; 1328 u_int aad_start, aad_stop; 1329 u_int auth_start, auth_stop, auth_insert; 1330 u_int cipher_start, cipher_stop, iv_offset; 1331 u_int imm_len, mss, ndesc, offset, plen, tlen, twr_len, wr_len; 1332 u_int fields, nsegs, tx_max_offset, tx_max; 1333 bool first_wr, last_wr, using_scratch; 1334 1335 ndesc = 0; 1336 MPASS(tlsp->txq == txq); 1337 1338 first_wr = (tlsp->prev_seq == 0 && tlsp->prev_ack == 0 && 1339 tlsp->prev_win == 0); 1340 1341 /* 1342 * Use the per-txq scratch pad if near the end of the ring to 1343 * simplify handling of wrap-around. This uses a simple but 1344 * not quite perfect test of using the scratch buffer if we 1345 * can't fit a maximal work request in without wrapping. 1346 */ 1347 using_scratch = (eq->sidx - pidx < SGE_MAX_WR_LEN / EQ_ESIZE); 1348 1349 /* Locate the TLS header. */ 1350 M_ASSERTEXTPG(m_tls); 1351 hdr = (void *)m_tls->m_epg_hdr; 1352 plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen; 1353 1354 /* Determine how much of the TLS record to send. */ 1355 tlen = ktls_tcp_payload_length(tlsp, m_tls); 1356 if (tlen <= m_tls->m_epg_hdrlen) { 1357 /* 1358 * For requests that only want to send the TLS header, 1359 * send a tunnelled packet as immediate data. 1360 */ 1361 #ifdef VERBOSE_TRACES 1362 CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u", 1363 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno); 1364 #endif 1365 return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available, 1366 tcp_seqno, pidx)); 1367 } 1368 if (tlen < plen) { 1369 plen = tlen; 1370 offset = ktls_payload_offset(tlsp, m_tls); 1371 #ifdef VERBOSE_TRACES 1372 CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u", 1373 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset); 1374 #endif 1375 if (m_tls->m_next == NULL && (tcp_get_flags(tcp) & TH_FIN) != 0) { 1376 txq->kern_tls_fin_short++; 1377 #ifdef INVARIANTS 1378 panic("%s: FIN on short TLS record", __func__); 1379 #endif 1380 } 1381 } else 1382 offset = 0; 1383 1384 /* 1385 * This is the last work request for a given TLS mbuf chain if 1386 * it is the last mbuf in the chain and FIN is not set. If 1387 * FIN is set, then ktls_write_tcp_fin() will write out the 1388 * last work request. 1389 */ 1390 last_wr = m_tls->m_next == NULL && (tcp_get_flags(tcp) & TH_FIN) == 0; 1391 1392 /* 1393 * The host stack may ask us to not send part of the start of 1394 * a TLS record. (For example, the stack might have 1395 * previously sent a "short" TLS record and might later send 1396 * down an mbuf that requests to send the remainder of the TLS 1397 * record.) The crypto engine must process a TLS record from 1398 * the beginning if computing a GCM tag or HMAC, so we always 1399 * send the TLS record from the beginning as input to the 1400 * crypto engine and via CPL_TX_DATA to TP. However, TP will 1401 * drop individual packets after they have been chopped up 1402 * into MSS-sized chunks if the entire sequence range of those 1403 * packets is less than SND_UNA. SND_UNA is computed as 1404 * TX_MAX - SND_UNA_RAW. Thus, use the offset stored in 1405 * m_data to set TX_MAX to the first byte in the TCP sequence 1406 * space the host actually wants us to send and set 1407 * SND_UNA_RAW to 0. 1408 * 1409 * If the host sends us back to back requests that span the 1410 * trailer of a single TLS record (first request ends "in" the 1411 * trailer and second request starts at the next byte but 1412 * still "in" the trailer), the initial bytes of the trailer 1413 * that the first request drops will not be retransmitted. If 1414 * the host uses the same requests when retransmitting the 1415 * connection will hang. To handle this, always transmit the 1416 * full trailer for a request that begins "in" the trailer 1417 * (the second request in the example above). This should 1418 * also help to avoid retransmits for the common case. 1419 * 1420 * A similar condition exists when using CBC for back to back 1421 * requests that span a single AES block. The first request 1422 * will be truncated to end at the end of the previous AES 1423 * block. To handle this, always begin transmission at the 1424 * start of the current AES block. 1425 */ 1426 tx_max_offset = mtod(m_tls, vm_offset_t); 1427 if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - 1428 m_tls->m_epg_trllen) { 1429 /* Always send the full trailer. */ 1430 tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - 1431 m_tls->m_epg_trllen; 1432 } 1433 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC && 1434 tx_max_offset > TLS_HEADER_LENGTH) { 1435 /* Always send all of the first AES block. */ 1436 tx_max_offset = TLS_HEADER_LENGTH + 1437 rounddown(tx_max_offset - TLS_HEADER_LENGTH, 1438 AES_BLOCK_LEN); 1439 } 1440 tx_max = tcp_seqno + tx_max_offset; 1441 1442 /* 1443 * Update TCB fields. Reserve space for the FW_ULPTX_WR header 1444 * but don't populate it until we know how many field updates 1445 * are required. 1446 */ 1447 if (using_scratch) 1448 wr = (void *)txq->ss; 1449 else 1450 wr = dst; 1451 out = (void *)(wr + 1); 1452 fields = 0; 1453 if (set_l2t_idx) { 1454 KASSERT(m->m_next == m_tls, 1455 ("trying to set L2T_IX for subsequent TLS WR")); 1456 #ifdef VERBOSE_TRACES 1457 CTR3(KTR_CXGBE, "%s: tid %d set L2T_IX to %d", __func__, 1458 tlsp->tid, tlsp->l2te->idx); 1459 #endif 1460 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_L2T_IX, 1461 V_TCB_L2T_IX(M_TCB_L2T_IX), V_TCB_L2T_IX(tlsp->l2te->idx)); 1462 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1463 fields++; 1464 } 1465 if (tsopt != NULL && tlsp->prev_tsecr != ntohl(tsopt[1])) { 1466 KASSERT(m->m_next == m_tls, 1467 ("trying to set T_RTSEQ_RECENT for subsequent TLS WR")); 1468 #ifdef VERBOSE_TRACES 1469 CTR2(KTR_CXGBE, "%s: tid %d wrote updated T_RTSEQ_RECENT", 1470 __func__, tlsp->tid); 1471 #endif 1472 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_T_RTSEQ_RECENT, 1473 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT), 1474 V_TCB_T_RTSEQ_RECENT(ntohl(tsopt[1]))); 1475 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1476 fields++; 1477 1478 tlsp->prev_tsecr = ntohl(tsopt[1]); 1479 } 1480 1481 if (first_wr || tlsp->prev_seq != tx_max) { 1482 KASSERT(m->m_next == m_tls, 1483 ("trying to set TX_MAX for subsequent TLS WR")); 1484 #ifdef VERBOSE_TRACES 1485 CTR4(KTR_CXGBE, 1486 "%s: tid %d setting TX_MAX to %u (tcp_seqno %u)", 1487 __func__, tlsp->tid, tx_max, tcp_seqno); 1488 #endif 1489 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_TX_MAX, 1490 V_TCB_TX_MAX(M_TCB_TX_MAX), V_TCB_TX_MAX(tx_max)); 1491 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1492 fields++; 1493 } 1494 1495 /* 1496 * If there is data to drop at the beginning of this TLS 1497 * record or if this is a retransmit, 1498 * reset SND_UNA_RAW to 0 so that SND_UNA == TX_MAX. 1499 */ 1500 if (tlsp->prev_seq != tx_max || mtod(m_tls, vm_offset_t) != 0) { 1501 KASSERT(m->m_next == m_tls, 1502 ("trying to clear SND_UNA_RAW for subsequent TLS WR")); 1503 #ifdef VERBOSE_TRACES 1504 CTR2(KTR_CXGBE, "%s: tid %d clearing SND_UNA_RAW", __func__, 1505 tlsp->tid); 1506 #endif 1507 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_SND_UNA_RAW, 1508 V_TCB_SND_UNA_RAW(M_TCB_SND_UNA_RAW), 1509 V_TCB_SND_UNA_RAW(0)); 1510 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1511 fields++; 1512 } 1513 1514 /* 1515 * Store the expected sequence number of the next byte after 1516 * this record. 1517 */ 1518 tlsp->prev_seq = tcp_seqno + tlen; 1519 1520 if (first_wr || tlsp->prev_ack != ntohl(tcp->th_ack)) { 1521 KASSERT(m->m_next == m_tls, 1522 ("trying to set RCV_NXT for subsequent TLS WR")); 1523 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_NXT, 1524 V_TCB_RCV_NXT(M_TCB_RCV_NXT), 1525 V_TCB_RCV_NXT(ntohl(tcp->th_ack))); 1526 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1527 fields++; 1528 1529 tlsp->prev_ack = ntohl(tcp->th_ack); 1530 } 1531 1532 if (first_wr || tlsp->prev_win != ntohs(tcp->th_win)) { 1533 KASSERT(m->m_next == m_tls, 1534 ("trying to set RCV_WND for subsequent TLS WR")); 1535 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_WND, 1536 V_TCB_RCV_WND(M_TCB_RCV_WND), 1537 V_TCB_RCV_WND(ntohs(tcp->th_win))); 1538 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1539 fields++; 1540 1541 tlsp->prev_win = ntohs(tcp->th_win); 1542 } 1543 1544 /* Use cached value for first record in chain. */ 1545 if (m->m_next == m_tls) 1546 nsegs = mbuf_nsegs(m); 1547 else 1548 nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + 1549 offset, plen - (m_tls->m_epg_hdrlen + offset)); 1550 1551 /* Calculate the size of the TLS work request. */ 1552 twr_len = ktls_base_wr_size(tlsp); 1553 1554 imm_len = 0; 1555 if (offset == 0) 1556 imm_len += m_tls->m_epg_hdrlen; 1557 if (plen == tlen) 1558 imm_len += AES_BLOCK_LEN; 1559 twr_len += roundup2(imm_len, 16); 1560 twr_len += ktls_sgl_size(nsegs); 1561 1562 /* 1563 * If any field updates were required, determine if they can 1564 * be included in the TLS work request. If not, use the 1565 * FW_ULPTX_WR work request header at 'wr' as a dedicated work 1566 * request for the field updates and start a new work request 1567 * for the TLS work request afterward. 1568 */ 1569 if (fields != 0) { 1570 wr_len = fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1571 if (twr_len + wr_len <= SGE_MAX_WR_LEN && 1572 tlsp->sc->tlst.combo_wrs) { 1573 wr_len += twr_len; 1574 txpkt = (void *)out; 1575 } else { 1576 wr_len += sizeof(*wr); 1577 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR)); 1578 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA | 1579 V_FW_WR_LEN16(wr_len / 16)); 1580 wr->cookie = 0; 1581 1582 /* 1583 * If we were using scratch space, copy the 1584 * field updates work request to the ring. 1585 */ 1586 if (using_scratch) { 1587 out = dst; 1588 copy_to_txd(eq, txq->ss, &out, wr_len); 1589 } 1590 1591 ndesc = howmany(wr_len, EQ_ESIZE); 1592 MPASS(ndesc <= available); 1593 1594 txq->raw_wrs++; 1595 txsd = &txq->sdesc[pidx]; 1596 txsd->m = NULL; 1597 txsd->desc_used = ndesc; 1598 IDXINCR(pidx, ndesc, eq->sidx); 1599 dst = &eq->desc[pidx]; 1600 1601 /* 1602 * Determine if we should use scratch space 1603 * for the TLS work request based on the 1604 * available space after advancing pidx for 1605 * the field updates work request. 1606 */ 1607 wr_len = twr_len; 1608 using_scratch = (eq->sidx - pidx < 1609 howmany(wr_len, EQ_ESIZE)); 1610 if (using_scratch) 1611 wr = (void *)txq->ss; 1612 else 1613 wr = dst; 1614 txpkt = (void *)(wr + 1); 1615 } 1616 } else { 1617 wr_len = twr_len; 1618 txpkt = (void *)out; 1619 } 1620 1621 wr_len = roundup2(wr_len, 16); 1622 MPASS(ndesc + howmany(wr_len, EQ_ESIZE) <= available); 1623 1624 /* FW_ULPTX_WR */ 1625 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR)); 1626 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA | 1627 V_FW_WR_LEN16(wr_len / 16)); 1628 wr->cookie = 0; 1629 1630 /* ULP_TXPKT */ 1631 txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 1632 V_ULP_TXPKT_DATAMODIFY(0) | 1633 V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) | 1634 V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1)); 1635 txpkt->len = htobe32(howmany(twr_len - sizeof(*wr), 16)); 1636 1637 /* ULPTX_IDATA sub-command */ 1638 idata = (void *)(txpkt + 1); 1639 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 1640 V_ULP_TX_SC_MORE(1)); 1641 idata->len = sizeof(struct cpl_tx_sec_pdu); 1642 1643 /* 1644 * The key context, CPL_TX_DATA, and immediate data are part 1645 * of this ULPTX_IDATA when using an inline key. When reading 1646 * the key from memory, the CPL_TX_DATA and immediate data are 1647 * part of a separate ULPTX_IDATA. 1648 */ 1649 if (tlsp->inline_key) 1650 idata->len += tlsp->tx_key_info_size + 1651 sizeof(struct cpl_tx_data) + imm_len; 1652 idata->len = htobe32(idata->len); 1653 1654 /* CPL_TX_SEC_PDU */ 1655 sec_pdu = (void *)(idata + 1); 1656 1657 /* 1658 * For short records, AAD is counted as header data in SCMD0, 1659 * the IV is next followed by a cipher region for the payload. 1660 */ 1661 if (plen == tlen) { 1662 aad_start = 0; 1663 aad_stop = 0; 1664 iv_offset = 1; 1665 auth_start = 0; 1666 auth_stop = 0; 1667 auth_insert = 0; 1668 cipher_start = AES_BLOCK_LEN + 1; 1669 cipher_stop = 0; 1670 1671 sec_pdu->pldlen = htobe32(16 + plen - 1672 (m_tls->m_epg_hdrlen + offset)); 1673 1674 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1675 sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs; 1676 sec_pdu->ivgen_hdrlen = htobe32( 1677 tlsp->scmd0_short.ivgen_hdrlen | 1678 V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0)); 1679 1680 txq->kern_tls_short++; 1681 } else { 1682 /* 1683 * AAD is TLS header. IV is after AAD. The cipher region 1684 * starts after the IV. See comments in ccr_authenc() and 1685 * ccr_gmac() in t4_crypto.c regarding cipher and auth 1686 * start/stop values. 1687 */ 1688 aad_start = 1; 1689 aad_stop = TLS_HEADER_LENGTH; 1690 iv_offset = TLS_HEADER_LENGTH + 1; 1691 cipher_start = m_tls->m_epg_hdrlen + 1; 1692 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) { 1693 cipher_stop = 0; 1694 auth_start = cipher_start; 1695 auth_stop = 0; 1696 auth_insert = 0; 1697 } else { 1698 cipher_stop = 0; 1699 auth_start = cipher_start; 1700 auth_stop = 0; 1701 auth_insert = 0; 1702 } 1703 1704 sec_pdu->pldlen = htobe32(plen); 1705 1706 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1707 sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs; 1708 sec_pdu->ivgen_hdrlen = tlsp->scmd0.ivgen_hdrlen; 1709 1710 if (mtod(m_tls, vm_offset_t) == 0) 1711 txq->kern_tls_full++; 1712 else 1713 txq->kern_tls_partial++; 1714 } 1715 sec_pdu->op_ivinsrtofst = htobe32( 1716 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1717 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1718 V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset)); 1719 sec_pdu->aadstart_cipherstop_hi = htobe32( 1720 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1721 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1722 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1723 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 1724 sec_pdu->cipherstop_lo_authinsert = htobe32( 1725 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 1726 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) | 1727 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) | 1728 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1729 1730 sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno); 1731 1732 /* Key context */ 1733 out = (void *)(sec_pdu + 1); 1734 if (tlsp->inline_key) { 1735 memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size); 1736 out += tlsp->tx_key_info_size; 1737 } else { 1738 /* ULPTX_SC_MEMRD to read key context. */ 1739 memrd = (void *)out; 1740 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) | 1741 V_ULP_TX_SC_MORE(1) | 1742 V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4)); 1743 memrd->addr = htobe32(tlsp->tx_key_addr >> 5); 1744 1745 /* ULPTX_IDATA for CPL_TX_DATA and TLS header. */ 1746 idata = (void *)(memrd + 1); 1747 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 1748 V_ULP_TX_SC_MORE(1)); 1749 idata->len = htobe32(sizeof(struct cpl_tx_data) + imm_len); 1750 1751 out = (void *)(idata + 1); 1752 } 1753 1754 /* CPL_TX_DATA */ 1755 tx_data = (void *)out; 1756 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tlsp->tid)); 1757 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1758 mss = m->m_pkthdr.tso_segsz; 1759 tlsp->prev_mss = mss; 1760 } else if (tlsp->prev_mss != 0) 1761 mss = tlsp->prev_mss; 1762 else 1763 mss = if_getmtu(tlsp->vi->ifp) - 1764 (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen); 1765 if (offset == 0) { 1766 tx_data->len = htobe32(V_TX_DATA_MSS(mss) | V_TX_LENGTH(tlen)); 1767 tx_data->rsvd = htobe32(tcp_seqno); 1768 } else { 1769 tx_data->len = htobe32(V_TX_DATA_MSS(mss) | 1770 V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset))); 1771 tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset); 1772 } 1773 tx_data->flags = htobe32(F_TX_BYPASS); 1774 if (last_wr && tcp_get_flags(tcp) & TH_PUSH) 1775 tx_data->flags |= htobe32(F_TX_PUSH | F_TX_SHOVE); 1776 1777 /* Populate the TLS header */ 1778 out = (void *)(tx_data + 1); 1779 if (offset == 0) { 1780 memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen); 1781 out += m_tls->m_epg_hdrlen; 1782 } 1783 1784 /* AES IV for a short record. */ 1785 if (plen == tlen) { 1786 iv = out; 1787 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) { 1788 memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE); 1789 memcpy(iv + 4, hdr + 1, 8); 1790 *(uint32_t *)(iv + 12) = htobe32(2 + 1791 offset / AES_BLOCK_LEN); 1792 } else 1793 memcpy(iv, hdr + 1, AES_BLOCK_LEN); 1794 out += AES_BLOCK_LEN; 1795 } 1796 1797 if (imm_len % 16 != 0) { 1798 if (imm_len % 8 != 0) { 1799 /* Zero pad to an 8-byte boundary. */ 1800 memset(out, 0, 8 - (imm_len % 8)); 1801 out += 8 - (imm_len % 8); 1802 } 1803 1804 /* 1805 * Insert a ULP_TX_SC_NOOP if needed so the SGL is 1806 * 16-byte aligned. 1807 */ 1808 if (imm_len % 16 <= 8) { 1809 idata = (void *)out; 1810 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1811 idata->len = htobe32(0); 1812 out = (void *)(idata + 1); 1813 } 1814 } 1815 1816 /* SGL for record payload */ 1817 sglist_reset(txq->gl); 1818 if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset, 1819 plen - (m_tls->m_epg_hdrlen + offset)) != 0) { 1820 #ifdef INVARIANTS 1821 panic("%s: failed to append sglist", __func__); 1822 #endif 1823 } 1824 write_gl_to_buf(txq->gl, out); 1825 1826 if (using_scratch) { 1827 out = dst; 1828 copy_to_txd(eq, txq->ss, &out, wr_len); 1829 } 1830 1831 ndesc += howmany(wr_len, EQ_ESIZE); 1832 MPASS(ndesc <= available); 1833 1834 txq->kern_tls_records++; 1835 txq->kern_tls_octets += tlen - mtod(m_tls, vm_offset_t); 1836 if (mtod(m_tls, vm_offset_t) != 0) { 1837 if (offset == 0) 1838 txq->kern_tls_waste += mtod(m_tls, vm_offset_t); 1839 else 1840 txq->kern_tls_waste += mtod(m_tls, vm_offset_t) - 1841 (m_tls->m_epg_hdrlen + offset); 1842 } 1843 1844 txsd = &txq->sdesc[pidx]; 1845 if (last_wr) 1846 txsd->m = m; 1847 else 1848 txsd->m = NULL; 1849 txsd->desc_used = howmany(wr_len, EQ_ESIZE); 1850 1851 return (ndesc); 1852 } 1853 1854 static int 1855 ktls_write_tcp_fin(struct sge_txq *txq, void *dst, struct mbuf *m, 1856 u_int available, tcp_seq tcp_seqno, u_int pidx) 1857 { 1858 struct tx_sdesc *txsd; 1859 struct fw_eth_tx_pkt_wr *wr; 1860 struct cpl_tx_pkt_core *cpl; 1861 uint32_t ctrl; 1862 uint64_t ctrl1; 1863 int len16, ndesc, pktlen; 1864 struct ether_header *eh; 1865 struct ip *ip, newip; 1866 struct ip6_hdr *ip6, newip6; 1867 struct tcphdr *tcp, newtcp; 1868 caddr_t out; 1869 1870 TXQ_LOCK_ASSERT_OWNED(txq); 1871 M_ASSERTPKTHDR(m); 1872 1873 wr = dst; 1874 pktlen = m->m_len; 1875 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen; 1876 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16); 1877 ndesc = tx_len16_to_desc(len16); 1878 MPASS(ndesc <= available); 1879 1880 /* Firmware work request header */ 1881 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1882 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 1883 1884 ctrl = V_FW_WR_LEN16(len16); 1885 wr->equiq_to_len16 = htobe32(ctrl); 1886 wr->r3 = 0; 1887 1888 cpl = (void *)(wr + 1); 1889 1890 /* CPL header */ 1891 cpl->ctrl0 = txq->cpl_ctrl0; 1892 cpl->pack = 0; 1893 cpl->len = htobe16(pktlen); 1894 1895 out = (void *)(cpl + 1); 1896 1897 /* Copy over Ethernet header. */ 1898 eh = mtod(m, struct ether_header *); 1899 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen); 1900 1901 /* Fixup length in IP header and copy out. */ 1902 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 1903 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1904 newip = *ip; 1905 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen); 1906 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip)); 1907 if (m->m_pkthdr.l3hlen > sizeof(*ip)) 1908 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out, 1909 m->m_pkthdr.l3hlen - sizeof(*ip)); 1910 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) | 1911 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1912 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1913 } else { 1914 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen); 1915 newip6 = *ip6; 1916 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen - 1917 sizeof(*ip6)); 1918 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6)); 1919 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6)); 1920 ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) | 1921 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) | 1922 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 1923 } 1924 cpl->ctrl1 = htobe64(ctrl1); 1925 txq->txcsum++; 1926 1927 /* Set sequence number in TCP header. */ 1928 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen); 1929 newtcp = *tcp; 1930 newtcp.th_seq = htonl(tcp_seqno); 1931 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp)); 1932 1933 /* Copy rest of packet. */ 1934 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len - 1935 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp))); 1936 txq->imm_wrs++; 1937 1938 txq->txpkt_wrs++; 1939 1940 txq->kern_tls_fin++; 1941 1942 txsd = &txq->sdesc[pidx]; 1943 txsd->m = m; 1944 txsd->desc_used = ndesc; 1945 1946 return (ndesc); 1947 } 1948 1949 int 1950 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, 1951 u_int available) 1952 { 1953 struct sge_eq *eq = &txq->eq; 1954 struct tx_sdesc *txsd; 1955 struct tlspcb *tlsp; 1956 struct tcphdr *tcp; 1957 struct mbuf *m_tls; 1958 struct ether_header *eh; 1959 tcp_seq tcp_seqno; 1960 u_int ndesc, pidx, totdesc; 1961 uint16_t vlan_tag; 1962 bool has_fin, set_l2t_idx; 1963 void *tsopt; 1964 1965 M_ASSERTPKTHDR(m); 1966 MPASS(m->m_pkthdr.snd_tag != NULL); 1967 tlsp = mst_to_tls(m->m_pkthdr.snd_tag); 1968 1969 totdesc = 0; 1970 eh = mtod(m, struct ether_header *); 1971 tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen + 1972 m->m_pkthdr.l3hlen); 1973 pidx = eq->pidx; 1974 has_fin = (tcp_get_flags(tcp) & TH_FIN) != 0; 1975 1976 /* 1977 * If this TLS record has a FIN, then we will send any 1978 * requested options as part of the FIN packet. 1979 */ 1980 if (!has_fin && ktls_has_tcp_options(tcp)) { 1981 ndesc = ktls_write_tcp_options(txq, dst, m, available, pidx); 1982 totdesc += ndesc; 1983 IDXINCR(pidx, ndesc, eq->sidx); 1984 dst = &eq->desc[pidx]; 1985 #ifdef VERBOSE_TRACES 1986 CTR2(KTR_CXGBE, "%s: tid %d wrote TCP options packet", __func__, 1987 tlsp->tid); 1988 #endif 1989 } 1990 1991 /* 1992 * Allocate a new L2T entry if necessary. This may write out 1993 * a work request to the txq. 1994 */ 1995 if (m->m_flags & M_VLANTAG) 1996 vlan_tag = m->m_pkthdr.ether_vtag; 1997 else 1998 vlan_tag = 0xfff; 1999 set_l2t_idx = false; 2000 if (tlsp->l2te == NULL || tlsp->l2te->vlan != vlan_tag || 2001 memcmp(tlsp->l2te->dmac, eh->ether_dhost, ETHER_ADDR_LEN) != 0) { 2002 set_l2t_idx = true; 2003 if (tlsp->l2te) 2004 t4_l2t_release(tlsp->l2te); 2005 tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc, 2006 vlan_tag, tlsp->vi->pi->hw_port, eh->ether_dhost); 2007 if (tlsp->l2te == NULL) 2008 CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE"); 2009 if (ndesc != 0) { 2010 MPASS(ndesc <= available - totdesc); 2011 2012 txq->raw_wrs++; 2013 txsd = &txq->sdesc[pidx]; 2014 txsd->m = NULL; 2015 txsd->desc_used = ndesc; 2016 totdesc += ndesc; 2017 IDXINCR(pidx, ndesc, eq->sidx); 2018 dst = &eq->desc[pidx]; 2019 } 2020 } 2021 2022 /* 2023 * Iterate over each TLS record constructing a work request 2024 * for that record. 2025 */ 2026 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) { 2027 MPASS(m_tls->m_flags & M_EXTPG); 2028 2029 /* 2030 * Determine the initial TCP sequence number for this 2031 * record. 2032 */ 2033 tsopt = NULL; 2034 if (m_tls == m->m_next) { 2035 tcp_seqno = ntohl(tcp->th_seq) - 2036 mtod(m_tls, vm_offset_t); 2037 if (tlsp->using_timestamps) 2038 tsopt = ktls_find_tcp_timestamps(tcp); 2039 } else { 2040 MPASS(mtod(m_tls, vm_offset_t) == 0); 2041 tcp_seqno = tlsp->prev_seq; 2042 } 2043 2044 ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls, 2045 available - totdesc, tcp_seqno, tsopt, pidx, set_l2t_idx); 2046 totdesc += ndesc; 2047 IDXINCR(pidx, ndesc, eq->sidx); 2048 dst = &eq->desc[pidx]; 2049 2050 /* Only need to set the L2T index once. */ 2051 set_l2t_idx = false; 2052 } 2053 2054 if (has_fin) { 2055 /* 2056 * If the TCP header for this chain has FIN sent, then 2057 * explicitly send a packet that has FIN set. This 2058 * will also have PUSH set if requested. This assumes 2059 * we sent at least one TLS record work request and 2060 * uses the TCP sequence number after that reqeust as 2061 * the sequence number for the FIN packet. 2062 */ 2063 ndesc = ktls_write_tcp_fin(txq, dst, m, available, 2064 tlsp->prev_seq, pidx); 2065 totdesc += ndesc; 2066 } 2067 2068 MPASS(totdesc <= available); 2069 return (totdesc); 2070 } 2071 2072 static void 2073 t6_tls_tag_free(struct m_snd_tag *mst) 2074 { 2075 struct adapter *sc; 2076 struct tlspcb *tlsp; 2077 2078 tlsp = mst_to_tls(mst); 2079 sc = tlsp->sc; 2080 2081 CTR2(KTR_CXGBE, "%s: tid %d", __func__, tlsp->tid); 2082 2083 if (tlsp->l2te) 2084 t4_l2t_release(tlsp->l2te); 2085 if (tlsp->tid >= 0) 2086 release_tid(sc, tlsp->tid, tlsp->ctrlq); 2087 if (tlsp->ce) 2088 t4_release_clip_entry(sc, tlsp->ce); 2089 if (tlsp->tx_key_addr >= 0) 2090 t4_free_tls_keyid(sc, tlsp->tx_key_addr); 2091 2092 zfree(tlsp, M_CXGBE); 2093 } 2094 2095 void 2096 t6_ktls_modload(void) 2097 { 2098 2099 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, ktls_act_open_rpl, 2100 CPL_COOKIE_KERN_TLS); 2101 } 2102 2103 void 2104 t6_ktls_modunload(void) 2105 { 2106 2107 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL, 2108 CPL_COOKIE_KERN_TLS); 2109 } 2110 2111 #else 2112 2113 int 2114 t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params, 2115 struct m_snd_tag **pt) 2116 { 2117 return (ENXIO); 2118 } 2119 2120 int 2121 t6_ktls_parse_pkt(struct mbuf *m) 2122 { 2123 return (EINVAL); 2124 } 2125 2126 int 2127 t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, 2128 u_int available) 2129 { 2130 panic("can't happen"); 2131 } 2132 2133 void 2134 t6_ktls_modload(void) 2135 { 2136 } 2137 2138 void 2139 t6_ktls_modunload(void) 2140 { 2141 } 2142 2143 #endif 2144