1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017-2018 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: John Baldwin <jhb@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_inet.h" 31 #include "opt_kern_tls.h" 32 33 #include <sys/cdefs.h> 34 #ifdef KERN_TLS 35 #include <sys/param.h> 36 #include <sys/ktr.h> 37 #include <sys/ktls.h> 38 #include <sys/sglist.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/systm.h> 42 #include <netinet/in.h> 43 #include <netinet/in_pcb.h> 44 #include <netinet/tcp_var.h> 45 #include <netinet/toecore.h> 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/xform.h> 48 49 #ifdef TCP_OFFLOAD 50 #include "common/common.h" 51 #include "common/t4_tcb.h" 52 #include "crypto/t4_crypto.h" 53 #include "tom/t4_tom_l2t.h" 54 #include "tom/t4_tom.h" 55 56 /* 57 * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while 58 * the mbuf is in the ulp_pdu_reclaimq. 59 */ 60 #define tls_tcp_seq PH_loc.thirtytwo[0] 61 62 static void 63 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask, 64 uint64_t val) 65 { 66 struct adapter *sc = td_adapter(toep->td); 67 68 t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0); 69 } 70 71 /* TLS and DTLS common routines */ 72 bool 73 can_tls_offload(struct adapter *sc) 74 { 75 76 return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS); 77 } 78 79 int 80 tls_tx_key(struct toepcb *toep) 81 { 82 struct tls_ofld_info *tls_ofld = &toep->tls; 83 84 return (tls_ofld->tx_key_addr >= 0); 85 } 86 87 /* Set TF_RX_QUIESCE to pause receive. */ 88 static void 89 t4_set_rx_quiesce(struct toepcb *toep) 90 { 91 struct adapter *sc = td_adapter(toep->td); 92 93 t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS, 94 V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM); 95 } 96 97 /* Clear TF_RX_QUIESCE to re-enable receive. */ 98 static void 99 t4_clear_rx_quiesce(struct toepcb *toep) 100 { 101 102 t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0); 103 } 104 105 /* TLS/DTLS content type for CPL SFO */ 106 static inline unsigned char 107 tls_content_type(unsigned char content_type) 108 { 109 switch (content_type) { 110 case CONTENT_TYPE_CCS: 111 return CPL_TX_TLS_SFO_TYPE_CCS; 112 case CONTENT_TYPE_ALERT: 113 return CPL_TX_TLS_SFO_TYPE_ALERT; 114 case CONTENT_TYPE_HANDSHAKE: 115 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE; 116 case CONTENT_TYPE_APP_DATA: 117 return CPL_TX_TLS_SFO_TYPE_DATA; 118 default: 119 return CPL_TX_TLS_SFO_TYPE_CUSTOM; 120 } 121 } 122 123 /* TLS Key memory management */ 124 static void 125 clear_tls_keyid(struct toepcb *toep) 126 { 127 struct tls_ofld_info *tls_ofld = &toep->tls; 128 struct adapter *sc = td_adapter(toep->td); 129 130 if (tls_ofld->rx_key_addr >= 0) { 131 t4_free_tls_keyid(sc, tls_ofld->rx_key_addr); 132 tls_ofld->rx_key_addr = -1; 133 } 134 if (tls_ofld->tx_key_addr >= 0) { 135 t4_free_tls_keyid(sc, tls_ofld->tx_key_addr); 136 tls_ofld->tx_key_addr = -1; 137 } 138 } 139 140 static int 141 get_tp_plen_max(struct ktls_session *tls) 142 { 143 int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448; 144 145 return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX); 146 } 147 148 /* Send request to get the key-id */ 149 static int 150 tls_program_key_id(struct toepcb *toep, struct ktls_session *tls, 151 int direction) 152 { 153 struct tls_ofld_info *tls_ofld = &toep->tls; 154 struct adapter *sc = td_adapter(toep->td); 155 struct ofld_tx_sdesc *txsd; 156 int keyid; 157 struct wrqe *wr; 158 struct tls_key_req *kwr; 159 struct tls_keyctx *kctx; 160 161 #ifdef INVARIANTS 162 int kwrlen, kctxlen, len; 163 164 kwrlen = sizeof(*kwr); 165 kctxlen = roundup2(sizeof(*kctx), 32); 166 len = roundup2(kwrlen + kctxlen, 16); 167 MPASS(TLS_KEY_WR_SZ == len); 168 #endif 169 if (toep->txsd_avail == 0) 170 return (EAGAIN); 171 172 if ((keyid = t4_alloc_tls_keyid(sc)) < 0) { 173 return (ENOSPC); 174 } 175 176 wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq); 177 if (wr == NULL) { 178 t4_free_tls_keyid(sc, keyid); 179 return (ENOMEM); 180 } 181 kwr = wrtod(wr); 182 memset(kwr, 0, TLS_KEY_WR_SZ); 183 184 t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid, 185 kwr); 186 kctx = (struct tls_keyctx *)(kwr + 1); 187 if (direction == KTLS_TX) 188 tls_ofld->tx_key_addr = keyid; 189 else 190 tls_ofld->rx_key_addr = keyid; 191 t4_tls_key_ctx(tls, direction, kctx); 192 193 txsd = &toep->txsd[toep->txsd_pidx]; 194 txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16); 195 txsd->plen = 0; 196 toep->tx_credits -= txsd->tx_credits; 197 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 198 toep->txsd_pidx = 0; 199 toep->txsd_avail--; 200 201 t4_wrq_tx(sc, wr); 202 203 return (0); 204 } 205 206 int 207 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction) 208 { 209 struct adapter *sc = td_adapter(toep->td); 210 int error, explicit_iv_size, mac_first; 211 212 if (!can_tls_offload(sc)) 213 return (EINVAL); 214 215 if (direction == KTLS_RX) { 216 if (ulp_mode(toep) != ULP_MODE_NONE) 217 return (EINVAL); 218 if ((toep->flags & TPF_TLS_STARTING) != 0) 219 return (EINVAL); 220 } else { 221 switch (ulp_mode(toep)) { 222 case ULP_MODE_NONE: 223 case ULP_MODE_TLS: 224 case ULP_MODE_TCPDDP: 225 break; 226 default: 227 return (EINVAL); 228 } 229 } 230 231 switch (tls->params.cipher_algorithm) { 232 case CRYPTO_AES_CBC: 233 /* XXX: Explicitly ignore any provided IV. */ 234 switch (tls->params.cipher_key_len) { 235 case 128 / 8: 236 case 192 / 8: 237 case 256 / 8: 238 break; 239 default: 240 return (EINVAL); 241 } 242 switch (tls->params.auth_algorithm) { 243 case CRYPTO_SHA1_HMAC: 244 case CRYPTO_SHA2_256_HMAC: 245 case CRYPTO_SHA2_384_HMAC: 246 break; 247 default: 248 return (EPROTONOSUPPORT); 249 } 250 explicit_iv_size = AES_BLOCK_LEN; 251 mac_first = 1; 252 break; 253 case CRYPTO_AES_NIST_GCM_16: 254 if (tls->params.iv_len != SALT_SIZE) { 255 return (EINVAL); 256 } 257 switch (tls->params.cipher_key_len) { 258 case 128 / 8: 259 case 192 / 8: 260 case 256 / 8: 261 break; 262 default: 263 return (EINVAL); 264 } 265 explicit_iv_size = 8; 266 mac_first = 0; 267 break; 268 default: 269 return (EPROTONOSUPPORT); 270 } 271 272 /* Only TLS 1.1 and TLS 1.2 are currently supported. */ 273 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || 274 tls->params.tls_vminor < TLS_MINOR_VER_ONE || 275 tls->params.tls_vminor > TLS_MINOR_VER_TWO) { 276 return (EPROTONOSUPPORT); 277 } 278 279 /* Bail if we already have a key. */ 280 if (direction == KTLS_TX) { 281 if (toep->tls.tx_key_addr != -1) 282 return (EOPNOTSUPP); 283 } else { 284 if (toep->tls.rx_key_addr != -1) 285 return (EOPNOTSUPP); 286 } 287 288 error = tls_program_key_id(toep, tls, direction); 289 if (error) 290 return (error); 291 292 if (direction == KTLS_TX) { 293 toep->tls.scmd0.seqno_numivs = 294 (V_SCMD_SEQ_NO_CTRL(3) | 295 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) | 296 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) | 297 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) | 298 V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) | 299 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) | 300 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) | 301 V_SCMD_IV_SIZE(explicit_iv_size / 2)); 302 303 toep->tls.scmd0.ivgen_hdrlen = 304 (V_SCMD_IV_GEN_CTRL(1) | 305 V_SCMD_KEY_CTX_INLINE(0) | 306 V_SCMD_TLS_FRAG_ENABLE(1)); 307 308 toep->tls.iv_len = explicit_iv_size; 309 toep->tls.frag_size = tls->params.max_frame_len; 310 toep->tls.fcplenmax = get_tp_plen_max(tls); 311 toep->tls.expn_per_ulp = tls->params.tls_hlen + 312 tls->params.tls_tlen; 313 toep->tls.pdus_per_ulp = 1; 314 toep->tls.adjusted_plen = toep->tls.expn_per_ulp + 315 tls->params.max_frame_len; 316 toep->tls.tx_key_info_size = t4_tls_key_info_size(tls); 317 } else { 318 toep->flags |= TPF_TLS_STARTING | TPF_TLS_RX_QUIESCED; 319 toep->tls.rx_version = tls->params.tls_vmajor << 8 | 320 tls->params.tls_vminor; 321 322 CTR2(KTR_CXGBE, "%s: tid %d setting RX_QUIESCE", __func__, 323 toep->tid); 324 t4_set_rx_quiesce(toep); 325 } 326 327 return (0); 328 } 329 330 void 331 tls_init_toep(struct toepcb *toep) 332 { 333 struct tls_ofld_info *tls_ofld = &toep->tls; 334 335 tls_ofld->rx_key_addr = -1; 336 tls_ofld->tx_key_addr = -1; 337 } 338 339 void 340 tls_uninit_toep(struct toepcb *toep) 341 { 342 343 clear_tls_keyid(toep); 344 } 345 346 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 347 #define MIN_OFLD_TLSTX_CREDITS(toep) \ 348 (howmany(sizeof(struct fw_tlstx_data_wr) + \ 349 sizeof(struct cpl_tx_tls_sfo) + sizeof(struct ulptx_idata) + \ 350 sizeof(struct ulptx_sc_memrd) + \ 351 AES_BLOCK_LEN + 1, 16)) 352 353 static void 354 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep, 355 unsigned int plen, unsigned int expn, uint8_t credits, int shove) 356 { 357 struct tls_ofld_info *tls_ofld = &toep->tls; 358 unsigned int len = plen + expn; 359 360 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) | 361 V_FW_TLSTX_DATA_WR_COMPL(1) | 362 V_FW_TLSTX_DATA_WR_IMMDLEN(0)); 363 txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) | 364 V_FW_TLSTX_DATA_WR_LEN16(credits)); 365 txwr->plen = htobe32(len); 366 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) | 367 V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove)); 368 txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) | 369 V_FW_TLSTX_DATA_WR_EXP(expn) | 370 V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) | 371 V_FW_TLSTX_DATA_WR_IVDSGL(0) | 372 V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4)); 373 txwr->mfs = htobe16(tls_ofld->frag_size); 374 txwr->adjustedplen_pkd = htobe16( 375 V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen)); 376 txwr->expinplenmax_pkd = htobe16( 377 V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp)); 378 txwr->pdusinplenmax_pkd = 379 V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp); 380 } 381 382 static void 383 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep, 384 struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno) 385 { 386 struct tls_ofld_info *tls_ofld = &toep->tls; 387 int data_type, seglen; 388 389 seglen = plen; 390 data_type = tls_content_type(tls_hdr->type); 391 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) | 392 V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) | 393 V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen)); 394 cpl->pld_len = htobe32(plen); 395 if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM) 396 cpl->type_protover = htobe32( 397 V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type)); 398 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs | 399 V_SCMD_NUM_IVS(1)); 400 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen); 401 cpl->scmd1 = htobe64(seqno); 402 } 403 404 static int 405 count_ext_pgs_segs(struct mbuf *m) 406 { 407 vm_paddr_t nextpa; 408 u_int i, nsegs; 409 410 MPASS(m->m_epg_npgs > 0); 411 nsegs = 1; 412 nextpa = m->m_epg_pa[0] + PAGE_SIZE; 413 for (i = 1; i < m->m_epg_npgs; i++) { 414 if (nextpa != m->m_epg_pa[i]) 415 nsegs++; 416 nextpa = m->m_epg_pa[i] + PAGE_SIZE; 417 } 418 return (nsegs); 419 } 420 421 static void 422 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs) 423 { 424 struct ulptx_sgl *usgl = dst; 425 vm_paddr_t pa; 426 uint32_t len; 427 int i, j; 428 429 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 430 431 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 432 V_ULPTX_NSGE(nsegs)); 433 434 /* Figure out the first S/G length. */ 435 pa = m->m_epg_pa[0] + m->m_epg_1st_off; 436 usgl->addr0 = htobe64(pa); 437 len = m_epg_pagelen(m, 0, m->m_epg_1st_off); 438 pa += len; 439 for (i = 1; i < m->m_epg_npgs; i++) { 440 if (m->m_epg_pa[i] != pa) 441 break; 442 len += m_epg_pagelen(m, i, 0); 443 pa += m_epg_pagelen(m, i, 0); 444 } 445 usgl->len0 = htobe32(len); 446 #ifdef INVARIANTS 447 nsegs--; 448 #endif 449 450 j = -1; 451 for (; i < m->m_epg_npgs; i++) { 452 if (j == -1 || m->m_epg_pa[i] != pa) { 453 if (j >= 0) 454 usgl->sge[j / 2].len[j & 1] = htobe32(len); 455 j++; 456 #ifdef INVARIANTS 457 nsegs--; 458 #endif 459 pa = m->m_epg_pa[i]; 460 usgl->sge[j / 2].addr[j & 1] = htobe64(pa); 461 len = m_epg_pagelen(m, i, 0); 462 pa += len; 463 } else { 464 len += m_epg_pagelen(m, i, 0); 465 pa += m_epg_pagelen(m, i, 0); 466 } 467 } 468 if (j >= 0) { 469 usgl->sge[j / 2].len[j & 1] = htobe32(len); 470 471 if ((j & 1) == 0) 472 usgl->sge[j / 2].len[1] = htobe32(0); 473 } 474 KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m)); 475 } 476 477 /* 478 * Similar to t4_push_frames() but handles sockets that contain TLS 479 * record mbufs. 480 */ 481 void 482 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop) 483 { 484 struct tls_hdr *thdr; 485 struct fw_tlstx_data_wr *txwr; 486 struct cpl_tx_tls_sfo *cpl; 487 struct ulptx_idata *idata; 488 struct ulptx_sc_memrd *memrd; 489 struct wrqe *wr; 490 struct mbuf *m; 491 u_int nsegs, credits, wr_len; 492 u_int expn_size; 493 struct inpcb *inp = toep->inp; 494 struct tcpcb *tp = intotcpcb(inp); 495 struct socket *so = inp->inp_socket; 496 struct sockbuf *sb = &so->so_snd; 497 int tls_size, tx_credits, shove, sowwakeup; 498 struct ofld_tx_sdesc *txsd; 499 char *buf; 500 501 INP_WLOCK_ASSERT(inp); 502 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 503 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 504 505 KASSERT(ulp_mode(toep) == ULP_MODE_NONE || 506 ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS, 507 ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep)); 508 KASSERT(tls_tx_key(toep), 509 ("%s: TX key not set for toep %p", __func__, toep)); 510 511 #ifdef VERBOSE_TRACES 512 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 513 __func__, toep->tid, toep->flags, tp->t_flags); 514 #endif 515 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 516 return; 517 518 #ifdef RATELIMIT 519 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 520 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 521 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 522 } 523 #endif 524 525 /* 526 * This function doesn't resume by itself. Someone else must clear the 527 * flag and call this function. 528 */ 529 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 530 KASSERT(drop == 0, 531 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 532 return; 533 } 534 535 txsd = &toep->txsd[toep->txsd_pidx]; 536 for (;;) { 537 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 538 539 SOCKBUF_LOCK(sb); 540 sowwakeup = drop; 541 if (drop) { 542 sbdrop_locked(sb, drop); 543 drop = 0; 544 } 545 546 m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb; 547 548 /* 549 * Send a FIN if requested, but only if there's no 550 * more data to send. 551 */ 552 if (m == NULL && toep->flags & TPF_SEND_FIN) { 553 if (sowwakeup) 554 sowwakeup_locked(so); 555 else 556 SOCKBUF_UNLOCK(sb); 557 SOCKBUF_UNLOCK_ASSERT(sb); 558 t4_close_conn(sc, toep); 559 return; 560 } 561 562 /* 563 * If there is no ready data to send, wait until more 564 * data arrives. 565 */ 566 if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) { 567 if (sowwakeup) 568 sowwakeup_locked(so); 569 else 570 SOCKBUF_UNLOCK(sb); 571 SOCKBUF_UNLOCK_ASSERT(sb); 572 #ifdef VERBOSE_TRACES 573 CTR2(KTR_CXGBE, "%s: tid %d no ready data to send", 574 __func__, toep->tid); 575 #endif 576 return; 577 } 578 579 KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP", 580 __func__, m)); 581 KASSERT(m->m_epg_tls != NULL, 582 ("%s: mbuf %p doesn't have TLS session", __func__, m)); 583 584 /* Calculate WR length. */ 585 wr_len = sizeof(struct fw_tlstx_data_wr) + 586 sizeof(struct cpl_tx_tls_sfo) + 587 sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd); 588 589 /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */ 590 MPASS(toep->tls.iv_len <= AES_BLOCK_LEN); 591 wr_len += AES_BLOCK_LEN; 592 593 /* Account for SGL in work request length. */ 594 nsegs = count_ext_pgs_segs(m); 595 wr_len += sizeof(struct ulptx_sgl) + 596 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 597 598 /* Not enough credits for this work request. */ 599 if (howmany(wr_len, 16) > tx_credits) { 600 if (sowwakeup) 601 sowwakeup_locked(so); 602 else 603 SOCKBUF_UNLOCK(sb); 604 SOCKBUF_UNLOCK_ASSERT(sb); 605 #ifdef VERBOSE_TRACES 606 CTR5(KTR_CXGBE, 607 "%s: tid %d mbuf %p requires %d credits, but only %d available", 608 __func__, toep->tid, m, howmany(wr_len, 16), 609 tx_credits); 610 #endif 611 toep->flags |= TPF_TX_SUSPENDED; 612 return; 613 } 614 615 /* Shove if there is no additional data pending. */ 616 shove = ((m->m_next == NULL || 617 (m->m_next->m_flags & M_NOTAVAIL) != 0)) && 618 (tp->t_flags & TF_MORETOCOME) == 0; 619 620 if (sb->sb_flags & SB_AUTOSIZE && 621 V_tcp_do_autosndbuf && 622 sb->sb_hiwat < V_tcp_autosndbuf_max && 623 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 624 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 625 V_tcp_autosndbuf_max); 626 627 if (!sbreserve_locked(so, SO_SND, newsize, NULL)) 628 sb->sb_flags &= ~SB_AUTOSIZE; 629 else 630 sowwakeup = 1; /* room available */ 631 } 632 if (sowwakeup) 633 sowwakeup_locked(so); 634 else 635 SOCKBUF_UNLOCK(sb); 636 SOCKBUF_UNLOCK_ASSERT(sb); 637 638 if (__predict_false(toep->flags & TPF_FIN_SENT)) 639 panic("%s: excess tx.", __func__); 640 641 wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq); 642 if (wr == NULL) { 643 /* XXX: how will we recover from this? */ 644 toep->flags |= TPF_TX_SUSPENDED; 645 return; 646 } 647 648 thdr = (struct tls_hdr *)&m->m_epg_hdr; 649 #ifdef VERBOSE_TRACES 650 CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x", 651 __func__, toep->tid, m->m_epg_seqno, thdr->type, 652 m->m_len); 653 #endif 654 txwr = wrtod(wr); 655 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1); 656 memset(txwr, 0, roundup2(wr_len, 16)); 657 credits = howmany(wr_len, 16); 658 expn_size = m->m_epg_hdrlen + 659 m->m_epg_trllen; 660 tls_size = m->m_len - expn_size; 661 write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove); 662 write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno); 663 664 idata = (struct ulptx_idata *)(cpl + 1); 665 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 666 idata->len = htobe32(0); 667 memrd = (struct ulptx_sc_memrd *)(idata + 1); 668 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) | 669 V_ULP_TX_SC_MORE(1) | 670 V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4)); 671 memrd->addr = htobe32(toep->tls.tx_key_addr >> 5); 672 673 /* Copy IV. */ 674 buf = (char *)(memrd + 1); 675 memcpy(buf, thdr + 1, toep->tls.iv_len); 676 buf += AES_BLOCK_LEN; 677 678 write_ktlstx_sgl(buf, m, nsegs); 679 680 KASSERT(toep->tx_credits >= credits, 681 ("%s: not enough credits", __func__)); 682 683 toep->tx_credits -= credits; 684 685 tp->snd_nxt += m->m_len; 686 tp->snd_max += m->m_len; 687 688 SOCKBUF_LOCK(sb); 689 sb->sb_sndptr = m; 690 SOCKBUF_UNLOCK(sb); 691 692 toep->flags |= TPF_TX_DATA_SENT; 693 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep)) 694 toep->flags |= TPF_TX_SUSPENDED; 695 696 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 697 txsd->plen = m->m_len; 698 txsd->tx_credits = credits; 699 txsd++; 700 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 701 toep->txsd_pidx = 0; 702 txsd = &toep->txsd[0]; 703 } 704 toep->txsd_avail--; 705 706 counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1); 707 counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len); 708 709 t4_l2t_send(sc, wr, toep->l2te); 710 } 711 } 712 713 /* 714 * For TLS data we place received mbufs received via CPL_TLS_DATA into 715 * an mbufq in the TLS offload state. When CPL_RX_TLS_CMP is 716 * received, the completed PDUs are placed into the socket receive 717 * buffer. 718 * 719 * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs. 720 */ 721 static int 722 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 723 { 724 struct adapter *sc = iq->adapter; 725 const struct cpl_tls_data *cpl = mtod(m, const void *); 726 unsigned int tid = GET_TID(cpl); 727 struct toepcb *toep = lookup_tid(sc, tid); 728 struct inpcb *inp = toep->inp; 729 struct tcpcb *tp; 730 int len; 731 732 /* XXX: Should this match do_rx_data instead? */ 733 KASSERT(!(toep->flags & TPF_SYNQE), 734 ("%s: toep %p claims to be a synq entry", __func__, toep)); 735 736 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 737 738 /* strip off CPL header */ 739 m_adj(m, sizeof(*cpl)); 740 len = m->m_pkthdr.len; 741 742 toep->ofld_rxq->rx_toe_tls_octets += len; 743 744 KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)), 745 ("%s: payload length mismatch", __func__)); 746 747 INP_WLOCK(inp); 748 if (inp->inp_flags & INP_DROPPED) { 749 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 750 __func__, tid, len, inp->inp_flags); 751 INP_WUNLOCK(inp); 752 m_freem(m); 753 return (0); 754 } 755 756 /* Save TCP sequence number. */ 757 m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq); 758 759 if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) { 760 #ifdef INVARIANTS 761 panic("Failed to queue TLS data packet"); 762 #else 763 printf("%s: Failed to queue TLS data packet\n", __func__); 764 INP_WUNLOCK(inp); 765 m_freem(m); 766 return (0); 767 #endif 768 } 769 770 tp = intotcpcb(inp); 771 tp->t_rcvtime = ticks; 772 773 #ifdef VERBOSE_TRACES 774 CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len, 775 be32toh(cpl->seq)); 776 #endif 777 778 INP_WUNLOCK(inp); 779 return (0); 780 } 781 782 static int 783 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 784 { 785 struct adapter *sc = iq->adapter; 786 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *); 787 struct tlsrx_hdr_pkt *tls_hdr_pkt; 788 unsigned int tid = GET_TID(cpl); 789 struct toepcb *toep = lookup_tid(sc, tid); 790 struct inpcb *inp = toep->inp; 791 struct tcpcb *tp; 792 struct socket *so; 793 struct sockbuf *sb; 794 struct mbuf *tls_data; 795 struct tls_get_record *tgr; 796 struct mbuf *control; 797 int pdu_length, rx_credits, trailer_len; 798 #if defined(KTR) || defined(INVARIANTS) 799 int len; 800 #endif 801 802 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 803 KASSERT(!(toep->flags & TPF_SYNQE), 804 ("%s: toep %p claims to be a synq entry", __func__, toep)); 805 806 /* strip off CPL header */ 807 m_adj(m, sizeof(*cpl)); 808 #if defined(KTR) || defined(INVARIANTS) 809 len = m->m_pkthdr.len; 810 #endif 811 812 toep->ofld_rxq->rx_toe_tls_records++; 813 814 KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)), 815 ("%s: payload length mismatch", __func__)); 816 817 INP_WLOCK(inp); 818 if (inp->inp_flags & INP_DROPPED) { 819 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 820 __func__, tid, len, inp->inp_flags); 821 INP_WUNLOCK(inp); 822 m_freem(m); 823 return (0); 824 } 825 826 pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length)); 827 828 so = inp_inpcbtosocket(inp); 829 tp = intotcpcb(inp); 830 831 #ifdef VERBOSE_TRACES 832 CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u", 833 __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt); 834 #endif 835 836 tp->rcv_nxt += pdu_length; 837 KASSERT(tp->rcv_wnd >= pdu_length, 838 ("%s: negative window size", __func__)); 839 tp->rcv_wnd -= pdu_length; 840 841 /* XXX: Not sure what to do about urgent data. */ 842 843 /* 844 * The payload of this CPL is the TLS header followed by 845 * additional fields. 846 */ 847 KASSERT(m->m_len >= sizeof(*tls_hdr_pkt), 848 ("%s: payload too small", __func__)); 849 tls_hdr_pkt = mtod(m, void *); 850 851 tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq); 852 if (tls_data != NULL) { 853 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq, 854 ("%s: sequence mismatch", __func__)); 855 } 856 857 /* Report decryption errors as EBADMSG. */ 858 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) { 859 CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x", 860 __func__, toep->tid, tls_hdr_pkt->res_to_mac_error, 861 be32toh(cpl->ddp_valid)); 862 m_freem(m); 863 m_freem(tls_data); 864 865 CURVNET_SET(toep->vnet); 866 so->so_error = EBADMSG; 867 sorwakeup(so); 868 869 INP_WUNLOCK(inp); 870 CURVNET_RESTORE(); 871 872 return (0); 873 } 874 875 /* Handle data received after the socket is closed. */ 876 sb = &so->so_rcv; 877 SOCKBUF_LOCK(sb); 878 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 879 struct epoch_tracker et; 880 881 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 882 __func__, tid, pdu_length); 883 m_freem(m); 884 m_freem(tls_data); 885 SOCKBUF_UNLOCK(sb); 886 INP_WUNLOCK(inp); 887 888 CURVNET_SET(toep->vnet); 889 NET_EPOCH_ENTER(et); 890 INP_WLOCK(inp); 891 tp = tcp_drop(tp, ECONNRESET); 892 if (tp != NULL) 893 INP_WUNLOCK(inp); 894 NET_EPOCH_EXIT(et); 895 CURVNET_RESTORE(); 896 897 return (0); 898 } 899 900 /* 901 * If there is any data in the 'sb_mtls' chain of the socket 902 * or we aren't able to allocate the control mbuf, append the 903 * record as a CSUM_TLS_DECRYPTED packet to 'sb_mtls' rather 904 * than as a decrypted record to 'sb_m'. 905 */ 906 if (sb->sb_mtls != NULL) 907 control = NULL; 908 else 909 control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD, 910 IPPROTO_TCP, M_NOWAIT); 911 912 if (control != NULL) { 913 tgr = (struct tls_get_record *) 914 CMSG_DATA(mtod(control, struct cmsghdr *)); 915 memset(tgr, 0, sizeof(*tgr)); 916 tgr->tls_type = tls_hdr_pkt->type; 917 tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8; 918 tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff; 919 if (tls_data != NULL) { 920 m_last(tls_data)->m_flags |= M_EOR; 921 tgr->tls_length = htobe16(tls_data->m_pkthdr.len); 922 } else 923 tgr->tls_length = 0; 924 925 m_freem(m); 926 m = tls_data; 927 } else { 928 M_ASSERTPKTHDR(m); 929 930 /* It's ok that any explicit IV is missing. */ 931 m->m_len = sb->sb_tls_info->params.tls_hlen; 932 m->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 933 m->m_pkthdr.len = m->m_len; 934 if (tls_data != NULL) { 935 m->m_pkthdr.len += tls_data->m_pkthdr.len; 936 m_demote_pkthdr(tls_data); 937 m->m_next = tls_data; 938 } 939 940 /* 941 * Grow the chain by the trailer, but without 942 * contents. The trailer will be thrown away by 943 * ktls_decrypt. Note that ktls_decrypt assumes the 944 * trailer is tls_tlen bytes long, so append that many 945 * bytes not the actual trailer size computed from 946 * pdu_length. 947 */ 948 trailer_len = sb->sb_tls_info->params.tls_tlen; 949 if (tls_data != NULL) { 950 m_last(tls_data)->m_len += trailer_len; 951 tls_data = NULL; 952 } else 953 m->m_len += trailer_len; 954 m->m_pkthdr.len += trailer_len; 955 tls_hdr_pkt->length = htobe16(m->m_pkthdr.len - 956 sizeof(struct tls_record_layer)); 957 } 958 959 /* receive buffer autosize */ 960 MPASS(toep->vnet == so->so_vnet); 961 CURVNET_SET(toep->vnet); 962 if (sb->sb_flags & SB_AUTOSIZE && 963 V_tcp_do_autorcvbuf && 964 sb->sb_hiwat < V_tcp_autorcvbuf_max && 965 m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) { 966 unsigned int hiwat = sb->sb_hiwat; 967 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 968 V_tcp_autorcvbuf_max); 969 970 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 971 sb->sb_flags &= ~SB_AUTOSIZE; 972 } 973 974 if (control != NULL) 975 sbappendcontrol_locked(sb, m, control, 0); 976 else 977 sbappendstream_locked(sb, m, 0); 978 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 979 #ifdef VERBOSE_TRACES 980 CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u", 981 __func__, tid, rx_credits, tp->rcv_wnd); 982 #endif 983 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) { 984 rx_credits = send_rx_credits(sc, toep, rx_credits); 985 tp->rcv_wnd += rx_credits; 986 tp->rcv_adv += rx_credits; 987 } 988 989 sorwakeup_locked(so); 990 SOCKBUF_UNLOCK_ASSERT(sb); 991 992 INP_WUNLOCK(inp); 993 CURVNET_RESTORE(); 994 return (0); 995 } 996 997 void 998 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep, 999 struct mbuf *m) 1000 { 1001 struct inpcb *inp = toep->inp; 1002 struct tls_ofld_info *tls_ofld = &toep->tls; 1003 struct tls_hdr *hdr; 1004 struct tcpcb *tp; 1005 struct socket *so; 1006 struct sockbuf *sb; 1007 int len, rx_credits; 1008 1009 len = m->m_pkthdr.len; 1010 1011 INP_WLOCK_ASSERT(inp); 1012 1013 so = inp_inpcbtosocket(inp); 1014 tp = intotcpcb(inp); 1015 sb = &so->so_rcv; 1016 SOCKBUF_LOCK(sb); 1017 CURVNET_SET(toep->vnet); 1018 1019 tp->rcv_nxt += len; 1020 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 1021 tp->rcv_wnd -= len; 1022 1023 /* Do we have a full TLS header? */ 1024 if (len < sizeof(*hdr)) { 1025 CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header", 1026 __func__, toep->tid, len); 1027 so->so_error = EMSGSIZE; 1028 goto out; 1029 } 1030 hdr = mtod(m, struct tls_hdr *); 1031 1032 /* Is the header valid? */ 1033 if (be16toh(hdr->version) != tls_ofld->rx_version) { 1034 CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x", 1035 __func__, toep->tid, be16toh(hdr->version)); 1036 so->so_error = EINVAL; 1037 goto out; 1038 } 1039 if (be16toh(hdr->length) < sizeof(*hdr)) { 1040 CTR3(KTR_CXGBE, "%s: tid %u invalid length %u", 1041 __func__, toep->tid, be16toh(hdr->length)); 1042 so->so_error = EBADMSG; 1043 goto out; 1044 } 1045 1046 /* Did we get a truncated record? */ 1047 if (len < be16toh(hdr->length)) { 1048 CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)", 1049 __func__, toep->tid, len, be16toh(hdr->length)); 1050 1051 so->so_error = EMSGSIZE; 1052 goto out; 1053 } 1054 1055 /* Is the header type unknown? */ 1056 switch (hdr->type) { 1057 case CONTENT_TYPE_CCS: 1058 case CONTENT_TYPE_ALERT: 1059 case CONTENT_TYPE_APP_DATA: 1060 case CONTENT_TYPE_HANDSHAKE: 1061 break; 1062 default: 1063 CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u", 1064 __func__, toep->tid, hdr->type); 1065 so->so_error = EBADMSG; 1066 goto out; 1067 } 1068 1069 /* 1070 * Just punt. Although this could fall back to software 1071 * decryption, this case should never really happen. 1072 */ 1073 CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u", 1074 __func__, toep->tid, hdr->type, be16toh(hdr->length)); 1075 so->so_error = EBADMSG; 1076 1077 out: 1078 /* 1079 * This connection is going to die anyway, so probably don't 1080 * need to bother with returning credits. 1081 */ 1082 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; 1083 #ifdef VERBOSE_TRACES 1084 CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u", 1085 __func__, toep->tid, rx_credits, tp->rcv_wnd); 1086 #endif 1087 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) { 1088 rx_credits = send_rx_credits(toep->vi->adapter, toep, 1089 rx_credits); 1090 tp->rcv_wnd += rx_credits; 1091 tp->rcv_adv += rx_credits; 1092 } 1093 1094 sorwakeup_locked(so); 1095 SOCKBUF_UNLOCK_ASSERT(sb); 1096 1097 INP_WUNLOCK(inp); 1098 CURVNET_RESTORE(); 1099 1100 m_freem(m); 1101 } 1102 1103 /* SET_TCB_FIELD sent as a ULP command looks like this */ 1104 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 1105 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 1106 1107 static inline void * 1108 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 1109 uint64_t word, uint64_t mask, uint64_t val) 1110 { 1111 struct ulptx_idata *ulpsc; 1112 struct cpl_set_tcb_field_core *req; 1113 1114 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 1115 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 1116 1117 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1118 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1119 ulpsc->len = htobe32(sizeof(*req)); 1120 1121 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 1122 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 1123 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 1124 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 1125 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 1126 req->mask = htobe64(mask); 1127 req->val = htobe64(val); 1128 1129 ulpsc = (struct ulptx_idata *)(req + 1); 1130 if (LEN__SET_TCB_FIELD_ULP % 16) { 1131 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1132 ulpsc->len = htobe32(0); 1133 return (ulpsc + 1); 1134 } 1135 return (ulpsc); 1136 } 1137 1138 /* 1139 * Send a work request setting multiple TCB fields to enable 1140 * ULP_MODE_TLS. 1141 */ 1142 static void 1143 tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno) 1144 { 1145 struct wrqe *wr; 1146 struct work_request_hdr *wrh; 1147 struct ulp_txpkt *ulpmc; 1148 int fields, key_offset, len; 1149 1150 KASSERT(ulp_mode(toep) == ULP_MODE_NONE, 1151 ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid)); 1152 1153 fields = 0; 1154 1155 /* 2 writes for the overlay region */ 1156 fields += 2; 1157 1158 /* W_TCB_TLS_SEQ */ 1159 fields++; 1160 1161 /* W_TCB_ULP_RAW */ 1162 fields++; 1163 1164 /* W_TCB_ULP_TYPE */ 1165 fields ++; 1166 1167 /* W_TCB_T_FLAGS */ 1168 fields++; 1169 1170 len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1171 KASSERT(len <= SGE_MAX_WR_LEN, 1172 ("%s: WR with %d TCB field updates too large", __func__, fields)); 1173 1174 wr = alloc_wrqe(len, toep->ctrlq); 1175 if (wr == NULL) { 1176 /* XXX */ 1177 panic("%s: out of memory", __func__); 1178 } 1179 1180 wrh = wrtod(wr); 1181 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 1182 ulpmc = (struct ulp_txpkt *)(wrh + 1); 1183 1184 /* 1185 * Clear the TLS overlay region: 1023:832. 1186 * 1187 * Words 26/27 are always set to zero. Words 28/29 1188 * contain seqno and are set when enabling TLS 1189 * decryption. Word 30 is zero and Word 31 contains 1190 * the keyid. 1191 */ 1192 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26, 1193 0xffffffffffffffff, 0); 1194 1195 /* 1196 * RX key tags are an index into the key portion of MA 1197 * memory stored as an offset from the base address in 1198 * units of 64 bytes. 1199 */ 1200 key_offset = toep->tls.rx_key_addr - sc->vres.key.start; 1201 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30, 1202 0xffffffffffffffff, 1203 (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32); 1204 1205 CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__, 1206 toep->tid, seqno); 1207 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_TLS_SEQ, 1208 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno)); 1209 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_RAW, 1210 V_TCB_ULP_RAW(M_TCB_ULP_RAW), 1211 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) | 1212 V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1)))); 1213 1214 toep->flags &= ~TPF_TLS_STARTING; 1215 toep->flags |= TPF_TLS_RECEIVE; 1216 1217 /* Set the ULP mode to ULP_MODE_TLS. */ 1218 toep->params.ulp_mode = ULP_MODE_TLS; 1219 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE, 1220 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), 1221 V_TCB_ULP_TYPE(ULP_MODE_TLS)); 1222 1223 /* Clear TF_RX_QUIESCE. */ 1224 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS, 1225 V_TF_RX_QUIESCE(1), 0); 1226 1227 t4_wrq_tx(sc, wr); 1228 } 1229 1230 /* 1231 * Examine the pending data in the socket buffer and either enable TLS 1232 * RX or request more encrypted data. 1233 */ 1234 static void 1235 tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep, 1236 struct sockbuf *sb) 1237 { 1238 uint64_t seqno; 1239 size_t resid; 1240 bool have_header; 1241 1242 SOCKBUF_LOCK_ASSERT(sb); 1243 MPASS(toep->tls.rx_resid == 0); 1244 1245 have_header = ktls_pending_rx_info(sb, &seqno, &resid); 1246 CTR5(KTR_CXGBE, "%s: tid %d have_header %d seqno %lu resid %zu", 1247 __func__, toep->tid, have_header, seqno, resid); 1248 1249 /* 1250 * If we have a partial header or we need fewer bytes than the 1251 * size of a TLS record, re-enable receive and pause again once 1252 * we get more data to try again. 1253 */ 1254 if (!have_header || resid != 0) { 1255 CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__, 1256 toep->tid); 1257 toep->flags &= ~TPF_TLS_RX_QUIESCED; 1258 t4_clear_rx_quiesce(toep); 1259 return; 1260 } 1261 1262 tls_update_tcb(sc, toep, seqno); 1263 } 1264 1265 void 1266 tls_received_starting_data(struct adapter *sc, struct toepcb *toep, 1267 struct sockbuf *sb, int len) 1268 { 1269 MPASS(toep->flags & TPF_TLS_STARTING); 1270 1271 /* 1272 * A previous call to tls_check_rx_sockbuf needed more data. 1273 * Now that more data has arrived, quiesce receive again and 1274 * check the state once the quiesce has completed. 1275 */ 1276 if ((toep->flags & TPF_TLS_RX_QUIESCED) == 0) { 1277 CTR(KTR_CXGBE, "%s: tid %d quiescing", __func__, toep->tid); 1278 toep->flags |= TPF_TLS_RX_QUIESCED; 1279 t4_set_rx_quiesce(toep); 1280 return; 1281 } 1282 1283 KASSERT(len <= toep->tls.rx_resid, 1284 ("%s: received excess bytes %d (waiting for %zu)", __func__, len, 1285 toep->tls.rx_resid)); 1286 toep->tls.rx_resid -= len; 1287 if (toep->tls.rx_resid != 0) 1288 return; 1289 1290 tls_check_rx_sockbuf(sc, toep, sb); 1291 } 1292 1293 static int 1294 do_tls_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1295 { 1296 struct adapter *sc = iq->adapter; 1297 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1298 unsigned int tid = GET_TID(cpl); 1299 struct toepcb *toep; 1300 struct inpcb *inp; 1301 struct socket *so; 1302 struct sockbuf *sb; 1303 1304 if (cpl->status != CPL_ERR_NONE) 1305 panic("XXX: tcp_rpl failed: %d", cpl->status); 1306 1307 toep = lookup_tid(sc, tid); 1308 inp = toep->inp; 1309 switch (cpl->cookie) { 1310 case V_WORD(W_TCB_T_FLAGS) | V_COOKIE(CPL_COOKIE_TOM): 1311 INP_WLOCK(inp); 1312 if ((toep->flags & TPF_TLS_STARTING) == 0) 1313 panic("%s: connection is not starting TLS RX\n", 1314 __func__); 1315 1316 so = inp->inp_socket; 1317 sb = &so->so_rcv; 1318 SOCKBUF_LOCK(sb); 1319 tls_check_rx_sockbuf(sc, toep, sb); 1320 SOCKBUF_UNLOCK(sb); 1321 INP_WUNLOCK(inp); 1322 break; 1323 default: 1324 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 1325 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 1326 } 1327 1328 return (0); 1329 } 1330 1331 void 1332 t4_tls_mod_load(void) 1333 { 1334 1335 t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data); 1336 t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp); 1337 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_tls_tcb_rpl, 1338 CPL_COOKIE_TOM); 1339 } 1340 1341 void 1342 t4_tls_mod_unload(void) 1343 { 1344 1345 t4_register_cpl_handler(CPL_TLS_DATA, NULL); 1346 t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL); 1347 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_TOM); 1348 } 1349 #endif /* TCP_OFFLOAD */ 1350 #endif /* KERN_TLS */ 1351