1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017-2018 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: John Baldwin <jhb@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_inet.h" 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/sglist.h> 37 #include <sys/socket.h> 38 #include <sys/socketvar.h> 39 #include <sys/systm.h> 40 #include <netinet/in.h> 41 #include <netinet/in_pcb.h> 42 #include <netinet/tcp_var.h> 43 #include <netinet/toecore.h> 44 45 #ifdef TCP_OFFLOAD 46 #include "common/common.h" 47 #include "common/t4_tcb.h" 48 #include "crypto/t4_crypto.h" 49 #include "tom/t4_tom_l2t.h" 50 #include "tom/t4_tom.h" 51 52 /* 53 * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while 54 * the mbuf is in the ulp_pdu_reclaimq. 55 */ 56 #define tls_tcp_seq PH_loc.thirtytwo[0] 57 58 /* 59 * Handshake lock used for the handshake timer. Having a global lock 60 * is perhaps not ideal, but it avoids having to use callout_drain() 61 * in tls_uninit_toep() which can't block. Also, the timer shouldn't 62 * actually fire for most connections. 63 */ 64 static struct mtx tls_handshake_lock; 65 66 static void 67 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask, 68 uint64_t val) 69 { 70 struct adapter *sc = td_adapter(toep->td); 71 72 t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0); 73 } 74 75 /* TLS and DTLS common routines */ 76 bool 77 can_tls_offload(struct adapter *sc) 78 { 79 80 return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS); 81 } 82 83 int 84 tls_tx_key(struct toepcb *toep) 85 { 86 struct tls_ofld_info *tls_ofld = &toep->tls; 87 88 return (tls_ofld->tx_key_addr >= 0); 89 } 90 91 int 92 tls_rx_key(struct toepcb *toep) 93 { 94 struct tls_ofld_info *tls_ofld = &toep->tls; 95 96 return (tls_ofld->rx_key_addr >= 0); 97 } 98 99 static int 100 key_size(struct toepcb *toep) 101 { 102 struct tls_ofld_info *tls_ofld = &toep->tls; 103 104 return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ? 105 tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE); 106 } 107 108 /* Set TLS Key-Id in TCB */ 109 static void 110 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id) 111 { 112 113 t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG, 114 V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG), 115 V_TCB_RX_TLS_KEY_TAG(key_id)); 116 } 117 118 /* Clear TF_RX_QUIESCE to re-enable receive. */ 119 static void 120 t4_clear_rx_quiesce(struct toepcb *toep) 121 { 122 123 t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0); 124 } 125 126 static void 127 tls_clr_ofld_mode(struct toepcb *toep) 128 { 129 130 tls_stop_handshake_timer(toep); 131 132 /* Operate in PDU extraction mode only. */ 133 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, 134 V_TCB_ULP_RAW(M_TCB_ULP_RAW), 135 V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1))); 136 t4_clear_rx_quiesce(toep); 137 } 138 139 static void 140 tls_clr_quiesce(struct toepcb *toep) 141 { 142 143 tls_stop_handshake_timer(toep); 144 t4_clear_rx_quiesce(toep); 145 } 146 147 /* 148 * Calculate the TLS data expansion size 149 */ 150 static int 151 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only, 152 unsigned short *pdus_per_ulp) 153 { 154 struct tls_ofld_info *tls_ofld = &toep->tls; 155 struct tls_scmd *scmd = &tls_ofld->scmd0; 156 int expn_size = 0, frag_count = 0, pad_per_pdu = 0, 157 pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0; 158 int exp_per_pdu = 0; 159 int hdr_len = TLS_HEADER_LENGTH; 160 161 do { 162 max_frag_size = tls_ofld->k_ctx.frag_size; 163 if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) == 164 SCMD_CIPH_MODE_AES_GCM) { 165 frag_count = (data_len / max_frag_size); 166 exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE + 167 hdr_len; 168 expn_size = frag_count * exp_per_pdu; 169 if (full_pdus_only) { 170 *pdus_per_ulp = data_len / (exp_per_pdu + 171 max_frag_size); 172 if (*pdus_per_ulp > 32) 173 *pdus_per_ulp = 32; 174 else if(!*pdus_per_ulp) 175 *pdus_per_ulp = 1; 176 expn_size = (*pdus_per_ulp) * exp_per_pdu; 177 break; 178 } 179 if ((last_frag_size = data_len % max_frag_size) > 0) { 180 frag_count += 1; 181 expn_size += exp_per_pdu; 182 } 183 break; 184 } else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) != 185 SCMD_CIPH_MODE_NOP) { 186 /* Calculate the number of fragments we can make */ 187 frag_count = (data_len / max_frag_size); 188 if (frag_count > 0) { 189 pad_per_pdu = (((howmany((max_frag_size + 190 tls_ofld->mac_length), 191 CIPHER_BLOCK_SIZE)) * 192 CIPHER_BLOCK_SIZE) - 193 (max_frag_size + 194 tls_ofld->mac_length)); 195 if (!pad_per_pdu) 196 pad_per_pdu = CIPHER_BLOCK_SIZE; 197 exp_per_pdu = pad_per_pdu + 198 tls_ofld->mac_length + 199 hdr_len + CIPHER_BLOCK_SIZE; 200 expn_size = frag_count * exp_per_pdu; 201 } 202 if (full_pdus_only) { 203 *pdus_per_ulp = data_len / (exp_per_pdu + 204 max_frag_size); 205 if (*pdus_per_ulp > 32) 206 *pdus_per_ulp = 32; 207 else if (!*pdus_per_ulp) 208 *pdus_per_ulp = 1; 209 expn_size = (*pdus_per_ulp) * exp_per_pdu; 210 break; 211 } 212 /* Consider the last fragment */ 213 if ((last_frag_size = data_len % max_frag_size) > 0) { 214 pad_last_pdu = (((howmany((last_frag_size + 215 tls_ofld->mac_length), 216 CIPHER_BLOCK_SIZE)) * 217 CIPHER_BLOCK_SIZE) - 218 (last_frag_size + 219 tls_ofld->mac_length)); 220 if (!pad_last_pdu) 221 pad_last_pdu = CIPHER_BLOCK_SIZE; 222 expn_size += (pad_last_pdu + 223 tls_ofld->mac_length + hdr_len + 224 CIPHER_BLOCK_SIZE); 225 } 226 } 227 } while (0); 228 229 return (expn_size); 230 } 231 232 /* Copy Key to WR */ 233 static void 234 tls_copy_tx_key(struct toepcb *toep, void *dst) 235 { 236 struct tls_ofld_info *tls_ofld = &toep->tls; 237 struct ulptx_sc_memrd *sc_memrd; 238 struct ulptx_idata *sc; 239 240 if (tls_ofld->k_ctx.tx_key_info_size <= 0) 241 return; 242 243 if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) { 244 sc = dst; 245 sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 246 sc->len = htobe32(0); 247 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1); 248 sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) | 249 V_ULP_TX_SC_MORE(1) | 250 V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4)); 251 sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5); 252 } else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) { 253 memcpy(dst, &tls_ofld->k_ctx.tx, 254 tls_ofld->k_ctx.tx_key_info_size); 255 } 256 } 257 258 /* TLS/DTLS content type for CPL SFO */ 259 static inline unsigned char 260 tls_content_type(unsigned char content_type) 261 { 262 /* 263 * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and 264 * default to "CUSTOM" for all other types including 265 * heartbeat? 266 */ 267 switch (content_type) { 268 case CONTENT_TYPE_CCS: 269 return CPL_TX_TLS_SFO_TYPE_CCS; 270 case CONTENT_TYPE_ALERT: 271 return CPL_TX_TLS_SFO_TYPE_ALERT; 272 case CONTENT_TYPE_HANDSHAKE: 273 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE; 274 case CONTENT_TYPE_HEARTBEAT: 275 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT; 276 } 277 return CPL_TX_TLS_SFO_TYPE_DATA; 278 } 279 280 static unsigned char 281 get_cipher_key_size(unsigned int ck_size) 282 { 283 switch (ck_size) { 284 case AES_NOP: /* NOP */ 285 return 15; 286 case AES_128: /* AES128 */ 287 return CH_CK_SIZE_128; 288 case AES_192: /* AES192 */ 289 return CH_CK_SIZE_192; 290 case AES_256: /* AES256 */ 291 return CH_CK_SIZE_256; 292 default: 293 return CH_CK_SIZE_256; 294 } 295 } 296 297 static unsigned char 298 get_mac_key_size(unsigned int mk_size) 299 { 300 switch (mk_size) { 301 case SHA_NOP: /* NOP */ 302 return CH_MK_SIZE_128; 303 case SHA_GHASH: /* GHASH */ 304 case SHA_512: /* SHA512 */ 305 return CH_MK_SIZE_512; 306 case SHA_224: /* SHA2-224 */ 307 return CH_MK_SIZE_192; 308 case SHA_256: /* SHA2-256*/ 309 return CH_MK_SIZE_256; 310 case SHA_384: /* SHA384 */ 311 return CH_MK_SIZE_512; 312 case SHA1: /* SHA1 */ 313 default: 314 return CH_MK_SIZE_160; 315 } 316 } 317 318 static unsigned int 319 get_proto_ver(int proto_ver) 320 { 321 switch (proto_ver) { 322 case TLS1_2_VERSION: 323 return TLS_1_2_VERSION; 324 case TLS1_1_VERSION: 325 return TLS_1_1_VERSION; 326 case DTLS1_2_VERSION: 327 return DTLS_1_2_VERSION; 328 default: 329 return TLS_VERSION_MAX; 330 } 331 } 332 333 static void 334 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx) 335 { 336 337 if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) { 338 kwr->u.rxhdr.ivinsert_to_authinsrt = 339 htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) | 340 V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) | 341 V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) | 342 V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) | 343 V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) | 344 V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) | 345 V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) | 346 V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL)); 347 kwr->u.rxhdr.ivpresent_to_rxmk_size &= 348 ~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1)); 349 kwr->u.rxhdr.authmode_to_rxvalid &= 350 ~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1)); 351 } else { 352 kwr->u.rxhdr.ivinsert_to_authinsrt = 353 htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) | 354 V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) | 355 V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) | 356 V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) | 357 V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) | 358 V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) | 359 V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) | 360 V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL)); 361 } 362 } 363 364 /* Rx key */ 365 static void 366 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx) 367 { 368 unsigned int ck_size = kctx->cipher_secret_size; 369 unsigned int mk_size = kctx->mac_secret_size; 370 int proto_ver = kctx->proto_ver; 371 372 kwr->u.rxhdr.flitcnt_hmacctrl = 373 ((kctx->tx_key_info_size >> 4) << 3) | kctx->hmac_ctrl; 374 375 kwr->u.rxhdr.protover_ciphmode = 376 V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) | 377 V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode); 378 379 kwr->u.rxhdr.authmode_to_rxvalid = 380 V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) | 381 V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) | 382 V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) | 383 V_TLS_KEYCTX_TX_WR_RXVALID(1); 384 385 kwr->u.rxhdr.ivpresent_to_rxmk_size = 386 V_TLS_KEYCTX_TX_WR_IVPRESENT(0) | 387 V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) | 388 V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) | 389 V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size)); 390 391 tls_rxkey_flit1(kwr, kctx); 392 393 /* No key reversal for GCM */ 394 if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) { 395 t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key, 396 (kctx->cipher_secret_size << 3)); 397 memcpy(kwr->keys.edkey + kctx->cipher_secret_size, 398 kctx->rx.key + kctx->cipher_secret_size, 399 (IPAD_SIZE + OPAD_SIZE)); 400 } else { 401 memcpy(kwr->keys.edkey, kctx->rx.key, 402 (kctx->tx_key_info_size - SALT_SIZE)); 403 memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE); 404 } 405 } 406 407 /* Tx key */ 408 static void 409 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx) 410 { 411 unsigned int ck_size = kctx->cipher_secret_size; 412 unsigned int mk_size = kctx->mac_secret_size; 413 414 kwr->u.txhdr.ctxlen = 415 (kctx->tx_key_info_size >> 4); 416 kwr->u.txhdr.dualck_to_txvalid = 417 V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) | 418 V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) | 419 V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) | 420 V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) | 421 V_TLS_KEYCTX_TX_WR_TXVALID(1); 422 423 memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE); 424 if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) { 425 memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE); 426 kwr->u.txhdr.dualck_to_txvalid &= 427 ~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1)); 428 } 429 kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid); 430 } 431 432 /* TLS Key memory management */ 433 static int 434 get_new_keyid(struct toepcb *toep, struct tls_key_context *k_ctx) 435 { 436 struct adapter *sc = td_adapter(toep->td); 437 vmem_addr_t addr; 438 439 if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT, 440 &addr) != 0) 441 return (-1); 442 443 return (addr); 444 } 445 446 static void 447 free_keyid(struct toepcb *toep, int keyid) 448 { 449 struct adapter *sc = td_adapter(toep->td); 450 451 vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ); 452 } 453 454 static void 455 clear_tls_keyid(struct toepcb *toep) 456 { 457 struct tls_ofld_info *tls_ofld = &toep->tls; 458 459 if (tls_ofld->rx_key_addr >= 0) { 460 free_keyid(toep, tls_ofld->rx_key_addr); 461 tls_ofld->rx_key_addr = -1; 462 } 463 if (tls_ofld->tx_key_addr >= 0) { 464 free_keyid(toep, tls_ofld->tx_key_addr); 465 tls_ofld->tx_key_addr = -1; 466 } 467 } 468 469 static int 470 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops) 471 { 472 return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr : 473 ((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1)); 474 } 475 476 static int 477 get_tp_plen_max(struct tls_ofld_info *tls_ofld) 478 { 479 int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448; 480 481 return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX); 482 } 483 484 /* Send request to get the key-id */ 485 static int 486 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx) 487 { 488 struct tls_ofld_info *tls_ofld = &toep->tls; 489 struct adapter *sc = td_adapter(toep->td); 490 struct ofld_tx_sdesc *txsd; 491 int kwrlen, kctxlen, keyid, len; 492 struct wrqe *wr; 493 struct tls_key_req *kwr; 494 struct tls_keyctx *kctx; 495 496 kwrlen = sizeof(*kwr); 497 kctxlen = roundup2(sizeof(*kctx), 32); 498 len = roundup2(kwrlen + kctxlen, 16); 499 500 if (toep->txsd_avail == 0) 501 return (EAGAIN); 502 503 /* Dont initialize key for re-neg */ 504 if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) { 505 if ((keyid = get_new_keyid(toep, k_ctx)) < 0) { 506 return (ENOSPC); 507 } 508 } else { 509 keyid = get_keyid(tls_ofld, k_ctx->l_p_key); 510 } 511 512 wr = alloc_wrqe(len, toep->ofld_txq); 513 if (wr == NULL) { 514 free_keyid(toep, keyid); 515 return (ENOMEM); 516 } 517 kwr = wrtod(wr); 518 memset(kwr, 0, kwrlen); 519 520 kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL | 521 F_FW_WR_ATOMIC); 522 kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) | 523 V_FW_WR_FLOWID(toep->tid)); 524 kwr->protocol = get_proto_ver(k_ctx->proto_ver); 525 kwr->mfs = htons(k_ctx->frag_size); 526 kwr->reneg_to_write_rx = k_ctx->l_p_key; 527 528 /* master command */ 529 kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) | 530 V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1)); 531 kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5)); 532 kwr->len16 = htobe32((toep->tid << 8) | 533 DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16)); 534 kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5)); 535 536 /* sub command */ 537 kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 538 kwr->sc_len = htobe32(kctxlen); 539 540 kctx = (struct tls_keyctx *)(kwr + 1); 541 memset(kctx, 0, kctxlen); 542 543 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) { 544 tls_ofld->tx_key_addr = keyid; 545 prepare_txkey_wr(kctx, k_ctx); 546 } else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) { 547 tls_ofld->rx_key_addr = keyid; 548 prepare_rxkey_wr(kctx, k_ctx); 549 } 550 551 txsd = &toep->txsd[toep->txsd_pidx]; 552 txsd->tx_credits = DIV_ROUND_UP(len, 16); 553 txsd->plen = 0; 554 toep->tx_credits -= txsd->tx_credits; 555 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 556 toep->txsd_pidx = 0; 557 toep->txsd_avail--; 558 559 t4_wrq_tx(sc, wr); 560 561 return (0); 562 } 563 564 /* Store a key received from SSL in DDR. */ 565 static int 566 program_key_context(struct tcpcb *tp, struct toepcb *toep, 567 struct tls_key_context *uk_ctx) 568 { 569 struct adapter *sc = td_adapter(toep->td); 570 struct tls_ofld_info *tls_ofld = &toep->tls; 571 struct tls_key_context *k_ctx; 572 int error, key_offset; 573 574 if (tp->t_state != TCPS_ESTABLISHED) { 575 /* 576 * XXX: Matches Linux driver, but not sure this is a 577 * very appropriate error. 578 */ 579 return (ENOENT); 580 } 581 582 /* Stop timer on handshake completion */ 583 tls_stop_handshake_timer(toep); 584 585 toep->flags &= ~TPF_FORCE_CREDITS; 586 587 CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid, 588 G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" : 589 "KEY_WRITE_TX", uk_ctx->proto_ver); 590 591 if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX && 592 toep->ulp_mode != ULP_MODE_TLS) 593 return (EOPNOTSUPP); 594 595 /* Don't copy the 'tx' and 'rx' fields. */ 596 k_ctx = &tls_ofld->k_ctx; 597 memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key, 598 sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key)); 599 600 /* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */ 601 if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) { 602 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) { 603 tls_ofld->rx_key_addr = -1; 604 t4_clear_rx_quiesce(toep); 605 } else { 606 tls_ofld->tx_key_addr = -1; 607 } 608 return (0); 609 } 610 611 if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) { 612 k_ctx->iv_size = 4; 613 k_ctx->mac_first = 0; 614 k_ctx->hmac_ctrl = 0; 615 } else { 616 k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */ 617 k_ctx->mac_first = 1; 618 } 619 620 tls_ofld->scmd0.seqno_numivs = 621 (V_SCMD_SEQ_NO_CTRL(3) | 622 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) | 623 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) | 624 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) | 625 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) | 626 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) | 627 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) | 628 V_SCMD_IV_SIZE(k_ctx->iv_size)); 629 630 tls_ofld->scmd0.ivgen_hdrlen = 631 (V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) | 632 V_SCMD_KEY_CTX_INLINE(0) | 633 V_SCMD_TLS_FRAG_ENABLE(1)); 634 635 tls_ofld->mac_length = k_ctx->mac_secret_size; 636 637 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) { 638 k_ctx->rx = uk_ctx->rx; 639 /* Dont initialize key for re-neg */ 640 if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) 641 tls_ofld->rx_key_addr = -1; 642 } else { 643 k_ctx->tx = uk_ctx->tx; 644 /* Dont initialize key for re-neg */ 645 if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) 646 tls_ofld->tx_key_addr = -1; 647 } 648 649 /* Flush pending data before new Tx key becomes active */ 650 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) { 651 struct sockbuf *sb; 652 653 /* XXX: This might not drain everything. */ 654 t4_push_frames(sc, toep, 0); 655 sb = &toep->inp->inp_socket->so_snd; 656 SOCKBUF_LOCK(sb); 657 658 /* XXX: This asserts that everything has been pushed. */ 659 MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL); 660 sb->sb_sndptr = NULL; 661 tls_ofld->sb_off = sbavail(sb); 662 SOCKBUF_UNLOCK(sb); 663 tls_ofld->tx_seq_no = 0; 664 } 665 666 if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) || 667 (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) { 668 error = tls_program_key_id(toep, k_ctx); 669 if (error) { 670 /* XXX: Only clear quiesce for KEY_WRITE_RX? */ 671 t4_clear_rx_quiesce(toep); 672 return (error); 673 } 674 } 675 676 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) { 677 /* 678 * RX key tags are an index into the key portion of MA 679 * memory stored as an offset from the base address in 680 * units of 64 bytes. 681 */ 682 key_offset = tls_ofld->rx_key_addr - sc->vres.key.start; 683 t4_set_tls_keyid(toep, key_offset / 64); 684 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, 685 V_TCB_ULP_RAW(M_TCB_ULP_RAW), 686 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | 687 V_TF_TLS_CONTROL(1) | 688 V_TF_TLS_ACTIVE(1) | 689 V_TF_TLS_ENABLE(1)))); 690 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ, 691 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), 692 V_TCB_TLS_SEQ(0)); 693 t4_clear_rx_quiesce(toep); 694 } else { 695 unsigned short pdus_per_ulp; 696 697 if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) 698 tls_ofld->tx_key_addr = 1; 699 700 tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld); 701 tls_ofld->expn_per_ulp = tls_expansion_size(toep, 702 tls_ofld->fcplenmax, 1, &pdus_per_ulp); 703 tls_ofld->pdus_per_ulp = pdus_per_ulp; 704 tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp * 705 ((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) + 706 tls_ofld->k_ctx.frag_size); 707 } 708 709 return (0); 710 } 711 712 /* 713 * In some cases a client connection can hang without sending the 714 * ServerHelloDone message from the NIC to the host. Send a dummy 715 * RX_DATA_ACK with RX_MODULATE to unstick the connection. 716 */ 717 static void 718 tls_send_handshake_ack(void *arg) 719 { 720 struct toepcb *toep = arg; 721 struct tls_ofld_info *tls_ofld = &toep->tls; 722 struct adapter *sc = td_adapter(toep->td); 723 724 /* 725 * XXX: Does not have the t4_get_tcb() checks to refine the 726 * workaround. 727 */ 728 callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz); 729 730 CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid); 731 send_rx_modulate(sc, toep); 732 } 733 734 static void 735 tls_start_handshake_timer(struct toepcb *toep) 736 { 737 struct tls_ofld_info *tls_ofld = &toep->tls; 738 739 mtx_lock(&tls_handshake_lock); 740 callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz, 741 tls_send_handshake_ack, toep); 742 mtx_unlock(&tls_handshake_lock); 743 } 744 745 void 746 tls_stop_handshake_timer(struct toepcb *toep) 747 { 748 struct tls_ofld_info *tls_ofld = &toep->tls; 749 750 mtx_lock(&tls_handshake_lock); 751 callout_stop(&tls_ofld->handshake_timer); 752 mtx_unlock(&tls_handshake_lock); 753 } 754 755 int 756 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt) 757 { 758 struct tls_key_context uk_ctx; 759 struct inpcb *inp; 760 struct tcpcb *tp; 761 struct toepcb *toep; 762 int error, optval; 763 764 error = 0; 765 if (sopt->sopt_dir == SOPT_SET && 766 sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) { 767 error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx), 768 sizeof(uk_ctx)); 769 if (error) 770 return (error); 771 } 772 773 inp = sotoinpcb(so); 774 KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL")); 775 INP_WLOCK(inp); 776 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 777 INP_WUNLOCK(inp); 778 return (ECONNRESET); 779 } 780 tp = intotcpcb(inp); 781 toep = tp->t_toe; 782 switch (sopt->sopt_dir) { 783 case SOPT_SET: 784 switch (sopt->sopt_name) { 785 case TCP_TLSOM_SET_TLS_CONTEXT: 786 error = program_key_context(tp, toep, &uk_ctx); 787 INP_WUNLOCK(inp); 788 break; 789 case TCP_TLSOM_CLR_TLS_TOM: 790 if (toep->ulp_mode == ULP_MODE_TLS) { 791 CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM", 792 __func__, toep->tid); 793 tls_clr_ofld_mode(toep); 794 } else 795 error = EOPNOTSUPP; 796 INP_WUNLOCK(inp); 797 break; 798 case TCP_TLSOM_CLR_QUIES: 799 if (toep->ulp_mode == ULP_MODE_TLS) { 800 CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES", 801 __func__, toep->tid); 802 tls_clr_quiesce(toep); 803 } else 804 error = EOPNOTSUPP; 805 INP_WUNLOCK(inp); 806 break; 807 default: 808 INP_WUNLOCK(inp); 809 error = EOPNOTSUPP; 810 break; 811 } 812 break; 813 case SOPT_GET: 814 switch (sopt->sopt_name) { 815 case TCP_TLSOM_GET_TLS_TOM: 816 /* 817 * TLS TX is permitted on any TOE socket, but 818 * TLS RX requires a TLS ULP mode. 819 */ 820 optval = TLS_TOM_NONE; 821 if (can_tls_offload(td_adapter(toep->td))) { 822 switch (toep->ulp_mode) { 823 case ULP_MODE_NONE: 824 case ULP_MODE_TCPDDP: 825 optval = TLS_TOM_TXONLY; 826 break; 827 case ULP_MODE_TLS: 828 optval = TLS_TOM_BOTH; 829 break; 830 } 831 } 832 CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d", 833 __func__, toep->tid, optval); 834 INP_WUNLOCK(inp); 835 error = sooptcopyout(sopt, &optval, sizeof(optval)); 836 break; 837 default: 838 INP_WUNLOCK(inp); 839 error = EOPNOTSUPP; 840 break; 841 } 842 break; 843 } 844 return (error); 845 } 846 847 void 848 tls_init_toep(struct toepcb *toep) 849 { 850 struct tls_ofld_info *tls_ofld = &toep->tls; 851 852 tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR; 853 tls_ofld->rx_key_addr = -1; 854 tls_ofld->tx_key_addr = -1; 855 if (toep->ulp_mode == ULP_MODE_TLS) 856 callout_init_mtx(&tls_ofld->handshake_timer, 857 &tls_handshake_lock, 0); 858 } 859 860 void 861 tls_establish(struct toepcb *toep) 862 { 863 864 /* 865 * Enable PDU extraction. 866 * 867 * XXX: Supposedly this should be done by the firmware when 868 * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but 869 * in practice this seems to be required. 870 */ 871 CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid); 872 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW), 873 V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1))); 874 875 toep->flags |= TPF_FORCE_CREDITS; 876 877 tls_start_handshake_timer(toep); 878 } 879 880 void 881 tls_uninit_toep(struct toepcb *toep) 882 { 883 884 if (toep->ulp_mode == ULP_MODE_TLS) 885 tls_stop_handshake_timer(toep); 886 clear_tls_keyid(toep); 887 } 888 889 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 890 #define MIN_OFLD_TLSTX_CREDITS(toep) \ 891 (howmany(sizeof(struct fw_tlstx_data_wr) + \ 892 sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) + \ 893 CIPHER_BLOCK_SIZE + 1, 16)) 894 895 static inline u_int 896 max_imm_tls_space(int tx_credits) 897 { 898 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 899 int space; 900 901 KASSERT(tx_credits >= 0 && 902 tx_credits <= MAX_OFLD_TX_CREDITS, 903 ("%s: %d credits", __func__, tx_credits)); 904 905 if (tx_credits >= (n * EQ_ESIZE) / 16) 906 space = (n * EQ_ESIZE); 907 else 908 space = tx_credits * 16; 909 return (space); 910 } 911 912 static int 913 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp) 914 { 915 int max_nsegs_1mbuf, n, nsegs; 916 917 while (skip >= m->m_len) { 918 skip -= m->m_len; 919 m = m->m_next; 920 } 921 922 nsegs = 0; 923 max_nsegs_1mbuf = 0; 924 while (len > 0) { 925 n = sglist_count(mtod(m, char *) + skip, m->m_len - skip); 926 if (n > max_nsegs_1mbuf) 927 max_nsegs_1mbuf = n; 928 nsegs += n; 929 len -= m->m_len - skip; 930 skip = 0; 931 m = m->m_next; 932 } 933 *max_nsegs_1mbufp = max_nsegs_1mbuf; 934 return (nsegs); 935 } 936 937 static void 938 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep, 939 unsigned int immdlen, unsigned int plen, unsigned int expn, 940 unsigned int pdus, uint8_t credits, int shove, int imm_ivs) 941 { 942 struct tls_ofld_info *tls_ofld = &toep->tls; 943 unsigned int len = plen + expn; 944 945 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) | 946 V_FW_TLSTX_DATA_WR_COMPL(1) | 947 V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen)); 948 txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) | 949 V_FW_TLSTX_DATA_WR_LEN16(credits)); 950 txwr->plen = htobe32(len); 951 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) | 952 V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove)); 953 txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) | 954 V_FW_TLSTX_DATA_WR_EXP(expn) | 955 V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) | 956 V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) | 957 V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4)); 958 txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size); 959 txwr->adjustedplen_pkd = htobe16( 960 V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen)); 961 txwr->expinplenmax_pkd = htobe16( 962 V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp)); 963 txwr->pdusinplenmax_pkd = htobe16( 964 V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp)); 965 } 966 967 static void 968 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep, 969 struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus) 970 { 971 struct tls_ofld_info *tls_ofld = &toep->tls; 972 int data_type, seglen; 973 974 if (plen < tls_ofld->k_ctx.frag_size) 975 seglen = plen; 976 else 977 seglen = tls_ofld->k_ctx.frag_size; 978 data_type = tls_content_type(tls_hdr->type); 979 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) | 980 V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) | 981 V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen)); 982 cpl->pld_len = htobe32(plen); 983 if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) 984 cpl->type_protover = htobe32( 985 V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type)); 986 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs | 987 V_SCMD_NUM_IVS(pdus)); 988 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen); 989 cpl->scmd1 = htobe64(tls_ofld->tx_seq_no); 990 tls_ofld->tx_seq_no += pdus; 991 } 992 993 /* 994 * Similar to write_tx_sgl() except that it accepts an optional 995 * trailer buffer for IVs. 996 */ 997 static void 998 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen, 999 void *iv_buffer, int iv_len, int nsegs, int n) 1000 { 1001 struct mbuf *m; 1002 struct ulptx_sgl *usgl = dst; 1003 int i, j, rc; 1004 struct sglist sg; 1005 struct sglist_seg segs[n]; 1006 1007 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 1008 1009 sglist_init(&sg, n, segs); 1010 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1011 V_ULPTX_NSGE(nsegs)); 1012 1013 for (m = start; skip >= m->m_len; m = m->m_next) 1014 skip -= m->m_len; 1015 1016 i = -1; 1017 for (m = start; plen > 0; m = m->m_next) { 1018 rc = sglist_append(&sg, mtod(m, char *) + skip, 1019 m->m_len - skip); 1020 if (__predict_false(rc != 0)) 1021 panic("%s: sglist_append %d", __func__, rc); 1022 plen -= m->m_len - skip; 1023 skip = 0; 1024 1025 for (j = 0; j < sg.sg_nseg; i++, j++) { 1026 if (i < 0) { 1027 usgl->len0 = htobe32(segs[j].ss_len); 1028 usgl->addr0 = htobe64(segs[j].ss_paddr); 1029 } else { 1030 usgl->sge[i / 2].len[i & 1] = 1031 htobe32(segs[j].ss_len); 1032 usgl->sge[i / 2].addr[i & 1] = 1033 htobe64(segs[j].ss_paddr); 1034 } 1035 #ifdef INVARIANTS 1036 nsegs--; 1037 #endif 1038 } 1039 sglist_reset(&sg); 1040 } 1041 if (iv_buffer != NULL) { 1042 rc = sglist_append(&sg, iv_buffer, iv_len); 1043 if (__predict_false(rc != 0)) 1044 panic("%s: sglist_append %d", __func__, rc); 1045 1046 for (j = 0; j < sg.sg_nseg; i++, j++) { 1047 if (i < 0) { 1048 usgl->len0 = htobe32(segs[j].ss_len); 1049 usgl->addr0 = htobe64(segs[j].ss_paddr); 1050 } else { 1051 usgl->sge[i / 2].len[i & 1] = 1052 htobe32(segs[j].ss_len); 1053 usgl->sge[i / 2].addr[i & 1] = 1054 htobe64(segs[j].ss_paddr); 1055 } 1056 #ifdef INVARIANTS 1057 nsegs--; 1058 #endif 1059 } 1060 } 1061 if (i & 1) 1062 usgl->sge[i / 2].len[1] = htobe32(0); 1063 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p", 1064 __func__, nsegs, start, iv_buffer)); 1065 } 1066 1067 /* 1068 * Similar to t4_push_frames() but handles TLS sockets when TLS offload 1069 * is enabled. Rather than transmitting bulk data, the socket buffer 1070 * contains TLS records. The work request requires a full TLS record, 1071 * so batch mbufs up until a full TLS record is seen. This requires 1072 * reading the TLS header out of the start of each record to determine 1073 * its length. 1074 */ 1075 void 1076 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop) 1077 { 1078 struct tls_hdr thdr; 1079 struct mbuf *sndptr; 1080 struct fw_tlstx_data_wr *txwr; 1081 struct cpl_tx_tls_sfo *cpl; 1082 struct wrqe *wr; 1083 u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len; 1084 u_int expn_size, iv_len, pdus, sndptroff; 1085 struct tls_ofld_info *tls_ofld = &toep->tls; 1086 struct inpcb *inp = toep->inp; 1087 struct tcpcb *tp = intotcpcb(inp); 1088 struct socket *so = inp->inp_socket; 1089 struct sockbuf *sb = &so->so_snd; 1090 int tls_size, tx_credits, shove, /* compl,*/ sowwakeup; 1091 struct ofld_tx_sdesc *txsd; 1092 bool imm_ivs, imm_payload; 1093 void *iv_buffer, *iv_dst, *buf; 1094 1095 INP_WLOCK_ASSERT(inp); 1096 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1097 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 1098 1099 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 1100 toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS, 1101 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 1102 KASSERT(tls_tx_key(toep), 1103 ("%s: TX key not set for toep %p", __func__, toep)); 1104 1105 #ifdef VERBOSE_TRACES 1106 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 1107 __func__, toep->tid, toep->flags, tp->t_flags); 1108 #endif 1109 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 1110 return; 1111 1112 #ifdef RATELIMIT 1113 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && 1114 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { 1115 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; 1116 } 1117 #endif 1118 1119 /* 1120 * This function doesn't resume by itself. Someone else must clear the 1121 * flag and call this function. 1122 */ 1123 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 1124 KASSERT(drop == 0, 1125 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 1126 return; 1127 } 1128 1129 txsd = &toep->txsd[toep->txsd_pidx]; 1130 for (;;) { 1131 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 1132 space = max_imm_tls_space(tx_credits); 1133 wr_len = sizeof(struct fw_tlstx_data_wr) + 1134 sizeof(struct cpl_tx_tls_sfo) + key_size(toep); 1135 if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) { 1136 #ifdef VERBOSE_TRACES 1137 CTR5(KTR_CXGBE, 1138 "%s: tid %d tx_credits %d min_wr %d space %d", 1139 __func__, toep->tid, tx_credits, wr_len + 1140 CIPHER_BLOCK_SIZE + 1, space); 1141 #endif 1142 return; 1143 } 1144 1145 SOCKBUF_LOCK(sb); 1146 sowwakeup = drop; 1147 if (drop) { 1148 sbdrop_locked(sb, drop); 1149 MPASS(tls_ofld->sb_off >= drop); 1150 tls_ofld->sb_off -= drop; 1151 drop = 0; 1152 } 1153 1154 /* 1155 * Send a FIN if requested, but only if there's no 1156 * more data to send. 1157 */ 1158 if (sbavail(sb) == tls_ofld->sb_off && 1159 toep->flags & TPF_SEND_FIN) { 1160 if (sowwakeup) 1161 sowwakeup_locked(so); 1162 else 1163 SOCKBUF_UNLOCK(sb); 1164 SOCKBUF_UNLOCK_ASSERT(sb); 1165 t4_close_conn(sc, toep); 1166 return; 1167 } 1168 1169 if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) { 1170 /* 1171 * A full TLS header is not yet queued, stop 1172 * for now until more data is added to the 1173 * socket buffer. However, if the connection 1174 * has been closed, we will never get the rest 1175 * of the header so just discard the partial 1176 * header and close the connection. 1177 */ 1178 #ifdef VERBOSE_TRACES 1179 CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s", 1180 __func__, toep->tid, sbavail(sb), tls_ofld->sb_off, 1181 toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN"); 1182 #endif 1183 if (sowwakeup) 1184 sowwakeup_locked(so); 1185 else 1186 SOCKBUF_UNLOCK(sb); 1187 SOCKBUF_UNLOCK_ASSERT(sb); 1188 if (toep->flags & TPF_SEND_FIN) 1189 t4_close_conn(sc, toep); 1190 return; 1191 } 1192 1193 /* Read the header of the next TLS record. */ 1194 sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff); 1195 MPASS(!IS_AIOTX_MBUF(sndptr)); 1196 m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr); 1197 tls_size = htons(thdr.length); 1198 plen = TLS_HEADER_LENGTH + tls_size; 1199 pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size); 1200 iv_len = pdus * CIPHER_BLOCK_SIZE; 1201 1202 if (sbavail(sb) < tls_ofld->sb_off + plen) { 1203 /* 1204 * The full TLS record is not yet queued, stop 1205 * for now until more data is added to the 1206 * socket buffer. However, if the connection 1207 * has been closed, we will never get the rest 1208 * of the record so just discard the partial 1209 * record and close the connection. 1210 */ 1211 #ifdef VERBOSE_TRACES 1212 CTR6(KTR_CXGBE, 1213 "%s: tid %d sbavail %d sb_off %d plen %d%s", 1214 __func__, toep->tid, sbavail(sb), tls_ofld->sb_off, 1215 plen, toep->flags & TPF_SEND_FIN ? "" : 1216 " SEND_FIN"); 1217 #endif 1218 if (sowwakeup) 1219 sowwakeup_locked(so); 1220 else 1221 SOCKBUF_UNLOCK(sb); 1222 SOCKBUF_UNLOCK_ASSERT(sb); 1223 if (toep->flags & TPF_SEND_FIN) 1224 t4_close_conn(sc, toep); 1225 return; 1226 } 1227 1228 /* Shove if there is no additional data pending. */ 1229 shove = (sbavail(sb) == tls_ofld->sb_off + plen) && 1230 !(tp->t_flags & TF_MORETOCOME); 1231 1232 if (sb->sb_flags & SB_AUTOSIZE && 1233 V_tcp_do_autosndbuf && 1234 sb->sb_hiwat < V_tcp_autosndbuf_max && 1235 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 1236 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 1237 V_tcp_autosndbuf_max); 1238 1239 if (!sbreserve_locked(sb, newsize, so, NULL)) 1240 sb->sb_flags &= ~SB_AUTOSIZE; 1241 else 1242 sowwakeup = 1; /* room available */ 1243 } 1244 if (sowwakeup) 1245 sowwakeup_locked(so); 1246 else 1247 SOCKBUF_UNLOCK(sb); 1248 SOCKBUF_UNLOCK_ASSERT(sb); 1249 1250 if (__predict_false(toep->flags & TPF_FIN_SENT)) 1251 panic("%s: excess tx.", __func__); 1252 1253 /* Determine whether to use immediate vs SGL. */ 1254 imm_payload = false; 1255 imm_ivs = false; 1256 if (wr_len + iv_len <= space) { 1257 imm_ivs = true; 1258 wr_len += iv_len; 1259 if (wr_len + tls_size <= space) { 1260 wr_len += tls_size; 1261 imm_payload = true; 1262 } 1263 } 1264 1265 /* Allocate space for IVs if needed. */ 1266 if (!imm_ivs) { 1267 iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT); 1268 if (iv_buffer == NULL) { 1269 /* 1270 * XXX: How to restart this? 1271 */ 1272 if (sowwakeup) 1273 sowwakeup_locked(so); 1274 else 1275 SOCKBUF_UNLOCK(sb); 1276 SOCKBUF_UNLOCK_ASSERT(sb); 1277 CTR3(KTR_CXGBE, 1278 "%s: tid %d failed to alloc IV space len %d", 1279 __func__, toep->tid, iv_len); 1280 return; 1281 } 1282 } else 1283 iv_buffer = NULL; 1284 1285 /* Determine size of SGL. */ 1286 nsegs = 0; 1287 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 1288 if (!imm_payload) { 1289 nsegs = count_mbuf_segs(sndptr, sndptroff + 1290 TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf); 1291 if (!imm_ivs) { 1292 int n = sglist_count(iv_buffer, iv_len); 1293 nsegs += n; 1294 if (n > max_nsegs_1mbuf) 1295 max_nsegs_1mbuf = n; 1296 } 1297 1298 /* Account for SGL in work request length. */ 1299 wr_len += sizeof(struct ulptx_sgl) + 1300 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 1301 } 1302 1303 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 1304 if (wr == NULL) { 1305 /* XXX: how will we recover from this? */ 1306 toep->flags |= TPF_TX_SUSPENDED; 1307 return; 1308 } 1309 1310 #ifdef VERBOSE_TRACES 1311 CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d", 1312 __func__, toep->tid, thdr.type, tls_size, pdus); 1313 #endif 1314 txwr = wrtod(wr); 1315 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1); 1316 memset(txwr, 0, roundup2(wr_len, 16)); 1317 credits = howmany(wr_len, 16); 1318 expn_size = tls_expansion_size(toep, tls_size, 0, NULL); 1319 write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0, 1320 tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0); 1321 write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus); 1322 tls_copy_tx_key(toep, cpl + 1); 1323 1324 /* Generate random IVs */ 1325 buf = (char *)(cpl + 1) + key_size(toep); 1326 if (imm_ivs) { 1327 MPASS(iv_buffer == NULL); 1328 iv_dst = buf; 1329 buf = (char *)iv_dst + iv_len; 1330 } else 1331 iv_dst = iv_buffer; 1332 arc4rand(iv_dst, iv_len, 0); 1333 1334 if (imm_payload) { 1335 m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH, 1336 tls_size, buf); 1337 } else { 1338 write_tlstx_sgl(buf, sndptr, 1339 sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer, 1340 iv_len, nsegs, max_nsegs_1mbuf); 1341 } 1342 1343 KASSERT(toep->tx_credits >= credits, 1344 ("%s: not enough credits", __func__)); 1345 1346 toep->tx_credits -= credits; 1347 1348 tp->snd_nxt += plen; 1349 tp->snd_max += plen; 1350 1351 SOCKBUF_LOCK(sb); 1352 sbsndptr_adv(sb, sb->sb_sndptr, plen); 1353 tls_ofld->sb_off += plen; 1354 SOCKBUF_UNLOCK(sb); 1355 1356 toep->flags |= TPF_TX_DATA_SENT; 1357 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep)) 1358 toep->flags |= TPF_TX_SUSPENDED; 1359 1360 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1361 txsd->plen = plen; 1362 txsd->tx_credits = credits; 1363 txsd->iv_buffer = iv_buffer; 1364 txsd++; 1365 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1366 toep->txsd_pidx = 0; 1367 txsd = &toep->txsd[0]; 1368 } 1369 toep->txsd_avail--; 1370 1371 atomic_add_long(&toep->vi->pi->tx_tls_records, 1); 1372 atomic_add_long(&toep->vi->pi->tx_tls_octets, plen); 1373 1374 t4_l2t_send(sc, wr, toep->l2te); 1375 } 1376 } 1377 1378 /* 1379 * For TLS data we place received mbufs received via CPL_TLS_DATA into 1380 * an mbufq in the TLS offload state. When CPL_RX_TLS_CMP is 1381 * received, the completed PDUs are placed into the socket receive 1382 * buffer. 1383 * 1384 * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs. 1385 */ 1386 static int 1387 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1388 { 1389 struct adapter *sc = iq->adapter; 1390 const struct cpl_tls_data *cpl = mtod(m, const void *); 1391 unsigned int tid = GET_TID(cpl); 1392 struct toepcb *toep = lookup_tid(sc, tid); 1393 struct inpcb *inp = toep->inp; 1394 struct tcpcb *tp; 1395 int len; 1396 1397 /* XXX: Should this match do_rx_data instead? */ 1398 KASSERT(!(toep->flags & TPF_SYNQE), 1399 ("%s: toep %p claims to be a synq entry", __func__, toep)); 1400 1401 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1402 1403 /* strip off CPL header */ 1404 m_adj(m, sizeof(*cpl)); 1405 len = m->m_pkthdr.len; 1406 1407 atomic_add_long(&toep->vi->pi->rx_tls_octets, len); 1408 1409 KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)), 1410 ("%s: payload length mismatch", __func__)); 1411 1412 INP_WLOCK(inp); 1413 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1414 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1415 __func__, tid, len, inp->inp_flags); 1416 INP_WUNLOCK(inp); 1417 m_freem(m); 1418 return (0); 1419 } 1420 1421 /* Save TCP sequence number. */ 1422 m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq); 1423 1424 if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) { 1425 #ifdef INVARIANTS 1426 panic("Failed to queue TLS data packet"); 1427 #else 1428 printf("%s: Failed to queue TLS data packet\n", __func__); 1429 INP_WUNLOCK(inp); 1430 m_freem(m); 1431 return (0); 1432 #endif 1433 } 1434 1435 tp = intotcpcb(inp); 1436 tp->t_rcvtime = ticks; 1437 1438 #ifdef VERBOSE_TRACES 1439 CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len, 1440 be32toh(cpl->seq)); 1441 #endif 1442 1443 INP_WUNLOCK(inp); 1444 return (0); 1445 } 1446 1447 static int 1448 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1449 { 1450 struct adapter *sc = iq->adapter; 1451 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *); 1452 struct tlsrx_hdr_pkt *tls_hdr_pkt; 1453 unsigned int tid = GET_TID(cpl); 1454 struct toepcb *toep = lookup_tid(sc, tid); 1455 struct inpcb *inp = toep->inp; 1456 struct tcpcb *tp; 1457 struct socket *so; 1458 struct sockbuf *sb; 1459 struct mbuf *tls_data; 1460 int len, pdu_length, pdu_overhead, sb_length; 1461 1462 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1463 KASSERT(!(toep->flags & TPF_SYNQE), 1464 ("%s: toep %p claims to be a synq entry", __func__, toep)); 1465 1466 /* strip off CPL header */ 1467 m_adj(m, sizeof(*cpl)); 1468 len = m->m_pkthdr.len; 1469 1470 atomic_add_long(&toep->vi->pi->rx_tls_records, 1); 1471 1472 KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)), 1473 ("%s: payload length mismatch", __func__)); 1474 1475 INP_WLOCK(inp); 1476 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1477 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1478 __func__, tid, len, inp->inp_flags); 1479 INP_WUNLOCK(inp); 1480 m_freem(m); 1481 return (0); 1482 } 1483 1484 pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length)); 1485 1486 tp = intotcpcb(inp); 1487 1488 #ifdef VERBOSE_TRACES 1489 CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u", 1490 __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt); 1491 #endif 1492 1493 tp->rcv_nxt += pdu_length; 1494 if (tp->rcv_wnd < pdu_length) { 1495 toep->tls.rcv_over += pdu_length - tp->rcv_wnd; 1496 tp->rcv_wnd = 0; 1497 } else 1498 tp->rcv_wnd -= pdu_length; 1499 1500 /* XXX: Not sure what to do about urgent data. */ 1501 1502 /* 1503 * The payload of this CPL is the TLS header followed by 1504 * additional fields. 1505 */ 1506 KASSERT(m->m_len >= sizeof(*tls_hdr_pkt), 1507 ("%s: payload too small", __func__)); 1508 tls_hdr_pkt = mtod(m, void *); 1509 1510 /* 1511 * Only the TLS header is sent to OpenSSL, so report errors by 1512 * altering the record type. 1513 */ 1514 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) 1515 tls_hdr_pkt->type = CONTENT_TYPE_ERROR; 1516 1517 /* Trim this CPL's mbuf to only include the TLS header. */ 1518 KASSERT(m->m_len == len && m->m_next == NULL, 1519 ("%s: CPL spans multiple mbufs", __func__)); 1520 m->m_len = TLS_HEADER_LENGTH; 1521 m->m_pkthdr.len = TLS_HEADER_LENGTH; 1522 1523 tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq); 1524 if (tls_data != NULL) { 1525 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq, 1526 ("%s: sequence mismatch", __func__)); 1527 1528 /* 1529 * Update the TLS header length to be the length of 1530 * the payload data. 1531 */ 1532 tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len); 1533 1534 m->m_next = tls_data; 1535 m->m_pkthdr.len += tls_data->m_len; 1536 } 1537 1538 so = inp_inpcbtosocket(inp); 1539 sb = &so->so_rcv; 1540 SOCKBUF_LOCK(sb); 1541 1542 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1543 struct epoch_tracker et; 1544 1545 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1546 __func__, tid, pdu_length); 1547 m_freem(m); 1548 SOCKBUF_UNLOCK(sb); 1549 INP_WUNLOCK(inp); 1550 1551 CURVNET_SET(toep->vnet); 1552 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1553 INP_WLOCK(inp); 1554 tp = tcp_drop(tp, ECONNRESET); 1555 if (tp) 1556 INP_WUNLOCK(inp); 1557 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1558 CURVNET_RESTORE(); 1559 1560 return (0); 1561 } 1562 1563 /* 1564 * Not all of the bytes on the wire are included in the socket 1565 * buffer (e.g. the MAC of the TLS record). However, those 1566 * bytes are included in the TCP sequence space. To handle 1567 * this, compute the delta for this TLS record in 1568 * 'pdu_overhead' and treat those bytes as having already been 1569 * "read" by the application for the purposes of expanding the 1570 * window. The meat of the TLS record passed to the 1571 * application ('sb_length') will still not be counted as 1572 * "read" until userland actually reads the bytes. 1573 * 1574 * XXX: Some of the calculations below are probably still not 1575 * really correct. 1576 */ 1577 sb_length = m->m_pkthdr.len; 1578 pdu_overhead = pdu_length - sb_length; 1579 toep->rx_credits += pdu_overhead; 1580 tp->rcv_wnd += pdu_overhead; 1581 tp->rcv_adv += pdu_overhead; 1582 1583 /* receive buffer autosize */ 1584 MPASS(toep->vnet == so->so_vnet); 1585 CURVNET_SET(toep->vnet); 1586 if (sb->sb_flags & SB_AUTOSIZE && 1587 V_tcp_do_autorcvbuf && 1588 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1589 sb_length > (sbspace(sb) / 8 * 7)) { 1590 unsigned int hiwat = sb->sb_hiwat; 1591 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1592 V_tcp_autorcvbuf_max); 1593 1594 if (!sbreserve_locked(sb, newsize, so, NULL)) 1595 sb->sb_flags &= ~SB_AUTOSIZE; 1596 else 1597 toep->rx_credits += newsize - hiwat; 1598 } 1599 1600 KASSERT(toep->sb_cc >= sbused(sb), 1601 ("%s: sb %p has more data (%d) than last time (%d).", 1602 __func__, sb, sbused(sb), toep->sb_cc)); 1603 toep->rx_credits += toep->sb_cc - sbused(sb); 1604 sbappendstream_locked(sb, m, 0); 1605 toep->sb_cc = sbused(sb); 1606 #ifdef VERBOSE_TRACES 1607 CTR5(KTR_CXGBE, "%s: tid %u PDU overhead %d rx_credits %u rcv_wnd %u", 1608 __func__, tid, pdu_overhead, toep->rx_credits, tp->rcv_wnd); 1609 #endif 1610 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1611 int credits; 1612 1613 credits = send_rx_credits(sc, toep, toep->rx_credits); 1614 toep->rx_credits -= credits; 1615 tp->rcv_wnd += credits; 1616 tp->rcv_adv += credits; 1617 } 1618 1619 sorwakeup_locked(so); 1620 SOCKBUF_UNLOCK_ASSERT(sb); 1621 1622 INP_WUNLOCK(inp); 1623 CURVNET_RESTORE(); 1624 return (0); 1625 } 1626 1627 void 1628 t4_tls_mod_load(void) 1629 { 1630 1631 mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF); 1632 t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data); 1633 t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp); 1634 } 1635 1636 void 1637 t4_tls_mod_unload(void) 1638 { 1639 1640 t4_register_cpl_handler(CPL_TLS_DATA, NULL); 1641 t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL); 1642 mtx_destroy(&tls_handshake_lock); 1643 } 1644 #endif /* TCP_OFFLOAD */ 1645