1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/bug.h> 39 #include <linux/sched/signal.h> 40 #include <linux/module.h> 41 #include <linux/kernel.h> 42 #include <linux/splice.h> 43 #include <crypto/aead.h> 44 45 #include <net/strparser.h> 46 #include <net/tls.h> 47 #include <trace/events/sock.h> 48 49 #include "tls.h" 50 51 struct tls_decrypt_arg { 52 struct_group(inargs, 53 bool zc; 54 bool async; 55 u8 tail; 56 ); 57 58 struct sk_buff *skb; 59 }; 60 61 struct tls_decrypt_ctx { 62 struct sock *sk; 63 u8 iv[TLS_MAX_IV_SIZE]; 64 u8 aad[TLS_MAX_AAD_SIZE]; 65 u8 tail; 66 struct scatterlist sg[]; 67 }; 68 69 noinline void tls_err_abort(struct sock *sk, int err) 70 { 71 WARN_ON_ONCE(err >= 0); 72 /* sk->sk_err should contain a positive error code. */ 73 WRITE_ONCE(sk->sk_err, -err); 74 /* Paired with smp_rmb() in tcp_poll() */ 75 smp_wmb(); 76 sk_error_report(sk); 77 } 78 79 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 80 unsigned int recursion_level) 81 { 82 int start = skb_headlen(skb); 83 int i, chunk = start - offset; 84 struct sk_buff *frag_iter; 85 int elt = 0; 86 87 if (unlikely(recursion_level >= 24)) 88 return -EMSGSIZE; 89 90 if (chunk > 0) { 91 if (chunk > len) 92 chunk = len; 93 elt++; 94 len -= chunk; 95 if (len == 0) 96 return elt; 97 offset += chunk; 98 } 99 100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 101 int end; 102 103 WARN_ON(start > offset + len); 104 105 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 106 chunk = end - offset; 107 if (chunk > 0) { 108 if (chunk > len) 109 chunk = len; 110 elt++; 111 len -= chunk; 112 if (len == 0) 113 return elt; 114 offset += chunk; 115 } 116 start = end; 117 } 118 119 if (unlikely(skb_has_frag_list(skb))) { 120 skb_walk_frags(skb, frag_iter) { 121 int end, ret; 122 123 WARN_ON(start > offset + len); 124 125 end = start + frag_iter->len; 126 chunk = end - offset; 127 if (chunk > 0) { 128 if (chunk > len) 129 chunk = len; 130 ret = __skb_nsg(frag_iter, offset - start, chunk, 131 recursion_level + 1); 132 if (unlikely(ret < 0)) 133 return ret; 134 elt += ret; 135 len -= chunk; 136 if (len == 0) 137 return elt; 138 offset += chunk; 139 } 140 start = end; 141 } 142 } 143 BUG_ON(len); 144 return elt; 145 } 146 147 /* Return the number of scatterlist elements required to completely map the 148 * skb, or -EMSGSIZE if the recursion depth is exceeded. 149 */ 150 static int skb_nsg(struct sk_buff *skb, int offset, int len) 151 { 152 return __skb_nsg(skb, offset, len, 0); 153 } 154 155 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, 156 struct tls_decrypt_arg *darg) 157 { 158 struct strp_msg *rxm = strp_msg(skb); 159 struct tls_msg *tlm = tls_msg(skb); 160 int sub = 0; 161 162 /* Determine zero-padding length */ 163 if (prot->version == TLS_1_3_VERSION) { 164 int offset = rxm->full_len - TLS_TAG_SIZE - 1; 165 char content_type = darg->zc ? darg->tail : 0; 166 int err; 167 168 while (content_type == 0) { 169 if (offset < prot->prepend_size) 170 return -EBADMSG; 171 err = skb_copy_bits(skb, rxm->offset + offset, 172 &content_type, 1); 173 if (err) 174 return err; 175 if (content_type) 176 break; 177 sub++; 178 offset--; 179 } 180 tlm->control = content_type; 181 } 182 return sub; 183 } 184 185 static void tls_decrypt_done(void *data, int err) 186 { 187 struct aead_request *aead_req = data; 188 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 189 struct scatterlist *sgout = aead_req->dst; 190 struct scatterlist *sgin = aead_req->src; 191 struct tls_sw_context_rx *ctx; 192 struct tls_decrypt_ctx *dctx; 193 struct tls_context *tls_ctx; 194 struct scatterlist *sg; 195 unsigned int pages; 196 struct sock *sk; 197 int aead_size; 198 199 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 200 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 201 dctx = (void *)((u8 *)aead_req + aead_size); 202 203 sk = dctx->sk; 204 tls_ctx = tls_get_ctx(sk); 205 ctx = tls_sw_ctx_rx(tls_ctx); 206 207 /* Propagate if there was an err */ 208 if (err) { 209 if (err == -EBADMSG) 210 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 211 ctx->async_wait.err = err; 212 tls_err_abort(sk, err); 213 } 214 215 /* Free the destination pages if skb was not decrypted inplace */ 216 if (sgout != sgin) { 217 /* Skip the first S/G entry as it points to AAD */ 218 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 219 if (!sg) 220 break; 221 put_page(sg_page(sg)); 222 } 223 } 224 225 kfree(aead_req); 226 227 spin_lock_bh(&ctx->decrypt_compl_lock); 228 if (!atomic_dec_return(&ctx->decrypt_pending)) 229 complete(&ctx->async_wait.completion); 230 spin_unlock_bh(&ctx->decrypt_compl_lock); 231 } 232 233 static int tls_do_decryption(struct sock *sk, 234 struct scatterlist *sgin, 235 struct scatterlist *sgout, 236 char *iv_recv, 237 size_t data_len, 238 struct aead_request *aead_req, 239 struct tls_decrypt_arg *darg) 240 { 241 struct tls_context *tls_ctx = tls_get_ctx(sk); 242 struct tls_prot_info *prot = &tls_ctx->prot_info; 243 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 244 int ret; 245 246 aead_request_set_tfm(aead_req, ctx->aead_recv); 247 aead_request_set_ad(aead_req, prot->aad_size); 248 aead_request_set_crypt(aead_req, sgin, sgout, 249 data_len + prot->tag_size, 250 (u8 *)iv_recv); 251 252 if (darg->async) { 253 aead_request_set_callback(aead_req, 254 CRYPTO_TFM_REQ_MAY_BACKLOG, 255 tls_decrypt_done, aead_req); 256 atomic_inc(&ctx->decrypt_pending); 257 } else { 258 aead_request_set_callback(aead_req, 259 CRYPTO_TFM_REQ_MAY_BACKLOG, 260 crypto_req_done, &ctx->async_wait); 261 } 262 263 ret = crypto_aead_decrypt(aead_req); 264 if (ret == -EINPROGRESS) { 265 if (darg->async) 266 return 0; 267 268 ret = crypto_wait_req(ret, &ctx->async_wait); 269 } 270 darg->async = false; 271 272 return ret; 273 } 274 275 static void tls_trim_both_msgs(struct sock *sk, int target_size) 276 { 277 struct tls_context *tls_ctx = tls_get_ctx(sk); 278 struct tls_prot_info *prot = &tls_ctx->prot_info; 279 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 280 struct tls_rec *rec = ctx->open_rec; 281 282 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 283 if (target_size > 0) 284 target_size += prot->overhead_size; 285 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 286 } 287 288 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 289 { 290 struct tls_context *tls_ctx = tls_get_ctx(sk); 291 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 292 struct tls_rec *rec = ctx->open_rec; 293 struct sk_msg *msg_en = &rec->msg_encrypted; 294 295 return sk_msg_alloc(sk, msg_en, len, 0); 296 } 297 298 static int tls_clone_plaintext_msg(struct sock *sk, int required) 299 { 300 struct tls_context *tls_ctx = tls_get_ctx(sk); 301 struct tls_prot_info *prot = &tls_ctx->prot_info; 302 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 303 struct tls_rec *rec = ctx->open_rec; 304 struct sk_msg *msg_pl = &rec->msg_plaintext; 305 struct sk_msg *msg_en = &rec->msg_encrypted; 306 int skip, len; 307 308 /* We add page references worth len bytes from encrypted sg 309 * at the end of plaintext sg. It is guaranteed that msg_en 310 * has enough required room (ensured by caller). 311 */ 312 len = required - msg_pl->sg.size; 313 314 /* Skip initial bytes in msg_en's data to be able to use 315 * same offset of both plain and encrypted data. 316 */ 317 skip = prot->prepend_size + msg_pl->sg.size; 318 319 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 320 } 321 322 static struct tls_rec *tls_get_rec(struct sock *sk) 323 { 324 struct tls_context *tls_ctx = tls_get_ctx(sk); 325 struct tls_prot_info *prot = &tls_ctx->prot_info; 326 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 327 struct sk_msg *msg_pl, *msg_en; 328 struct tls_rec *rec; 329 int mem_size; 330 331 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 332 333 rec = kzalloc(mem_size, sk->sk_allocation); 334 if (!rec) 335 return NULL; 336 337 msg_pl = &rec->msg_plaintext; 338 msg_en = &rec->msg_encrypted; 339 340 sk_msg_init(msg_pl); 341 sk_msg_init(msg_en); 342 343 sg_init_table(rec->sg_aead_in, 2); 344 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 345 sg_unmark_end(&rec->sg_aead_in[1]); 346 347 sg_init_table(rec->sg_aead_out, 2); 348 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 349 sg_unmark_end(&rec->sg_aead_out[1]); 350 351 rec->sk = sk; 352 353 return rec; 354 } 355 356 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 357 { 358 sk_msg_free(sk, &rec->msg_encrypted); 359 sk_msg_free(sk, &rec->msg_plaintext); 360 kfree(rec); 361 } 362 363 static void tls_free_open_rec(struct sock *sk) 364 { 365 struct tls_context *tls_ctx = tls_get_ctx(sk); 366 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 367 struct tls_rec *rec = ctx->open_rec; 368 369 if (rec) { 370 tls_free_rec(sk, rec); 371 ctx->open_rec = NULL; 372 } 373 } 374 375 int tls_tx_records(struct sock *sk, int flags) 376 { 377 struct tls_context *tls_ctx = tls_get_ctx(sk); 378 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 379 struct tls_rec *rec, *tmp; 380 struct sk_msg *msg_en; 381 int tx_flags, rc = 0; 382 383 if (tls_is_partially_sent_record(tls_ctx)) { 384 rec = list_first_entry(&ctx->tx_list, 385 struct tls_rec, list); 386 387 if (flags == -1) 388 tx_flags = rec->tx_flags; 389 else 390 tx_flags = flags; 391 392 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 393 if (rc) 394 goto tx_err; 395 396 /* Full record has been transmitted. 397 * Remove the head of tx_list 398 */ 399 list_del(&rec->list); 400 sk_msg_free(sk, &rec->msg_plaintext); 401 kfree(rec); 402 } 403 404 /* Tx all ready records */ 405 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 406 if (READ_ONCE(rec->tx_ready)) { 407 if (flags == -1) 408 tx_flags = rec->tx_flags; 409 else 410 tx_flags = flags; 411 412 msg_en = &rec->msg_encrypted; 413 rc = tls_push_sg(sk, tls_ctx, 414 &msg_en->sg.data[msg_en->sg.curr], 415 0, tx_flags); 416 if (rc) 417 goto tx_err; 418 419 list_del(&rec->list); 420 sk_msg_free(sk, &rec->msg_plaintext); 421 kfree(rec); 422 } else { 423 break; 424 } 425 } 426 427 tx_err: 428 if (rc < 0 && rc != -EAGAIN) 429 tls_err_abort(sk, -EBADMSG); 430 431 return rc; 432 } 433 434 static void tls_encrypt_done(void *data, int err) 435 { 436 struct tls_sw_context_tx *ctx; 437 struct tls_context *tls_ctx; 438 struct tls_prot_info *prot; 439 struct tls_rec *rec = data; 440 struct scatterlist *sge; 441 struct sk_msg *msg_en; 442 bool ready = false; 443 struct sock *sk; 444 int pending; 445 446 msg_en = &rec->msg_encrypted; 447 448 sk = rec->sk; 449 tls_ctx = tls_get_ctx(sk); 450 prot = &tls_ctx->prot_info; 451 ctx = tls_sw_ctx_tx(tls_ctx); 452 453 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 454 sge->offset -= prot->prepend_size; 455 sge->length += prot->prepend_size; 456 457 /* Check if error is previously set on socket */ 458 if (err || sk->sk_err) { 459 rec = NULL; 460 461 /* If err is already set on socket, return the same code */ 462 if (sk->sk_err) { 463 ctx->async_wait.err = -sk->sk_err; 464 } else { 465 ctx->async_wait.err = err; 466 tls_err_abort(sk, err); 467 } 468 } 469 470 if (rec) { 471 struct tls_rec *first_rec; 472 473 /* Mark the record as ready for transmission */ 474 smp_store_mb(rec->tx_ready, true); 475 476 /* If received record is at head of tx_list, schedule tx */ 477 first_rec = list_first_entry(&ctx->tx_list, 478 struct tls_rec, list); 479 if (rec == first_rec) 480 ready = true; 481 } 482 483 spin_lock_bh(&ctx->encrypt_compl_lock); 484 pending = atomic_dec_return(&ctx->encrypt_pending); 485 486 if (!pending && ctx->async_notify) 487 complete(&ctx->async_wait.completion); 488 spin_unlock_bh(&ctx->encrypt_compl_lock); 489 490 if (!ready) 491 return; 492 493 /* Schedule the transmission */ 494 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 495 schedule_delayed_work(&ctx->tx_work.work, 1); 496 } 497 498 static int tls_do_encryption(struct sock *sk, 499 struct tls_context *tls_ctx, 500 struct tls_sw_context_tx *ctx, 501 struct aead_request *aead_req, 502 size_t data_len, u32 start) 503 { 504 struct tls_prot_info *prot = &tls_ctx->prot_info; 505 struct tls_rec *rec = ctx->open_rec; 506 struct sk_msg *msg_en = &rec->msg_encrypted; 507 struct scatterlist *sge = sk_msg_elem(msg_en, start); 508 int rc, iv_offset = 0; 509 510 /* For CCM based ciphers, first byte of IV is a constant */ 511 switch (prot->cipher_type) { 512 case TLS_CIPHER_AES_CCM_128: 513 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 514 iv_offset = 1; 515 break; 516 case TLS_CIPHER_SM4_CCM: 517 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; 518 iv_offset = 1; 519 break; 520 } 521 522 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 523 prot->iv_size + prot->salt_size); 524 525 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset, 526 tls_ctx->tx.rec_seq); 527 528 sge->offset += prot->prepend_size; 529 sge->length -= prot->prepend_size; 530 531 msg_en->sg.curr = start; 532 533 aead_request_set_tfm(aead_req, ctx->aead_send); 534 aead_request_set_ad(aead_req, prot->aad_size); 535 aead_request_set_crypt(aead_req, rec->sg_aead_in, 536 rec->sg_aead_out, 537 data_len, rec->iv_data); 538 539 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 540 tls_encrypt_done, rec); 541 542 /* Add the record in tx_list */ 543 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 544 atomic_inc(&ctx->encrypt_pending); 545 546 rc = crypto_aead_encrypt(aead_req); 547 if (!rc || rc != -EINPROGRESS) { 548 atomic_dec(&ctx->encrypt_pending); 549 sge->offset -= prot->prepend_size; 550 sge->length += prot->prepend_size; 551 } 552 553 if (!rc) { 554 WRITE_ONCE(rec->tx_ready, true); 555 } else if (rc != -EINPROGRESS) { 556 list_del(&rec->list); 557 return rc; 558 } 559 560 /* Unhook the record from context if encryption is not failure */ 561 ctx->open_rec = NULL; 562 tls_advance_record_sn(sk, prot, &tls_ctx->tx); 563 return rc; 564 } 565 566 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 567 struct tls_rec **to, struct sk_msg *msg_opl, 568 struct sk_msg *msg_oen, u32 split_point, 569 u32 tx_overhead_size, u32 *orig_end) 570 { 571 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 572 struct scatterlist *sge, *osge, *nsge; 573 u32 orig_size = msg_opl->sg.size; 574 struct scatterlist tmp = { }; 575 struct sk_msg *msg_npl; 576 struct tls_rec *new; 577 int ret; 578 579 new = tls_get_rec(sk); 580 if (!new) 581 return -ENOMEM; 582 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 583 tx_overhead_size, 0); 584 if (ret < 0) { 585 tls_free_rec(sk, new); 586 return ret; 587 } 588 589 *orig_end = msg_opl->sg.end; 590 i = msg_opl->sg.start; 591 sge = sk_msg_elem(msg_opl, i); 592 while (apply && sge->length) { 593 if (sge->length > apply) { 594 u32 len = sge->length - apply; 595 596 get_page(sg_page(sge)); 597 sg_set_page(&tmp, sg_page(sge), len, 598 sge->offset + apply); 599 sge->length = apply; 600 bytes += apply; 601 apply = 0; 602 } else { 603 apply -= sge->length; 604 bytes += sge->length; 605 } 606 607 sk_msg_iter_var_next(i); 608 if (i == msg_opl->sg.end) 609 break; 610 sge = sk_msg_elem(msg_opl, i); 611 } 612 613 msg_opl->sg.end = i; 614 msg_opl->sg.curr = i; 615 msg_opl->sg.copybreak = 0; 616 msg_opl->apply_bytes = 0; 617 msg_opl->sg.size = bytes; 618 619 msg_npl = &new->msg_plaintext; 620 msg_npl->apply_bytes = apply; 621 msg_npl->sg.size = orig_size - bytes; 622 623 j = msg_npl->sg.start; 624 nsge = sk_msg_elem(msg_npl, j); 625 if (tmp.length) { 626 memcpy(nsge, &tmp, sizeof(*nsge)); 627 sk_msg_iter_var_next(j); 628 nsge = sk_msg_elem(msg_npl, j); 629 } 630 631 osge = sk_msg_elem(msg_opl, i); 632 while (osge->length) { 633 memcpy(nsge, osge, sizeof(*nsge)); 634 sg_unmark_end(nsge); 635 sk_msg_iter_var_next(i); 636 sk_msg_iter_var_next(j); 637 if (i == *orig_end) 638 break; 639 osge = sk_msg_elem(msg_opl, i); 640 nsge = sk_msg_elem(msg_npl, j); 641 } 642 643 msg_npl->sg.end = j; 644 msg_npl->sg.curr = j; 645 msg_npl->sg.copybreak = 0; 646 647 *to = new; 648 return 0; 649 } 650 651 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 652 struct tls_rec *from, u32 orig_end) 653 { 654 struct sk_msg *msg_npl = &from->msg_plaintext; 655 struct sk_msg *msg_opl = &to->msg_plaintext; 656 struct scatterlist *osge, *nsge; 657 u32 i, j; 658 659 i = msg_opl->sg.end; 660 sk_msg_iter_var_prev(i); 661 j = msg_npl->sg.start; 662 663 osge = sk_msg_elem(msg_opl, i); 664 nsge = sk_msg_elem(msg_npl, j); 665 666 if (sg_page(osge) == sg_page(nsge) && 667 osge->offset + osge->length == nsge->offset) { 668 osge->length += nsge->length; 669 put_page(sg_page(nsge)); 670 } 671 672 msg_opl->sg.end = orig_end; 673 msg_opl->sg.curr = orig_end; 674 msg_opl->sg.copybreak = 0; 675 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 676 msg_opl->sg.size += msg_npl->sg.size; 677 678 sk_msg_free(sk, &to->msg_encrypted); 679 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 680 681 kfree(from); 682 } 683 684 static int tls_push_record(struct sock *sk, int flags, 685 unsigned char record_type) 686 { 687 struct tls_context *tls_ctx = tls_get_ctx(sk); 688 struct tls_prot_info *prot = &tls_ctx->prot_info; 689 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 690 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 691 u32 i, split_point, orig_end; 692 struct sk_msg *msg_pl, *msg_en; 693 struct aead_request *req; 694 bool split; 695 int rc; 696 697 if (!rec) 698 return 0; 699 700 msg_pl = &rec->msg_plaintext; 701 msg_en = &rec->msg_encrypted; 702 703 split_point = msg_pl->apply_bytes; 704 split = split_point && split_point < msg_pl->sg.size; 705 if (unlikely((!split && 706 msg_pl->sg.size + 707 prot->overhead_size > msg_en->sg.size) || 708 (split && 709 split_point + 710 prot->overhead_size > msg_en->sg.size))) { 711 split = true; 712 split_point = msg_en->sg.size; 713 } 714 if (split) { 715 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 716 split_point, prot->overhead_size, 717 &orig_end); 718 if (rc < 0) 719 return rc; 720 /* This can happen if above tls_split_open_record allocates 721 * a single large encryption buffer instead of two smaller 722 * ones. In this case adjust pointers and continue without 723 * split. 724 */ 725 if (!msg_pl->sg.size) { 726 tls_merge_open_record(sk, rec, tmp, orig_end); 727 msg_pl = &rec->msg_plaintext; 728 msg_en = &rec->msg_encrypted; 729 split = false; 730 } 731 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 732 prot->overhead_size); 733 } 734 735 rec->tx_flags = flags; 736 req = &rec->aead_req; 737 738 i = msg_pl->sg.end; 739 sk_msg_iter_var_prev(i); 740 741 rec->content_type = record_type; 742 if (prot->version == TLS_1_3_VERSION) { 743 /* Add content type to end of message. No padding added */ 744 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 745 sg_mark_end(&rec->sg_content_type); 746 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 747 &rec->sg_content_type); 748 } else { 749 sg_mark_end(sk_msg_elem(msg_pl, i)); 750 } 751 752 if (msg_pl->sg.end < msg_pl->sg.start) { 753 sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 754 MAX_SKB_FRAGS - msg_pl->sg.start + 1, 755 msg_pl->sg.data); 756 } 757 758 i = msg_pl->sg.start; 759 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 760 761 i = msg_en->sg.end; 762 sk_msg_iter_var_prev(i); 763 sg_mark_end(sk_msg_elem(msg_en, i)); 764 765 i = msg_en->sg.start; 766 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 767 768 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 769 tls_ctx->tx.rec_seq, record_type, prot); 770 771 tls_fill_prepend(tls_ctx, 772 page_address(sg_page(&msg_en->sg.data[i])) + 773 msg_en->sg.data[i].offset, 774 msg_pl->sg.size + prot->tail_size, 775 record_type); 776 777 tls_ctx->pending_open_record_frags = false; 778 779 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 780 msg_pl->sg.size + prot->tail_size, i); 781 if (rc < 0) { 782 if (rc != -EINPROGRESS) { 783 tls_err_abort(sk, -EBADMSG); 784 if (split) { 785 tls_ctx->pending_open_record_frags = true; 786 tls_merge_open_record(sk, rec, tmp, orig_end); 787 } 788 } 789 ctx->async_capable = 1; 790 return rc; 791 } else if (split) { 792 msg_pl = &tmp->msg_plaintext; 793 msg_en = &tmp->msg_encrypted; 794 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 795 tls_ctx->pending_open_record_frags = true; 796 ctx->open_rec = tmp; 797 } 798 799 return tls_tx_records(sk, flags); 800 } 801 802 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 803 bool full_record, u8 record_type, 804 ssize_t *copied, int flags) 805 { 806 struct tls_context *tls_ctx = tls_get_ctx(sk); 807 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 808 struct sk_msg msg_redir = { }; 809 struct sk_psock *psock; 810 struct sock *sk_redir; 811 struct tls_rec *rec; 812 bool enospc, policy, redir_ingress; 813 int err = 0, send; 814 u32 delta = 0; 815 816 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 817 psock = sk_psock_get(sk); 818 if (!psock || !policy) { 819 err = tls_push_record(sk, flags, record_type); 820 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 821 *copied -= sk_msg_free(sk, msg); 822 tls_free_open_rec(sk); 823 err = -sk->sk_err; 824 } 825 if (psock) 826 sk_psock_put(sk, psock); 827 return err; 828 } 829 more_data: 830 enospc = sk_msg_full(msg); 831 if (psock->eval == __SK_NONE) { 832 delta = msg->sg.size; 833 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 834 delta -= msg->sg.size; 835 } 836 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 837 !enospc && !full_record) { 838 err = -ENOSPC; 839 goto out_err; 840 } 841 msg->cork_bytes = 0; 842 send = msg->sg.size; 843 if (msg->apply_bytes && msg->apply_bytes < send) 844 send = msg->apply_bytes; 845 846 switch (psock->eval) { 847 case __SK_PASS: 848 err = tls_push_record(sk, flags, record_type); 849 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 850 *copied -= sk_msg_free(sk, msg); 851 tls_free_open_rec(sk); 852 err = -sk->sk_err; 853 goto out_err; 854 } 855 break; 856 case __SK_REDIRECT: 857 redir_ingress = psock->redir_ingress; 858 sk_redir = psock->sk_redir; 859 memcpy(&msg_redir, msg, sizeof(*msg)); 860 if (msg->apply_bytes < send) 861 msg->apply_bytes = 0; 862 else 863 msg->apply_bytes -= send; 864 sk_msg_return_zero(sk, msg, send); 865 msg->sg.size -= send; 866 release_sock(sk); 867 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 868 &msg_redir, send, flags); 869 lock_sock(sk); 870 if (err < 0) { 871 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 872 msg->sg.size = 0; 873 } 874 if (msg->sg.size == 0) 875 tls_free_open_rec(sk); 876 break; 877 case __SK_DROP: 878 default: 879 sk_msg_free_partial(sk, msg, send); 880 if (msg->apply_bytes < send) 881 msg->apply_bytes = 0; 882 else 883 msg->apply_bytes -= send; 884 if (msg->sg.size == 0) 885 tls_free_open_rec(sk); 886 *copied -= (send + delta); 887 err = -EACCES; 888 } 889 890 if (likely(!err)) { 891 bool reset_eval = !ctx->open_rec; 892 893 rec = ctx->open_rec; 894 if (rec) { 895 msg = &rec->msg_plaintext; 896 if (!msg->apply_bytes) 897 reset_eval = true; 898 } 899 if (reset_eval) { 900 psock->eval = __SK_NONE; 901 if (psock->sk_redir) { 902 sock_put(psock->sk_redir); 903 psock->sk_redir = NULL; 904 } 905 } 906 if (rec) 907 goto more_data; 908 } 909 out_err: 910 sk_psock_put(sk, psock); 911 return err; 912 } 913 914 static int tls_sw_push_pending_record(struct sock *sk, int flags) 915 { 916 struct tls_context *tls_ctx = tls_get_ctx(sk); 917 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 918 struct tls_rec *rec = ctx->open_rec; 919 struct sk_msg *msg_pl; 920 size_t copied; 921 922 if (!rec) 923 return 0; 924 925 msg_pl = &rec->msg_plaintext; 926 copied = msg_pl->sg.size; 927 if (!copied) 928 return 0; 929 930 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 931 &copied, flags); 932 } 933 934 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, 935 struct sk_msg *msg_pl, size_t try_to_copy, 936 ssize_t *copied) 937 { 938 struct page *page = NULL, **pages = &page; 939 940 do { 941 ssize_t part; 942 size_t off; 943 944 part = iov_iter_extract_pages(&msg->msg_iter, &pages, 945 try_to_copy, 1, 0, &off); 946 if (part <= 0) 947 return part ?: -EIO; 948 949 if (WARN_ON_ONCE(!sendpage_ok(page))) { 950 iov_iter_revert(&msg->msg_iter, part); 951 return -EIO; 952 } 953 954 sk_msg_page_add(msg_pl, page, part, off); 955 sk_mem_charge(sk, part); 956 *copied += part; 957 try_to_copy -= part; 958 } while (try_to_copy && !sk_msg_full(msg_pl)); 959 960 return 0; 961 } 962 963 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, 964 size_t size) 965 { 966 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 967 struct tls_context *tls_ctx = tls_get_ctx(sk); 968 struct tls_prot_info *prot = &tls_ctx->prot_info; 969 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 970 bool async_capable = ctx->async_capable; 971 unsigned char record_type = TLS_RECORD_TYPE_DATA; 972 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 973 bool eor = !(msg->msg_flags & MSG_MORE); 974 size_t try_to_copy; 975 ssize_t copied = 0; 976 struct sk_msg *msg_pl, *msg_en; 977 struct tls_rec *rec; 978 int required_size; 979 int num_async = 0; 980 bool full_record; 981 int record_room; 982 int num_zc = 0; 983 int orig_size; 984 int ret = 0; 985 int pending; 986 987 if (!eor && (msg->msg_flags & MSG_EOR)) 988 return -EINVAL; 989 990 if (unlikely(msg->msg_controllen)) { 991 ret = tls_process_cmsg(sk, msg, &record_type); 992 if (ret) { 993 if (ret == -EINPROGRESS) 994 num_async++; 995 else if (ret != -EAGAIN) 996 goto send_end; 997 } 998 } 999 1000 while (msg_data_left(msg)) { 1001 if (sk->sk_err) { 1002 ret = -sk->sk_err; 1003 goto send_end; 1004 } 1005 1006 if (ctx->open_rec) 1007 rec = ctx->open_rec; 1008 else 1009 rec = ctx->open_rec = tls_get_rec(sk); 1010 if (!rec) { 1011 ret = -ENOMEM; 1012 goto send_end; 1013 } 1014 1015 msg_pl = &rec->msg_plaintext; 1016 msg_en = &rec->msg_encrypted; 1017 1018 orig_size = msg_pl->sg.size; 1019 full_record = false; 1020 try_to_copy = msg_data_left(msg); 1021 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1022 if (try_to_copy >= record_room) { 1023 try_to_copy = record_room; 1024 full_record = true; 1025 } 1026 1027 required_size = msg_pl->sg.size + try_to_copy + 1028 prot->overhead_size; 1029 1030 if (!sk_stream_memory_free(sk)) 1031 goto wait_for_sndbuf; 1032 1033 alloc_encrypted: 1034 ret = tls_alloc_encrypted_msg(sk, required_size); 1035 if (ret) { 1036 if (ret != -ENOSPC) 1037 goto wait_for_memory; 1038 1039 /* Adjust try_to_copy according to the amount that was 1040 * actually allocated. The difference is due 1041 * to max sg elements limit 1042 */ 1043 try_to_copy -= required_size - msg_en->sg.size; 1044 full_record = true; 1045 } 1046 1047 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { 1048 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, 1049 try_to_copy, &copied); 1050 if (ret < 0) 1051 goto send_end; 1052 tls_ctx->pending_open_record_frags = true; 1053 if (full_record || eor || sk_msg_full(msg_pl)) 1054 goto copied; 1055 continue; 1056 } 1057 1058 if (!is_kvec && (full_record || eor) && !async_capable) { 1059 u32 first = msg_pl->sg.end; 1060 1061 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1062 msg_pl, try_to_copy); 1063 if (ret) 1064 goto fallback_to_reg_send; 1065 1066 num_zc++; 1067 copied += try_to_copy; 1068 1069 sk_msg_sg_copy_set(msg_pl, first); 1070 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1071 record_type, &copied, 1072 msg->msg_flags); 1073 if (ret) { 1074 if (ret == -EINPROGRESS) 1075 num_async++; 1076 else if (ret == -ENOMEM) 1077 goto wait_for_memory; 1078 else if (ctx->open_rec && ret == -ENOSPC) 1079 goto rollback_iter; 1080 else if (ret != -EAGAIN) 1081 goto send_end; 1082 } 1083 continue; 1084 rollback_iter: 1085 copied -= try_to_copy; 1086 sk_msg_sg_copy_clear(msg_pl, first); 1087 iov_iter_revert(&msg->msg_iter, 1088 msg_pl->sg.size - orig_size); 1089 fallback_to_reg_send: 1090 sk_msg_trim(sk, msg_pl, orig_size); 1091 } 1092 1093 required_size = msg_pl->sg.size + try_to_copy; 1094 1095 ret = tls_clone_plaintext_msg(sk, required_size); 1096 if (ret) { 1097 if (ret != -ENOSPC) 1098 goto send_end; 1099 1100 /* Adjust try_to_copy according to the amount that was 1101 * actually allocated. The difference is due 1102 * to max sg elements limit 1103 */ 1104 try_to_copy -= required_size - msg_pl->sg.size; 1105 full_record = true; 1106 sk_msg_trim(sk, msg_en, 1107 msg_pl->sg.size + prot->overhead_size); 1108 } 1109 1110 if (try_to_copy) { 1111 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1112 msg_pl, try_to_copy); 1113 if (ret < 0) 1114 goto trim_sgl; 1115 } 1116 1117 /* Open records defined only if successfully copied, otherwise 1118 * we would trim the sg but not reset the open record frags. 1119 */ 1120 tls_ctx->pending_open_record_frags = true; 1121 copied += try_to_copy; 1122 copied: 1123 if (full_record || eor) { 1124 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1125 record_type, &copied, 1126 msg->msg_flags); 1127 if (ret) { 1128 if (ret == -EINPROGRESS) 1129 num_async++; 1130 else if (ret == -ENOMEM) 1131 goto wait_for_memory; 1132 else if (ret != -EAGAIN) { 1133 if (ret == -ENOSPC) 1134 ret = 0; 1135 goto send_end; 1136 } 1137 } 1138 } 1139 1140 continue; 1141 1142 wait_for_sndbuf: 1143 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1144 wait_for_memory: 1145 ret = sk_stream_wait_memory(sk, &timeo); 1146 if (ret) { 1147 trim_sgl: 1148 if (ctx->open_rec) 1149 tls_trim_both_msgs(sk, orig_size); 1150 goto send_end; 1151 } 1152 1153 if (ctx->open_rec && msg_en->sg.size < required_size) 1154 goto alloc_encrypted; 1155 } 1156 1157 if (!num_async) { 1158 goto send_end; 1159 } else if (num_zc) { 1160 /* Wait for pending encryptions to get completed */ 1161 spin_lock_bh(&ctx->encrypt_compl_lock); 1162 ctx->async_notify = true; 1163 1164 pending = atomic_read(&ctx->encrypt_pending); 1165 spin_unlock_bh(&ctx->encrypt_compl_lock); 1166 if (pending) 1167 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1168 else 1169 reinit_completion(&ctx->async_wait.completion); 1170 1171 /* There can be no concurrent accesses, since we have no 1172 * pending encrypt operations 1173 */ 1174 WRITE_ONCE(ctx->async_notify, false); 1175 1176 if (ctx->async_wait.err) { 1177 ret = ctx->async_wait.err; 1178 copied = 0; 1179 } 1180 } 1181 1182 /* Transmit if any encryptions have completed */ 1183 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1184 cancel_delayed_work(&ctx->tx_work.work); 1185 tls_tx_records(sk, msg->msg_flags); 1186 } 1187 1188 send_end: 1189 ret = sk_stream_error(sk, msg->msg_flags, ret); 1190 return copied > 0 ? copied : ret; 1191 } 1192 1193 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1194 { 1195 struct tls_context *tls_ctx = tls_get_ctx(sk); 1196 int ret; 1197 1198 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1199 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | 1200 MSG_SENDPAGE_NOPOLICY)) 1201 return -EOPNOTSUPP; 1202 1203 ret = mutex_lock_interruptible(&tls_ctx->tx_lock); 1204 if (ret) 1205 return ret; 1206 lock_sock(sk); 1207 ret = tls_sw_sendmsg_locked(sk, msg, size); 1208 release_sock(sk); 1209 mutex_unlock(&tls_ctx->tx_lock); 1210 return ret; 1211 } 1212 1213 /* 1214 * Handle unexpected EOF during splice without SPLICE_F_MORE set. 1215 */ 1216 void tls_sw_splice_eof(struct socket *sock) 1217 { 1218 struct sock *sk = sock->sk; 1219 struct tls_context *tls_ctx = tls_get_ctx(sk); 1220 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1221 struct tls_rec *rec; 1222 struct sk_msg *msg_pl; 1223 ssize_t copied = 0; 1224 bool retrying = false; 1225 int ret = 0; 1226 int pending; 1227 1228 if (!ctx->open_rec) 1229 return; 1230 1231 mutex_lock(&tls_ctx->tx_lock); 1232 lock_sock(sk); 1233 1234 retry: 1235 /* same checks as in tls_sw_push_pending_record() */ 1236 rec = ctx->open_rec; 1237 if (!rec) 1238 goto unlock; 1239 1240 msg_pl = &rec->msg_plaintext; 1241 if (msg_pl->sg.size == 0) 1242 goto unlock; 1243 1244 /* Check the BPF advisor and perform transmission. */ 1245 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, 1246 &copied, 0); 1247 switch (ret) { 1248 case 0: 1249 case -EAGAIN: 1250 if (retrying) 1251 goto unlock; 1252 retrying = true; 1253 goto retry; 1254 case -EINPROGRESS: 1255 break; 1256 default: 1257 goto unlock; 1258 } 1259 1260 /* Wait for pending encryptions to get completed */ 1261 spin_lock_bh(&ctx->encrypt_compl_lock); 1262 ctx->async_notify = true; 1263 1264 pending = atomic_read(&ctx->encrypt_pending); 1265 spin_unlock_bh(&ctx->encrypt_compl_lock); 1266 if (pending) 1267 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1268 else 1269 reinit_completion(&ctx->async_wait.completion); 1270 1271 /* There can be no concurrent accesses, since we have no pending 1272 * encrypt operations 1273 */ 1274 WRITE_ONCE(ctx->async_notify, false); 1275 1276 if (ctx->async_wait.err) 1277 goto unlock; 1278 1279 /* Transmit if any encryptions have completed */ 1280 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1281 cancel_delayed_work(&ctx->tx_work.work); 1282 tls_tx_records(sk, 0); 1283 } 1284 1285 unlock: 1286 release_sock(sk); 1287 mutex_unlock(&tls_ctx->tx_lock); 1288 } 1289 1290 static int 1291 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1292 bool released) 1293 { 1294 struct tls_context *tls_ctx = tls_get_ctx(sk); 1295 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1296 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1297 int ret = 0; 1298 long timeo; 1299 1300 timeo = sock_rcvtimeo(sk, nonblock); 1301 1302 while (!tls_strp_msg_ready(ctx)) { 1303 if (!sk_psock_queue_empty(psock)) 1304 return 0; 1305 1306 if (sk->sk_err) 1307 return sock_error(sk); 1308 1309 if (ret < 0) 1310 return ret; 1311 1312 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1313 tls_strp_check_rcv(&ctx->strp); 1314 if (tls_strp_msg_ready(ctx)) 1315 break; 1316 } 1317 1318 if (sk->sk_shutdown & RCV_SHUTDOWN) 1319 return 0; 1320 1321 if (sock_flag(sk, SOCK_DONE)) 1322 return 0; 1323 1324 if (!timeo) 1325 return -EAGAIN; 1326 1327 released = true; 1328 add_wait_queue(sk_sleep(sk), &wait); 1329 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1330 ret = sk_wait_event(sk, &timeo, 1331 tls_strp_msg_ready(ctx) || 1332 !sk_psock_queue_empty(psock), 1333 &wait); 1334 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1335 remove_wait_queue(sk_sleep(sk), &wait); 1336 1337 /* Handle signals */ 1338 if (signal_pending(current)) 1339 return sock_intr_errno(timeo); 1340 } 1341 1342 tls_strp_msg_load(&ctx->strp, released); 1343 1344 return 1; 1345 } 1346 1347 static int tls_setup_from_iter(struct iov_iter *from, 1348 int length, int *pages_used, 1349 struct scatterlist *to, 1350 int to_max_pages) 1351 { 1352 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1353 struct page *pages[MAX_SKB_FRAGS]; 1354 unsigned int size = 0; 1355 ssize_t copied, use; 1356 size_t offset; 1357 1358 while (length > 0) { 1359 i = 0; 1360 maxpages = to_max_pages - num_elem; 1361 if (maxpages == 0) { 1362 rc = -EFAULT; 1363 goto out; 1364 } 1365 copied = iov_iter_get_pages2(from, pages, 1366 length, 1367 maxpages, &offset); 1368 if (copied <= 0) { 1369 rc = -EFAULT; 1370 goto out; 1371 } 1372 1373 length -= copied; 1374 size += copied; 1375 while (copied) { 1376 use = min_t(int, copied, PAGE_SIZE - offset); 1377 1378 sg_set_page(&to[num_elem], 1379 pages[i], use, offset); 1380 sg_unmark_end(&to[num_elem]); 1381 /* We do not uncharge memory from this API */ 1382 1383 offset = 0; 1384 copied -= use; 1385 1386 i++; 1387 num_elem++; 1388 } 1389 } 1390 /* Mark the end in the last sg entry if newly added */ 1391 if (num_elem > *pages_used) 1392 sg_mark_end(&to[num_elem - 1]); 1393 out: 1394 if (rc) 1395 iov_iter_revert(from, size); 1396 *pages_used = num_elem; 1397 1398 return rc; 1399 } 1400 1401 static struct sk_buff * 1402 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, 1403 unsigned int full_len) 1404 { 1405 struct strp_msg *clr_rxm; 1406 struct sk_buff *clr_skb; 1407 int err; 1408 1409 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER, 1410 &err, sk->sk_allocation); 1411 if (!clr_skb) 1412 return NULL; 1413 1414 skb_copy_header(clr_skb, skb); 1415 clr_skb->len = full_len; 1416 clr_skb->data_len = full_len; 1417 1418 clr_rxm = strp_msg(clr_skb); 1419 clr_rxm->offset = 0; 1420 1421 return clr_skb; 1422 } 1423 1424 /* Decrypt handlers 1425 * 1426 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1427 * They must transform the darg in/out argument are as follows: 1428 * | Input | Output 1429 * ------------------------------------------------------------------- 1430 * zc | Zero-copy decrypt allowed | Zero-copy performed 1431 * async | Async decrypt allowed | Async crypto used / in progress 1432 * skb | * | Output skb 1433 * 1434 * If ZC decryption was performed darg.skb will point to the input skb. 1435 */ 1436 1437 /* This function decrypts the input skb into either out_iov or in out_sg 1438 * or in skb buffers itself. The input parameter 'darg->zc' indicates if 1439 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1440 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1441 * NULL, then the decryption happens inside skb buffers itself, i.e. 1442 * zero-copy gets disabled and 'darg->zc' is updated. 1443 */ 1444 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, 1445 struct scatterlist *out_sg, 1446 struct tls_decrypt_arg *darg) 1447 { 1448 struct tls_context *tls_ctx = tls_get_ctx(sk); 1449 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1450 struct tls_prot_info *prot = &tls_ctx->prot_info; 1451 int n_sgin, n_sgout, aead_size, err, pages = 0; 1452 struct sk_buff *skb = tls_strp_msg(ctx); 1453 const struct strp_msg *rxm = strp_msg(skb); 1454 const struct tls_msg *tlm = tls_msg(skb); 1455 struct aead_request *aead_req; 1456 struct scatterlist *sgin = NULL; 1457 struct scatterlist *sgout = NULL; 1458 const int data_len = rxm->full_len - prot->overhead_size; 1459 int tail_pages = !!prot->tail_size; 1460 struct tls_decrypt_ctx *dctx; 1461 struct sk_buff *clear_skb; 1462 int iv_offset = 0; 1463 u8 *mem; 1464 1465 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 1466 rxm->full_len - prot->prepend_size); 1467 if (n_sgin < 1) 1468 return n_sgin ?: -EBADMSG; 1469 1470 if (darg->zc && (out_iov || out_sg)) { 1471 clear_skb = NULL; 1472 1473 if (out_iov) 1474 n_sgout = 1 + tail_pages + 1475 iov_iter_npages_cap(out_iov, INT_MAX, data_len); 1476 else 1477 n_sgout = sg_nents(out_sg); 1478 } else { 1479 darg->zc = false; 1480 1481 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len); 1482 if (!clear_skb) 1483 return -ENOMEM; 1484 1485 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; 1486 } 1487 1488 /* Increment to accommodate AAD */ 1489 n_sgin = n_sgin + 1; 1490 1491 /* Allocate a single block of memory which contains 1492 * aead_req || tls_decrypt_ctx. 1493 * Both structs are variable length. 1494 */ 1495 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1496 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 1497 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), 1498 sk->sk_allocation); 1499 if (!mem) { 1500 err = -ENOMEM; 1501 goto exit_free_skb; 1502 } 1503 1504 /* Segment the allocated memory */ 1505 aead_req = (struct aead_request *)mem; 1506 dctx = (struct tls_decrypt_ctx *)(mem + aead_size); 1507 dctx->sk = sk; 1508 sgin = &dctx->sg[0]; 1509 sgout = &dctx->sg[n_sgin]; 1510 1511 /* For CCM based ciphers, first byte of nonce+iv is a constant */ 1512 switch (prot->cipher_type) { 1513 case TLS_CIPHER_AES_CCM_128: 1514 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; 1515 iv_offset = 1; 1516 break; 1517 case TLS_CIPHER_SM4_CCM: 1518 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; 1519 iv_offset = 1; 1520 break; 1521 } 1522 1523 /* Prepare IV */ 1524 if (prot->version == TLS_1_3_VERSION || 1525 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 1526 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, 1527 prot->iv_size + prot->salt_size); 1528 } else { 1529 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1530 &dctx->iv[iv_offset] + prot->salt_size, 1531 prot->iv_size); 1532 if (err < 0) 1533 goto exit_free; 1534 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); 1535 } 1536 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq); 1537 1538 /* Prepare AAD */ 1539 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size + 1540 prot->tail_size, 1541 tls_ctx->rx.rec_seq, tlm->control, prot); 1542 1543 /* Prepare sgin */ 1544 sg_init_table(sgin, n_sgin); 1545 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size); 1546 err = skb_to_sgvec(skb, &sgin[1], 1547 rxm->offset + prot->prepend_size, 1548 rxm->full_len - prot->prepend_size); 1549 if (err < 0) 1550 goto exit_free; 1551 1552 if (clear_skb) { 1553 sg_init_table(sgout, n_sgout); 1554 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1555 1556 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size, 1557 data_len + prot->tail_size); 1558 if (err < 0) 1559 goto exit_free; 1560 } else if (out_iov) { 1561 sg_init_table(sgout, n_sgout); 1562 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1563 1564 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1], 1565 (n_sgout - 1 - tail_pages)); 1566 if (err < 0) 1567 goto exit_free_pages; 1568 1569 if (prot->tail_size) { 1570 sg_unmark_end(&sgout[pages]); 1571 sg_set_buf(&sgout[pages + 1], &dctx->tail, 1572 prot->tail_size); 1573 sg_mark_end(&sgout[pages + 1]); 1574 } 1575 } else if (out_sg) { 1576 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1577 } 1578 1579 /* Prepare and submit AEAD request */ 1580 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1581 data_len + prot->tail_size, aead_req, darg); 1582 if (err) 1583 goto exit_free_pages; 1584 1585 darg->skb = clear_skb ?: tls_strp_msg(ctx); 1586 clear_skb = NULL; 1587 1588 if (unlikely(darg->async)) { 1589 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1590 if (err) 1591 __skb_queue_tail(&ctx->async_hold, darg->skb); 1592 return err; 1593 } 1594 1595 if (prot->tail_size) 1596 darg->tail = dctx->tail; 1597 1598 exit_free_pages: 1599 /* Release the pages in case iov was mapped to pages */ 1600 for (; pages > 0; pages--) 1601 put_page(sg_page(&sgout[pages])); 1602 exit_free: 1603 kfree(mem); 1604 exit_free_skb: 1605 consume_skb(clear_skb); 1606 return err; 1607 } 1608 1609 static int 1610 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1611 struct msghdr *msg, struct tls_decrypt_arg *darg) 1612 { 1613 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1614 struct tls_prot_info *prot = &tls_ctx->prot_info; 1615 struct strp_msg *rxm; 1616 int pad, err; 1617 1618 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1619 if (err < 0) { 1620 if (err == -EBADMSG) 1621 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1622 return err; 1623 } 1624 /* keep going even for ->async, the code below is TLS 1.3 */ 1625 1626 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1627 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1628 darg->tail != TLS_RECORD_TYPE_DATA)) { 1629 darg->zc = false; 1630 if (!darg->tail) 1631 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1632 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1633 return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1634 } 1635 1636 pad = tls_padding_length(prot, darg->skb, darg); 1637 if (pad < 0) { 1638 if (darg->skb != tls_strp_msg(ctx)) 1639 consume_skb(darg->skb); 1640 return pad; 1641 } 1642 1643 rxm = strp_msg(darg->skb); 1644 rxm->full_len -= pad; 1645 1646 return 0; 1647 } 1648 1649 static int 1650 tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1651 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1652 { 1653 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1654 struct tls_prot_info *prot = &tls_ctx->prot_info; 1655 struct strp_msg *rxm; 1656 int pad, err; 1657 1658 if (tls_ctx->rx_conf != TLS_HW) 1659 return 0; 1660 1661 err = tls_device_decrypted(sk, tls_ctx); 1662 if (err <= 0) 1663 return err; 1664 1665 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1666 if (pad < 0) 1667 return pad; 1668 1669 darg->async = false; 1670 darg->skb = tls_strp_msg(ctx); 1671 /* ->zc downgrade check, in case TLS 1.3 gets here */ 1672 darg->zc &= !(prot->version == TLS_1_3_VERSION && 1673 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1674 1675 rxm = strp_msg(darg->skb); 1676 rxm->full_len -= pad; 1677 1678 if (!darg->zc) { 1679 /* Non-ZC case needs a real skb */ 1680 darg->skb = tls_strp_msg_detach(ctx); 1681 if (!darg->skb) 1682 return -ENOMEM; 1683 } else { 1684 unsigned int off, len; 1685 1686 /* In ZC case nobody cares about the output skb. 1687 * Just copy the data here. Note the skb is not fully trimmed. 1688 */ 1689 off = rxm->offset + prot->prepend_size; 1690 len = rxm->full_len - prot->overhead_size; 1691 1692 err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1693 if (err) 1694 return err; 1695 } 1696 return 1; 1697 } 1698 1699 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1700 struct tls_decrypt_arg *darg) 1701 { 1702 struct tls_context *tls_ctx = tls_get_ctx(sk); 1703 struct tls_prot_info *prot = &tls_ctx->prot_info; 1704 struct strp_msg *rxm; 1705 int err; 1706 1707 err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1708 if (!err) 1709 err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1710 if (err < 0) 1711 return err; 1712 1713 rxm = strp_msg(darg->skb); 1714 rxm->offset += prot->prepend_size; 1715 rxm->full_len -= prot->overhead_size; 1716 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1717 1718 return 0; 1719 } 1720 1721 int decrypt_skb(struct sock *sk, struct scatterlist *sgout) 1722 { 1723 struct tls_decrypt_arg darg = { .zc = true, }; 1724 1725 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1726 } 1727 1728 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1729 u8 *control) 1730 { 1731 int err; 1732 1733 if (!*control) { 1734 *control = tlm->control; 1735 if (!*control) 1736 return -EBADMSG; 1737 1738 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1739 sizeof(*control), control); 1740 if (*control != TLS_RECORD_TYPE_DATA) { 1741 if (err || msg->msg_flags & MSG_CTRUNC) 1742 return -EIO; 1743 } 1744 } else if (*control != tlm->control) { 1745 return 0; 1746 } 1747 1748 return 1; 1749 } 1750 1751 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1752 { 1753 tls_strp_msg_done(&ctx->strp); 1754 } 1755 1756 /* This function traverses the rx_list in tls receive context to copies the 1757 * decrypted records into the buffer provided by caller zero copy is not 1758 * true. Further, the records are removed from the rx_list if it is not a peek 1759 * case and the record has been consumed completely. 1760 */ 1761 static int process_rx_list(struct tls_sw_context_rx *ctx, 1762 struct msghdr *msg, 1763 u8 *control, 1764 size_t skip, 1765 size_t len, 1766 bool is_peek) 1767 { 1768 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1769 struct tls_msg *tlm; 1770 ssize_t copied = 0; 1771 int err; 1772 1773 while (skip && skb) { 1774 struct strp_msg *rxm = strp_msg(skb); 1775 tlm = tls_msg(skb); 1776 1777 err = tls_record_content_type(msg, tlm, control); 1778 if (err <= 0) 1779 goto out; 1780 1781 if (skip < rxm->full_len) 1782 break; 1783 1784 skip = skip - rxm->full_len; 1785 skb = skb_peek_next(skb, &ctx->rx_list); 1786 } 1787 1788 while (len && skb) { 1789 struct sk_buff *next_skb; 1790 struct strp_msg *rxm = strp_msg(skb); 1791 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1792 1793 tlm = tls_msg(skb); 1794 1795 err = tls_record_content_type(msg, tlm, control); 1796 if (err <= 0) 1797 goto out; 1798 1799 err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1800 msg, chunk); 1801 if (err < 0) 1802 goto out; 1803 1804 len = len - chunk; 1805 copied = copied + chunk; 1806 1807 /* Consume the data from record if it is non-peek case*/ 1808 if (!is_peek) { 1809 rxm->offset = rxm->offset + chunk; 1810 rxm->full_len = rxm->full_len - chunk; 1811 1812 /* Return if there is unconsumed data in the record */ 1813 if (rxm->full_len - skip) 1814 break; 1815 } 1816 1817 /* The remaining skip-bytes must lie in 1st record in rx_list. 1818 * So from the 2nd record, 'skip' should be 0. 1819 */ 1820 skip = 0; 1821 1822 if (msg) 1823 msg->msg_flags |= MSG_EOR; 1824 1825 next_skb = skb_peek_next(skb, &ctx->rx_list); 1826 1827 if (!is_peek) { 1828 __skb_unlink(skb, &ctx->rx_list); 1829 consume_skb(skb); 1830 } 1831 1832 skb = next_skb; 1833 } 1834 err = 0; 1835 1836 out: 1837 return copied ? : err; 1838 } 1839 1840 static bool 1841 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1842 size_t len_left, size_t decrypted, ssize_t done, 1843 size_t *flushed_at) 1844 { 1845 size_t max_rec; 1846 1847 if (len_left <= decrypted) 1848 return false; 1849 1850 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1851 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1852 return false; 1853 1854 *flushed_at = done; 1855 return sk_flush_backlog(sk); 1856 } 1857 1858 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, 1859 bool nonblock) 1860 { 1861 long timeo; 1862 int ret; 1863 1864 timeo = sock_rcvtimeo(sk, nonblock); 1865 1866 while (unlikely(ctx->reader_present)) { 1867 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1868 1869 ctx->reader_contended = 1; 1870 1871 add_wait_queue(&ctx->wq, &wait); 1872 ret = sk_wait_event(sk, &timeo, 1873 !READ_ONCE(ctx->reader_present), &wait); 1874 remove_wait_queue(&ctx->wq, &wait); 1875 1876 if (timeo <= 0) 1877 return -EAGAIN; 1878 if (signal_pending(current)) 1879 return sock_intr_errno(timeo); 1880 if (ret < 0) 1881 return ret; 1882 } 1883 1884 WRITE_ONCE(ctx->reader_present, 1); 1885 1886 return 0; 1887 } 1888 1889 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, 1890 bool nonblock) 1891 { 1892 int err; 1893 1894 lock_sock(sk); 1895 err = tls_rx_reader_acquire(sk, ctx, nonblock); 1896 if (err) 1897 release_sock(sk); 1898 return err; 1899 } 1900 1901 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) 1902 { 1903 if (unlikely(ctx->reader_contended)) { 1904 if (wq_has_sleeper(&ctx->wq)) 1905 wake_up(&ctx->wq); 1906 else 1907 ctx->reader_contended = 0; 1908 1909 WARN_ON_ONCE(!ctx->reader_present); 1910 } 1911 1912 WRITE_ONCE(ctx->reader_present, 0); 1913 } 1914 1915 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) 1916 { 1917 tls_rx_reader_release(sk, ctx); 1918 release_sock(sk); 1919 } 1920 1921 int tls_sw_recvmsg(struct sock *sk, 1922 struct msghdr *msg, 1923 size_t len, 1924 int flags, 1925 int *addr_len) 1926 { 1927 struct tls_context *tls_ctx = tls_get_ctx(sk); 1928 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1929 struct tls_prot_info *prot = &tls_ctx->prot_info; 1930 ssize_t decrypted = 0, async_copy_bytes = 0; 1931 struct sk_psock *psock; 1932 unsigned char control = 0; 1933 size_t flushed_at = 0; 1934 struct strp_msg *rxm; 1935 struct tls_msg *tlm; 1936 ssize_t copied = 0; 1937 bool async = false; 1938 int target, err; 1939 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1940 bool is_peek = flags & MSG_PEEK; 1941 bool released = true; 1942 bool bpf_strp_enabled; 1943 bool zc_capable; 1944 1945 if (unlikely(flags & MSG_ERRQUEUE)) 1946 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1947 1948 psock = sk_psock_get(sk); 1949 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); 1950 if (err < 0) 1951 return err; 1952 bpf_strp_enabled = sk_psock_strp_enabled(psock); 1953 1954 /* If crypto failed the connection is broken */ 1955 err = ctx->async_wait.err; 1956 if (err) 1957 goto end; 1958 1959 /* Process pending decrypted records. It must be non-zero-copy */ 1960 err = process_rx_list(ctx, msg, &control, 0, len, is_peek); 1961 if (err < 0) 1962 goto end; 1963 1964 copied = err; 1965 if (len <= copied) 1966 goto end; 1967 1968 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1969 len = len - copied; 1970 1971 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 1972 ctx->zc_capable; 1973 decrypted = 0; 1974 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 1975 struct tls_decrypt_arg darg; 1976 int to_decrypt, chunk; 1977 1978 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, 1979 released); 1980 if (err <= 0) { 1981 if (psock) { 1982 chunk = sk_msg_recvmsg(sk, psock, msg, len, 1983 flags); 1984 if (chunk > 0) { 1985 decrypted += chunk; 1986 len -= chunk; 1987 continue; 1988 } 1989 } 1990 goto recv_end; 1991 } 1992 1993 memset(&darg.inargs, 0, sizeof(darg.inargs)); 1994 1995 rxm = strp_msg(tls_strp_msg(ctx)); 1996 tlm = tls_msg(tls_strp_msg(ctx)); 1997 1998 to_decrypt = rxm->full_len - prot->overhead_size; 1999 2000 if (zc_capable && to_decrypt <= len && 2001 tlm->control == TLS_RECORD_TYPE_DATA) 2002 darg.zc = true; 2003 2004 /* Do not use async mode if record is non-data */ 2005 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 2006 darg.async = ctx->async_capable; 2007 else 2008 darg.async = false; 2009 2010 err = tls_rx_one_record(sk, msg, &darg); 2011 if (err < 0) { 2012 tls_err_abort(sk, -EBADMSG); 2013 goto recv_end; 2014 } 2015 2016 async |= darg.async; 2017 2018 /* If the type of records being processed is not known yet, 2019 * set it to record type just dequeued. If it is already known, 2020 * but does not match the record type just dequeued, go to end. 2021 * We always get record type here since for tls1.2, record type 2022 * is known just after record is dequeued from stream parser. 2023 * For tls1.3, we disable async. 2024 */ 2025 err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2026 if (err <= 0) { 2027 DEBUG_NET_WARN_ON_ONCE(darg.zc); 2028 tls_rx_rec_done(ctx); 2029 put_on_rx_list_err: 2030 __skb_queue_tail(&ctx->rx_list, darg.skb); 2031 goto recv_end; 2032 } 2033 2034 /* periodically flush backlog, and feed strparser */ 2035 released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 2036 decrypted + copied, 2037 &flushed_at); 2038 2039 /* TLS 1.3 may have updated the length by more than overhead */ 2040 rxm = strp_msg(darg.skb); 2041 chunk = rxm->full_len; 2042 tls_rx_rec_done(ctx); 2043 2044 if (!darg.zc) { 2045 bool partially_consumed = chunk > len; 2046 struct sk_buff *skb = darg.skb; 2047 2048 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); 2049 2050 if (async) { 2051 /* TLS 1.2-only, to_decrypt must be text len */ 2052 chunk = min_t(int, to_decrypt, len); 2053 async_copy_bytes += chunk; 2054 put_on_rx_list: 2055 decrypted += chunk; 2056 len -= chunk; 2057 __skb_queue_tail(&ctx->rx_list, skb); 2058 continue; 2059 } 2060 2061 if (bpf_strp_enabled) { 2062 released = true; 2063 err = sk_psock_tls_strp_read(psock, skb); 2064 if (err != __SK_PASS) { 2065 rxm->offset = rxm->offset + rxm->full_len; 2066 rxm->full_len = 0; 2067 if (err == __SK_DROP) 2068 consume_skb(skb); 2069 continue; 2070 } 2071 } 2072 2073 if (partially_consumed) 2074 chunk = len; 2075 2076 err = skb_copy_datagram_msg(skb, rxm->offset, 2077 msg, chunk); 2078 if (err < 0) 2079 goto put_on_rx_list_err; 2080 2081 if (is_peek) 2082 goto put_on_rx_list; 2083 2084 if (partially_consumed) { 2085 rxm->offset += chunk; 2086 rxm->full_len -= chunk; 2087 goto put_on_rx_list; 2088 } 2089 2090 consume_skb(skb); 2091 } 2092 2093 decrypted += chunk; 2094 len -= chunk; 2095 2096 /* Return full control message to userspace before trying 2097 * to parse another message type 2098 */ 2099 msg->msg_flags |= MSG_EOR; 2100 if (control != TLS_RECORD_TYPE_DATA) 2101 break; 2102 } 2103 2104 recv_end: 2105 if (async) { 2106 int ret, pending; 2107 2108 /* Wait for all previously submitted records to be decrypted */ 2109 spin_lock_bh(&ctx->decrypt_compl_lock); 2110 reinit_completion(&ctx->async_wait.completion); 2111 pending = atomic_read(&ctx->decrypt_pending); 2112 spin_unlock_bh(&ctx->decrypt_compl_lock); 2113 ret = 0; 2114 if (pending) 2115 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2116 __skb_queue_purge(&ctx->async_hold); 2117 2118 if (ret) { 2119 if (err >= 0 || err == -EINPROGRESS) 2120 err = ret; 2121 decrypted = 0; 2122 goto end; 2123 } 2124 2125 /* Drain records from the rx_list & copy if required */ 2126 if (is_peek || is_kvec) 2127 err = process_rx_list(ctx, msg, &control, copied, 2128 decrypted, is_peek); 2129 else 2130 err = process_rx_list(ctx, msg, &control, 0, 2131 async_copy_bytes, is_peek); 2132 decrypted += max(err, 0); 2133 } 2134 2135 copied += decrypted; 2136 2137 end: 2138 tls_rx_reader_unlock(sk, ctx); 2139 if (psock) 2140 sk_psock_put(sk, psock); 2141 return copied ? : err; 2142 } 2143 2144 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 2145 struct pipe_inode_info *pipe, 2146 size_t len, unsigned int flags) 2147 { 2148 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 2149 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2150 struct strp_msg *rxm = NULL; 2151 struct sock *sk = sock->sk; 2152 struct tls_msg *tlm; 2153 struct sk_buff *skb; 2154 ssize_t copied = 0; 2155 int chunk; 2156 int err; 2157 2158 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); 2159 if (err < 0) 2160 return err; 2161 2162 if (!skb_queue_empty(&ctx->rx_list)) { 2163 skb = __skb_dequeue(&ctx->rx_list); 2164 } else { 2165 struct tls_decrypt_arg darg; 2166 2167 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2168 true); 2169 if (err <= 0) 2170 goto splice_read_end; 2171 2172 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2173 2174 err = tls_rx_one_record(sk, NULL, &darg); 2175 if (err < 0) { 2176 tls_err_abort(sk, -EBADMSG); 2177 goto splice_read_end; 2178 } 2179 2180 tls_rx_rec_done(ctx); 2181 skb = darg.skb; 2182 } 2183 2184 rxm = strp_msg(skb); 2185 tlm = tls_msg(skb); 2186 2187 /* splice does not support reading control messages */ 2188 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2189 err = -EINVAL; 2190 goto splice_requeue; 2191 } 2192 2193 chunk = min_t(unsigned int, rxm->full_len, len); 2194 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2195 if (copied < 0) 2196 goto splice_requeue; 2197 2198 if (chunk < rxm->full_len) { 2199 rxm->offset += len; 2200 rxm->full_len -= len; 2201 goto splice_requeue; 2202 } 2203 2204 consume_skb(skb); 2205 2206 splice_read_end: 2207 tls_rx_reader_unlock(sk, ctx); 2208 return copied ? : err; 2209 2210 splice_requeue: 2211 __skb_queue_head(&ctx->rx_list, skb); 2212 goto splice_read_end; 2213 } 2214 2215 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 2216 sk_read_actor_t read_actor) 2217 { 2218 struct tls_context *tls_ctx = tls_get_ctx(sk); 2219 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2220 struct tls_prot_info *prot = &tls_ctx->prot_info; 2221 struct strp_msg *rxm = NULL; 2222 struct sk_buff *skb = NULL; 2223 struct sk_psock *psock; 2224 size_t flushed_at = 0; 2225 bool released = true; 2226 struct tls_msg *tlm; 2227 ssize_t copied = 0; 2228 ssize_t decrypted; 2229 int err, used; 2230 2231 psock = sk_psock_get(sk); 2232 if (psock) { 2233 sk_psock_put(sk, psock); 2234 return -EINVAL; 2235 } 2236 err = tls_rx_reader_acquire(sk, ctx, true); 2237 if (err < 0) 2238 return err; 2239 2240 /* If crypto failed the connection is broken */ 2241 err = ctx->async_wait.err; 2242 if (err) 2243 goto read_sock_end; 2244 2245 decrypted = 0; 2246 do { 2247 if (!skb_queue_empty(&ctx->rx_list)) { 2248 skb = __skb_dequeue(&ctx->rx_list); 2249 rxm = strp_msg(skb); 2250 tlm = tls_msg(skb); 2251 } else { 2252 struct tls_decrypt_arg darg; 2253 2254 err = tls_rx_rec_wait(sk, NULL, true, released); 2255 if (err <= 0) 2256 goto read_sock_end; 2257 2258 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2259 2260 err = tls_rx_one_record(sk, NULL, &darg); 2261 if (err < 0) { 2262 tls_err_abort(sk, -EBADMSG); 2263 goto read_sock_end; 2264 } 2265 2266 released = tls_read_flush_backlog(sk, prot, INT_MAX, 2267 0, decrypted, 2268 &flushed_at); 2269 skb = darg.skb; 2270 rxm = strp_msg(skb); 2271 tlm = tls_msg(skb); 2272 decrypted += rxm->full_len; 2273 2274 tls_rx_rec_done(ctx); 2275 } 2276 2277 /* read_sock does not support reading control messages */ 2278 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2279 err = -EINVAL; 2280 goto read_sock_requeue; 2281 } 2282 2283 used = read_actor(desc, skb, rxm->offset, rxm->full_len); 2284 if (used <= 0) { 2285 if (!copied) 2286 err = used; 2287 goto read_sock_requeue; 2288 } 2289 copied += used; 2290 if (used < rxm->full_len) { 2291 rxm->offset += used; 2292 rxm->full_len -= used; 2293 if (!desc->count) 2294 goto read_sock_requeue; 2295 } else { 2296 consume_skb(skb); 2297 if (!desc->count) 2298 skb = NULL; 2299 } 2300 } while (skb); 2301 2302 read_sock_end: 2303 tls_rx_reader_release(sk, ctx); 2304 return copied ? : err; 2305 2306 read_sock_requeue: 2307 __skb_queue_head(&ctx->rx_list, skb); 2308 goto read_sock_end; 2309 } 2310 2311 bool tls_sw_sock_is_readable(struct sock *sk) 2312 { 2313 struct tls_context *tls_ctx = tls_get_ctx(sk); 2314 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2315 bool ingress_empty = true; 2316 struct sk_psock *psock; 2317 2318 rcu_read_lock(); 2319 psock = sk_psock(sk); 2320 if (psock) 2321 ingress_empty = list_empty(&psock->ingress_msg); 2322 rcu_read_unlock(); 2323 2324 return !ingress_empty || tls_strp_msg_ready(ctx) || 2325 !skb_queue_empty(&ctx->rx_list); 2326 } 2327 2328 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2329 { 2330 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2331 struct tls_prot_info *prot = &tls_ctx->prot_info; 2332 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE]; 2333 size_t cipher_overhead; 2334 size_t data_len = 0; 2335 int ret; 2336 2337 /* Verify that we have a full TLS header, or wait for more data */ 2338 if (strp->stm.offset + prot->prepend_size > skb->len) 2339 return 0; 2340 2341 /* Sanity-check size of on-stack buffer. */ 2342 if (WARN_ON(prot->prepend_size > sizeof(header))) { 2343 ret = -EINVAL; 2344 goto read_failure; 2345 } 2346 2347 /* Linearize header to local buffer */ 2348 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2349 if (ret < 0) 2350 goto read_failure; 2351 2352 strp->mark = header[0]; 2353 2354 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2355 2356 cipher_overhead = prot->tag_size; 2357 if (prot->version != TLS_1_3_VERSION && 2358 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 2359 cipher_overhead += prot->iv_size; 2360 2361 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 2362 prot->tail_size) { 2363 ret = -EMSGSIZE; 2364 goto read_failure; 2365 } 2366 if (data_len < cipher_overhead) { 2367 ret = -EBADMSG; 2368 goto read_failure; 2369 } 2370 2371 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2372 if (header[1] != TLS_1_2_VERSION_MINOR || 2373 header[2] != TLS_1_2_VERSION_MAJOR) { 2374 ret = -EINVAL; 2375 goto read_failure; 2376 } 2377 2378 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2379 TCP_SKB_CB(skb)->seq + strp->stm.offset); 2380 return data_len + TLS_HEADER_SIZE; 2381 2382 read_failure: 2383 tls_err_abort(strp->sk, ret); 2384 2385 return ret; 2386 } 2387 2388 void tls_rx_msg_ready(struct tls_strparser *strp) 2389 { 2390 struct tls_sw_context_rx *ctx; 2391 2392 ctx = container_of(strp, struct tls_sw_context_rx, strp); 2393 ctx->saved_data_ready(strp->sk); 2394 } 2395 2396 static void tls_data_ready(struct sock *sk) 2397 { 2398 struct tls_context *tls_ctx = tls_get_ctx(sk); 2399 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2400 struct sk_psock *psock; 2401 gfp_t alloc_save; 2402 2403 trace_sk_data_ready(sk); 2404 2405 alloc_save = sk->sk_allocation; 2406 sk->sk_allocation = GFP_ATOMIC; 2407 tls_strp_data_ready(&ctx->strp); 2408 sk->sk_allocation = alloc_save; 2409 2410 psock = sk_psock_get(sk); 2411 if (psock) { 2412 if (!list_empty(&psock->ingress_msg)) 2413 ctx->saved_data_ready(sk); 2414 sk_psock_put(sk, psock); 2415 } 2416 } 2417 2418 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2419 { 2420 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2421 2422 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2423 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2424 cancel_delayed_work_sync(&ctx->tx_work.work); 2425 } 2426 2427 void tls_sw_release_resources_tx(struct sock *sk) 2428 { 2429 struct tls_context *tls_ctx = tls_get_ctx(sk); 2430 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2431 struct tls_rec *rec, *tmp; 2432 int pending; 2433 2434 /* Wait for any pending async encryptions to complete */ 2435 spin_lock_bh(&ctx->encrypt_compl_lock); 2436 ctx->async_notify = true; 2437 pending = atomic_read(&ctx->encrypt_pending); 2438 spin_unlock_bh(&ctx->encrypt_compl_lock); 2439 2440 if (pending) 2441 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2442 2443 tls_tx_records(sk, -1); 2444 2445 /* Free up un-sent records in tx_list. First, free 2446 * the partially sent record if any at head of tx_list. 2447 */ 2448 if (tls_ctx->partially_sent_record) { 2449 tls_free_partial_record(sk, tls_ctx); 2450 rec = list_first_entry(&ctx->tx_list, 2451 struct tls_rec, list); 2452 list_del(&rec->list); 2453 sk_msg_free(sk, &rec->msg_plaintext); 2454 kfree(rec); 2455 } 2456 2457 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2458 list_del(&rec->list); 2459 sk_msg_free(sk, &rec->msg_encrypted); 2460 sk_msg_free(sk, &rec->msg_plaintext); 2461 kfree(rec); 2462 } 2463 2464 crypto_free_aead(ctx->aead_send); 2465 tls_free_open_rec(sk); 2466 } 2467 2468 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2469 { 2470 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2471 2472 kfree(ctx); 2473 } 2474 2475 void tls_sw_release_resources_rx(struct sock *sk) 2476 { 2477 struct tls_context *tls_ctx = tls_get_ctx(sk); 2478 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2479 2480 if (ctx->aead_recv) { 2481 __skb_queue_purge(&ctx->rx_list); 2482 crypto_free_aead(ctx->aead_recv); 2483 tls_strp_stop(&ctx->strp); 2484 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2485 * we still want to tls_strp_stop(), but sk->sk_data_ready was 2486 * never swapped. 2487 */ 2488 if (ctx->saved_data_ready) { 2489 write_lock_bh(&sk->sk_callback_lock); 2490 sk->sk_data_ready = ctx->saved_data_ready; 2491 write_unlock_bh(&sk->sk_callback_lock); 2492 } 2493 } 2494 } 2495 2496 void tls_sw_strparser_done(struct tls_context *tls_ctx) 2497 { 2498 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2499 2500 tls_strp_done(&ctx->strp); 2501 } 2502 2503 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2504 { 2505 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2506 2507 kfree(ctx); 2508 } 2509 2510 void tls_sw_free_resources_rx(struct sock *sk) 2511 { 2512 struct tls_context *tls_ctx = tls_get_ctx(sk); 2513 2514 tls_sw_release_resources_rx(sk); 2515 tls_sw_free_ctx_rx(tls_ctx); 2516 } 2517 2518 /* The work handler to transmitt the encrypted records in tx_list */ 2519 static void tx_work_handler(struct work_struct *work) 2520 { 2521 struct delayed_work *delayed_work = to_delayed_work(work); 2522 struct tx_work *tx_work = container_of(delayed_work, 2523 struct tx_work, work); 2524 struct sock *sk = tx_work->sk; 2525 struct tls_context *tls_ctx = tls_get_ctx(sk); 2526 struct tls_sw_context_tx *ctx; 2527 2528 if (unlikely(!tls_ctx)) 2529 return; 2530 2531 ctx = tls_sw_ctx_tx(tls_ctx); 2532 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2533 return; 2534 2535 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2536 return; 2537 2538 if (mutex_trylock(&tls_ctx->tx_lock)) { 2539 lock_sock(sk); 2540 tls_tx_records(sk, -1); 2541 release_sock(sk); 2542 mutex_unlock(&tls_ctx->tx_lock); 2543 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 2544 /* Someone is holding the tx_lock, they will likely run Tx 2545 * and cancel the work on their way out of the lock section. 2546 * Schedule a long delay just in case. 2547 */ 2548 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); 2549 } 2550 } 2551 2552 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) 2553 { 2554 struct tls_rec *rec; 2555 2556 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); 2557 if (!rec) 2558 return false; 2559 2560 return READ_ONCE(rec->tx_ready); 2561 } 2562 2563 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2564 { 2565 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2566 2567 /* Schedule the transmission if tx list is ready */ 2568 if (tls_is_tx_ready(tx_ctx) && 2569 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 2570 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 2571 } 2572 2573 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2574 { 2575 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2576 2577 write_lock_bh(&sk->sk_callback_lock); 2578 rx_ctx->saved_data_ready = sk->sk_data_ready; 2579 sk->sk_data_ready = tls_data_ready; 2580 write_unlock_bh(&sk->sk_callback_lock); 2581 } 2582 2583 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) 2584 { 2585 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2586 2587 rx_ctx->zc_capable = tls_ctx->rx_no_pad || 2588 tls_ctx->prot_info.version != TLS_1_3_VERSION; 2589 } 2590 2591 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) 2592 { 2593 struct tls_sw_context_tx *sw_ctx_tx; 2594 2595 if (!ctx->priv_ctx_tx) { 2596 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2597 if (!sw_ctx_tx) 2598 return NULL; 2599 } else { 2600 sw_ctx_tx = ctx->priv_ctx_tx; 2601 } 2602 2603 crypto_init_wait(&sw_ctx_tx->async_wait); 2604 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2605 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2606 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2607 sw_ctx_tx->tx_work.sk = sk; 2608 2609 return sw_ctx_tx; 2610 } 2611 2612 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) 2613 { 2614 struct tls_sw_context_rx *sw_ctx_rx; 2615 2616 if (!ctx->priv_ctx_rx) { 2617 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2618 if (!sw_ctx_rx) 2619 return NULL; 2620 } else { 2621 sw_ctx_rx = ctx->priv_ctx_rx; 2622 } 2623 2624 crypto_init_wait(&sw_ctx_rx->async_wait); 2625 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2626 init_waitqueue_head(&sw_ctx_rx->wq); 2627 skb_queue_head_init(&sw_ctx_rx->rx_list); 2628 skb_queue_head_init(&sw_ctx_rx->async_hold); 2629 2630 return sw_ctx_rx; 2631 } 2632 2633 int init_prot_info(struct tls_prot_info *prot, 2634 const struct tls_crypto_info *crypto_info, 2635 const struct tls_cipher_desc *cipher_desc) 2636 { 2637 u16 nonce_size = cipher_desc->nonce; 2638 2639 if (crypto_info->version == TLS_1_3_VERSION) { 2640 nonce_size = 0; 2641 prot->aad_size = TLS_HEADER_SIZE; 2642 prot->tail_size = 1; 2643 } else { 2644 prot->aad_size = TLS_AAD_SPACE_SIZE; 2645 prot->tail_size = 0; 2646 } 2647 2648 /* Sanity-check the sizes for stack allocations. */ 2649 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) 2650 return -EINVAL; 2651 2652 prot->version = crypto_info->version; 2653 prot->cipher_type = crypto_info->cipher_type; 2654 prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 2655 prot->tag_size = cipher_desc->tag; 2656 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size; 2657 prot->iv_size = cipher_desc->iv; 2658 prot->salt_size = cipher_desc->salt; 2659 prot->rec_seq_size = cipher_desc->rec_seq; 2660 2661 return 0; 2662 } 2663 2664 int tls_set_sw_offload(struct sock *sk, int tx) 2665 { 2666 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2667 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2668 const struct tls_cipher_desc *cipher_desc; 2669 struct tls_crypto_info *crypto_info; 2670 char *iv, *rec_seq, *key, *salt; 2671 struct cipher_context *cctx; 2672 struct tls_prot_info *prot; 2673 struct crypto_aead **aead; 2674 struct tls_context *ctx; 2675 struct crypto_tfm *tfm; 2676 int rc = 0; 2677 2678 ctx = tls_get_ctx(sk); 2679 prot = &ctx->prot_info; 2680 2681 if (tx) { 2682 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); 2683 if (!ctx->priv_ctx_tx) 2684 return -ENOMEM; 2685 2686 sw_ctx_tx = ctx->priv_ctx_tx; 2687 crypto_info = &ctx->crypto_send.info; 2688 cctx = &ctx->tx; 2689 aead = &sw_ctx_tx->aead_send; 2690 } else { 2691 ctx->priv_ctx_rx = init_ctx_rx(ctx); 2692 if (!ctx->priv_ctx_rx) 2693 return -ENOMEM; 2694 2695 sw_ctx_rx = ctx->priv_ctx_rx; 2696 crypto_info = &ctx->crypto_recv.info; 2697 cctx = &ctx->rx; 2698 aead = &sw_ctx_rx->aead_recv; 2699 } 2700 2701 cipher_desc = get_cipher_desc(crypto_info->cipher_type); 2702 if (!cipher_desc) { 2703 rc = -EINVAL; 2704 goto free_priv; 2705 } 2706 2707 rc = init_prot_info(prot, crypto_info, cipher_desc); 2708 if (rc) 2709 goto free_priv; 2710 2711 iv = crypto_info_iv(crypto_info, cipher_desc); 2712 key = crypto_info_key(crypto_info, cipher_desc); 2713 salt = crypto_info_salt(crypto_info, cipher_desc); 2714 rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); 2715 2716 memcpy(cctx->iv, salt, cipher_desc->salt); 2717 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); 2718 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq); 2719 2720 if (!*aead) { 2721 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); 2722 if (IS_ERR(*aead)) { 2723 rc = PTR_ERR(*aead); 2724 *aead = NULL; 2725 goto free_priv; 2726 } 2727 } 2728 2729 ctx->push_pending_record = tls_sw_push_pending_record; 2730 2731 rc = crypto_aead_setkey(*aead, key, cipher_desc->key); 2732 if (rc) 2733 goto free_aead; 2734 2735 rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2736 if (rc) 2737 goto free_aead; 2738 2739 if (sw_ctx_rx) { 2740 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2741 2742 tls_update_rx_zc_capable(ctx); 2743 sw_ctx_rx->async_capable = 2744 crypto_info->version != TLS_1_3_VERSION && 2745 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2746 2747 rc = tls_strp_init(&sw_ctx_rx->strp, sk); 2748 if (rc) 2749 goto free_aead; 2750 } 2751 2752 goto out; 2753 2754 free_aead: 2755 crypto_free_aead(*aead); 2756 *aead = NULL; 2757 free_priv: 2758 if (tx) { 2759 kfree(ctx->priv_ctx_tx); 2760 ctx->priv_ctx_tx = NULL; 2761 } else { 2762 kfree(ctx->priv_ctx_rx); 2763 ctx->priv_ctx_rx = NULL; 2764 } 2765 out: 2766 return rc; 2767 } 2768