1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/bug.h> 39 #include <linux/sched/signal.h> 40 #include <linux/module.h> 41 #include <linux/kernel.h> 42 #include <linux/splice.h> 43 #include <crypto/aead.h> 44 45 #include <net/strparser.h> 46 #include <net/tls.h> 47 #include <trace/events/sock.h> 48 49 #include "tls.h" 50 51 struct tls_decrypt_arg { 52 struct_group(inargs, 53 bool zc; 54 bool async; 55 bool async_done; 56 u8 tail; 57 ); 58 59 struct sk_buff *skb; 60 }; 61 62 struct tls_decrypt_ctx { 63 struct sock *sk; 64 u8 iv[TLS_MAX_IV_SIZE]; 65 u8 aad[TLS_MAX_AAD_SIZE]; 66 u8 tail; 67 bool free_sgout; 68 struct scatterlist sg[]; 69 }; 70 71 noinline void tls_err_abort(struct sock *sk, int err) 72 { 73 WARN_ON_ONCE(err >= 0); 74 /* sk->sk_err should contain a positive error code. */ 75 WRITE_ONCE(sk->sk_err, -err); 76 /* Paired with smp_rmb() in tcp_poll() */ 77 smp_wmb(); 78 sk_error_report(sk); 79 } 80 81 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 82 unsigned int recursion_level) 83 { 84 int start = skb_headlen(skb); 85 int i, chunk = start - offset; 86 struct sk_buff *frag_iter; 87 int elt = 0; 88 89 if (unlikely(recursion_level >= 24)) 90 return -EMSGSIZE; 91 92 if (chunk > 0) { 93 if (chunk > len) 94 chunk = len; 95 elt++; 96 len -= chunk; 97 if (len == 0) 98 return elt; 99 offset += chunk; 100 } 101 102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 103 int end; 104 105 WARN_ON(start > offset + len); 106 107 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 108 chunk = end - offset; 109 if (chunk > 0) { 110 if (chunk > len) 111 chunk = len; 112 elt++; 113 len -= chunk; 114 if (len == 0) 115 return elt; 116 offset += chunk; 117 } 118 start = end; 119 } 120 121 if (unlikely(skb_has_frag_list(skb))) { 122 skb_walk_frags(skb, frag_iter) { 123 int end, ret; 124 125 WARN_ON(start > offset + len); 126 127 end = start + frag_iter->len; 128 chunk = end - offset; 129 if (chunk > 0) { 130 if (chunk > len) 131 chunk = len; 132 ret = __skb_nsg(frag_iter, offset - start, chunk, 133 recursion_level + 1); 134 if (unlikely(ret < 0)) 135 return ret; 136 elt += ret; 137 len -= chunk; 138 if (len == 0) 139 return elt; 140 offset += chunk; 141 } 142 start = end; 143 } 144 } 145 BUG_ON(len); 146 return elt; 147 } 148 149 /* Return the number of scatterlist elements required to completely map the 150 * skb, or -EMSGSIZE if the recursion depth is exceeded. 151 */ 152 static int skb_nsg(struct sk_buff *skb, int offset, int len) 153 { 154 return __skb_nsg(skb, offset, len, 0); 155 } 156 157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, 158 struct tls_decrypt_arg *darg) 159 { 160 struct strp_msg *rxm = strp_msg(skb); 161 struct tls_msg *tlm = tls_msg(skb); 162 int sub = 0; 163 164 /* Determine zero-padding length */ 165 if (prot->version == TLS_1_3_VERSION) { 166 int offset = rxm->full_len - TLS_TAG_SIZE - 1; 167 char content_type = darg->zc ? darg->tail : 0; 168 int err; 169 170 while (content_type == 0) { 171 if (offset < prot->prepend_size) 172 return -EBADMSG; 173 err = skb_copy_bits(skb, rxm->offset + offset, 174 &content_type, 1); 175 if (err) 176 return err; 177 if (content_type) 178 break; 179 sub++; 180 offset--; 181 } 182 tlm->control = content_type; 183 } 184 return sub; 185 } 186 187 static void tls_decrypt_done(void *data, int err) 188 { 189 struct aead_request *aead_req = data; 190 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 191 struct scatterlist *sgout = aead_req->dst; 192 struct tls_sw_context_rx *ctx; 193 struct tls_decrypt_ctx *dctx; 194 struct tls_context *tls_ctx; 195 struct scatterlist *sg; 196 unsigned int pages; 197 struct sock *sk; 198 int aead_size; 199 200 /* If requests get too backlogged crypto API returns -EBUSY and calls 201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0) 202 * to make waiting for backlog to flush with crypto_wait_req() easier. 203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one 204 * -EINPROGRESS -> 0. 205 * We have a single struct crypto_async_request per direction, this 206 * scheme doesn't help us, so just ignore the first ->complete(). 207 */ 208 if (err == -EINPROGRESS) 209 return; 210 211 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 212 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 213 dctx = (void *)((u8 *)aead_req + aead_size); 214 215 sk = dctx->sk; 216 tls_ctx = tls_get_ctx(sk); 217 ctx = tls_sw_ctx_rx(tls_ctx); 218 219 /* Propagate if there was an err */ 220 if (err) { 221 if (err == -EBADMSG) 222 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 223 ctx->async_wait.err = err; 224 tls_err_abort(sk, err); 225 } 226 227 /* Free the destination pages if skb was not decrypted inplace */ 228 if (dctx->free_sgout) { 229 /* Skip the first S/G entry as it points to AAD */ 230 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 231 if (!sg) 232 break; 233 put_page(sg_page(sg)); 234 } 235 } 236 237 kfree(aead_req); 238 239 if (atomic_dec_and_test(&ctx->decrypt_pending)) 240 complete(&ctx->async_wait.completion); 241 } 242 243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) 244 { 245 if (!atomic_dec_and_test(&ctx->decrypt_pending)) 246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 247 atomic_inc(&ctx->decrypt_pending); 248 249 __skb_queue_purge(&ctx->async_hold); 250 return ctx->async_wait.err; 251 } 252 253 static int tls_do_decryption(struct sock *sk, 254 struct scatterlist *sgin, 255 struct scatterlist *sgout, 256 char *iv_recv, 257 size_t data_len, 258 struct aead_request *aead_req, 259 struct tls_decrypt_arg *darg) 260 { 261 struct tls_context *tls_ctx = tls_get_ctx(sk); 262 struct tls_prot_info *prot = &tls_ctx->prot_info; 263 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 264 int ret; 265 266 aead_request_set_tfm(aead_req, ctx->aead_recv); 267 aead_request_set_ad(aead_req, prot->aad_size); 268 aead_request_set_crypt(aead_req, sgin, sgout, 269 data_len + prot->tag_size, 270 (u8 *)iv_recv); 271 272 if (darg->async) { 273 aead_request_set_callback(aead_req, 274 CRYPTO_TFM_REQ_MAY_BACKLOG, 275 tls_decrypt_done, aead_req); 276 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); 277 atomic_inc(&ctx->decrypt_pending); 278 } else { 279 DECLARE_CRYPTO_WAIT(wait); 280 281 aead_request_set_callback(aead_req, 282 CRYPTO_TFM_REQ_MAY_BACKLOG, 283 crypto_req_done, &wait); 284 ret = crypto_aead_decrypt(aead_req); 285 if (ret == -EINPROGRESS || ret == -EBUSY) 286 ret = crypto_wait_req(ret, &wait); 287 return ret; 288 } 289 290 ret = crypto_aead_decrypt(aead_req); 291 if (ret == -EINPROGRESS) 292 return 0; 293 294 if (ret == -EBUSY) { 295 ret = tls_decrypt_async_wait(ctx); 296 darg->async_done = true; 297 /* all completions have run, we're not doing async anymore */ 298 darg->async = false; 299 return ret; 300 } 301 302 atomic_dec(&ctx->decrypt_pending); 303 darg->async = false; 304 305 return ret; 306 } 307 308 static void tls_trim_both_msgs(struct sock *sk, int target_size) 309 { 310 struct tls_context *tls_ctx = tls_get_ctx(sk); 311 struct tls_prot_info *prot = &tls_ctx->prot_info; 312 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 313 struct tls_rec *rec = ctx->open_rec; 314 315 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 316 if (target_size > 0) 317 target_size += prot->overhead_size; 318 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 319 } 320 321 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 322 { 323 struct tls_context *tls_ctx = tls_get_ctx(sk); 324 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 325 struct tls_rec *rec = ctx->open_rec; 326 struct sk_msg *msg_en = &rec->msg_encrypted; 327 328 return sk_msg_alloc(sk, msg_en, len, 0); 329 } 330 331 static int tls_clone_plaintext_msg(struct sock *sk, int required) 332 { 333 struct tls_context *tls_ctx = tls_get_ctx(sk); 334 struct tls_prot_info *prot = &tls_ctx->prot_info; 335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 336 struct tls_rec *rec = ctx->open_rec; 337 struct sk_msg *msg_pl = &rec->msg_plaintext; 338 struct sk_msg *msg_en = &rec->msg_encrypted; 339 int skip, len; 340 341 /* We add page references worth len bytes from encrypted sg 342 * at the end of plaintext sg. It is guaranteed that msg_en 343 * has enough required room (ensured by caller). 344 */ 345 len = required - msg_pl->sg.size; 346 347 /* Skip initial bytes in msg_en's data to be able to use 348 * same offset of both plain and encrypted data. 349 */ 350 skip = prot->prepend_size + msg_pl->sg.size; 351 352 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 353 } 354 355 static struct tls_rec *tls_get_rec(struct sock *sk) 356 { 357 struct tls_context *tls_ctx = tls_get_ctx(sk); 358 struct tls_prot_info *prot = &tls_ctx->prot_info; 359 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 360 struct sk_msg *msg_pl, *msg_en; 361 struct tls_rec *rec; 362 int mem_size; 363 364 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 365 366 rec = kzalloc(mem_size, sk->sk_allocation); 367 if (!rec) 368 return NULL; 369 370 msg_pl = &rec->msg_plaintext; 371 msg_en = &rec->msg_encrypted; 372 373 sk_msg_init(msg_pl); 374 sk_msg_init(msg_en); 375 376 sg_init_table(rec->sg_aead_in, 2); 377 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 378 sg_unmark_end(&rec->sg_aead_in[1]); 379 380 sg_init_table(rec->sg_aead_out, 2); 381 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 382 sg_unmark_end(&rec->sg_aead_out[1]); 383 384 rec->sk = sk; 385 386 return rec; 387 } 388 389 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 390 { 391 sk_msg_free(sk, &rec->msg_encrypted); 392 sk_msg_free(sk, &rec->msg_plaintext); 393 kfree(rec); 394 } 395 396 static void tls_free_open_rec(struct sock *sk) 397 { 398 struct tls_context *tls_ctx = tls_get_ctx(sk); 399 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 400 struct tls_rec *rec = ctx->open_rec; 401 402 if (rec) { 403 tls_free_rec(sk, rec); 404 ctx->open_rec = NULL; 405 } 406 } 407 408 int tls_tx_records(struct sock *sk, int flags) 409 { 410 struct tls_context *tls_ctx = tls_get_ctx(sk); 411 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 412 struct tls_rec *rec, *tmp; 413 struct sk_msg *msg_en; 414 int tx_flags, rc = 0; 415 416 if (tls_is_partially_sent_record(tls_ctx)) { 417 rec = list_first_entry(&ctx->tx_list, 418 struct tls_rec, list); 419 420 if (flags == -1) 421 tx_flags = rec->tx_flags; 422 else 423 tx_flags = flags; 424 425 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 426 if (rc) 427 goto tx_err; 428 429 /* Full record has been transmitted. 430 * Remove the head of tx_list 431 */ 432 list_del(&rec->list); 433 sk_msg_free(sk, &rec->msg_plaintext); 434 kfree(rec); 435 } 436 437 /* Tx all ready records */ 438 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 439 if (READ_ONCE(rec->tx_ready)) { 440 if (flags == -1) 441 tx_flags = rec->tx_flags; 442 else 443 tx_flags = flags; 444 445 msg_en = &rec->msg_encrypted; 446 rc = tls_push_sg(sk, tls_ctx, 447 &msg_en->sg.data[msg_en->sg.curr], 448 0, tx_flags); 449 if (rc) 450 goto tx_err; 451 452 list_del(&rec->list); 453 sk_msg_free(sk, &rec->msg_plaintext); 454 kfree(rec); 455 } else { 456 break; 457 } 458 } 459 460 tx_err: 461 if (rc < 0 && rc != -EAGAIN) 462 tls_err_abort(sk, rc); 463 464 return rc; 465 } 466 467 static void tls_encrypt_done(void *data, int err) 468 { 469 struct tls_sw_context_tx *ctx; 470 struct tls_context *tls_ctx; 471 struct tls_prot_info *prot; 472 struct tls_rec *rec = data; 473 struct scatterlist *sge; 474 struct sk_msg *msg_en; 475 struct sock *sk; 476 477 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ 478 return; 479 480 msg_en = &rec->msg_encrypted; 481 482 sk = rec->sk; 483 tls_ctx = tls_get_ctx(sk); 484 prot = &tls_ctx->prot_info; 485 ctx = tls_sw_ctx_tx(tls_ctx); 486 487 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 488 sge->offset -= prot->prepend_size; 489 sge->length += prot->prepend_size; 490 491 /* Check if error is previously set on socket */ 492 if (err || sk->sk_err) { 493 rec = NULL; 494 495 /* If err is already set on socket, return the same code */ 496 if (sk->sk_err) { 497 ctx->async_wait.err = -sk->sk_err; 498 } else { 499 ctx->async_wait.err = err; 500 tls_err_abort(sk, err); 501 } 502 } 503 504 if (rec) { 505 struct tls_rec *first_rec; 506 507 /* Mark the record as ready for transmission */ 508 smp_store_mb(rec->tx_ready, true); 509 510 /* If received record is at head of tx_list, schedule tx */ 511 first_rec = list_first_entry(&ctx->tx_list, 512 struct tls_rec, list); 513 if (rec == first_rec) { 514 /* Schedule the transmission */ 515 if (!test_and_set_bit(BIT_TX_SCHEDULED, 516 &ctx->tx_bitmask)) 517 schedule_delayed_work(&ctx->tx_work.work, 1); 518 } 519 } 520 521 if (atomic_dec_and_test(&ctx->encrypt_pending)) 522 complete(&ctx->async_wait.completion); 523 } 524 525 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) 526 { 527 if (!atomic_dec_and_test(&ctx->encrypt_pending)) 528 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 529 atomic_inc(&ctx->encrypt_pending); 530 531 return ctx->async_wait.err; 532 } 533 534 static int tls_do_encryption(struct sock *sk, 535 struct tls_context *tls_ctx, 536 struct tls_sw_context_tx *ctx, 537 struct aead_request *aead_req, 538 size_t data_len, u32 start) 539 { 540 struct tls_prot_info *prot = &tls_ctx->prot_info; 541 struct tls_rec *rec = ctx->open_rec; 542 struct sk_msg *msg_en = &rec->msg_encrypted; 543 struct scatterlist *sge = sk_msg_elem(msg_en, start); 544 int rc, iv_offset = 0; 545 546 /* For CCM based ciphers, first byte of IV is a constant */ 547 switch (prot->cipher_type) { 548 case TLS_CIPHER_AES_CCM_128: 549 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 550 iv_offset = 1; 551 break; 552 case TLS_CIPHER_SM4_CCM: 553 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; 554 iv_offset = 1; 555 break; 556 } 557 558 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 559 prot->iv_size + prot->salt_size); 560 561 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset, 562 tls_ctx->tx.rec_seq); 563 564 sge->offset += prot->prepend_size; 565 sge->length -= prot->prepend_size; 566 567 msg_en->sg.curr = start; 568 569 aead_request_set_tfm(aead_req, ctx->aead_send); 570 aead_request_set_ad(aead_req, prot->aad_size); 571 aead_request_set_crypt(aead_req, rec->sg_aead_in, 572 rec->sg_aead_out, 573 data_len, rec->iv_data); 574 575 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 576 tls_encrypt_done, rec); 577 578 /* Add the record in tx_list */ 579 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 580 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); 581 atomic_inc(&ctx->encrypt_pending); 582 583 rc = crypto_aead_encrypt(aead_req); 584 if (rc == -EBUSY) { 585 rc = tls_encrypt_async_wait(ctx); 586 rc = rc ?: -EINPROGRESS; 587 /* 588 * The async callback tls_encrypt_done() has already 589 * decremented encrypt_pending and restored the sge on 590 * both success and error. Skip the synchronous cleanup 591 * below on error, just remove the record and return. 592 */ 593 if (rc != -EINPROGRESS) { 594 list_del(&rec->list); 595 return rc; 596 } 597 } 598 if (!rc || rc != -EINPROGRESS) { 599 atomic_dec(&ctx->encrypt_pending); 600 sge->offset -= prot->prepend_size; 601 sge->length += prot->prepend_size; 602 } 603 604 if (!rc) { 605 WRITE_ONCE(rec->tx_ready, true); 606 } else if (rc != -EINPROGRESS) { 607 list_del(&rec->list); 608 return rc; 609 } 610 611 /* Unhook the record from context if encryption is not failure */ 612 ctx->open_rec = NULL; 613 tls_advance_record_sn(sk, prot, &tls_ctx->tx); 614 return rc; 615 } 616 617 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 618 struct tls_rec **to, struct sk_msg *msg_opl, 619 struct sk_msg *msg_oen, u32 split_point, 620 u32 tx_overhead_size, u32 *orig_end) 621 { 622 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 623 struct scatterlist *sge, *osge, *nsge; 624 u32 orig_size = msg_opl->sg.size; 625 struct scatterlist tmp = { }; 626 struct sk_msg *msg_npl; 627 struct tls_rec *new; 628 int ret; 629 630 new = tls_get_rec(sk); 631 if (!new) 632 return -ENOMEM; 633 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 634 tx_overhead_size, 0); 635 if (ret < 0) { 636 tls_free_rec(sk, new); 637 return ret; 638 } 639 640 *orig_end = msg_opl->sg.end; 641 i = msg_opl->sg.start; 642 sge = sk_msg_elem(msg_opl, i); 643 while (apply && sge->length) { 644 if (sge->length > apply) { 645 u32 len = sge->length - apply; 646 647 get_page(sg_page(sge)); 648 sg_set_page(&tmp, sg_page(sge), len, 649 sge->offset + apply); 650 sge->length = apply; 651 bytes += apply; 652 apply = 0; 653 } else { 654 apply -= sge->length; 655 bytes += sge->length; 656 } 657 658 sk_msg_iter_var_next(i); 659 if (i == msg_opl->sg.end) 660 break; 661 sge = sk_msg_elem(msg_opl, i); 662 } 663 664 msg_opl->sg.end = i; 665 msg_opl->sg.curr = i; 666 msg_opl->sg.copybreak = 0; 667 msg_opl->apply_bytes = 0; 668 msg_opl->sg.size = bytes; 669 670 msg_npl = &new->msg_plaintext; 671 msg_npl->apply_bytes = apply; 672 msg_npl->sg.size = orig_size - bytes; 673 674 j = msg_npl->sg.start; 675 nsge = sk_msg_elem(msg_npl, j); 676 if (tmp.length) { 677 memcpy(nsge, &tmp, sizeof(*nsge)); 678 sk_msg_iter_var_next(j); 679 nsge = sk_msg_elem(msg_npl, j); 680 } 681 682 osge = sk_msg_elem(msg_opl, i); 683 while (osge->length) { 684 memcpy(nsge, osge, sizeof(*nsge)); 685 sg_unmark_end(nsge); 686 sk_msg_iter_var_next(i); 687 sk_msg_iter_var_next(j); 688 if (i == *orig_end) 689 break; 690 osge = sk_msg_elem(msg_opl, i); 691 nsge = sk_msg_elem(msg_npl, j); 692 } 693 694 msg_npl->sg.end = j; 695 msg_npl->sg.curr = j; 696 msg_npl->sg.copybreak = 0; 697 698 *to = new; 699 return 0; 700 } 701 702 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 703 struct tls_rec *from, u32 orig_end) 704 { 705 struct sk_msg *msg_npl = &from->msg_plaintext; 706 struct sk_msg *msg_opl = &to->msg_plaintext; 707 struct scatterlist *osge, *nsge; 708 u32 i, j; 709 710 i = msg_opl->sg.end; 711 sk_msg_iter_var_prev(i); 712 j = msg_npl->sg.start; 713 714 osge = sk_msg_elem(msg_opl, i); 715 nsge = sk_msg_elem(msg_npl, j); 716 717 if (sg_page(osge) == sg_page(nsge) && 718 osge->offset + osge->length == nsge->offset) { 719 osge->length += nsge->length; 720 put_page(sg_page(nsge)); 721 } 722 723 msg_opl->sg.end = orig_end; 724 msg_opl->sg.curr = orig_end; 725 msg_opl->sg.copybreak = 0; 726 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 727 msg_opl->sg.size += msg_npl->sg.size; 728 729 sk_msg_free(sk, &to->msg_encrypted); 730 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 731 732 kfree(from); 733 } 734 735 static int tls_push_record(struct sock *sk, int flags, 736 unsigned char record_type) 737 { 738 struct tls_context *tls_ctx = tls_get_ctx(sk); 739 struct tls_prot_info *prot = &tls_ctx->prot_info; 740 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 741 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 742 u32 i, split_point, orig_end; 743 struct sk_msg *msg_pl, *msg_en; 744 struct aead_request *req; 745 bool split; 746 int rc; 747 748 if (!rec) 749 return 0; 750 751 msg_pl = &rec->msg_plaintext; 752 msg_en = &rec->msg_encrypted; 753 754 split_point = msg_pl->apply_bytes; 755 split = split_point && split_point < msg_pl->sg.size; 756 if (unlikely((!split && 757 msg_pl->sg.size + 758 prot->overhead_size > msg_en->sg.size) || 759 (split && 760 split_point + 761 prot->overhead_size > msg_en->sg.size))) { 762 split = true; 763 split_point = msg_en->sg.size; 764 } 765 if (split) { 766 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 767 split_point, prot->overhead_size, 768 &orig_end); 769 if (rc < 0) 770 return rc; 771 /* This can happen if above tls_split_open_record allocates 772 * a single large encryption buffer instead of two smaller 773 * ones. In this case adjust pointers and continue without 774 * split. 775 */ 776 if (!msg_pl->sg.size) { 777 tls_merge_open_record(sk, rec, tmp, orig_end); 778 msg_pl = &rec->msg_plaintext; 779 msg_en = &rec->msg_encrypted; 780 split = false; 781 } 782 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 783 prot->overhead_size); 784 } 785 786 rec->tx_flags = flags; 787 req = &rec->aead_req; 788 789 i = msg_pl->sg.end; 790 sk_msg_iter_var_prev(i); 791 792 rec->content_type = record_type; 793 if (prot->version == TLS_1_3_VERSION) { 794 /* Add content type to end of message. No padding added */ 795 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 796 sg_mark_end(&rec->sg_content_type); 797 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 798 &rec->sg_content_type); 799 } else { 800 sg_mark_end(sk_msg_elem(msg_pl, i)); 801 } 802 803 if (msg_pl->sg.end < msg_pl->sg.start) { 804 sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 805 MAX_SKB_FRAGS - msg_pl->sg.start + 1, 806 msg_pl->sg.data); 807 } 808 809 i = msg_pl->sg.start; 810 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 811 812 i = msg_en->sg.end; 813 sk_msg_iter_var_prev(i); 814 sg_mark_end(sk_msg_elem(msg_en, i)); 815 816 i = msg_en->sg.start; 817 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 818 819 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 820 tls_ctx->tx.rec_seq, record_type, prot); 821 822 tls_fill_prepend(tls_ctx, 823 page_address(sg_page(&msg_en->sg.data[i])) + 824 msg_en->sg.data[i].offset, 825 msg_pl->sg.size + prot->tail_size, 826 record_type); 827 828 tls_ctx->pending_open_record_frags = false; 829 830 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 831 msg_pl->sg.size + prot->tail_size, i); 832 if (rc < 0) { 833 if (rc != -EINPROGRESS) { 834 tls_err_abort(sk, -EBADMSG); 835 if (split) { 836 tls_ctx->pending_open_record_frags = true; 837 tls_merge_open_record(sk, rec, tmp, orig_end); 838 } 839 } 840 ctx->async_capable = 1; 841 return rc; 842 } else if (split) { 843 msg_pl = &tmp->msg_plaintext; 844 msg_en = &tmp->msg_encrypted; 845 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 846 tls_ctx->pending_open_record_frags = true; 847 ctx->open_rec = tmp; 848 } 849 850 return tls_tx_records(sk, flags); 851 } 852 853 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 854 bool full_record, u8 record_type, 855 ssize_t *copied, int flags) 856 { 857 struct tls_context *tls_ctx = tls_get_ctx(sk); 858 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 859 struct sk_msg msg_redir = { }; 860 struct sk_psock *psock; 861 struct sock *sk_redir; 862 struct tls_rec *rec; 863 bool enospc, policy, redir_ingress; 864 int err = 0, send; 865 u32 delta = 0; 866 867 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 868 psock = sk_psock_get(sk); 869 if (!psock || !policy) { 870 err = tls_push_record(sk, flags, record_type); 871 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 872 *copied -= sk_msg_free(sk, msg); 873 tls_free_open_rec(sk); 874 err = -sk->sk_err; 875 } 876 if (psock) 877 sk_psock_put(sk, psock); 878 return err; 879 } 880 more_data: 881 enospc = sk_msg_full(msg); 882 if (psock->eval == __SK_NONE) { 883 delta = msg->sg.size; 884 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 885 delta -= msg->sg.size; 886 887 if ((s32)delta > 0) { 888 /* It indicates that we executed bpf_msg_pop_data(), 889 * causing the plaintext data size to decrease. 890 * Therefore the encrypted data size also needs to 891 * correspondingly decrease. We only need to subtract 892 * delta to calculate the new ciphertext length since 893 * ktls does not support block encryption. 894 */ 895 struct sk_msg *enc = &ctx->open_rec->msg_encrypted; 896 897 sk_msg_trim(sk, enc, enc->sg.size - delta); 898 } 899 } 900 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 901 !enospc && !full_record) { 902 err = -ENOSPC; 903 goto out_err; 904 } 905 msg->cork_bytes = 0; 906 send = msg->sg.size; 907 if (msg->apply_bytes && msg->apply_bytes < send) 908 send = msg->apply_bytes; 909 910 switch (psock->eval) { 911 case __SK_PASS: 912 err = tls_push_record(sk, flags, record_type); 913 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 914 *copied -= sk_msg_free(sk, msg); 915 tls_free_open_rec(sk); 916 err = -sk->sk_err; 917 goto out_err; 918 } 919 break; 920 case __SK_REDIRECT: 921 redir_ingress = psock->redir_ingress; 922 sk_redir = psock->sk_redir; 923 memcpy(&msg_redir, msg, sizeof(*msg)); 924 if (msg->apply_bytes < send) 925 msg->apply_bytes = 0; 926 else 927 msg->apply_bytes -= send; 928 sk_msg_return_zero(sk, msg, send); 929 msg->sg.size -= send; 930 release_sock(sk); 931 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 932 &msg_redir, send, flags); 933 lock_sock(sk); 934 if (err < 0) { 935 /* Regardless of whether the data represented by 936 * msg_redir is sent successfully, we have already 937 * uncharged it via sk_msg_return_zero(). The 938 * msg->sg.size represents the remaining unprocessed 939 * data, which needs to be uncharged here. 940 */ 941 sk_mem_uncharge(sk, msg->sg.size); 942 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 943 msg->sg.size = 0; 944 } 945 if (msg->sg.size == 0) 946 tls_free_open_rec(sk); 947 break; 948 case __SK_DROP: 949 default: 950 sk_msg_free_partial(sk, msg, send); 951 if (msg->apply_bytes < send) 952 msg->apply_bytes = 0; 953 else 954 msg->apply_bytes -= send; 955 if (msg->sg.size == 0) 956 tls_free_open_rec(sk); 957 *copied -= (send + delta); 958 err = -EACCES; 959 } 960 961 if (likely(!err)) { 962 bool reset_eval = !ctx->open_rec; 963 964 rec = ctx->open_rec; 965 if (rec) { 966 msg = &rec->msg_plaintext; 967 if (!msg->apply_bytes) 968 reset_eval = true; 969 } 970 if (reset_eval) { 971 psock->eval = __SK_NONE; 972 if (psock->sk_redir) { 973 sock_put(psock->sk_redir); 974 psock->sk_redir = NULL; 975 } 976 } 977 if (rec) 978 goto more_data; 979 } 980 out_err: 981 sk_psock_put(sk, psock); 982 return err; 983 } 984 985 static int tls_sw_push_pending_record(struct sock *sk, int flags) 986 { 987 struct tls_context *tls_ctx = tls_get_ctx(sk); 988 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 989 struct tls_rec *rec = ctx->open_rec; 990 struct sk_msg *msg_pl; 991 size_t copied; 992 993 if (!rec) 994 return 0; 995 996 msg_pl = &rec->msg_plaintext; 997 copied = msg_pl->sg.size; 998 if (!copied) 999 return 0; 1000 1001 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 1002 &copied, flags); 1003 } 1004 1005 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, 1006 struct sk_msg *msg_pl, size_t try_to_copy, 1007 ssize_t *copied) 1008 { 1009 struct page *page = NULL, **pages = &page; 1010 1011 do { 1012 ssize_t part; 1013 size_t off; 1014 1015 part = iov_iter_extract_pages(&msg->msg_iter, &pages, 1016 try_to_copy, 1, 0, &off); 1017 if (part <= 0) 1018 return part ?: -EIO; 1019 1020 if (WARN_ON_ONCE(!sendpage_ok(page))) { 1021 iov_iter_revert(&msg->msg_iter, part); 1022 return -EIO; 1023 } 1024 1025 sk_msg_page_add(msg_pl, page, part, off); 1026 msg_pl->sg.copybreak = 0; 1027 msg_pl->sg.curr = msg_pl->sg.end; 1028 sk_mem_charge(sk, part); 1029 *copied += part; 1030 try_to_copy -= part; 1031 } while (try_to_copy && !sk_msg_full(msg_pl)); 1032 1033 return 0; 1034 } 1035 1036 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, 1037 size_t size) 1038 { 1039 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1040 struct tls_context *tls_ctx = tls_get_ctx(sk); 1041 struct tls_prot_info *prot = &tls_ctx->prot_info; 1042 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1043 bool async_capable = ctx->async_capable; 1044 unsigned char record_type = TLS_RECORD_TYPE_DATA; 1045 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1046 bool eor = !(msg->msg_flags & MSG_MORE); 1047 size_t try_to_copy; 1048 ssize_t copied = 0; 1049 struct sk_msg *msg_pl, *msg_en; 1050 struct tls_rec *rec; 1051 int required_size; 1052 int num_async = 0; 1053 bool full_record; 1054 int record_room; 1055 int num_zc = 0; 1056 int orig_size; 1057 int ret = 0; 1058 1059 if (!eor && (msg->msg_flags & MSG_EOR)) 1060 return -EINVAL; 1061 1062 if (unlikely(msg->msg_controllen)) { 1063 ret = tls_process_cmsg(sk, msg, &record_type); 1064 if (ret) { 1065 if (ret == -EINPROGRESS) 1066 num_async++; 1067 else if (ret != -EAGAIN) 1068 goto end; 1069 } 1070 } 1071 1072 while (msg_data_left(msg)) { 1073 if (sk->sk_err) { 1074 ret = -sk->sk_err; 1075 goto send_end; 1076 } 1077 1078 if (ctx->open_rec) 1079 rec = ctx->open_rec; 1080 else 1081 rec = ctx->open_rec = tls_get_rec(sk); 1082 if (!rec) { 1083 ret = -ENOMEM; 1084 goto send_end; 1085 } 1086 1087 msg_pl = &rec->msg_plaintext; 1088 msg_en = &rec->msg_encrypted; 1089 1090 orig_size = msg_pl->sg.size; 1091 full_record = false; 1092 try_to_copy = msg_data_left(msg); 1093 record_room = tls_ctx->tx_max_payload_len - msg_pl->sg.size; 1094 if (try_to_copy >= record_room) { 1095 try_to_copy = record_room; 1096 full_record = true; 1097 } 1098 1099 required_size = msg_pl->sg.size + try_to_copy + 1100 prot->overhead_size; 1101 1102 if (!sk_stream_memory_free(sk)) 1103 goto wait_for_sndbuf; 1104 1105 alloc_encrypted: 1106 ret = tls_alloc_encrypted_msg(sk, required_size); 1107 if (ret) { 1108 if (ret != -ENOSPC) 1109 goto wait_for_memory; 1110 1111 /* Adjust try_to_copy according to the amount that was 1112 * actually allocated. The difference is due 1113 * to max sg elements limit 1114 */ 1115 try_to_copy -= required_size - msg_en->sg.size; 1116 full_record = true; 1117 } 1118 1119 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { 1120 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, 1121 try_to_copy, &copied); 1122 if (ret < 0) 1123 goto send_end; 1124 tls_ctx->pending_open_record_frags = true; 1125 1126 if (sk_msg_full(msg_pl)) { 1127 full_record = true; 1128 sk_msg_trim(sk, msg_en, 1129 msg_pl->sg.size + prot->overhead_size); 1130 } 1131 1132 if (full_record || eor) 1133 goto copied; 1134 continue; 1135 } 1136 1137 if (!is_kvec && (full_record || eor) && !async_capable) { 1138 u32 first = msg_pl->sg.end; 1139 1140 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1141 msg_pl, try_to_copy); 1142 if (ret) 1143 goto fallback_to_reg_send; 1144 1145 num_zc++; 1146 copied += try_to_copy; 1147 1148 sk_msg_sg_copy_set(msg_pl, first); 1149 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1150 record_type, &copied, 1151 msg->msg_flags); 1152 if (ret) { 1153 if (ret == -EINPROGRESS) 1154 num_async++; 1155 else if (ret == -ENOMEM) 1156 goto wait_for_memory; 1157 else if (ctx->open_rec && ret == -ENOSPC) { 1158 if (msg_pl->cork_bytes) { 1159 ret = 0; 1160 goto send_end; 1161 } 1162 goto rollback_iter; 1163 } else if (ret != -EAGAIN) 1164 goto send_end; 1165 } 1166 1167 /* Transmit if any encryptions have completed */ 1168 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1169 cancel_delayed_work(&ctx->tx_work.work); 1170 tls_tx_records(sk, msg->msg_flags); 1171 } 1172 1173 continue; 1174 rollback_iter: 1175 copied -= try_to_copy; 1176 sk_msg_sg_copy_clear(msg_pl, first); 1177 iov_iter_revert(&msg->msg_iter, 1178 msg_pl->sg.size - orig_size); 1179 fallback_to_reg_send: 1180 sk_msg_trim(sk, msg_pl, orig_size); 1181 } 1182 1183 required_size = msg_pl->sg.size + try_to_copy; 1184 1185 ret = tls_clone_plaintext_msg(sk, required_size); 1186 if (ret) { 1187 if (ret != -ENOSPC) 1188 goto send_end; 1189 1190 /* Adjust try_to_copy according to the amount that was 1191 * actually allocated. The difference is due 1192 * to max sg elements limit 1193 */ 1194 try_to_copy -= required_size - msg_pl->sg.size; 1195 full_record = true; 1196 sk_msg_trim(sk, msg_en, 1197 msg_pl->sg.size + prot->overhead_size); 1198 } 1199 1200 if (try_to_copy) { 1201 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1202 msg_pl, try_to_copy); 1203 if (ret < 0) 1204 goto trim_sgl; 1205 } 1206 1207 /* Open records defined only if successfully copied, otherwise 1208 * we would trim the sg but not reset the open record frags. 1209 */ 1210 tls_ctx->pending_open_record_frags = true; 1211 copied += try_to_copy; 1212 copied: 1213 if (full_record || eor) { 1214 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1215 record_type, &copied, 1216 msg->msg_flags); 1217 if (ret) { 1218 if (ret == -EINPROGRESS) 1219 num_async++; 1220 else if (ret == -ENOMEM) 1221 goto wait_for_memory; 1222 else if (ret != -EAGAIN) { 1223 if (ret == -ENOSPC) 1224 ret = 0; 1225 goto send_end; 1226 } 1227 } 1228 1229 /* Transmit if any encryptions have completed */ 1230 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1231 cancel_delayed_work(&ctx->tx_work.work); 1232 tls_tx_records(sk, msg->msg_flags); 1233 } 1234 } 1235 1236 continue; 1237 1238 wait_for_sndbuf: 1239 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1240 wait_for_memory: 1241 ret = sk_stream_wait_memory(sk, &timeo); 1242 if (ret) { 1243 trim_sgl: 1244 if (ctx->open_rec) 1245 tls_trim_both_msgs(sk, orig_size); 1246 goto send_end; 1247 } 1248 1249 if (ctx->open_rec && msg_en->sg.size < required_size) 1250 goto alloc_encrypted; 1251 } 1252 1253 send_end: 1254 if (!num_async) { 1255 goto end; 1256 } else if (num_zc || eor) { 1257 int err; 1258 1259 /* Wait for pending encryptions to get completed */ 1260 err = tls_encrypt_async_wait(ctx); 1261 if (err) { 1262 ret = err; 1263 copied = 0; 1264 } 1265 } 1266 1267 /* Transmit if any encryptions have completed */ 1268 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1269 cancel_delayed_work(&ctx->tx_work.work); 1270 tls_tx_records(sk, msg->msg_flags); 1271 } 1272 1273 end: 1274 ret = sk_stream_error(sk, msg->msg_flags, ret); 1275 return copied > 0 ? copied : ret; 1276 } 1277 1278 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1279 { 1280 struct tls_context *tls_ctx = tls_get_ctx(sk); 1281 int ret; 1282 1283 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1284 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | 1285 MSG_SENDPAGE_NOPOLICY)) 1286 return -EOPNOTSUPP; 1287 1288 ret = mutex_lock_interruptible(&tls_ctx->tx_lock); 1289 if (ret) 1290 return ret; 1291 lock_sock(sk); 1292 ret = tls_sw_sendmsg_locked(sk, msg, size); 1293 release_sock(sk); 1294 mutex_unlock(&tls_ctx->tx_lock); 1295 return ret; 1296 } 1297 1298 /* 1299 * Handle unexpected EOF during splice without SPLICE_F_MORE set. 1300 */ 1301 void tls_sw_splice_eof(struct socket *sock) 1302 { 1303 struct sock *sk = sock->sk; 1304 struct tls_context *tls_ctx = tls_get_ctx(sk); 1305 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1306 struct tls_rec *rec; 1307 struct sk_msg *msg_pl; 1308 ssize_t copied = 0; 1309 bool retrying = false; 1310 int ret = 0; 1311 1312 if (!ctx->open_rec) 1313 return; 1314 1315 mutex_lock(&tls_ctx->tx_lock); 1316 lock_sock(sk); 1317 1318 retry: 1319 /* same checks as in tls_sw_push_pending_record() */ 1320 rec = ctx->open_rec; 1321 if (!rec) 1322 goto unlock; 1323 1324 msg_pl = &rec->msg_plaintext; 1325 if (msg_pl->sg.size == 0) 1326 goto unlock; 1327 1328 /* Check the BPF advisor and perform transmission. */ 1329 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, 1330 &copied, 0); 1331 switch (ret) { 1332 case 0: 1333 case -EAGAIN: 1334 if (retrying) 1335 goto unlock; 1336 retrying = true; 1337 goto retry; 1338 case -EINPROGRESS: 1339 break; 1340 default: 1341 goto unlock; 1342 } 1343 1344 /* Wait for pending encryptions to get completed */ 1345 if (tls_encrypt_async_wait(ctx)) 1346 goto unlock; 1347 1348 /* Transmit if any encryptions have completed */ 1349 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1350 cancel_delayed_work(&ctx->tx_work.work); 1351 tls_tx_records(sk, 0); 1352 } 1353 1354 unlock: 1355 release_sock(sk); 1356 mutex_unlock(&tls_ctx->tx_lock); 1357 } 1358 1359 static int 1360 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1361 bool released) 1362 { 1363 struct tls_context *tls_ctx = tls_get_ctx(sk); 1364 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1365 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1366 int ret = 0; 1367 long timeo; 1368 1369 /* a rekey is pending, let userspace deal with it */ 1370 if (unlikely(ctx->key_update_pending)) 1371 return -EKEYEXPIRED; 1372 1373 timeo = sock_rcvtimeo(sk, nonblock); 1374 1375 while (!tls_strp_msg_ready(ctx)) { 1376 if (!sk_psock_queue_empty(psock)) 1377 return 0; 1378 1379 if (sk->sk_err) 1380 return sock_error(sk); 1381 1382 if (ret < 0) 1383 return ret; 1384 1385 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1386 tls_strp_check_rcv(&ctx->strp); 1387 if (tls_strp_msg_ready(ctx)) 1388 break; 1389 } 1390 1391 if (sk->sk_shutdown & RCV_SHUTDOWN) 1392 return 0; 1393 1394 if (sock_flag(sk, SOCK_DONE)) 1395 return 0; 1396 1397 if (!timeo) 1398 return -EAGAIN; 1399 1400 released = true; 1401 add_wait_queue(sk_sleep(sk), &wait); 1402 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1403 ret = sk_wait_event(sk, &timeo, 1404 tls_strp_msg_ready(ctx) || 1405 !sk_psock_queue_empty(psock), 1406 &wait); 1407 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1408 remove_wait_queue(sk_sleep(sk), &wait); 1409 1410 /* Handle signals */ 1411 if (signal_pending(current)) 1412 return sock_intr_errno(timeo); 1413 } 1414 1415 if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) 1416 return tls_rx_rec_wait(sk, psock, nonblock, false); 1417 1418 return 1; 1419 } 1420 1421 static int tls_setup_from_iter(struct iov_iter *from, 1422 int length, int *pages_used, 1423 struct scatterlist *to, 1424 int to_max_pages) 1425 { 1426 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1427 struct page *pages[MAX_SKB_FRAGS]; 1428 unsigned int size = 0; 1429 ssize_t copied, use; 1430 size_t offset; 1431 1432 while (length > 0) { 1433 i = 0; 1434 maxpages = to_max_pages - num_elem; 1435 if (maxpages == 0) { 1436 rc = -EFAULT; 1437 goto out; 1438 } 1439 copied = iov_iter_get_pages2(from, pages, 1440 length, 1441 maxpages, &offset); 1442 if (copied <= 0) { 1443 rc = -EFAULT; 1444 goto out; 1445 } 1446 1447 length -= copied; 1448 size += copied; 1449 while (copied) { 1450 use = min_t(int, copied, PAGE_SIZE - offset); 1451 1452 sg_set_page(&to[num_elem], 1453 pages[i], use, offset); 1454 sg_unmark_end(&to[num_elem]); 1455 /* We do not uncharge memory from this API */ 1456 1457 offset = 0; 1458 copied -= use; 1459 1460 i++; 1461 num_elem++; 1462 } 1463 } 1464 /* Mark the end in the last sg entry if newly added */ 1465 if (num_elem > *pages_used) 1466 sg_mark_end(&to[num_elem - 1]); 1467 out: 1468 if (rc) 1469 iov_iter_revert(from, size); 1470 *pages_used = num_elem; 1471 1472 return rc; 1473 } 1474 1475 static struct sk_buff * 1476 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, 1477 unsigned int full_len) 1478 { 1479 struct strp_msg *clr_rxm; 1480 struct sk_buff *clr_skb; 1481 int err; 1482 1483 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER, 1484 &err, sk->sk_allocation); 1485 if (!clr_skb) 1486 return NULL; 1487 1488 skb_copy_header(clr_skb, skb); 1489 clr_skb->len = full_len; 1490 clr_skb->data_len = full_len; 1491 1492 clr_rxm = strp_msg(clr_skb); 1493 clr_rxm->offset = 0; 1494 1495 return clr_skb; 1496 } 1497 1498 /* Decrypt handlers 1499 * 1500 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1501 * They must transform the darg in/out argument are as follows: 1502 * | Input | Output 1503 * ------------------------------------------------------------------- 1504 * zc | Zero-copy decrypt allowed | Zero-copy performed 1505 * async | Async decrypt allowed | Async crypto used / in progress 1506 * skb | * | Output skb 1507 * 1508 * If ZC decryption was performed darg.skb will point to the input skb. 1509 */ 1510 1511 /* This function decrypts the input skb into either out_iov or in out_sg 1512 * or in skb buffers itself. The input parameter 'darg->zc' indicates if 1513 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1514 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1515 * NULL, then the decryption happens inside skb buffers itself, i.e. 1516 * zero-copy gets disabled and 'darg->zc' is updated. 1517 */ 1518 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, 1519 struct scatterlist *out_sg, 1520 struct tls_decrypt_arg *darg) 1521 { 1522 struct tls_context *tls_ctx = tls_get_ctx(sk); 1523 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1524 struct tls_prot_info *prot = &tls_ctx->prot_info; 1525 int n_sgin, n_sgout, aead_size, err, pages = 0; 1526 struct sk_buff *skb = tls_strp_msg(ctx); 1527 const struct strp_msg *rxm = strp_msg(skb); 1528 const struct tls_msg *tlm = tls_msg(skb); 1529 struct aead_request *aead_req; 1530 struct scatterlist *sgin = NULL; 1531 struct scatterlist *sgout = NULL; 1532 const int data_len = rxm->full_len - prot->overhead_size; 1533 int tail_pages = !!prot->tail_size; 1534 struct tls_decrypt_ctx *dctx; 1535 struct sk_buff *clear_skb; 1536 int iv_offset = 0; 1537 u8 *mem; 1538 1539 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 1540 rxm->full_len - prot->prepend_size); 1541 if (n_sgin < 1) 1542 return n_sgin ?: -EBADMSG; 1543 1544 if (darg->zc && (out_iov || out_sg)) { 1545 clear_skb = NULL; 1546 1547 if (out_iov) 1548 n_sgout = 1 + tail_pages + 1549 iov_iter_npages_cap(out_iov, INT_MAX, data_len); 1550 else 1551 n_sgout = sg_nents(out_sg); 1552 } else { 1553 darg->zc = false; 1554 1555 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len); 1556 if (!clear_skb) 1557 return -ENOMEM; 1558 1559 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; 1560 } 1561 1562 /* Increment to accommodate AAD */ 1563 n_sgin = n_sgin + 1; 1564 1565 /* Allocate a single block of memory which contains 1566 * aead_req || tls_decrypt_ctx. 1567 * Both structs are variable length. 1568 */ 1569 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1570 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 1571 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), 1572 sk->sk_allocation); 1573 if (!mem) { 1574 err = -ENOMEM; 1575 goto exit_free_skb; 1576 } 1577 1578 /* Segment the allocated memory */ 1579 aead_req = (struct aead_request *)mem; 1580 dctx = (struct tls_decrypt_ctx *)(mem + aead_size); 1581 dctx->sk = sk; 1582 sgin = &dctx->sg[0]; 1583 sgout = &dctx->sg[n_sgin]; 1584 1585 /* For CCM based ciphers, first byte of nonce+iv is a constant */ 1586 switch (prot->cipher_type) { 1587 case TLS_CIPHER_AES_CCM_128: 1588 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; 1589 iv_offset = 1; 1590 break; 1591 case TLS_CIPHER_SM4_CCM: 1592 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; 1593 iv_offset = 1; 1594 break; 1595 } 1596 1597 /* Prepare IV */ 1598 if (prot->version == TLS_1_3_VERSION || 1599 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 1600 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, 1601 prot->iv_size + prot->salt_size); 1602 } else { 1603 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1604 &dctx->iv[iv_offset] + prot->salt_size, 1605 prot->iv_size); 1606 if (err < 0) 1607 goto exit_free; 1608 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); 1609 } 1610 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq); 1611 1612 /* Prepare AAD */ 1613 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size + 1614 prot->tail_size, 1615 tls_ctx->rx.rec_seq, tlm->control, prot); 1616 1617 /* Prepare sgin */ 1618 sg_init_table(sgin, n_sgin); 1619 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size); 1620 err = skb_to_sgvec(skb, &sgin[1], 1621 rxm->offset + prot->prepend_size, 1622 rxm->full_len - prot->prepend_size); 1623 if (err < 0) 1624 goto exit_free; 1625 1626 if (clear_skb) { 1627 sg_init_table(sgout, n_sgout); 1628 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1629 1630 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size, 1631 data_len + prot->tail_size); 1632 if (err < 0) 1633 goto exit_free; 1634 } else if (out_iov) { 1635 sg_init_table(sgout, n_sgout); 1636 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1637 1638 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1], 1639 (n_sgout - 1 - tail_pages)); 1640 if (err < 0) 1641 goto exit_free_pages; 1642 1643 if (prot->tail_size) { 1644 sg_unmark_end(&sgout[pages]); 1645 sg_set_buf(&sgout[pages + 1], &dctx->tail, 1646 prot->tail_size); 1647 sg_mark_end(&sgout[pages + 1]); 1648 } 1649 } else if (out_sg) { 1650 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1651 } 1652 dctx->free_sgout = !!pages; 1653 1654 /* Prepare and submit AEAD request */ 1655 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1656 data_len + prot->tail_size, aead_req, darg); 1657 if (err) { 1658 if (darg->async_done) 1659 goto exit_free_skb; 1660 goto exit_free_pages; 1661 } 1662 1663 darg->skb = clear_skb ?: tls_strp_msg(ctx); 1664 clear_skb = NULL; 1665 1666 if (unlikely(darg->async)) { 1667 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1668 if (err) { 1669 err = tls_decrypt_async_wait(ctx); 1670 darg->async = false; 1671 } 1672 return err; 1673 } 1674 1675 if (unlikely(darg->async_done)) 1676 return 0; 1677 1678 if (prot->tail_size) 1679 darg->tail = dctx->tail; 1680 1681 exit_free_pages: 1682 /* Release the pages in case iov was mapped to pages */ 1683 for (; pages > 0; pages--) 1684 put_page(sg_page(&sgout[pages])); 1685 exit_free: 1686 kfree(mem); 1687 exit_free_skb: 1688 consume_skb(clear_skb); 1689 return err; 1690 } 1691 1692 static int 1693 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1694 struct msghdr *msg, struct tls_decrypt_arg *darg) 1695 { 1696 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1697 struct tls_prot_info *prot = &tls_ctx->prot_info; 1698 struct strp_msg *rxm; 1699 int pad, err; 1700 1701 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1702 if (err < 0) { 1703 if (err == -EBADMSG) 1704 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1705 return err; 1706 } 1707 /* keep going even for ->async, the code below is TLS 1.3 */ 1708 1709 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1710 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1711 darg->tail != TLS_RECORD_TYPE_DATA)) { 1712 darg->zc = false; 1713 if (!darg->tail) 1714 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1715 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1716 return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1717 } 1718 1719 pad = tls_padding_length(prot, darg->skb, darg); 1720 if (pad < 0) { 1721 if (darg->skb != tls_strp_msg(ctx)) 1722 consume_skb(darg->skb); 1723 return pad; 1724 } 1725 1726 rxm = strp_msg(darg->skb); 1727 rxm->full_len -= pad; 1728 1729 return 0; 1730 } 1731 1732 static int 1733 tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1734 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1735 { 1736 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1737 struct tls_prot_info *prot = &tls_ctx->prot_info; 1738 struct strp_msg *rxm; 1739 int pad, err; 1740 1741 if (tls_ctx->rx_conf != TLS_HW) 1742 return 0; 1743 1744 err = tls_device_decrypted(sk, tls_ctx); 1745 if (err <= 0) 1746 return err; 1747 1748 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1749 if (pad < 0) 1750 return pad; 1751 1752 darg->async = false; 1753 darg->skb = tls_strp_msg(ctx); 1754 /* ->zc downgrade check, in case TLS 1.3 gets here */ 1755 darg->zc &= !(prot->version == TLS_1_3_VERSION && 1756 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1757 1758 rxm = strp_msg(darg->skb); 1759 rxm->full_len -= pad; 1760 1761 if (!darg->zc) { 1762 /* Non-ZC case needs a real skb */ 1763 darg->skb = tls_strp_msg_detach(ctx); 1764 if (!darg->skb) 1765 return -ENOMEM; 1766 } else { 1767 unsigned int off, len; 1768 1769 /* In ZC case nobody cares about the output skb. 1770 * Just copy the data here. Note the skb is not fully trimmed. 1771 */ 1772 off = rxm->offset + prot->prepend_size; 1773 len = rxm->full_len - prot->overhead_size; 1774 1775 err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1776 if (err) 1777 return err; 1778 } 1779 return 1; 1780 } 1781 1782 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx, 1783 struct sk_buff *skb) 1784 { 1785 const struct strp_msg *rxm = strp_msg(skb); 1786 const struct tls_msg *tlm = tls_msg(skb); 1787 char hs_type; 1788 int err; 1789 1790 if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE)) 1791 return 0; 1792 1793 if (rxm->full_len < 1) 1794 return 0; 1795 1796 err = skb_copy_bits(skb, rxm->offset, &hs_type, 1); 1797 if (err < 0) { 1798 DEBUG_NET_WARN_ON_ONCE(1); 1799 return err; 1800 } 1801 1802 if (hs_type == TLS_HANDSHAKE_KEYUPDATE) { 1803 struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx; 1804 1805 WRITE_ONCE(rx_ctx->key_update_pending, true); 1806 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED); 1807 } 1808 1809 return 0; 1810 } 1811 1812 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1813 struct tls_decrypt_arg *darg) 1814 { 1815 struct tls_context *tls_ctx = tls_get_ctx(sk); 1816 struct tls_prot_info *prot = &tls_ctx->prot_info; 1817 struct strp_msg *rxm; 1818 int err; 1819 1820 err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1821 if (!err) 1822 err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1823 if (err < 0) 1824 return err; 1825 1826 rxm = strp_msg(darg->skb); 1827 rxm->offset += prot->prepend_size; 1828 rxm->full_len -= prot->overhead_size; 1829 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1830 1831 return tls_check_pending_rekey(sk, tls_ctx, darg->skb); 1832 } 1833 1834 int decrypt_skb(struct sock *sk, struct scatterlist *sgout) 1835 { 1836 struct tls_decrypt_arg darg = { .zc = true, }; 1837 1838 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1839 } 1840 1841 /* All records returned from a recvmsg() call must have the same type. 1842 * 0 is not a valid content type. Use it as "no type reported, yet". 1843 */ 1844 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1845 u8 *control) 1846 { 1847 int err; 1848 1849 if (!*control) { 1850 *control = tlm->control; 1851 if (!*control) 1852 return -EBADMSG; 1853 1854 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1855 sizeof(*control), control); 1856 if (*control != TLS_RECORD_TYPE_DATA) { 1857 if (err || msg->msg_flags & MSG_CTRUNC) 1858 return -EIO; 1859 } 1860 } else if (*control != tlm->control) { 1861 return 0; 1862 } 1863 1864 return 1; 1865 } 1866 1867 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1868 { 1869 tls_strp_msg_done(&ctx->strp); 1870 } 1871 1872 /* This function traverses the rx_list in tls receive context to copies the 1873 * decrypted records into the buffer provided by caller zero copy is not 1874 * true. Further, the records are removed from the rx_list if it is not a peek 1875 * case and the record has been consumed completely. 1876 */ 1877 static int process_rx_list(struct tls_sw_context_rx *ctx, 1878 struct msghdr *msg, 1879 u8 *control, 1880 size_t skip, 1881 size_t len, 1882 bool is_peek, 1883 bool *more) 1884 { 1885 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1886 struct tls_msg *tlm; 1887 ssize_t copied = 0; 1888 int err; 1889 1890 while (skip && skb) { 1891 struct strp_msg *rxm = strp_msg(skb); 1892 tlm = tls_msg(skb); 1893 1894 err = tls_record_content_type(msg, tlm, control); 1895 if (err <= 0) 1896 goto more; 1897 1898 if (skip < rxm->full_len) 1899 break; 1900 1901 skip = skip - rxm->full_len; 1902 skb = skb_peek_next(skb, &ctx->rx_list); 1903 } 1904 1905 while (len && skb) { 1906 struct sk_buff *next_skb; 1907 struct strp_msg *rxm = strp_msg(skb); 1908 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1909 1910 tlm = tls_msg(skb); 1911 1912 err = tls_record_content_type(msg, tlm, control); 1913 if (err <= 0) 1914 goto more; 1915 1916 err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1917 msg, chunk); 1918 if (err < 0) 1919 goto more; 1920 1921 len = len - chunk; 1922 copied = copied + chunk; 1923 1924 /* Consume the data from record if it is non-peek case*/ 1925 if (!is_peek) { 1926 rxm->offset = rxm->offset + chunk; 1927 rxm->full_len = rxm->full_len - chunk; 1928 1929 /* Return if there is unconsumed data in the record */ 1930 if (rxm->full_len - skip) 1931 break; 1932 } 1933 1934 /* The remaining skip-bytes must lie in 1st record in rx_list. 1935 * So from the 2nd record, 'skip' should be 0. 1936 */ 1937 skip = 0; 1938 1939 if (msg) 1940 msg->msg_flags |= MSG_EOR; 1941 1942 next_skb = skb_peek_next(skb, &ctx->rx_list); 1943 1944 if (!is_peek) { 1945 __skb_unlink(skb, &ctx->rx_list); 1946 consume_skb(skb); 1947 } 1948 1949 skb = next_skb; 1950 } 1951 err = 0; 1952 1953 out: 1954 return copied ? : err; 1955 more: 1956 if (more) 1957 *more = true; 1958 goto out; 1959 } 1960 1961 static bool 1962 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1963 size_t len_left, size_t decrypted, ssize_t done, 1964 size_t *flushed_at) 1965 { 1966 size_t max_rec; 1967 1968 if (len_left <= decrypted) 1969 return false; 1970 1971 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1972 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1973 return false; 1974 1975 *flushed_at = done; 1976 return sk_flush_backlog(sk); 1977 } 1978 1979 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, 1980 bool nonblock) 1981 { 1982 long timeo; 1983 int ret; 1984 1985 timeo = sock_rcvtimeo(sk, nonblock); 1986 1987 while (unlikely(ctx->reader_present)) { 1988 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1989 1990 ctx->reader_contended = 1; 1991 1992 add_wait_queue(&ctx->wq, &wait); 1993 ret = sk_wait_event(sk, &timeo, 1994 !READ_ONCE(ctx->reader_present), &wait); 1995 remove_wait_queue(&ctx->wq, &wait); 1996 1997 if (timeo <= 0) 1998 return -EAGAIN; 1999 if (signal_pending(current)) 2000 return sock_intr_errno(timeo); 2001 if (ret < 0) 2002 return ret; 2003 } 2004 2005 WRITE_ONCE(ctx->reader_present, 1); 2006 2007 return 0; 2008 } 2009 2010 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, 2011 bool nonblock) 2012 { 2013 int err; 2014 2015 lock_sock(sk); 2016 err = tls_rx_reader_acquire(sk, ctx, nonblock); 2017 if (err) 2018 release_sock(sk); 2019 return err; 2020 } 2021 2022 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) 2023 { 2024 if (unlikely(ctx->reader_contended)) { 2025 if (wq_has_sleeper(&ctx->wq)) 2026 wake_up(&ctx->wq); 2027 else 2028 ctx->reader_contended = 0; 2029 2030 WARN_ON_ONCE(!ctx->reader_present); 2031 } 2032 2033 WRITE_ONCE(ctx->reader_present, 0); 2034 } 2035 2036 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) 2037 { 2038 tls_rx_reader_release(sk, ctx); 2039 release_sock(sk); 2040 } 2041 2042 int tls_sw_recvmsg(struct sock *sk, 2043 struct msghdr *msg, 2044 size_t len, 2045 int flags) 2046 { 2047 struct tls_context *tls_ctx = tls_get_ctx(sk); 2048 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2049 struct tls_prot_info *prot = &tls_ctx->prot_info; 2050 ssize_t decrypted = 0, async_copy_bytes = 0; 2051 struct sk_psock *psock; 2052 unsigned char control = 0; 2053 size_t flushed_at = 0; 2054 struct strp_msg *rxm; 2055 struct tls_msg *tlm; 2056 ssize_t copied = 0; 2057 ssize_t peeked = 0; 2058 bool async = false; 2059 int target, err; 2060 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 2061 bool is_peek = flags & MSG_PEEK; 2062 bool rx_more = false; 2063 bool released = true; 2064 bool bpf_strp_enabled; 2065 bool zc_capable; 2066 2067 if (unlikely(flags & MSG_ERRQUEUE)) 2068 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 2069 2070 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); 2071 if (err < 0) 2072 return err; 2073 psock = sk_psock_get(sk); 2074 bpf_strp_enabled = sk_psock_strp_enabled(psock); 2075 2076 /* If crypto failed the connection is broken */ 2077 err = ctx->async_wait.err; 2078 if (err) 2079 goto end; 2080 2081 /* Process pending decrypted records. It must be non-zero-copy */ 2082 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more); 2083 if (err < 0) 2084 goto end; 2085 2086 /* process_rx_list() will set @control if it processed any records */ 2087 copied = err; 2088 if (len <= copied || rx_more || 2089 (control && control != TLS_RECORD_TYPE_DATA)) 2090 goto end; 2091 2092 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2093 len = len - copied; 2094 2095 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 2096 ctx->zc_capable; 2097 decrypted = 0; 2098 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 2099 struct tls_decrypt_arg darg; 2100 int to_decrypt, chunk; 2101 2102 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, 2103 released); 2104 if (err <= 0) { 2105 if (psock) { 2106 chunk = sk_msg_recvmsg(sk, psock, msg, len, 2107 flags); 2108 if (chunk > 0) { 2109 decrypted += chunk; 2110 len -= chunk; 2111 continue; 2112 } 2113 } 2114 goto recv_end; 2115 } 2116 2117 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2118 2119 rxm = strp_msg(tls_strp_msg(ctx)); 2120 tlm = tls_msg(tls_strp_msg(ctx)); 2121 2122 to_decrypt = rxm->full_len - prot->overhead_size; 2123 2124 if (zc_capable && to_decrypt <= len && 2125 tlm->control == TLS_RECORD_TYPE_DATA) 2126 darg.zc = true; 2127 2128 /* Do not use async mode if record is non-data */ 2129 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 2130 darg.async = ctx->async_capable; 2131 else 2132 darg.async = false; 2133 2134 err = tls_rx_one_record(sk, msg, &darg); 2135 if (err < 0) { 2136 tls_err_abort(sk, -EBADMSG); 2137 goto recv_end; 2138 } 2139 2140 async |= darg.async; 2141 2142 /* If the type of records being processed is not known yet, 2143 * set it to record type just dequeued. If it is already known, 2144 * but does not match the record type just dequeued, go to end. 2145 * We always get record type here since for tls1.2, record type 2146 * is known just after record is dequeued from stream parser. 2147 * For tls1.3, we disable async. 2148 */ 2149 err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2150 if (err <= 0) { 2151 DEBUG_NET_WARN_ON_ONCE(darg.zc); 2152 tls_rx_rec_done(ctx); 2153 put_on_rx_list_err: 2154 __skb_queue_tail(&ctx->rx_list, darg.skb); 2155 goto recv_end; 2156 } 2157 2158 /* periodically flush backlog, and feed strparser */ 2159 released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 2160 decrypted + copied, 2161 &flushed_at); 2162 2163 /* TLS 1.3 may have updated the length by more than overhead */ 2164 rxm = strp_msg(darg.skb); 2165 chunk = rxm->full_len; 2166 tls_rx_rec_done(ctx); 2167 2168 if (!darg.zc) { 2169 bool partially_consumed = chunk > len; 2170 struct sk_buff *skb = darg.skb; 2171 2172 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); 2173 2174 if (async) { 2175 /* TLS 1.2-only, to_decrypt must be text len */ 2176 chunk = min_t(int, to_decrypt, len); 2177 async_copy_bytes += chunk; 2178 put_on_rx_list: 2179 decrypted += chunk; 2180 len -= chunk; 2181 __skb_queue_tail(&ctx->rx_list, skb); 2182 if (unlikely(control != TLS_RECORD_TYPE_DATA)) 2183 break; 2184 continue; 2185 } 2186 2187 if (bpf_strp_enabled) { 2188 released = true; 2189 err = sk_psock_tls_strp_read(psock, skb); 2190 if (err != __SK_PASS) { 2191 rxm->offset = rxm->offset + rxm->full_len; 2192 rxm->full_len = 0; 2193 if (err == __SK_DROP) 2194 consume_skb(skb); 2195 continue; 2196 } 2197 } 2198 2199 if (partially_consumed) 2200 chunk = len; 2201 2202 err = skb_copy_datagram_msg(skb, rxm->offset, 2203 msg, chunk); 2204 if (err < 0) 2205 goto put_on_rx_list_err; 2206 2207 if (is_peek) { 2208 peeked += chunk; 2209 goto put_on_rx_list; 2210 } 2211 2212 if (partially_consumed) { 2213 rxm->offset += chunk; 2214 rxm->full_len -= chunk; 2215 goto put_on_rx_list; 2216 } 2217 2218 consume_skb(skb); 2219 } 2220 2221 decrypted += chunk; 2222 len -= chunk; 2223 2224 /* Return full control message to userspace before trying 2225 * to parse another message type 2226 */ 2227 msg->msg_flags |= MSG_EOR; 2228 if (control != TLS_RECORD_TYPE_DATA) 2229 break; 2230 } 2231 2232 recv_end: 2233 if (async) { 2234 int ret; 2235 2236 /* Wait for all previously submitted records to be decrypted */ 2237 ret = tls_decrypt_async_wait(ctx); 2238 2239 if (ret) { 2240 if (err >= 0 || err == -EINPROGRESS) 2241 err = ret; 2242 goto end; 2243 } 2244 2245 /* Drain records from the rx_list & copy if required */ 2246 if (is_peek) 2247 err = process_rx_list(ctx, msg, &control, copied + peeked, 2248 decrypted - peeked, is_peek, NULL); 2249 else 2250 err = process_rx_list(ctx, msg, &control, 0, 2251 async_copy_bytes, is_peek, NULL); 2252 2253 /* we could have copied less than we wanted, and possibly nothing */ 2254 decrypted += max(err, 0) - async_copy_bytes; 2255 } 2256 2257 copied += decrypted; 2258 2259 end: 2260 tls_rx_reader_unlock(sk, ctx); 2261 if (psock) 2262 sk_psock_put(sk, psock); 2263 return copied ? : err; 2264 } 2265 2266 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 2267 struct pipe_inode_info *pipe, 2268 size_t len, unsigned int flags) 2269 { 2270 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 2271 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2272 struct strp_msg *rxm = NULL; 2273 struct sock *sk = sock->sk; 2274 struct tls_msg *tlm; 2275 struct sk_buff *skb; 2276 ssize_t copied = 0; 2277 int chunk; 2278 int err; 2279 2280 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); 2281 if (err < 0) 2282 return err; 2283 2284 if (!skb_queue_empty(&ctx->rx_list)) { 2285 skb = __skb_dequeue(&ctx->rx_list); 2286 } else { 2287 struct tls_decrypt_arg darg; 2288 2289 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2290 true); 2291 if (err <= 0) 2292 goto splice_read_end; 2293 2294 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2295 2296 err = tls_rx_one_record(sk, NULL, &darg); 2297 if (err < 0) { 2298 tls_err_abort(sk, -EBADMSG); 2299 goto splice_read_end; 2300 } 2301 2302 tls_rx_rec_done(ctx); 2303 skb = darg.skb; 2304 } 2305 2306 rxm = strp_msg(skb); 2307 tlm = tls_msg(skb); 2308 2309 /* splice does not support reading control messages */ 2310 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2311 err = -EINVAL; 2312 goto splice_requeue; 2313 } 2314 2315 chunk = min_t(unsigned int, rxm->full_len, len); 2316 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2317 if (copied < 0) 2318 goto splice_requeue; 2319 2320 if (chunk < rxm->full_len) { 2321 rxm->offset += len; 2322 rxm->full_len -= len; 2323 goto splice_requeue; 2324 } 2325 2326 consume_skb(skb); 2327 2328 splice_read_end: 2329 tls_rx_reader_unlock(sk, ctx); 2330 return copied ? : err; 2331 2332 splice_requeue: 2333 __skb_queue_head(&ctx->rx_list, skb); 2334 goto splice_read_end; 2335 } 2336 2337 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 2338 sk_read_actor_t read_actor) 2339 { 2340 struct tls_context *tls_ctx = tls_get_ctx(sk); 2341 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2342 struct tls_prot_info *prot = &tls_ctx->prot_info; 2343 struct strp_msg *rxm = NULL; 2344 struct sk_buff *skb = NULL; 2345 struct sk_psock *psock; 2346 size_t flushed_at = 0; 2347 bool released = true; 2348 struct tls_msg *tlm; 2349 ssize_t copied = 0; 2350 ssize_t decrypted; 2351 int err, used; 2352 2353 psock = sk_psock_get(sk); 2354 if (psock) { 2355 sk_psock_put(sk, psock); 2356 return -EINVAL; 2357 } 2358 err = tls_rx_reader_acquire(sk, ctx, true); 2359 if (err < 0) 2360 return err; 2361 2362 /* If crypto failed the connection is broken */ 2363 err = ctx->async_wait.err; 2364 if (err) 2365 goto read_sock_end; 2366 2367 decrypted = 0; 2368 do { 2369 if (!skb_queue_empty(&ctx->rx_list)) { 2370 skb = __skb_dequeue(&ctx->rx_list); 2371 rxm = strp_msg(skb); 2372 tlm = tls_msg(skb); 2373 } else { 2374 struct tls_decrypt_arg darg; 2375 2376 err = tls_rx_rec_wait(sk, NULL, true, released); 2377 if (err <= 0) 2378 goto read_sock_end; 2379 2380 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2381 2382 err = tls_rx_one_record(sk, NULL, &darg); 2383 if (err < 0) { 2384 tls_err_abort(sk, -EBADMSG); 2385 goto read_sock_end; 2386 } 2387 2388 released = tls_read_flush_backlog(sk, prot, INT_MAX, 2389 0, decrypted, 2390 &flushed_at); 2391 skb = darg.skb; 2392 rxm = strp_msg(skb); 2393 tlm = tls_msg(skb); 2394 decrypted += rxm->full_len; 2395 2396 tls_rx_rec_done(ctx); 2397 } 2398 2399 /* read_sock does not support reading control messages */ 2400 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2401 err = -EINVAL; 2402 goto read_sock_requeue; 2403 } 2404 2405 used = read_actor(desc, skb, rxm->offset, rxm->full_len); 2406 if (used <= 0) { 2407 if (!copied) 2408 err = used; 2409 goto read_sock_requeue; 2410 } 2411 copied += used; 2412 if (used < rxm->full_len) { 2413 rxm->offset += used; 2414 rxm->full_len -= used; 2415 if (!desc->count) 2416 goto read_sock_requeue; 2417 } else { 2418 consume_skb(skb); 2419 if (!desc->count) 2420 skb = NULL; 2421 } 2422 } while (skb); 2423 2424 read_sock_end: 2425 tls_rx_reader_release(sk, ctx); 2426 return copied ? : err; 2427 2428 read_sock_requeue: 2429 __skb_queue_head(&ctx->rx_list, skb); 2430 goto read_sock_end; 2431 } 2432 2433 bool tls_sw_sock_is_readable(struct sock *sk) 2434 { 2435 struct tls_context *tls_ctx = tls_get_ctx(sk); 2436 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2437 bool ingress_empty = true; 2438 struct sk_psock *psock; 2439 2440 rcu_read_lock(); 2441 psock = sk_psock(sk); 2442 if (psock) 2443 ingress_empty = list_empty(&psock->ingress_msg); 2444 rcu_read_unlock(); 2445 2446 return !ingress_empty || tls_strp_msg_ready(ctx) || 2447 !skb_queue_empty(&ctx->rx_list); 2448 } 2449 2450 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2451 { 2452 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2453 struct tls_prot_info *prot = &tls_ctx->prot_info; 2454 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE]; 2455 size_t cipher_overhead; 2456 size_t data_len = 0; 2457 int ret; 2458 2459 /* Verify that we have a full TLS header, or wait for more data */ 2460 if (strp->stm.offset + prot->prepend_size > skb->len) 2461 return 0; 2462 2463 /* Sanity-check size of on-stack buffer. */ 2464 if (WARN_ON(prot->prepend_size > sizeof(header))) { 2465 ret = -EINVAL; 2466 goto read_failure; 2467 } 2468 2469 /* Linearize header to local buffer */ 2470 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2471 if (ret < 0) 2472 goto read_failure; 2473 2474 strp->mark = header[0]; 2475 2476 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2477 2478 cipher_overhead = prot->tag_size; 2479 if (prot->version != TLS_1_3_VERSION && 2480 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 2481 cipher_overhead += prot->iv_size; 2482 2483 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 2484 prot->tail_size) { 2485 ret = -EMSGSIZE; 2486 goto read_failure; 2487 } 2488 if (data_len < cipher_overhead) { 2489 ret = -EBADMSG; 2490 goto read_failure; 2491 } 2492 2493 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2494 if (header[1] != TLS_1_2_VERSION_MINOR || 2495 header[2] != TLS_1_2_VERSION_MAJOR) { 2496 ret = -EINVAL; 2497 goto read_failure; 2498 } 2499 2500 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2501 TCP_SKB_CB(skb)->seq + strp->stm.offset); 2502 return data_len + TLS_HEADER_SIZE; 2503 2504 read_failure: 2505 tls_strp_abort_strp(strp, ret); 2506 return ret; 2507 } 2508 2509 void tls_rx_msg_ready(struct tls_strparser *strp) 2510 { 2511 struct tls_sw_context_rx *ctx; 2512 2513 ctx = container_of(strp, struct tls_sw_context_rx, strp); 2514 ctx->saved_data_ready(strp->sk); 2515 } 2516 2517 static void tls_data_ready(struct sock *sk) 2518 { 2519 struct tls_context *tls_ctx = tls_get_ctx(sk); 2520 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2521 struct sk_psock *psock; 2522 gfp_t alloc_save; 2523 2524 trace_sk_data_ready(sk); 2525 2526 alloc_save = sk->sk_allocation; 2527 sk->sk_allocation = GFP_ATOMIC; 2528 tls_strp_data_ready(&ctx->strp); 2529 sk->sk_allocation = alloc_save; 2530 2531 psock = sk_psock_get(sk); 2532 if (psock) { 2533 if (!list_empty(&psock->ingress_msg)) 2534 ctx->saved_data_ready(sk); 2535 sk_psock_put(sk, psock); 2536 } 2537 } 2538 2539 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2540 { 2541 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2542 2543 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2544 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2545 disable_delayed_work_sync(&ctx->tx_work.work); 2546 } 2547 2548 void tls_sw_release_resources_tx(struct sock *sk) 2549 { 2550 struct tls_context *tls_ctx = tls_get_ctx(sk); 2551 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2552 struct tls_rec *rec, *tmp; 2553 2554 /* Wait for any pending async encryptions to complete */ 2555 tls_encrypt_async_wait(ctx); 2556 2557 tls_tx_records(sk, -1); 2558 2559 /* Free up un-sent records in tx_list. First, free 2560 * the partially sent record if any at head of tx_list. 2561 */ 2562 if (tls_ctx->partially_sent_record) { 2563 tls_free_partial_record(sk, tls_ctx); 2564 rec = list_first_entry(&ctx->tx_list, 2565 struct tls_rec, list); 2566 list_del(&rec->list); 2567 sk_msg_free(sk, &rec->msg_plaintext); 2568 kfree(rec); 2569 } 2570 2571 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2572 list_del(&rec->list); 2573 sk_msg_free(sk, &rec->msg_encrypted); 2574 sk_msg_free(sk, &rec->msg_plaintext); 2575 kfree(rec); 2576 } 2577 2578 crypto_free_aead(ctx->aead_send); 2579 tls_free_open_rec(sk); 2580 } 2581 2582 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2583 { 2584 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2585 2586 kfree(ctx); 2587 } 2588 2589 void tls_sw_release_resources_rx(struct sock *sk) 2590 { 2591 struct tls_context *tls_ctx = tls_get_ctx(sk); 2592 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2593 2594 if (ctx->aead_recv) { 2595 __skb_queue_purge(&ctx->rx_list); 2596 crypto_free_aead(ctx->aead_recv); 2597 tls_strp_stop(&ctx->strp); 2598 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2599 * we still want to tls_strp_stop(), but sk->sk_data_ready was 2600 * never swapped. 2601 */ 2602 if (ctx->saved_data_ready) { 2603 write_lock_bh(&sk->sk_callback_lock); 2604 sk->sk_data_ready = ctx->saved_data_ready; 2605 write_unlock_bh(&sk->sk_callback_lock); 2606 } 2607 } 2608 } 2609 2610 void tls_sw_strparser_done(struct tls_context *tls_ctx) 2611 { 2612 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2613 2614 tls_strp_done(&ctx->strp); 2615 } 2616 2617 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2618 { 2619 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2620 2621 kfree(ctx); 2622 } 2623 2624 void tls_sw_free_resources_rx(struct sock *sk) 2625 { 2626 struct tls_context *tls_ctx = tls_get_ctx(sk); 2627 2628 tls_sw_release_resources_rx(sk); 2629 tls_sw_free_ctx_rx(tls_ctx); 2630 } 2631 2632 /* The work handler to transmitt the encrypted records in tx_list */ 2633 static void tx_work_handler(struct work_struct *work) 2634 { 2635 struct delayed_work *delayed_work = to_delayed_work(work); 2636 struct tx_work *tx_work = container_of(delayed_work, 2637 struct tx_work, work); 2638 struct sock *sk = tx_work->sk; 2639 struct tls_context *tls_ctx = tls_get_ctx(sk); 2640 struct tls_sw_context_tx *ctx; 2641 2642 if (unlikely(!tls_ctx)) 2643 return; 2644 2645 ctx = tls_sw_ctx_tx(tls_ctx); 2646 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2647 return; 2648 2649 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2650 return; 2651 2652 if (mutex_trylock(&tls_ctx->tx_lock)) { 2653 lock_sock(sk); 2654 tls_tx_records(sk, -1); 2655 release_sock(sk); 2656 mutex_unlock(&tls_ctx->tx_lock); 2657 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 2658 /* Someone is holding the tx_lock, they will likely run Tx 2659 * and cancel the work on their way out of the lock section. 2660 * Schedule a long delay just in case. 2661 */ 2662 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); 2663 } 2664 } 2665 2666 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) 2667 { 2668 struct tls_rec *rec; 2669 2670 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); 2671 if (!rec) 2672 return false; 2673 2674 return READ_ONCE(rec->tx_ready); 2675 } 2676 2677 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2678 { 2679 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2680 2681 /* Schedule the transmission if tx list is ready */ 2682 if (tls_is_tx_ready(tx_ctx) && 2683 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 2684 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 2685 } 2686 2687 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2688 { 2689 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2690 2691 write_lock_bh(&sk->sk_callback_lock); 2692 rx_ctx->saved_data_ready = sk->sk_data_ready; 2693 sk->sk_data_ready = tls_data_ready; 2694 write_unlock_bh(&sk->sk_callback_lock); 2695 } 2696 2697 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) 2698 { 2699 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2700 2701 rx_ctx->zc_capable = tls_ctx->rx_no_pad || 2702 tls_ctx->prot_info.version != TLS_1_3_VERSION; 2703 } 2704 2705 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) 2706 { 2707 struct tls_sw_context_tx *sw_ctx_tx; 2708 2709 if (!ctx->priv_ctx_tx) { 2710 sw_ctx_tx = kzalloc_obj(*sw_ctx_tx); 2711 if (!sw_ctx_tx) 2712 return NULL; 2713 } else { 2714 sw_ctx_tx = ctx->priv_ctx_tx; 2715 } 2716 2717 crypto_init_wait(&sw_ctx_tx->async_wait); 2718 atomic_set(&sw_ctx_tx->encrypt_pending, 1); 2719 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2720 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2721 sw_ctx_tx->tx_work.sk = sk; 2722 2723 return sw_ctx_tx; 2724 } 2725 2726 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) 2727 { 2728 struct tls_sw_context_rx *sw_ctx_rx; 2729 2730 if (!ctx->priv_ctx_rx) { 2731 sw_ctx_rx = kzalloc_obj(*sw_ctx_rx); 2732 if (!sw_ctx_rx) 2733 return NULL; 2734 } else { 2735 sw_ctx_rx = ctx->priv_ctx_rx; 2736 } 2737 2738 crypto_init_wait(&sw_ctx_rx->async_wait); 2739 atomic_set(&sw_ctx_rx->decrypt_pending, 1); 2740 init_waitqueue_head(&sw_ctx_rx->wq); 2741 skb_queue_head_init(&sw_ctx_rx->rx_list); 2742 skb_queue_head_init(&sw_ctx_rx->async_hold); 2743 2744 return sw_ctx_rx; 2745 } 2746 2747 int init_prot_info(struct tls_prot_info *prot, 2748 const struct tls_crypto_info *crypto_info, 2749 const struct tls_cipher_desc *cipher_desc) 2750 { 2751 u16 nonce_size = cipher_desc->nonce; 2752 2753 if (crypto_info->version == TLS_1_3_VERSION) { 2754 nonce_size = 0; 2755 prot->aad_size = TLS_HEADER_SIZE; 2756 prot->tail_size = 1; 2757 } else { 2758 prot->aad_size = TLS_AAD_SPACE_SIZE; 2759 prot->tail_size = 0; 2760 } 2761 2762 /* Sanity-check the sizes for stack allocations. */ 2763 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) 2764 return -EINVAL; 2765 2766 prot->version = crypto_info->version; 2767 prot->cipher_type = crypto_info->cipher_type; 2768 prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 2769 prot->tag_size = cipher_desc->tag; 2770 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size; 2771 prot->iv_size = cipher_desc->iv; 2772 prot->salt_size = cipher_desc->salt; 2773 prot->rec_seq_size = cipher_desc->rec_seq; 2774 2775 return 0; 2776 } 2777 2778 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx) 2779 { 2780 struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx; 2781 2782 WRITE_ONCE(ctx->key_update_pending, false); 2783 /* wake-up pre-existing poll() */ 2784 ctx->saved_data_ready(sk); 2785 } 2786 2787 int tls_set_sw_offload(struct sock *sk, int tx, 2788 struct tls_crypto_info *new_crypto_info) 2789 { 2790 struct tls_crypto_info *crypto_info, *src_crypto_info; 2791 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2792 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2793 const struct tls_cipher_desc *cipher_desc; 2794 char *iv, *rec_seq, *key, *salt; 2795 struct cipher_context *cctx; 2796 struct tls_prot_info *prot; 2797 struct crypto_aead **aead; 2798 struct tls_context *ctx; 2799 struct crypto_tfm *tfm; 2800 int rc = 0; 2801 2802 ctx = tls_get_ctx(sk); 2803 prot = &ctx->prot_info; 2804 2805 /* new_crypto_info != NULL means rekey */ 2806 if (!new_crypto_info) { 2807 if (tx) { 2808 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); 2809 if (!ctx->priv_ctx_tx) 2810 return -ENOMEM; 2811 } else { 2812 ctx->priv_ctx_rx = init_ctx_rx(ctx); 2813 if (!ctx->priv_ctx_rx) 2814 return -ENOMEM; 2815 } 2816 } 2817 2818 if (tx) { 2819 sw_ctx_tx = ctx->priv_ctx_tx; 2820 crypto_info = &ctx->crypto_send.info; 2821 cctx = &ctx->tx; 2822 aead = &sw_ctx_tx->aead_send; 2823 } else { 2824 sw_ctx_rx = ctx->priv_ctx_rx; 2825 crypto_info = &ctx->crypto_recv.info; 2826 cctx = &ctx->rx; 2827 aead = &sw_ctx_rx->aead_recv; 2828 } 2829 2830 src_crypto_info = new_crypto_info ?: crypto_info; 2831 2832 cipher_desc = get_cipher_desc(src_crypto_info->cipher_type); 2833 if (!cipher_desc) { 2834 rc = -EINVAL; 2835 goto free_priv; 2836 } 2837 2838 rc = init_prot_info(prot, src_crypto_info, cipher_desc); 2839 if (rc) 2840 goto free_priv; 2841 2842 iv = crypto_info_iv(src_crypto_info, cipher_desc); 2843 key = crypto_info_key(src_crypto_info, cipher_desc); 2844 salt = crypto_info_salt(src_crypto_info, cipher_desc); 2845 rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc); 2846 2847 if (!*aead) { 2848 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); 2849 if (IS_ERR(*aead)) { 2850 rc = PTR_ERR(*aead); 2851 *aead = NULL; 2852 goto free_priv; 2853 } 2854 } 2855 2856 ctx->push_pending_record = tls_sw_push_pending_record; 2857 2858 /* setkey is the last operation that could fail during a 2859 * rekey. if it succeeds, we can start modifying the 2860 * context. 2861 */ 2862 rc = crypto_aead_setkey(*aead, key, cipher_desc->key); 2863 if (rc) { 2864 if (new_crypto_info) 2865 goto out; 2866 else 2867 goto free_aead; 2868 } 2869 2870 if (!new_crypto_info) { 2871 rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2872 if (rc) 2873 goto free_aead; 2874 } 2875 2876 if (!tx && !new_crypto_info) { 2877 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2878 2879 tls_update_rx_zc_capable(ctx); 2880 sw_ctx_rx->async_capable = 2881 src_crypto_info->version != TLS_1_3_VERSION && 2882 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2883 2884 rc = tls_strp_init(&sw_ctx_rx->strp, sk); 2885 if (rc) 2886 goto free_aead; 2887 } 2888 2889 memcpy(cctx->iv, salt, cipher_desc->salt); 2890 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); 2891 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq); 2892 2893 if (new_crypto_info) { 2894 unsafe_memcpy(crypto_info, new_crypto_info, 2895 cipher_desc->crypto_info, 2896 /* size was checked in do_tls_setsockopt_conf */); 2897 memzero_explicit(new_crypto_info, cipher_desc->crypto_info); 2898 if (!tx) 2899 tls_finish_key_update(sk, ctx); 2900 } 2901 2902 goto out; 2903 2904 free_aead: 2905 crypto_free_aead(*aead); 2906 *aead = NULL; 2907 free_priv: 2908 if (!new_crypto_info) { 2909 if (tx) { 2910 kfree(ctx->priv_ctx_tx); 2911 ctx->priv_ctx_tx = NULL; 2912 } else { 2913 kfree(ctx->priv_ctx_rx); 2914 ctx->priv_ctx_rx = NULL; 2915 } 2916 } 2917 out: 2918 return rc; 2919 } 2920