1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/sched/signal.h> 39 #include <linux/module.h> 40 #include <crypto/aead.h> 41 42 #include <net/strparser.h> 43 #include <net/tls.h> 44 45 #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE 46 47 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 48 unsigned int recursion_level) 49 { 50 int start = skb_headlen(skb); 51 int i, chunk = start - offset; 52 struct sk_buff *frag_iter; 53 int elt = 0; 54 55 if (unlikely(recursion_level >= 24)) 56 return -EMSGSIZE; 57 58 if (chunk > 0) { 59 if (chunk > len) 60 chunk = len; 61 elt++; 62 len -= chunk; 63 if (len == 0) 64 return elt; 65 offset += chunk; 66 } 67 68 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 69 int end; 70 71 WARN_ON(start > offset + len); 72 73 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 74 chunk = end - offset; 75 if (chunk > 0) { 76 if (chunk > len) 77 chunk = len; 78 elt++; 79 len -= chunk; 80 if (len == 0) 81 return elt; 82 offset += chunk; 83 } 84 start = end; 85 } 86 87 if (unlikely(skb_has_frag_list(skb))) { 88 skb_walk_frags(skb, frag_iter) { 89 int end, ret; 90 91 WARN_ON(start > offset + len); 92 93 end = start + frag_iter->len; 94 chunk = end - offset; 95 if (chunk > 0) { 96 if (chunk > len) 97 chunk = len; 98 ret = __skb_nsg(frag_iter, offset - start, chunk, 99 recursion_level + 1); 100 if (unlikely(ret < 0)) 101 return ret; 102 elt += ret; 103 len -= chunk; 104 if (len == 0) 105 return elt; 106 offset += chunk; 107 } 108 start = end; 109 } 110 } 111 BUG_ON(len); 112 return elt; 113 } 114 115 /* Return the number of scatterlist elements required to completely map the 116 * skb, or -EMSGSIZE if the recursion depth is exceeded. 117 */ 118 static int skb_nsg(struct sk_buff *skb, int offset, int len) 119 { 120 return __skb_nsg(skb, offset, len, 0); 121 } 122 123 static int padding_length(struct tls_sw_context_rx *ctx, 124 struct tls_context *tls_ctx, struct sk_buff *skb) 125 { 126 struct strp_msg *rxm = strp_msg(skb); 127 int sub = 0; 128 129 /* Determine zero-padding length */ 130 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) { 131 char content_type = 0; 132 int err; 133 int back = 17; 134 135 while (content_type == 0) { 136 if (back > rxm->full_len) 137 return -EBADMSG; 138 err = skb_copy_bits(skb, 139 rxm->offset + rxm->full_len - back, 140 &content_type, 1); 141 if (content_type) 142 break; 143 sub++; 144 back++; 145 } 146 ctx->control = content_type; 147 } 148 return sub; 149 } 150 151 static void tls_decrypt_done(struct crypto_async_request *req, int err) 152 { 153 struct aead_request *aead_req = (struct aead_request *)req; 154 struct scatterlist *sgout = aead_req->dst; 155 struct scatterlist *sgin = aead_req->src; 156 struct tls_sw_context_rx *ctx; 157 struct tls_context *tls_ctx; 158 struct scatterlist *sg; 159 struct sk_buff *skb; 160 unsigned int pages; 161 int pending; 162 163 skb = (struct sk_buff *)req->data; 164 tls_ctx = tls_get_ctx(skb->sk); 165 ctx = tls_sw_ctx_rx(tls_ctx); 166 167 /* Propagate if there was an err */ 168 if (err) { 169 ctx->async_wait.err = err; 170 tls_err_abort(skb->sk, err); 171 } else { 172 struct strp_msg *rxm = strp_msg(skb); 173 rxm->full_len -= padding_length(ctx, tls_ctx, skb); 174 rxm->offset += tls_ctx->rx.prepend_size; 175 rxm->full_len -= tls_ctx->rx.overhead_size; 176 } 177 178 /* After using skb->sk to propagate sk through crypto async callback 179 * we need to NULL it again. 180 */ 181 skb->sk = NULL; 182 183 184 /* Free the destination pages if skb was not decrypted inplace */ 185 if (sgout != sgin) { 186 /* Skip the first S/G entry as it points to AAD */ 187 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 188 if (!sg) 189 break; 190 put_page(sg_page(sg)); 191 } 192 } 193 194 kfree(aead_req); 195 196 pending = atomic_dec_return(&ctx->decrypt_pending); 197 198 if (!pending && READ_ONCE(ctx->async_notify)) 199 complete(&ctx->async_wait.completion); 200 } 201 202 static int tls_do_decryption(struct sock *sk, 203 struct sk_buff *skb, 204 struct scatterlist *sgin, 205 struct scatterlist *sgout, 206 char *iv_recv, 207 size_t data_len, 208 struct aead_request *aead_req, 209 bool async) 210 { 211 struct tls_context *tls_ctx = tls_get_ctx(sk); 212 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 213 int ret; 214 215 aead_request_set_tfm(aead_req, ctx->aead_recv); 216 aead_request_set_ad(aead_req, tls_ctx->rx.aad_size); 217 aead_request_set_crypt(aead_req, sgin, sgout, 218 data_len + tls_ctx->rx.tag_size, 219 (u8 *)iv_recv); 220 221 if (async) { 222 /* Using skb->sk to push sk through to crypto async callback 223 * handler. This allows propagating errors up to the socket 224 * if needed. It _must_ be cleared in the async handler 225 * before kfree_skb is called. We _know_ skb->sk is NULL 226 * because it is a clone from strparser. 227 */ 228 skb->sk = sk; 229 aead_request_set_callback(aead_req, 230 CRYPTO_TFM_REQ_MAY_BACKLOG, 231 tls_decrypt_done, skb); 232 atomic_inc(&ctx->decrypt_pending); 233 } else { 234 aead_request_set_callback(aead_req, 235 CRYPTO_TFM_REQ_MAY_BACKLOG, 236 crypto_req_done, &ctx->async_wait); 237 } 238 239 ret = crypto_aead_decrypt(aead_req); 240 if (ret == -EINPROGRESS) { 241 if (async) 242 return ret; 243 244 ret = crypto_wait_req(ret, &ctx->async_wait); 245 } 246 247 if (async) 248 atomic_dec(&ctx->decrypt_pending); 249 250 return ret; 251 } 252 253 static void tls_trim_both_msgs(struct sock *sk, int target_size) 254 { 255 struct tls_context *tls_ctx = tls_get_ctx(sk); 256 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 257 struct tls_rec *rec = ctx->open_rec; 258 259 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 260 if (target_size > 0) 261 target_size += tls_ctx->tx.overhead_size; 262 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 263 } 264 265 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 266 { 267 struct tls_context *tls_ctx = tls_get_ctx(sk); 268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 269 struct tls_rec *rec = ctx->open_rec; 270 struct sk_msg *msg_en = &rec->msg_encrypted; 271 272 return sk_msg_alloc(sk, msg_en, len, 0); 273 } 274 275 static int tls_clone_plaintext_msg(struct sock *sk, int required) 276 { 277 struct tls_context *tls_ctx = tls_get_ctx(sk); 278 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 279 struct tls_rec *rec = ctx->open_rec; 280 struct sk_msg *msg_pl = &rec->msg_plaintext; 281 struct sk_msg *msg_en = &rec->msg_encrypted; 282 int skip, len; 283 284 /* We add page references worth len bytes from encrypted sg 285 * at the end of plaintext sg. It is guaranteed that msg_en 286 * has enough required room (ensured by caller). 287 */ 288 len = required - msg_pl->sg.size; 289 290 /* Skip initial bytes in msg_en's data to be able to use 291 * same offset of both plain and encrypted data. 292 */ 293 skip = tls_ctx->tx.prepend_size + msg_pl->sg.size; 294 295 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 296 } 297 298 static struct tls_rec *tls_get_rec(struct sock *sk) 299 { 300 struct tls_context *tls_ctx = tls_get_ctx(sk); 301 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 302 struct sk_msg *msg_pl, *msg_en; 303 struct tls_rec *rec; 304 int mem_size; 305 306 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 307 308 rec = kzalloc(mem_size, sk->sk_allocation); 309 if (!rec) 310 return NULL; 311 312 msg_pl = &rec->msg_plaintext; 313 msg_en = &rec->msg_encrypted; 314 315 sk_msg_init(msg_pl); 316 sk_msg_init(msg_en); 317 318 sg_init_table(rec->sg_aead_in, 2); 319 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, 320 tls_ctx->tx.aad_size); 321 sg_unmark_end(&rec->sg_aead_in[1]); 322 323 sg_init_table(rec->sg_aead_out, 2); 324 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, 325 tls_ctx->tx.aad_size); 326 sg_unmark_end(&rec->sg_aead_out[1]); 327 328 return rec; 329 } 330 331 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 332 { 333 sk_msg_free(sk, &rec->msg_encrypted); 334 sk_msg_free(sk, &rec->msg_plaintext); 335 kfree(rec); 336 } 337 338 static void tls_free_open_rec(struct sock *sk) 339 { 340 struct tls_context *tls_ctx = tls_get_ctx(sk); 341 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 342 struct tls_rec *rec = ctx->open_rec; 343 344 if (rec) { 345 tls_free_rec(sk, rec); 346 ctx->open_rec = NULL; 347 } 348 } 349 350 int tls_tx_records(struct sock *sk, int flags) 351 { 352 struct tls_context *tls_ctx = tls_get_ctx(sk); 353 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 354 struct tls_rec *rec, *tmp; 355 struct sk_msg *msg_en; 356 int tx_flags, rc = 0; 357 358 if (tls_is_partially_sent_record(tls_ctx)) { 359 rec = list_first_entry(&ctx->tx_list, 360 struct tls_rec, list); 361 362 if (flags == -1) 363 tx_flags = rec->tx_flags; 364 else 365 tx_flags = flags; 366 367 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 368 if (rc) 369 goto tx_err; 370 371 /* Full record has been transmitted. 372 * Remove the head of tx_list 373 */ 374 list_del(&rec->list); 375 sk_msg_free(sk, &rec->msg_plaintext); 376 kfree(rec); 377 } 378 379 /* Tx all ready records */ 380 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 381 if (READ_ONCE(rec->tx_ready)) { 382 if (flags == -1) 383 tx_flags = rec->tx_flags; 384 else 385 tx_flags = flags; 386 387 msg_en = &rec->msg_encrypted; 388 rc = tls_push_sg(sk, tls_ctx, 389 &msg_en->sg.data[msg_en->sg.curr], 390 0, tx_flags); 391 if (rc) 392 goto tx_err; 393 394 list_del(&rec->list); 395 sk_msg_free(sk, &rec->msg_plaintext); 396 kfree(rec); 397 } else { 398 break; 399 } 400 } 401 402 tx_err: 403 if (rc < 0 && rc != -EAGAIN) 404 tls_err_abort(sk, EBADMSG); 405 406 return rc; 407 } 408 409 static void tls_encrypt_done(struct crypto_async_request *req, int err) 410 { 411 struct aead_request *aead_req = (struct aead_request *)req; 412 struct sock *sk = req->data; 413 struct tls_context *tls_ctx = tls_get_ctx(sk); 414 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 415 struct scatterlist *sge; 416 struct sk_msg *msg_en; 417 struct tls_rec *rec; 418 bool ready = false; 419 int pending; 420 421 rec = container_of(aead_req, struct tls_rec, aead_req); 422 msg_en = &rec->msg_encrypted; 423 424 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 425 sge->offset -= tls_ctx->tx.prepend_size; 426 sge->length += tls_ctx->tx.prepend_size; 427 428 /* Check if error is previously set on socket */ 429 if (err || sk->sk_err) { 430 rec = NULL; 431 432 /* If err is already set on socket, return the same code */ 433 if (sk->sk_err) { 434 ctx->async_wait.err = sk->sk_err; 435 } else { 436 ctx->async_wait.err = err; 437 tls_err_abort(sk, err); 438 } 439 } 440 441 if (rec) { 442 struct tls_rec *first_rec; 443 444 /* Mark the record as ready for transmission */ 445 smp_store_mb(rec->tx_ready, true); 446 447 /* If received record is at head of tx_list, schedule tx */ 448 first_rec = list_first_entry(&ctx->tx_list, 449 struct tls_rec, list); 450 if (rec == first_rec) 451 ready = true; 452 } 453 454 pending = atomic_dec_return(&ctx->encrypt_pending); 455 456 if (!pending && READ_ONCE(ctx->async_notify)) 457 complete(&ctx->async_wait.completion); 458 459 if (!ready) 460 return; 461 462 /* Schedule the transmission */ 463 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 464 schedule_delayed_work(&ctx->tx_work.work, 1); 465 } 466 467 static int tls_do_encryption(struct sock *sk, 468 struct tls_context *tls_ctx, 469 struct tls_sw_context_tx *ctx, 470 struct aead_request *aead_req, 471 size_t data_len, u32 start) 472 { 473 struct tls_rec *rec = ctx->open_rec; 474 struct sk_msg *msg_en = &rec->msg_encrypted; 475 struct scatterlist *sge = sk_msg_elem(msg_en, start); 476 int rc; 477 478 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); 479 xor_iv_with_seq(tls_ctx->crypto_send.info.version, rec->iv_data, 480 tls_ctx->tx.rec_seq); 481 482 sge->offset += tls_ctx->tx.prepend_size; 483 sge->length -= tls_ctx->tx.prepend_size; 484 485 msg_en->sg.curr = start; 486 487 aead_request_set_tfm(aead_req, ctx->aead_send); 488 aead_request_set_ad(aead_req, tls_ctx->tx.aad_size); 489 aead_request_set_crypt(aead_req, rec->sg_aead_in, 490 rec->sg_aead_out, 491 data_len, rec->iv_data); 492 493 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 494 tls_encrypt_done, sk); 495 496 /* Add the record in tx_list */ 497 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 498 atomic_inc(&ctx->encrypt_pending); 499 500 rc = crypto_aead_encrypt(aead_req); 501 if (!rc || rc != -EINPROGRESS) { 502 atomic_dec(&ctx->encrypt_pending); 503 sge->offset -= tls_ctx->tx.prepend_size; 504 sge->length += tls_ctx->tx.prepend_size; 505 } 506 507 if (!rc) { 508 WRITE_ONCE(rec->tx_ready, true); 509 } else if (rc != -EINPROGRESS) { 510 list_del(&rec->list); 511 return rc; 512 } 513 514 /* Unhook the record from context if encryption is not failure */ 515 ctx->open_rec = NULL; 516 tls_advance_record_sn(sk, &tls_ctx->tx, 517 tls_ctx->crypto_send.info.version); 518 return rc; 519 } 520 521 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 522 struct tls_rec **to, struct sk_msg *msg_opl, 523 struct sk_msg *msg_oen, u32 split_point, 524 u32 tx_overhead_size, u32 *orig_end) 525 { 526 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 527 struct scatterlist *sge, *osge, *nsge; 528 u32 orig_size = msg_opl->sg.size; 529 struct scatterlist tmp = { }; 530 struct sk_msg *msg_npl; 531 struct tls_rec *new; 532 int ret; 533 534 new = tls_get_rec(sk); 535 if (!new) 536 return -ENOMEM; 537 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 538 tx_overhead_size, 0); 539 if (ret < 0) { 540 tls_free_rec(sk, new); 541 return ret; 542 } 543 544 *orig_end = msg_opl->sg.end; 545 i = msg_opl->sg.start; 546 sge = sk_msg_elem(msg_opl, i); 547 while (apply && sge->length) { 548 if (sge->length > apply) { 549 u32 len = sge->length - apply; 550 551 get_page(sg_page(sge)); 552 sg_set_page(&tmp, sg_page(sge), len, 553 sge->offset + apply); 554 sge->length = apply; 555 bytes += apply; 556 apply = 0; 557 } else { 558 apply -= sge->length; 559 bytes += sge->length; 560 } 561 562 sk_msg_iter_var_next(i); 563 if (i == msg_opl->sg.end) 564 break; 565 sge = sk_msg_elem(msg_opl, i); 566 } 567 568 msg_opl->sg.end = i; 569 msg_opl->sg.curr = i; 570 msg_opl->sg.copybreak = 0; 571 msg_opl->apply_bytes = 0; 572 msg_opl->sg.size = bytes; 573 574 msg_npl = &new->msg_plaintext; 575 msg_npl->apply_bytes = apply; 576 msg_npl->sg.size = orig_size - bytes; 577 578 j = msg_npl->sg.start; 579 nsge = sk_msg_elem(msg_npl, j); 580 if (tmp.length) { 581 memcpy(nsge, &tmp, sizeof(*nsge)); 582 sk_msg_iter_var_next(j); 583 nsge = sk_msg_elem(msg_npl, j); 584 } 585 586 osge = sk_msg_elem(msg_opl, i); 587 while (osge->length) { 588 memcpy(nsge, osge, sizeof(*nsge)); 589 sg_unmark_end(nsge); 590 sk_msg_iter_var_next(i); 591 sk_msg_iter_var_next(j); 592 if (i == *orig_end) 593 break; 594 osge = sk_msg_elem(msg_opl, i); 595 nsge = sk_msg_elem(msg_npl, j); 596 } 597 598 msg_npl->sg.end = j; 599 msg_npl->sg.curr = j; 600 msg_npl->sg.copybreak = 0; 601 602 *to = new; 603 return 0; 604 } 605 606 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 607 struct tls_rec *from, u32 orig_end) 608 { 609 struct sk_msg *msg_npl = &from->msg_plaintext; 610 struct sk_msg *msg_opl = &to->msg_plaintext; 611 struct scatterlist *osge, *nsge; 612 u32 i, j; 613 614 i = msg_opl->sg.end; 615 sk_msg_iter_var_prev(i); 616 j = msg_npl->sg.start; 617 618 osge = sk_msg_elem(msg_opl, i); 619 nsge = sk_msg_elem(msg_npl, j); 620 621 if (sg_page(osge) == sg_page(nsge) && 622 osge->offset + osge->length == nsge->offset) { 623 osge->length += nsge->length; 624 put_page(sg_page(nsge)); 625 } 626 627 msg_opl->sg.end = orig_end; 628 msg_opl->sg.curr = orig_end; 629 msg_opl->sg.copybreak = 0; 630 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 631 msg_opl->sg.size += msg_npl->sg.size; 632 633 sk_msg_free(sk, &to->msg_encrypted); 634 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 635 636 kfree(from); 637 } 638 639 static int tls_push_record(struct sock *sk, int flags, 640 unsigned char record_type) 641 { 642 struct tls_context *tls_ctx = tls_get_ctx(sk); 643 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 644 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 645 u32 i, split_point, uninitialized_var(orig_end); 646 struct sk_msg *msg_pl, *msg_en; 647 struct aead_request *req; 648 bool split; 649 int rc; 650 651 if (!rec) 652 return 0; 653 654 msg_pl = &rec->msg_plaintext; 655 msg_en = &rec->msg_encrypted; 656 657 split_point = msg_pl->apply_bytes; 658 split = split_point && split_point < msg_pl->sg.size; 659 if (split) { 660 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 661 split_point, tls_ctx->tx.overhead_size, 662 &orig_end); 663 if (rc < 0) 664 return rc; 665 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 666 tls_ctx->tx.overhead_size); 667 } 668 669 rec->tx_flags = flags; 670 req = &rec->aead_req; 671 672 i = msg_pl->sg.end; 673 sk_msg_iter_var_prev(i); 674 675 rec->content_type = record_type; 676 if (tls_ctx->crypto_send.info.version == TLS_1_3_VERSION) { 677 /* Add content type to end of message. No padding added */ 678 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 679 sg_mark_end(&rec->sg_content_type); 680 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 681 &rec->sg_content_type); 682 } else { 683 sg_mark_end(sk_msg_elem(msg_pl, i)); 684 } 685 686 i = msg_pl->sg.start; 687 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? 688 &msg_en->sg.data[i] : &msg_pl->sg.data[i]); 689 690 i = msg_en->sg.end; 691 sk_msg_iter_var_prev(i); 692 sg_mark_end(sk_msg_elem(msg_en, i)); 693 694 i = msg_en->sg.start; 695 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 696 697 tls_make_aad(rec->aad_space, msg_pl->sg.size + tls_ctx->tx.tail_size, 698 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, 699 record_type, 700 tls_ctx->crypto_send.info.version); 701 702 tls_fill_prepend(tls_ctx, 703 page_address(sg_page(&msg_en->sg.data[i])) + 704 msg_en->sg.data[i].offset, 705 msg_pl->sg.size + tls_ctx->tx.tail_size, 706 record_type, 707 tls_ctx->crypto_send.info.version); 708 709 tls_ctx->pending_open_record_frags = false; 710 711 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 712 msg_pl->sg.size + tls_ctx->tx.tail_size, i); 713 if (rc < 0) { 714 if (rc != -EINPROGRESS) { 715 tls_err_abort(sk, EBADMSG); 716 if (split) { 717 tls_ctx->pending_open_record_frags = true; 718 tls_merge_open_record(sk, rec, tmp, orig_end); 719 } 720 } 721 ctx->async_capable = 1; 722 return rc; 723 } else if (split) { 724 msg_pl = &tmp->msg_plaintext; 725 msg_en = &tmp->msg_encrypted; 726 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 727 tls_ctx->tx.overhead_size); 728 tls_ctx->pending_open_record_frags = true; 729 ctx->open_rec = tmp; 730 } 731 732 return tls_tx_records(sk, flags); 733 } 734 735 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 736 bool full_record, u8 record_type, 737 size_t *copied, int flags) 738 { 739 struct tls_context *tls_ctx = tls_get_ctx(sk); 740 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 741 struct sk_msg msg_redir = { }; 742 struct sk_psock *psock; 743 struct sock *sk_redir; 744 struct tls_rec *rec; 745 bool enospc, policy; 746 int err = 0, send; 747 u32 delta = 0; 748 749 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 750 psock = sk_psock_get(sk); 751 if (!psock || !policy) 752 return tls_push_record(sk, flags, record_type); 753 more_data: 754 enospc = sk_msg_full(msg); 755 if (psock->eval == __SK_NONE) { 756 delta = msg->sg.size; 757 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 758 if (delta < msg->sg.size) 759 delta -= msg->sg.size; 760 else 761 delta = 0; 762 } 763 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 764 !enospc && !full_record) { 765 err = -ENOSPC; 766 goto out_err; 767 } 768 msg->cork_bytes = 0; 769 send = msg->sg.size; 770 if (msg->apply_bytes && msg->apply_bytes < send) 771 send = msg->apply_bytes; 772 773 switch (psock->eval) { 774 case __SK_PASS: 775 err = tls_push_record(sk, flags, record_type); 776 if (err < 0) { 777 *copied -= sk_msg_free(sk, msg); 778 tls_free_open_rec(sk); 779 goto out_err; 780 } 781 break; 782 case __SK_REDIRECT: 783 sk_redir = psock->sk_redir; 784 memcpy(&msg_redir, msg, sizeof(*msg)); 785 if (msg->apply_bytes < send) 786 msg->apply_bytes = 0; 787 else 788 msg->apply_bytes -= send; 789 sk_msg_return_zero(sk, msg, send); 790 msg->sg.size -= send; 791 release_sock(sk); 792 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags); 793 lock_sock(sk); 794 if (err < 0) { 795 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 796 msg->sg.size = 0; 797 } 798 if (msg->sg.size == 0) 799 tls_free_open_rec(sk); 800 break; 801 case __SK_DROP: 802 default: 803 sk_msg_free_partial(sk, msg, send); 804 if (msg->apply_bytes < send) 805 msg->apply_bytes = 0; 806 else 807 msg->apply_bytes -= send; 808 if (msg->sg.size == 0) 809 tls_free_open_rec(sk); 810 *copied -= (send + delta); 811 err = -EACCES; 812 } 813 814 if (likely(!err)) { 815 bool reset_eval = !ctx->open_rec; 816 817 rec = ctx->open_rec; 818 if (rec) { 819 msg = &rec->msg_plaintext; 820 if (!msg->apply_bytes) 821 reset_eval = true; 822 } 823 if (reset_eval) { 824 psock->eval = __SK_NONE; 825 if (psock->sk_redir) { 826 sock_put(psock->sk_redir); 827 psock->sk_redir = NULL; 828 } 829 } 830 if (rec) 831 goto more_data; 832 } 833 out_err: 834 sk_psock_put(sk, psock); 835 return err; 836 } 837 838 static int tls_sw_push_pending_record(struct sock *sk, int flags) 839 { 840 struct tls_context *tls_ctx = tls_get_ctx(sk); 841 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 842 struct tls_rec *rec = ctx->open_rec; 843 struct sk_msg *msg_pl; 844 size_t copied; 845 846 if (!rec) 847 return 0; 848 849 msg_pl = &rec->msg_plaintext; 850 copied = msg_pl->sg.size; 851 if (!copied) 852 return 0; 853 854 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 855 &copied, flags); 856 } 857 858 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 859 { 860 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 861 struct tls_context *tls_ctx = tls_get_ctx(sk); 862 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 863 bool async_capable = ctx->async_capable; 864 unsigned char record_type = TLS_RECORD_TYPE_DATA; 865 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 866 bool eor = !(msg->msg_flags & MSG_MORE); 867 size_t try_to_copy, copied = 0; 868 struct sk_msg *msg_pl, *msg_en; 869 struct tls_rec *rec; 870 int required_size; 871 int num_async = 0; 872 bool full_record; 873 int record_room; 874 int num_zc = 0; 875 int orig_size; 876 int ret = 0; 877 878 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 879 return -ENOTSUPP; 880 881 lock_sock(sk); 882 883 /* Wait till there is any pending write on socket */ 884 if (unlikely(sk->sk_write_pending)) { 885 ret = wait_on_pending_writer(sk, &timeo); 886 if (unlikely(ret)) 887 goto send_end; 888 } 889 890 if (unlikely(msg->msg_controllen)) { 891 ret = tls_proccess_cmsg(sk, msg, &record_type); 892 if (ret) { 893 if (ret == -EINPROGRESS) 894 num_async++; 895 else if (ret != -EAGAIN) 896 goto send_end; 897 } 898 } 899 900 while (msg_data_left(msg)) { 901 if (sk->sk_err) { 902 ret = -sk->sk_err; 903 goto send_end; 904 } 905 906 if (ctx->open_rec) 907 rec = ctx->open_rec; 908 else 909 rec = ctx->open_rec = tls_get_rec(sk); 910 if (!rec) { 911 ret = -ENOMEM; 912 goto send_end; 913 } 914 915 msg_pl = &rec->msg_plaintext; 916 msg_en = &rec->msg_encrypted; 917 918 orig_size = msg_pl->sg.size; 919 full_record = false; 920 try_to_copy = msg_data_left(msg); 921 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 922 if (try_to_copy >= record_room) { 923 try_to_copy = record_room; 924 full_record = true; 925 } 926 927 required_size = msg_pl->sg.size + try_to_copy + 928 tls_ctx->tx.overhead_size; 929 930 if (!sk_stream_memory_free(sk)) 931 goto wait_for_sndbuf; 932 933 alloc_encrypted: 934 ret = tls_alloc_encrypted_msg(sk, required_size); 935 if (ret) { 936 if (ret != -ENOSPC) 937 goto wait_for_memory; 938 939 /* Adjust try_to_copy according to the amount that was 940 * actually allocated. The difference is due 941 * to max sg elements limit 942 */ 943 try_to_copy -= required_size - msg_en->sg.size; 944 full_record = true; 945 } 946 947 if (!is_kvec && (full_record || eor) && !async_capable) { 948 u32 first = msg_pl->sg.end; 949 950 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 951 msg_pl, try_to_copy); 952 if (ret) 953 goto fallback_to_reg_send; 954 955 rec->inplace_crypto = 0; 956 957 num_zc++; 958 copied += try_to_copy; 959 960 sk_msg_sg_copy_set(msg_pl, first); 961 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 962 record_type, &copied, 963 msg->msg_flags); 964 if (ret) { 965 if (ret == -EINPROGRESS) 966 num_async++; 967 else if (ret == -ENOMEM) 968 goto wait_for_memory; 969 else if (ret == -ENOSPC) 970 goto rollback_iter; 971 else if (ret != -EAGAIN) 972 goto send_end; 973 } 974 continue; 975 rollback_iter: 976 copied -= try_to_copy; 977 sk_msg_sg_copy_clear(msg_pl, first); 978 iov_iter_revert(&msg->msg_iter, 979 msg_pl->sg.size - orig_size); 980 fallback_to_reg_send: 981 sk_msg_trim(sk, msg_pl, orig_size); 982 } 983 984 required_size = msg_pl->sg.size + try_to_copy; 985 986 ret = tls_clone_plaintext_msg(sk, required_size); 987 if (ret) { 988 if (ret != -ENOSPC) 989 goto send_end; 990 991 /* Adjust try_to_copy according to the amount that was 992 * actually allocated. The difference is due 993 * to max sg elements limit 994 */ 995 try_to_copy -= required_size - msg_pl->sg.size; 996 full_record = true; 997 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 998 tls_ctx->tx.overhead_size); 999 } 1000 1001 if (try_to_copy) { 1002 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1003 msg_pl, try_to_copy); 1004 if (ret < 0) 1005 goto trim_sgl; 1006 } 1007 1008 /* Open records defined only if successfully copied, otherwise 1009 * we would trim the sg but not reset the open record frags. 1010 */ 1011 tls_ctx->pending_open_record_frags = true; 1012 copied += try_to_copy; 1013 if (full_record || eor) { 1014 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1015 record_type, &copied, 1016 msg->msg_flags); 1017 if (ret) { 1018 if (ret == -EINPROGRESS) 1019 num_async++; 1020 else if (ret == -ENOMEM) 1021 goto wait_for_memory; 1022 else if (ret != -EAGAIN) { 1023 if (ret == -ENOSPC) 1024 ret = 0; 1025 goto send_end; 1026 } 1027 } 1028 } 1029 1030 continue; 1031 1032 wait_for_sndbuf: 1033 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1034 wait_for_memory: 1035 ret = sk_stream_wait_memory(sk, &timeo); 1036 if (ret) { 1037 trim_sgl: 1038 tls_trim_both_msgs(sk, orig_size); 1039 goto send_end; 1040 } 1041 1042 if (msg_en->sg.size < required_size) 1043 goto alloc_encrypted; 1044 } 1045 1046 if (!num_async) { 1047 goto send_end; 1048 } else if (num_zc) { 1049 /* Wait for pending encryptions to get completed */ 1050 smp_store_mb(ctx->async_notify, true); 1051 1052 if (atomic_read(&ctx->encrypt_pending)) 1053 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1054 else 1055 reinit_completion(&ctx->async_wait.completion); 1056 1057 WRITE_ONCE(ctx->async_notify, false); 1058 1059 if (ctx->async_wait.err) { 1060 ret = ctx->async_wait.err; 1061 copied = 0; 1062 } 1063 } 1064 1065 /* Transmit if any encryptions have completed */ 1066 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1067 cancel_delayed_work(&ctx->tx_work.work); 1068 tls_tx_records(sk, msg->msg_flags); 1069 } 1070 1071 send_end: 1072 ret = sk_stream_error(sk, msg->msg_flags, ret); 1073 1074 release_sock(sk); 1075 return copied ? copied : ret; 1076 } 1077 1078 static int tls_sw_do_sendpage(struct sock *sk, struct page *page, 1079 int offset, size_t size, int flags) 1080 { 1081 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1082 struct tls_context *tls_ctx = tls_get_ctx(sk); 1083 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1084 unsigned char record_type = TLS_RECORD_TYPE_DATA; 1085 struct sk_msg *msg_pl; 1086 struct tls_rec *rec; 1087 int num_async = 0; 1088 size_t copied = 0; 1089 bool full_record; 1090 int record_room; 1091 int ret = 0; 1092 bool eor; 1093 1094 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1095 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1096 1097 /* Wait till there is any pending write on socket */ 1098 if (unlikely(sk->sk_write_pending)) { 1099 ret = wait_on_pending_writer(sk, &timeo); 1100 if (unlikely(ret)) 1101 goto sendpage_end; 1102 } 1103 1104 /* Call the sk_stream functions to manage the sndbuf mem. */ 1105 while (size > 0) { 1106 size_t copy, required_size; 1107 1108 if (sk->sk_err) { 1109 ret = -sk->sk_err; 1110 goto sendpage_end; 1111 } 1112 1113 if (ctx->open_rec) 1114 rec = ctx->open_rec; 1115 else 1116 rec = ctx->open_rec = tls_get_rec(sk); 1117 if (!rec) { 1118 ret = -ENOMEM; 1119 goto sendpage_end; 1120 } 1121 1122 msg_pl = &rec->msg_plaintext; 1123 1124 full_record = false; 1125 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1126 copied = 0; 1127 copy = size; 1128 if (copy >= record_room) { 1129 copy = record_room; 1130 full_record = true; 1131 } 1132 1133 required_size = msg_pl->sg.size + copy + 1134 tls_ctx->tx.overhead_size; 1135 1136 if (!sk_stream_memory_free(sk)) 1137 goto wait_for_sndbuf; 1138 alloc_payload: 1139 ret = tls_alloc_encrypted_msg(sk, required_size); 1140 if (ret) { 1141 if (ret != -ENOSPC) 1142 goto wait_for_memory; 1143 1144 /* Adjust copy according to the amount that was 1145 * actually allocated. The difference is due 1146 * to max sg elements limit 1147 */ 1148 copy -= required_size - msg_pl->sg.size; 1149 full_record = true; 1150 } 1151 1152 sk_msg_page_add(msg_pl, page, copy, offset); 1153 sk_mem_charge(sk, copy); 1154 1155 offset += copy; 1156 size -= copy; 1157 copied += copy; 1158 1159 tls_ctx->pending_open_record_frags = true; 1160 if (full_record || eor || sk_msg_full(msg_pl)) { 1161 rec->inplace_crypto = 0; 1162 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1163 record_type, &copied, flags); 1164 if (ret) { 1165 if (ret == -EINPROGRESS) 1166 num_async++; 1167 else if (ret == -ENOMEM) 1168 goto wait_for_memory; 1169 else if (ret != -EAGAIN) { 1170 if (ret == -ENOSPC) 1171 ret = 0; 1172 goto sendpage_end; 1173 } 1174 } 1175 } 1176 continue; 1177 wait_for_sndbuf: 1178 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1179 wait_for_memory: 1180 ret = sk_stream_wait_memory(sk, &timeo); 1181 if (ret) { 1182 tls_trim_both_msgs(sk, msg_pl->sg.size); 1183 goto sendpage_end; 1184 } 1185 1186 goto alloc_payload; 1187 } 1188 1189 if (num_async) { 1190 /* Transmit if any encryptions have completed */ 1191 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1192 cancel_delayed_work(&ctx->tx_work.work); 1193 tls_tx_records(sk, flags); 1194 } 1195 } 1196 sendpage_end: 1197 ret = sk_stream_error(sk, flags, ret); 1198 return copied ? copied : ret; 1199 } 1200 1201 int tls_sw_sendpage(struct sock *sk, struct page *page, 1202 int offset, size_t size, int flags) 1203 { 1204 int ret; 1205 1206 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1207 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1208 return -ENOTSUPP; 1209 1210 lock_sock(sk); 1211 ret = tls_sw_do_sendpage(sk, page, offset, size, flags); 1212 release_sock(sk); 1213 return ret; 1214 } 1215 1216 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock, 1217 int flags, long timeo, int *err) 1218 { 1219 struct tls_context *tls_ctx = tls_get_ctx(sk); 1220 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1221 struct sk_buff *skb; 1222 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1223 1224 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) { 1225 if (sk->sk_err) { 1226 *err = sock_error(sk); 1227 return NULL; 1228 } 1229 1230 if (sk->sk_shutdown & RCV_SHUTDOWN) 1231 return NULL; 1232 1233 if (sock_flag(sk, SOCK_DONE)) 1234 return NULL; 1235 1236 if ((flags & MSG_DONTWAIT) || !timeo) { 1237 *err = -EAGAIN; 1238 return NULL; 1239 } 1240 1241 add_wait_queue(sk_sleep(sk), &wait); 1242 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1243 sk_wait_event(sk, &timeo, 1244 ctx->recv_pkt != skb || 1245 !sk_psock_queue_empty(psock), 1246 &wait); 1247 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1248 remove_wait_queue(sk_sleep(sk), &wait); 1249 1250 /* Handle signals */ 1251 if (signal_pending(current)) { 1252 *err = sock_intr_errno(timeo); 1253 return NULL; 1254 } 1255 } 1256 1257 return skb; 1258 } 1259 1260 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from, 1261 int length, int *pages_used, 1262 unsigned int *size_used, 1263 struct scatterlist *to, 1264 int to_max_pages) 1265 { 1266 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1267 struct page *pages[MAX_SKB_FRAGS]; 1268 unsigned int size = *size_used; 1269 ssize_t copied, use; 1270 size_t offset; 1271 1272 while (length > 0) { 1273 i = 0; 1274 maxpages = to_max_pages - num_elem; 1275 if (maxpages == 0) { 1276 rc = -EFAULT; 1277 goto out; 1278 } 1279 copied = iov_iter_get_pages(from, pages, 1280 length, 1281 maxpages, &offset); 1282 if (copied <= 0) { 1283 rc = -EFAULT; 1284 goto out; 1285 } 1286 1287 iov_iter_advance(from, copied); 1288 1289 length -= copied; 1290 size += copied; 1291 while (copied) { 1292 use = min_t(int, copied, PAGE_SIZE - offset); 1293 1294 sg_set_page(&to[num_elem], 1295 pages[i], use, offset); 1296 sg_unmark_end(&to[num_elem]); 1297 /* We do not uncharge memory from this API */ 1298 1299 offset = 0; 1300 copied -= use; 1301 1302 i++; 1303 num_elem++; 1304 } 1305 } 1306 /* Mark the end in the last sg entry if newly added */ 1307 if (num_elem > *pages_used) 1308 sg_mark_end(&to[num_elem - 1]); 1309 out: 1310 if (rc) 1311 iov_iter_revert(from, size - *size_used); 1312 *size_used = size; 1313 *pages_used = num_elem; 1314 1315 return rc; 1316 } 1317 1318 /* This function decrypts the input skb into either out_iov or in out_sg 1319 * or in skb buffers itself. The input parameter 'zc' indicates if 1320 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1321 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1322 * NULL, then the decryption happens inside skb buffers itself, i.e. 1323 * zero-copy gets disabled and 'zc' is updated. 1324 */ 1325 1326 static int decrypt_internal(struct sock *sk, struct sk_buff *skb, 1327 struct iov_iter *out_iov, 1328 struct scatterlist *out_sg, 1329 int *chunk, bool *zc, bool async) 1330 { 1331 struct tls_context *tls_ctx = tls_get_ctx(sk); 1332 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1333 struct strp_msg *rxm = strp_msg(skb); 1334 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; 1335 struct aead_request *aead_req; 1336 struct sk_buff *unused; 1337 u8 *aad, *iv, *mem = NULL; 1338 struct scatterlist *sgin = NULL; 1339 struct scatterlist *sgout = NULL; 1340 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size + 1341 tls_ctx->rx.tail_size; 1342 1343 if (*zc && (out_iov || out_sg)) { 1344 if (out_iov) 1345 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; 1346 else 1347 n_sgout = sg_nents(out_sg); 1348 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size, 1349 rxm->full_len - tls_ctx->rx.prepend_size); 1350 } else { 1351 n_sgout = 0; 1352 *zc = false; 1353 n_sgin = skb_cow_data(skb, 0, &unused); 1354 } 1355 1356 if (n_sgin < 1) 1357 return -EBADMSG; 1358 1359 /* Increment to accommodate AAD */ 1360 n_sgin = n_sgin + 1; 1361 1362 nsg = n_sgin + n_sgout; 1363 1364 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1365 mem_size = aead_size + (nsg * sizeof(struct scatterlist)); 1366 mem_size = mem_size + tls_ctx->rx.aad_size; 1367 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); 1368 1369 /* Allocate a single block of memory which contains 1370 * aead_req || sgin[] || sgout[] || aad || iv. 1371 * This order achieves correct alignment for aead_req, sgin, sgout. 1372 */ 1373 mem = kmalloc(mem_size, sk->sk_allocation); 1374 if (!mem) 1375 return -ENOMEM; 1376 1377 /* Segment the allocated memory */ 1378 aead_req = (struct aead_request *)mem; 1379 sgin = (struct scatterlist *)(mem + aead_size); 1380 sgout = sgin + n_sgin; 1381 aad = (u8 *)(sgout + n_sgout); 1382 iv = aad + tls_ctx->rx.aad_size; 1383 1384 /* Prepare IV */ 1385 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1386 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 1387 tls_ctx->rx.iv_size); 1388 if (err < 0) { 1389 kfree(mem); 1390 return err; 1391 } 1392 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) 1393 memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv)); 1394 else 1395 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 1396 1397 xor_iv_with_seq(tls_ctx->crypto_recv.info.version, iv, 1398 tls_ctx->rx.rec_seq); 1399 1400 /* Prepare AAD */ 1401 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size + 1402 tls_ctx->rx.tail_size, 1403 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, 1404 ctx->control, 1405 tls_ctx->crypto_recv.info.version); 1406 1407 /* Prepare sgin */ 1408 sg_init_table(sgin, n_sgin); 1409 sg_set_buf(&sgin[0], aad, tls_ctx->rx.aad_size); 1410 err = skb_to_sgvec(skb, &sgin[1], 1411 rxm->offset + tls_ctx->rx.prepend_size, 1412 rxm->full_len - tls_ctx->rx.prepend_size); 1413 if (err < 0) { 1414 kfree(mem); 1415 return err; 1416 } 1417 1418 if (n_sgout) { 1419 if (out_iov) { 1420 sg_init_table(sgout, n_sgout); 1421 sg_set_buf(&sgout[0], aad, tls_ctx->rx.aad_size); 1422 1423 *chunk = 0; 1424 err = tls_setup_from_iter(sk, out_iov, data_len, 1425 &pages, chunk, &sgout[1], 1426 (n_sgout - 1)); 1427 if (err < 0) 1428 goto fallback_to_reg_recv; 1429 } else if (out_sg) { 1430 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1431 } else { 1432 goto fallback_to_reg_recv; 1433 } 1434 } else { 1435 fallback_to_reg_recv: 1436 sgout = sgin; 1437 pages = 0; 1438 *chunk = data_len; 1439 *zc = false; 1440 } 1441 1442 /* Prepare and submit AEAD request */ 1443 err = tls_do_decryption(sk, skb, sgin, sgout, iv, 1444 data_len, aead_req, async); 1445 if (err == -EINPROGRESS) 1446 return err; 1447 1448 /* Release the pages in case iov was mapped to pages */ 1449 for (; pages > 0; pages--) 1450 put_page(sg_page(&sgout[pages])); 1451 1452 kfree(mem); 1453 return err; 1454 } 1455 1456 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, 1457 struct iov_iter *dest, int *chunk, bool *zc, 1458 bool async) 1459 { 1460 struct tls_context *tls_ctx = tls_get_ctx(sk); 1461 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1462 int version = tls_ctx->crypto_recv.info.version; 1463 struct strp_msg *rxm = strp_msg(skb); 1464 int err = 0; 1465 1466 #ifdef CONFIG_TLS_DEVICE 1467 err = tls_device_decrypted(sk, skb); 1468 if (err < 0) 1469 return err; 1470 #endif 1471 if (!ctx->decrypted) { 1472 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async); 1473 if (err < 0) { 1474 if (err == -EINPROGRESS) 1475 tls_advance_record_sn(sk, &tls_ctx->rx, 1476 version); 1477 1478 return err; 1479 } 1480 1481 rxm->full_len -= padding_length(ctx, tls_ctx, skb); 1482 1483 rxm->offset += tls_ctx->rx.prepend_size; 1484 rxm->full_len -= tls_ctx->rx.overhead_size; 1485 tls_advance_record_sn(sk, &tls_ctx->rx, version); 1486 ctx->decrypted = true; 1487 ctx->saved_data_ready(sk); 1488 } else { 1489 *zc = false; 1490 } 1491 1492 return err; 1493 } 1494 1495 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 1496 struct scatterlist *sgout) 1497 { 1498 bool zc = true; 1499 int chunk; 1500 1501 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false); 1502 } 1503 1504 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, 1505 unsigned int len) 1506 { 1507 struct tls_context *tls_ctx = tls_get_ctx(sk); 1508 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1509 1510 if (skb) { 1511 struct strp_msg *rxm = strp_msg(skb); 1512 1513 if (len < rxm->full_len) { 1514 rxm->offset += len; 1515 rxm->full_len -= len; 1516 return false; 1517 } 1518 kfree_skb(skb); 1519 } 1520 1521 /* Finished with message */ 1522 ctx->recv_pkt = NULL; 1523 __strp_unpause(&ctx->strp); 1524 1525 return true; 1526 } 1527 1528 /* This function traverses the rx_list in tls receive context to copies the 1529 * decrypted data records into the buffer provided by caller zero copy is not 1530 * true. Further, the records are removed from the rx_list if it is not a peek 1531 * case and the record has been consumed completely. 1532 */ 1533 static int process_rx_list(struct tls_sw_context_rx *ctx, 1534 struct msghdr *msg, 1535 size_t skip, 1536 size_t len, 1537 bool zc, 1538 bool is_peek) 1539 { 1540 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1541 ssize_t copied = 0; 1542 1543 while (skip && skb) { 1544 struct strp_msg *rxm = strp_msg(skb); 1545 1546 if (skip < rxm->full_len) 1547 break; 1548 1549 skip = skip - rxm->full_len; 1550 skb = skb_peek_next(skb, &ctx->rx_list); 1551 } 1552 1553 while (len && skb) { 1554 struct sk_buff *next_skb; 1555 struct strp_msg *rxm = strp_msg(skb); 1556 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1557 1558 if (!zc || (rxm->full_len - skip) > len) { 1559 int err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1560 msg, chunk); 1561 if (err < 0) 1562 return err; 1563 } 1564 1565 len = len - chunk; 1566 copied = copied + chunk; 1567 1568 /* Consume the data from record if it is non-peek case*/ 1569 if (!is_peek) { 1570 rxm->offset = rxm->offset + chunk; 1571 rxm->full_len = rxm->full_len - chunk; 1572 1573 /* Return if there is unconsumed data in the record */ 1574 if (rxm->full_len - skip) 1575 break; 1576 } 1577 1578 /* The remaining skip-bytes must lie in 1st record in rx_list. 1579 * So from the 2nd record, 'skip' should be 0. 1580 */ 1581 skip = 0; 1582 1583 if (msg) 1584 msg->msg_flags |= MSG_EOR; 1585 1586 next_skb = skb_peek_next(skb, &ctx->rx_list); 1587 1588 if (!is_peek) { 1589 skb_unlink(skb, &ctx->rx_list); 1590 kfree_skb(skb); 1591 } 1592 1593 skb = next_skb; 1594 } 1595 1596 return copied; 1597 } 1598 1599 int tls_sw_recvmsg(struct sock *sk, 1600 struct msghdr *msg, 1601 size_t len, 1602 int nonblock, 1603 int flags, 1604 int *addr_len) 1605 { 1606 struct tls_context *tls_ctx = tls_get_ctx(sk); 1607 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1608 struct sk_psock *psock; 1609 unsigned char control = 0; 1610 ssize_t decrypted = 0; 1611 struct strp_msg *rxm; 1612 struct sk_buff *skb; 1613 ssize_t copied = 0; 1614 bool cmsg = false; 1615 int target, err = 0; 1616 long timeo; 1617 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1618 bool is_peek = flags & MSG_PEEK; 1619 int num_async = 0; 1620 1621 flags |= nonblock; 1622 1623 if (unlikely(flags & MSG_ERRQUEUE)) 1624 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1625 1626 psock = sk_psock_get(sk); 1627 lock_sock(sk); 1628 1629 /* Process pending decrypted records. It must be non-zero-copy */ 1630 err = process_rx_list(ctx, msg, 0, len, false, is_peek); 1631 if (err < 0) { 1632 tls_err_abort(sk, err); 1633 goto end; 1634 } else { 1635 copied = err; 1636 } 1637 1638 len = len - copied; 1639 if (len) { 1640 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1641 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1642 } else { 1643 goto recv_end; 1644 } 1645 1646 do { 1647 bool retain_skb = false; 1648 bool zc = false; 1649 int to_decrypt; 1650 int chunk = 0; 1651 bool async; 1652 1653 skb = tls_wait_data(sk, psock, flags, timeo, &err); 1654 if (!skb) { 1655 if (psock) { 1656 int ret = __tcp_bpf_recvmsg(sk, psock, 1657 msg, len, flags); 1658 1659 if (ret > 0) { 1660 decrypted += ret; 1661 len -= ret; 1662 continue; 1663 } 1664 } 1665 goto recv_end; 1666 } 1667 1668 rxm = strp_msg(skb); 1669 1670 to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size; 1671 1672 if (to_decrypt <= len && !is_kvec && !is_peek && 1673 ctx->control == TLS_RECORD_TYPE_DATA && 1674 tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION) 1675 zc = true; 1676 1677 /* Do not use async mode if record is non-data */ 1678 if (ctx->control == TLS_RECORD_TYPE_DATA) 1679 async = ctx->async_capable; 1680 else 1681 async = false; 1682 1683 err = decrypt_skb_update(sk, skb, &msg->msg_iter, 1684 &chunk, &zc, async); 1685 if (err < 0 && err != -EINPROGRESS) { 1686 tls_err_abort(sk, EBADMSG); 1687 goto recv_end; 1688 } 1689 1690 if (err == -EINPROGRESS) 1691 num_async++; 1692 1693 if (!cmsg) { 1694 int cerr; 1695 1696 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1697 sizeof(ctx->control), &ctx->control); 1698 cmsg = true; 1699 control = ctx->control; 1700 if (ctx->control != TLS_RECORD_TYPE_DATA) { 1701 if (cerr || msg->msg_flags & MSG_CTRUNC) { 1702 err = -EIO; 1703 goto recv_end; 1704 } 1705 } 1706 } else if (control != ctx->control) { 1707 goto recv_end; 1708 } 1709 1710 if (async) 1711 goto pick_next_record; 1712 1713 if (!zc) { 1714 if (rxm->full_len > len) { 1715 retain_skb = true; 1716 chunk = len; 1717 } else { 1718 chunk = rxm->full_len; 1719 } 1720 1721 err = skb_copy_datagram_msg(skb, rxm->offset, 1722 msg, chunk); 1723 if (err < 0) 1724 goto recv_end; 1725 1726 if (!is_peek) { 1727 rxm->offset = rxm->offset + chunk; 1728 rxm->full_len = rxm->full_len - chunk; 1729 } 1730 } 1731 1732 pick_next_record: 1733 if (chunk > len) 1734 chunk = len; 1735 1736 decrypted += chunk; 1737 len -= chunk; 1738 1739 /* For async or peek case, queue the current skb */ 1740 if (async || is_peek || retain_skb) { 1741 skb_queue_tail(&ctx->rx_list, skb); 1742 skb = NULL; 1743 } 1744 1745 if (tls_sw_advance_skb(sk, skb, chunk)) { 1746 /* Return full control message to 1747 * userspace before trying to parse 1748 * another message type 1749 */ 1750 msg->msg_flags |= MSG_EOR; 1751 if (ctx->control != TLS_RECORD_TYPE_DATA) 1752 goto recv_end; 1753 } else { 1754 break; 1755 } 1756 1757 /* If we have a new message from strparser, continue now. */ 1758 if (decrypted >= target && !ctx->recv_pkt) 1759 break; 1760 } while (len); 1761 1762 recv_end: 1763 if (num_async) { 1764 /* Wait for all previously submitted records to be decrypted */ 1765 smp_store_mb(ctx->async_notify, true); 1766 if (atomic_read(&ctx->decrypt_pending)) { 1767 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1768 if (err) { 1769 /* one of async decrypt failed */ 1770 tls_err_abort(sk, err); 1771 copied = 0; 1772 decrypted = 0; 1773 goto end; 1774 } 1775 } else { 1776 reinit_completion(&ctx->async_wait.completion); 1777 } 1778 WRITE_ONCE(ctx->async_notify, false); 1779 1780 /* Drain records from the rx_list & copy if required */ 1781 if (is_peek || is_kvec) 1782 err = process_rx_list(ctx, msg, copied, 1783 decrypted, false, is_peek); 1784 else 1785 err = process_rx_list(ctx, msg, 0, 1786 decrypted, true, is_peek); 1787 if (err < 0) { 1788 tls_err_abort(sk, err); 1789 copied = 0; 1790 goto end; 1791 } 1792 1793 WARN_ON(decrypted != err); 1794 } 1795 1796 copied += decrypted; 1797 1798 end: 1799 release_sock(sk); 1800 if (psock) 1801 sk_psock_put(sk, psock); 1802 return copied ? : err; 1803 } 1804 1805 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 1806 struct pipe_inode_info *pipe, 1807 size_t len, unsigned int flags) 1808 { 1809 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 1810 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1811 struct strp_msg *rxm = NULL; 1812 struct sock *sk = sock->sk; 1813 struct sk_buff *skb; 1814 ssize_t copied = 0; 1815 int err = 0; 1816 long timeo; 1817 int chunk; 1818 bool zc = false; 1819 1820 lock_sock(sk); 1821 1822 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1823 1824 skb = tls_wait_data(sk, NULL, flags, timeo, &err); 1825 if (!skb) 1826 goto splice_read_end; 1827 1828 if (!ctx->decrypted) { 1829 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); 1830 1831 /* splice does not support reading control messages */ 1832 if (ctx->control != TLS_RECORD_TYPE_DATA) { 1833 err = -ENOTSUPP; 1834 goto splice_read_end; 1835 } 1836 1837 if (err < 0) { 1838 tls_err_abort(sk, EBADMSG); 1839 goto splice_read_end; 1840 } 1841 ctx->decrypted = true; 1842 } 1843 rxm = strp_msg(skb); 1844 1845 chunk = min_t(unsigned int, rxm->full_len, len); 1846 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 1847 if (copied < 0) 1848 goto splice_read_end; 1849 1850 if (likely(!(flags & MSG_PEEK))) 1851 tls_sw_advance_skb(sk, skb, copied); 1852 1853 splice_read_end: 1854 release_sock(sk); 1855 return copied ? : err; 1856 } 1857 1858 bool tls_sw_stream_read(const struct sock *sk) 1859 { 1860 struct tls_context *tls_ctx = tls_get_ctx(sk); 1861 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1862 bool ingress_empty = true; 1863 struct sk_psock *psock; 1864 1865 rcu_read_lock(); 1866 psock = sk_psock(sk); 1867 if (psock) 1868 ingress_empty = list_empty(&psock->ingress_msg); 1869 rcu_read_unlock(); 1870 1871 return !ingress_empty || ctx->recv_pkt; 1872 } 1873 1874 static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 1875 { 1876 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 1877 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1878 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 1879 struct strp_msg *rxm = strp_msg(skb); 1880 size_t cipher_overhead; 1881 size_t data_len = 0; 1882 int ret; 1883 1884 /* Verify that we have a full TLS header, or wait for more data */ 1885 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) 1886 return 0; 1887 1888 /* Sanity-check size of on-stack buffer. */ 1889 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { 1890 ret = -EINVAL; 1891 goto read_failure; 1892 } 1893 1894 /* Linearize header to local buffer */ 1895 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); 1896 1897 if (ret < 0) 1898 goto read_failure; 1899 1900 ctx->control = header[0]; 1901 1902 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 1903 1904 cipher_overhead = tls_ctx->rx.tag_size; 1905 if (tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION) 1906 cipher_overhead += tls_ctx->rx.iv_size; 1907 1908 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 1909 tls_ctx->rx.tail_size) { 1910 ret = -EMSGSIZE; 1911 goto read_failure; 1912 } 1913 if (data_len < cipher_overhead) { 1914 ret = -EBADMSG; 1915 goto read_failure; 1916 } 1917 1918 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 1919 if (header[1] != TLS_1_2_VERSION_MINOR || 1920 header[2] != TLS_1_2_VERSION_MAJOR) { 1921 ret = -EINVAL; 1922 goto read_failure; 1923 } 1924 #ifdef CONFIG_TLS_DEVICE 1925 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, 1926 *(u64*)tls_ctx->rx.rec_seq); 1927 #endif 1928 return data_len + TLS_HEADER_SIZE; 1929 1930 read_failure: 1931 tls_err_abort(strp->sk, ret); 1932 1933 return ret; 1934 } 1935 1936 static void tls_queue(struct strparser *strp, struct sk_buff *skb) 1937 { 1938 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 1939 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1940 1941 ctx->decrypted = false; 1942 1943 ctx->recv_pkt = skb; 1944 strp_pause(strp); 1945 1946 ctx->saved_data_ready(strp->sk); 1947 } 1948 1949 static void tls_data_ready(struct sock *sk) 1950 { 1951 struct tls_context *tls_ctx = tls_get_ctx(sk); 1952 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1953 struct sk_psock *psock; 1954 1955 strp_data_ready(&ctx->strp); 1956 1957 psock = sk_psock_get(sk); 1958 if (psock && !list_empty(&psock->ingress_msg)) { 1959 ctx->saved_data_ready(sk); 1960 sk_psock_put(sk, psock); 1961 } 1962 } 1963 1964 void tls_sw_free_resources_tx(struct sock *sk) 1965 { 1966 struct tls_context *tls_ctx = tls_get_ctx(sk); 1967 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1968 struct tls_rec *rec, *tmp; 1969 1970 /* Wait for any pending async encryptions to complete */ 1971 smp_store_mb(ctx->async_notify, true); 1972 if (atomic_read(&ctx->encrypt_pending)) 1973 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1974 1975 release_sock(sk); 1976 cancel_delayed_work_sync(&ctx->tx_work.work); 1977 lock_sock(sk); 1978 1979 /* Tx whatever records we can transmit and abandon the rest */ 1980 tls_tx_records(sk, -1); 1981 1982 /* Free up un-sent records in tx_list. First, free 1983 * the partially sent record if any at head of tx_list. 1984 */ 1985 if (tls_ctx->partially_sent_record) { 1986 struct scatterlist *sg = tls_ctx->partially_sent_record; 1987 1988 while (1) { 1989 put_page(sg_page(sg)); 1990 sk_mem_uncharge(sk, sg->length); 1991 1992 if (sg_is_last(sg)) 1993 break; 1994 sg++; 1995 } 1996 1997 tls_ctx->partially_sent_record = NULL; 1998 1999 rec = list_first_entry(&ctx->tx_list, 2000 struct tls_rec, list); 2001 list_del(&rec->list); 2002 sk_msg_free(sk, &rec->msg_plaintext); 2003 kfree(rec); 2004 } 2005 2006 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2007 list_del(&rec->list); 2008 sk_msg_free(sk, &rec->msg_encrypted); 2009 sk_msg_free(sk, &rec->msg_plaintext); 2010 kfree(rec); 2011 } 2012 2013 crypto_free_aead(ctx->aead_send); 2014 tls_free_open_rec(sk); 2015 2016 kfree(ctx); 2017 } 2018 2019 void tls_sw_release_resources_rx(struct sock *sk) 2020 { 2021 struct tls_context *tls_ctx = tls_get_ctx(sk); 2022 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2023 2024 if (ctx->aead_recv) { 2025 kfree_skb(ctx->recv_pkt); 2026 ctx->recv_pkt = NULL; 2027 skb_queue_purge(&ctx->rx_list); 2028 crypto_free_aead(ctx->aead_recv); 2029 strp_stop(&ctx->strp); 2030 write_lock_bh(&sk->sk_callback_lock); 2031 sk->sk_data_ready = ctx->saved_data_ready; 2032 write_unlock_bh(&sk->sk_callback_lock); 2033 release_sock(sk); 2034 strp_done(&ctx->strp); 2035 lock_sock(sk); 2036 } 2037 } 2038 2039 void tls_sw_free_resources_rx(struct sock *sk) 2040 { 2041 struct tls_context *tls_ctx = tls_get_ctx(sk); 2042 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2043 2044 tls_sw_release_resources_rx(sk); 2045 2046 kfree(ctx); 2047 } 2048 2049 /* The work handler to transmitt the encrypted records in tx_list */ 2050 static void tx_work_handler(struct work_struct *work) 2051 { 2052 struct delayed_work *delayed_work = to_delayed_work(work); 2053 struct tx_work *tx_work = container_of(delayed_work, 2054 struct tx_work, work); 2055 struct sock *sk = tx_work->sk; 2056 struct tls_context *tls_ctx = tls_get_ctx(sk); 2057 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2058 2059 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2060 return; 2061 2062 lock_sock(sk); 2063 tls_tx_records(sk, -1); 2064 release_sock(sk); 2065 } 2066 2067 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2068 { 2069 struct tls_crypto_info *crypto_info; 2070 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 2071 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; 2072 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2073 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2074 struct cipher_context *cctx; 2075 struct crypto_aead **aead; 2076 struct strp_callbacks cb; 2077 u16 nonce_size, tag_size, iv_size, rec_seq_size; 2078 struct crypto_tfm *tfm; 2079 char *iv, *rec_seq, *key, *salt; 2080 size_t keysize; 2081 int rc = 0; 2082 2083 if (!ctx) { 2084 rc = -EINVAL; 2085 goto out; 2086 } 2087 2088 if (tx) { 2089 if (!ctx->priv_ctx_tx) { 2090 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2091 if (!sw_ctx_tx) { 2092 rc = -ENOMEM; 2093 goto out; 2094 } 2095 ctx->priv_ctx_tx = sw_ctx_tx; 2096 } else { 2097 sw_ctx_tx = 2098 (struct tls_sw_context_tx *)ctx->priv_ctx_tx; 2099 } 2100 } else { 2101 if (!ctx->priv_ctx_rx) { 2102 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2103 if (!sw_ctx_rx) { 2104 rc = -ENOMEM; 2105 goto out; 2106 } 2107 ctx->priv_ctx_rx = sw_ctx_rx; 2108 } else { 2109 sw_ctx_rx = 2110 (struct tls_sw_context_rx *)ctx->priv_ctx_rx; 2111 } 2112 } 2113 2114 if (tx) { 2115 crypto_init_wait(&sw_ctx_tx->async_wait); 2116 crypto_info = &ctx->crypto_send.info; 2117 cctx = &ctx->tx; 2118 aead = &sw_ctx_tx->aead_send; 2119 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2120 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2121 sw_ctx_tx->tx_work.sk = sk; 2122 } else { 2123 crypto_init_wait(&sw_ctx_rx->async_wait); 2124 crypto_info = &ctx->crypto_recv.info; 2125 cctx = &ctx->rx; 2126 skb_queue_head_init(&sw_ctx_rx->rx_list); 2127 aead = &sw_ctx_rx->aead_recv; 2128 } 2129 2130 switch (crypto_info->cipher_type) { 2131 case TLS_CIPHER_AES_GCM_128: { 2132 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 2133 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; 2134 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 2135 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; 2136 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; 2137 rec_seq = 2138 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; 2139 gcm_128_info = 2140 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 2141 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE; 2142 key = gcm_128_info->key; 2143 salt = gcm_128_info->salt; 2144 break; 2145 } 2146 case TLS_CIPHER_AES_GCM_256: { 2147 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; 2148 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE; 2149 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; 2150 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; 2151 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE; 2152 rec_seq = 2153 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; 2154 gcm_256_info = 2155 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 2156 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE; 2157 key = gcm_256_info->key; 2158 salt = gcm_256_info->salt; 2159 break; 2160 } 2161 default: 2162 rc = -EINVAL; 2163 goto free_priv; 2164 } 2165 2166 /* Sanity-check the IV size for stack allocations. */ 2167 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { 2168 rc = -EINVAL; 2169 goto free_priv; 2170 } 2171 2172 if (crypto_info->version == TLS_1_3_VERSION) { 2173 nonce_size = 0; 2174 cctx->aad_size = TLS_HEADER_SIZE; 2175 cctx->tail_size = 1; 2176 } else { 2177 cctx->aad_size = TLS_AAD_SPACE_SIZE; 2178 cctx->tail_size = 0; 2179 } 2180 2181 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 2182 cctx->tag_size = tag_size; 2183 cctx->overhead_size = cctx->prepend_size + cctx->tag_size + 2184 cctx->tail_size; 2185 cctx->iv_size = iv_size; 2186 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 2187 GFP_KERNEL); 2188 if (!cctx->iv) { 2189 rc = -ENOMEM; 2190 goto free_priv; 2191 } 2192 /* Note: 128 & 256 bit salt are the same size */ 2193 memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 2194 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 2195 cctx->rec_seq_size = rec_seq_size; 2196 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); 2197 if (!cctx->rec_seq) { 2198 rc = -ENOMEM; 2199 goto free_iv; 2200 } 2201 2202 if (!*aead) { 2203 *aead = crypto_alloc_aead("gcm(aes)", 0, 0); 2204 if (IS_ERR(*aead)) { 2205 rc = PTR_ERR(*aead); 2206 *aead = NULL; 2207 goto free_rec_seq; 2208 } 2209 } 2210 2211 ctx->push_pending_record = tls_sw_push_pending_record; 2212 2213 rc = crypto_aead_setkey(*aead, key, keysize); 2214 2215 if (rc) 2216 goto free_aead; 2217 2218 rc = crypto_aead_setauthsize(*aead, cctx->tag_size); 2219 if (rc) 2220 goto free_aead; 2221 2222 if (sw_ctx_rx) { 2223 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2224 2225 if (crypto_info->version == TLS_1_3_VERSION) 2226 sw_ctx_rx->async_capable = false; 2227 else 2228 sw_ctx_rx->async_capable = 2229 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; 2230 2231 /* Set up strparser */ 2232 memset(&cb, 0, sizeof(cb)); 2233 cb.rcv_msg = tls_queue; 2234 cb.parse_msg = tls_read_size; 2235 2236 strp_init(&sw_ctx_rx->strp, sk, &cb); 2237 2238 write_lock_bh(&sk->sk_callback_lock); 2239 sw_ctx_rx->saved_data_ready = sk->sk_data_ready; 2240 sk->sk_data_ready = tls_data_ready; 2241 write_unlock_bh(&sk->sk_callback_lock); 2242 2243 strp_check_rcv(&sw_ctx_rx->strp); 2244 } 2245 2246 goto out; 2247 2248 free_aead: 2249 crypto_free_aead(*aead); 2250 *aead = NULL; 2251 free_rec_seq: 2252 kfree(cctx->rec_seq); 2253 cctx->rec_seq = NULL; 2254 free_iv: 2255 kfree(cctx->iv); 2256 cctx->iv = NULL; 2257 free_priv: 2258 if (tx) { 2259 kfree(ctx->priv_ctx_tx); 2260 ctx->priv_ctx_tx = NULL; 2261 } else { 2262 kfree(ctx->priv_ctx_rx); 2263 ctx->priv_ctx_rx = NULL; 2264 } 2265 out: 2266 return rc; 2267 } 2268