1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static LIST_HEAD(device_list); 59 static DEFINE_SPINLOCK(device_spinlock); 60 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 61 static struct proto_ops tls_sw_proto_ops; 62 63 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 64 { 65 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 66 67 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 68 } 69 70 int wait_on_pending_writer(struct sock *sk, long *timeo) 71 { 72 int rc = 0; 73 DEFINE_WAIT_FUNC(wait, woken_wake_function); 74 75 add_wait_queue(sk_sleep(sk), &wait); 76 while (1) { 77 if (!*timeo) { 78 rc = -EAGAIN; 79 break; 80 } 81 82 if (signal_pending(current)) { 83 rc = sock_intr_errno(*timeo); 84 break; 85 } 86 87 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 88 break; 89 } 90 remove_wait_queue(sk_sleep(sk), &wait); 91 return rc; 92 } 93 94 int tls_push_sg(struct sock *sk, 95 struct tls_context *ctx, 96 struct scatterlist *sg, 97 u16 first_offset, 98 int flags) 99 { 100 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 101 int ret = 0; 102 struct page *p; 103 size_t size; 104 int offset = first_offset; 105 106 size = sg->length - offset; 107 offset += sg->offset; 108 109 ctx->in_tcp_sendpages = true; 110 while (1) { 111 if (sg_is_last(sg)) 112 sendpage_flags = flags; 113 114 /* is sending application-limited? */ 115 tcp_rate_check_app_limited(sk); 116 p = sg_page(sg); 117 retry: 118 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 119 120 if (ret != size) { 121 if (ret > 0) { 122 offset += ret; 123 size -= ret; 124 goto retry; 125 } 126 127 offset -= sg->offset; 128 ctx->partially_sent_offset = offset; 129 ctx->partially_sent_record = (void *)sg; 130 ctx->in_tcp_sendpages = false; 131 return ret; 132 } 133 134 put_page(p); 135 sk_mem_uncharge(sk, sg->length); 136 sg = sg_next(sg); 137 if (!sg) 138 break; 139 140 offset = sg->offset; 141 size = sg->length; 142 } 143 144 ctx->in_tcp_sendpages = false; 145 ctx->sk_write_space(sk); 146 147 return 0; 148 } 149 150 static int tls_handle_open_record(struct sock *sk, int flags) 151 { 152 struct tls_context *ctx = tls_get_ctx(sk); 153 154 if (tls_is_pending_open_record(ctx)) 155 return ctx->push_pending_record(sk, flags); 156 157 return 0; 158 } 159 160 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 161 unsigned char *record_type) 162 { 163 struct cmsghdr *cmsg; 164 int rc = -EINVAL; 165 166 for_each_cmsghdr(cmsg, msg) { 167 if (!CMSG_OK(msg, cmsg)) 168 return -EINVAL; 169 if (cmsg->cmsg_level != SOL_TLS) 170 continue; 171 172 switch (cmsg->cmsg_type) { 173 case TLS_SET_RECORD_TYPE: 174 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 175 return -EINVAL; 176 177 if (msg->msg_flags & MSG_MORE) 178 return -EINVAL; 179 180 rc = tls_handle_open_record(sk, msg->msg_flags); 181 if (rc) 182 return rc; 183 184 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 185 rc = 0; 186 break; 187 default: 188 return -EINVAL; 189 } 190 } 191 192 return rc; 193 } 194 195 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 196 int flags) 197 { 198 struct scatterlist *sg; 199 u16 offset; 200 201 sg = ctx->partially_sent_record; 202 offset = ctx->partially_sent_offset; 203 204 ctx->partially_sent_record = NULL; 205 return tls_push_sg(sk, ctx, sg, offset, flags); 206 } 207 208 int tls_push_pending_closed_record(struct sock *sk, 209 struct tls_context *tls_ctx, 210 int flags, long *timeo) 211 { 212 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 213 214 if (tls_is_partially_sent_record(tls_ctx) || 215 !list_empty(&ctx->tx_list)) 216 return tls_tx_records(sk, flags); 217 else 218 return tls_ctx->push_pending_record(sk, flags); 219 } 220 221 static void tls_write_space(struct sock *sk) 222 { 223 struct tls_context *ctx = tls_get_ctx(sk); 224 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 225 226 /* If in_tcp_sendpages call lower protocol write space handler 227 * to ensure we wake up any waiting operations there. For example 228 * if do_tcp_sendpages where to call sk_wait_event. 229 */ 230 if (ctx->in_tcp_sendpages) { 231 ctx->sk_write_space(sk); 232 return; 233 } 234 235 /* Schedule the transmission if tx list is ready */ 236 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 237 /* Schedule the transmission */ 238 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 239 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 240 } 241 242 ctx->sk_write_space(sk); 243 } 244 245 static void tls_ctx_free(struct tls_context *ctx) 246 { 247 if (!ctx) 248 return; 249 250 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 251 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 252 kfree(ctx); 253 } 254 255 static void tls_sk_proto_close(struct sock *sk, long timeout) 256 { 257 struct tls_context *ctx = tls_get_ctx(sk); 258 long timeo = sock_sndtimeo(sk, 0); 259 void (*sk_proto_close)(struct sock *sk, long timeout); 260 bool free_ctx = false; 261 262 lock_sock(sk); 263 sk_proto_close = ctx->sk_proto_close; 264 265 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) || 266 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) { 267 free_ctx = true; 268 goto skip_tx_cleanup; 269 } 270 271 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 272 tls_handle_open_record(sk, 0); 273 274 /* We need these for tls_sw_fallback handling of other packets */ 275 if (ctx->tx_conf == TLS_SW) { 276 kfree(ctx->tx.rec_seq); 277 kfree(ctx->tx.iv); 278 tls_sw_free_resources_tx(sk); 279 } 280 281 if (ctx->rx_conf == TLS_SW) { 282 kfree(ctx->rx.rec_seq); 283 kfree(ctx->rx.iv); 284 tls_sw_free_resources_rx(sk); 285 } 286 287 #ifdef CONFIG_TLS_DEVICE 288 if (ctx->rx_conf == TLS_HW) 289 tls_device_offload_cleanup_rx(sk); 290 291 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 292 #else 293 { 294 #endif 295 tls_ctx_free(ctx); 296 ctx = NULL; 297 } 298 299 skip_tx_cleanup: 300 release_sock(sk); 301 sk_proto_close(sk, timeout); 302 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 303 * for sk->sk_prot->unhash [tls_hw_unhash] 304 */ 305 if (free_ctx) 306 tls_ctx_free(ctx); 307 } 308 309 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 310 int __user *optlen) 311 { 312 int rc = 0; 313 struct tls_context *ctx = tls_get_ctx(sk); 314 struct tls_crypto_info *crypto_info; 315 int len; 316 317 if (get_user(len, optlen)) 318 return -EFAULT; 319 320 if (!optval || (len < sizeof(*crypto_info))) { 321 rc = -EINVAL; 322 goto out; 323 } 324 325 if (!ctx) { 326 rc = -EBUSY; 327 goto out; 328 } 329 330 /* get user crypto info */ 331 crypto_info = &ctx->crypto_send.info; 332 333 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 334 rc = -EBUSY; 335 goto out; 336 } 337 338 if (len == sizeof(*crypto_info)) { 339 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 340 rc = -EFAULT; 341 goto out; 342 } 343 344 switch (crypto_info->cipher_type) { 345 case TLS_CIPHER_AES_GCM_128: { 346 struct tls12_crypto_info_aes_gcm_128 * 347 crypto_info_aes_gcm_128 = 348 container_of(crypto_info, 349 struct tls12_crypto_info_aes_gcm_128, 350 info); 351 352 if (len != sizeof(*crypto_info_aes_gcm_128)) { 353 rc = -EINVAL; 354 goto out; 355 } 356 lock_sock(sk); 357 memcpy(crypto_info_aes_gcm_128->iv, 358 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 359 TLS_CIPHER_AES_GCM_128_IV_SIZE); 360 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 361 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 362 release_sock(sk); 363 if (copy_to_user(optval, 364 crypto_info_aes_gcm_128, 365 sizeof(*crypto_info_aes_gcm_128))) 366 rc = -EFAULT; 367 break; 368 } 369 default: 370 rc = -EINVAL; 371 } 372 373 out: 374 return rc; 375 } 376 377 static int do_tls_getsockopt(struct sock *sk, int optname, 378 char __user *optval, int __user *optlen) 379 { 380 int rc = 0; 381 382 switch (optname) { 383 case TLS_TX: 384 rc = do_tls_getsockopt_tx(sk, optval, optlen); 385 break; 386 default: 387 rc = -ENOPROTOOPT; 388 break; 389 } 390 return rc; 391 } 392 393 static int tls_getsockopt(struct sock *sk, int level, int optname, 394 char __user *optval, int __user *optlen) 395 { 396 struct tls_context *ctx = tls_get_ctx(sk); 397 398 if (level != SOL_TLS) 399 return ctx->getsockopt(sk, level, optname, optval, optlen); 400 401 return do_tls_getsockopt(sk, optname, optval, optlen); 402 } 403 404 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 405 unsigned int optlen, int tx) 406 { 407 struct tls_crypto_info *crypto_info; 408 struct tls_context *ctx = tls_get_ctx(sk); 409 int rc = 0; 410 int conf; 411 412 if (!optval || (optlen < sizeof(*crypto_info))) { 413 rc = -EINVAL; 414 goto out; 415 } 416 417 if (tx) 418 crypto_info = &ctx->crypto_send.info; 419 else 420 crypto_info = &ctx->crypto_recv.info; 421 422 /* Currently we don't support set crypto info more than one time */ 423 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 424 rc = -EBUSY; 425 goto out; 426 } 427 428 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 429 if (rc) { 430 rc = -EFAULT; 431 goto err_crypto_info; 432 } 433 434 /* check version */ 435 if (crypto_info->version != TLS_1_2_VERSION) { 436 rc = -ENOTSUPP; 437 goto err_crypto_info; 438 } 439 440 switch (crypto_info->cipher_type) { 441 case TLS_CIPHER_AES_GCM_128: { 442 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 443 rc = -EINVAL; 444 goto err_crypto_info; 445 } 446 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 447 optlen - sizeof(*crypto_info)); 448 if (rc) { 449 rc = -EFAULT; 450 goto err_crypto_info; 451 } 452 break; 453 } 454 default: 455 rc = -EINVAL; 456 goto err_crypto_info; 457 } 458 459 if (tx) { 460 #ifdef CONFIG_TLS_DEVICE 461 rc = tls_set_device_offload(sk, ctx); 462 conf = TLS_HW; 463 if (rc) { 464 #else 465 { 466 #endif 467 rc = tls_set_sw_offload(sk, ctx, 1); 468 conf = TLS_SW; 469 } 470 } else { 471 #ifdef CONFIG_TLS_DEVICE 472 rc = tls_set_device_offload_rx(sk, ctx); 473 conf = TLS_HW; 474 if (rc) { 475 #else 476 { 477 #endif 478 rc = tls_set_sw_offload(sk, ctx, 0); 479 conf = TLS_SW; 480 } 481 } 482 483 if (rc) 484 goto err_crypto_info; 485 486 if (tx) 487 ctx->tx_conf = conf; 488 else 489 ctx->rx_conf = conf; 490 update_sk_prot(sk, ctx); 491 if (tx) { 492 ctx->sk_write_space = sk->sk_write_space; 493 sk->sk_write_space = tls_write_space; 494 } else { 495 sk->sk_socket->ops = &tls_sw_proto_ops; 496 } 497 goto out; 498 499 err_crypto_info: 500 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 501 out: 502 return rc; 503 } 504 505 static int do_tls_setsockopt(struct sock *sk, int optname, 506 char __user *optval, unsigned int optlen) 507 { 508 int rc = 0; 509 510 switch (optname) { 511 case TLS_TX: 512 case TLS_RX: 513 lock_sock(sk); 514 rc = do_tls_setsockopt_conf(sk, optval, optlen, 515 optname == TLS_TX); 516 release_sock(sk); 517 break; 518 default: 519 rc = -ENOPROTOOPT; 520 break; 521 } 522 return rc; 523 } 524 525 static int tls_setsockopt(struct sock *sk, int level, int optname, 526 char __user *optval, unsigned int optlen) 527 { 528 struct tls_context *ctx = tls_get_ctx(sk); 529 530 if (level != SOL_TLS) 531 return ctx->setsockopt(sk, level, optname, optval, optlen); 532 533 return do_tls_setsockopt(sk, optname, optval, optlen); 534 } 535 536 static struct tls_context *create_ctx(struct sock *sk) 537 { 538 struct inet_connection_sock *icsk = inet_csk(sk); 539 struct tls_context *ctx; 540 541 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 542 if (!ctx) 543 return NULL; 544 545 icsk->icsk_ulp_data = ctx; 546 ctx->setsockopt = sk->sk_prot->setsockopt; 547 ctx->getsockopt = sk->sk_prot->getsockopt; 548 ctx->sk_proto_close = sk->sk_prot->close; 549 return ctx; 550 } 551 552 static int tls_hw_prot(struct sock *sk) 553 { 554 struct tls_context *ctx; 555 struct tls_device *dev; 556 int rc = 0; 557 558 spin_lock_bh(&device_spinlock); 559 list_for_each_entry(dev, &device_list, dev_list) { 560 if (dev->feature && dev->feature(dev)) { 561 ctx = create_ctx(sk); 562 if (!ctx) 563 goto out; 564 565 ctx->hash = sk->sk_prot->hash; 566 ctx->unhash = sk->sk_prot->unhash; 567 ctx->sk_proto_close = sk->sk_prot->close; 568 ctx->rx_conf = TLS_HW_RECORD; 569 ctx->tx_conf = TLS_HW_RECORD; 570 update_sk_prot(sk, ctx); 571 rc = 1; 572 break; 573 } 574 } 575 out: 576 spin_unlock_bh(&device_spinlock); 577 return rc; 578 } 579 580 static void tls_hw_unhash(struct sock *sk) 581 { 582 struct tls_context *ctx = tls_get_ctx(sk); 583 struct tls_device *dev; 584 585 spin_lock_bh(&device_spinlock); 586 list_for_each_entry(dev, &device_list, dev_list) { 587 if (dev->unhash) { 588 kref_get(&dev->kref); 589 spin_unlock_bh(&device_spinlock); 590 dev->unhash(dev, sk); 591 kref_put(&dev->kref, dev->release); 592 spin_lock_bh(&device_spinlock); 593 } 594 } 595 spin_unlock_bh(&device_spinlock); 596 ctx->unhash(sk); 597 } 598 599 static int tls_hw_hash(struct sock *sk) 600 { 601 struct tls_context *ctx = tls_get_ctx(sk); 602 struct tls_device *dev; 603 int err; 604 605 err = ctx->hash(sk); 606 spin_lock_bh(&device_spinlock); 607 list_for_each_entry(dev, &device_list, dev_list) { 608 if (dev->hash) { 609 kref_get(&dev->kref); 610 spin_unlock_bh(&device_spinlock); 611 err |= dev->hash(dev, sk); 612 kref_put(&dev->kref, dev->release); 613 spin_lock_bh(&device_spinlock); 614 } 615 } 616 spin_unlock_bh(&device_spinlock); 617 618 if (err) 619 tls_hw_unhash(sk); 620 return err; 621 } 622 623 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 624 struct proto *base) 625 { 626 prot[TLS_BASE][TLS_BASE] = *base; 627 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 628 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 629 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 630 631 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 632 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 633 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 634 635 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 636 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 637 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 638 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 639 640 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 641 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 642 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 643 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 644 645 #ifdef CONFIG_TLS_DEVICE 646 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 647 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 648 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 649 650 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 651 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 652 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 653 654 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 655 656 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 657 658 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 659 #endif 660 661 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 662 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 663 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 664 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 665 } 666 667 static int tls_init(struct sock *sk) 668 { 669 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 670 struct tls_context *ctx; 671 int rc = 0; 672 673 if (tls_hw_prot(sk)) 674 goto out; 675 676 /* The TLS ulp is currently supported only for TCP sockets 677 * in ESTABLISHED state. 678 * Supporting sockets in LISTEN state will require us 679 * to modify the accept implementation to clone rather then 680 * share the ulp context. 681 */ 682 if (sk->sk_state != TCP_ESTABLISHED) 683 return -ENOTSUPP; 684 685 /* allocate tls context */ 686 ctx = create_ctx(sk); 687 if (!ctx) { 688 rc = -ENOMEM; 689 goto out; 690 } 691 692 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 693 if (ip_ver == TLSV6 && 694 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 695 mutex_lock(&tcpv6_prot_mutex); 696 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 697 build_protos(tls_prots[TLSV6], sk->sk_prot); 698 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 699 } 700 mutex_unlock(&tcpv6_prot_mutex); 701 } 702 703 ctx->tx_conf = TLS_BASE; 704 ctx->rx_conf = TLS_BASE; 705 update_sk_prot(sk, ctx); 706 out: 707 return rc; 708 } 709 710 void tls_register_device(struct tls_device *device) 711 { 712 spin_lock_bh(&device_spinlock); 713 list_add_tail(&device->dev_list, &device_list); 714 spin_unlock_bh(&device_spinlock); 715 } 716 EXPORT_SYMBOL(tls_register_device); 717 718 void tls_unregister_device(struct tls_device *device) 719 { 720 spin_lock_bh(&device_spinlock); 721 list_del(&device->dev_list); 722 spin_unlock_bh(&device_spinlock); 723 } 724 EXPORT_SYMBOL(tls_unregister_device); 725 726 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 727 .name = "tls", 728 .owner = THIS_MODULE, 729 .init = tls_init, 730 }; 731 732 static int __init tls_register(void) 733 { 734 build_protos(tls_prots[TLSV4], &tcp_prot); 735 736 tls_sw_proto_ops = inet_stream_ops; 737 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 738 739 #ifdef CONFIG_TLS_DEVICE 740 tls_device_init(); 741 #endif 742 tcp_register_ulp(&tcp_tls_ulp_ops); 743 744 return 0; 745 } 746 747 static void __exit tls_unregister(void) 748 { 749 tcp_unregister_ulp(&tcp_tls_ulp_ops); 750 #ifdef CONFIG_TLS_DEVICE 751 tls_device_cleanup(); 752 #endif 753 } 754 755 module_init(tls_register); 756 module_exit(tls_unregister); 757