1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static LIST_HEAD(device_list); 59 static DEFINE_MUTEX(device_mutex); 60 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 61 static struct proto_ops tls_sw_proto_ops; 62 63 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 64 { 65 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 66 67 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 68 } 69 70 int wait_on_pending_writer(struct sock *sk, long *timeo) 71 { 72 int rc = 0; 73 DEFINE_WAIT_FUNC(wait, woken_wake_function); 74 75 add_wait_queue(sk_sleep(sk), &wait); 76 while (1) { 77 if (!*timeo) { 78 rc = -EAGAIN; 79 break; 80 } 81 82 if (signal_pending(current)) { 83 rc = sock_intr_errno(*timeo); 84 break; 85 } 86 87 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 88 break; 89 } 90 remove_wait_queue(sk_sleep(sk), &wait); 91 return rc; 92 } 93 94 int tls_push_sg(struct sock *sk, 95 struct tls_context *ctx, 96 struct scatterlist *sg, 97 u16 first_offset, 98 int flags) 99 { 100 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 101 int ret = 0; 102 struct page *p; 103 size_t size; 104 int offset = first_offset; 105 106 size = sg->length - offset; 107 offset += sg->offset; 108 109 ctx->in_tcp_sendpages = true; 110 while (1) { 111 if (sg_is_last(sg)) 112 sendpage_flags = flags; 113 114 /* is sending application-limited? */ 115 tcp_rate_check_app_limited(sk); 116 p = sg_page(sg); 117 retry: 118 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 119 120 if (ret != size) { 121 if (ret > 0) { 122 offset += ret; 123 size -= ret; 124 goto retry; 125 } 126 127 offset -= sg->offset; 128 ctx->partially_sent_offset = offset; 129 ctx->partially_sent_record = (void *)sg; 130 ctx->in_tcp_sendpages = false; 131 return ret; 132 } 133 134 put_page(p); 135 sk_mem_uncharge(sk, sg->length); 136 sg = sg_next(sg); 137 if (!sg) 138 break; 139 140 offset = sg->offset; 141 size = sg->length; 142 } 143 144 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); 145 ctx->in_tcp_sendpages = false; 146 ctx->sk_write_space(sk); 147 148 return 0; 149 } 150 151 static int tls_handle_open_record(struct sock *sk, int flags) 152 { 153 struct tls_context *ctx = tls_get_ctx(sk); 154 155 if (tls_is_pending_open_record(ctx)) 156 return ctx->push_pending_record(sk, flags); 157 158 return 0; 159 } 160 161 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 162 unsigned char *record_type) 163 { 164 struct cmsghdr *cmsg; 165 int rc = -EINVAL; 166 167 for_each_cmsghdr(cmsg, msg) { 168 if (!CMSG_OK(msg, cmsg)) 169 return -EINVAL; 170 if (cmsg->cmsg_level != SOL_TLS) 171 continue; 172 173 switch (cmsg->cmsg_type) { 174 case TLS_SET_RECORD_TYPE: 175 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 176 return -EINVAL; 177 178 if (msg->msg_flags & MSG_MORE) 179 return -EINVAL; 180 181 rc = tls_handle_open_record(sk, msg->msg_flags); 182 if (rc) 183 return rc; 184 185 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 186 rc = 0; 187 break; 188 default: 189 return -EINVAL; 190 } 191 } 192 193 return rc; 194 } 195 196 int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, 197 int flags, long *timeo) 198 { 199 struct scatterlist *sg; 200 u16 offset; 201 202 if (!tls_is_partially_sent_record(ctx)) 203 return ctx->push_pending_record(sk, flags); 204 205 sg = ctx->partially_sent_record; 206 offset = ctx->partially_sent_offset; 207 208 ctx->partially_sent_record = NULL; 209 return tls_push_sg(sk, ctx, sg, offset, flags); 210 } 211 212 static void tls_write_space(struct sock *sk) 213 { 214 struct tls_context *ctx = tls_get_ctx(sk); 215 216 /* If in_tcp_sendpages call lower protocol write space handler 217 * to ensure we wake up any waiting operations there. For example 218 * if do_tcp_sendpages where to call sk_wait_event. 219 */ 220 if (ctx->in_tcp_sendpages) { 221 ctx->sk_write_space(sk); 222 return; 223 } 224 225 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { 226 gfp_t sk_allocation = sk->sk_allocation; 227 int rc; 228 long timeo = 0; 229 230 sk->sk_allocation = GFP_ATOMIC; 231 rc = tls_push_pending_closed_record(sk, ctx, 232 MSG_DONTWAIT | 233 MSG_NOSIGNAL, 234 &timeo); 235 sk->sk_allocation = sk_allocation; 236 237 if (rc < 0) 238 return; 239 } 240 241 ctx->sk_write_space(sk); 242 } 243 244 static void tls_ctx_free(struct tls_context *ctx) 245 { 246 if (!ctx) 247 return; 248 249 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 250 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 251 kfree(ctx); 252 } 253 254 static void tls_sk_proto_close(struct sock *sk, long timeout) 255 { 256 struct tls_context *ctx = tls_get_ctx(sk); 257 long timeo = sock_sndtimeo(sk, 0); 258 void (*sk_proto_close)(struct sock *sk, long timeout); 259 bool free_ctx = false; 260 261 lock_sock(sk); 262 sk_proto_close = ctx->sk_proto_close; 263 264 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) || 265 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) { 266 free_ctx = true; 267 goto skip_tx_cleanup; 268 } 269 270 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 271 tls_handle_open_record(sk, 0); 272 273 if (ctx->partially_sent_record) { 274 struct scatterlist *sg = ctx->partially_sent_record; 275 276 while (1) { 277 put_page(sg_page(sg)); 278 sk_mem_uncharge(sk, sg->length); 279 280 if (sg_is_last(sg)) 281 break; 282 sg++; 283 } 284 } 285 286 /* We need these for tls_sw_fallback handling of other packets */ 287 if (ctx->tx_conf == TLS_SW) { 288 kfree(ctx->tx.rec_seq); 289 kfree(ctx->tx.iv); 290 tls_sw_free_resources_tx(sk); 291 } 292 293 if (ctx->rx_conf == TLS_SW) { 294 kfree(ctx->rx.rec_seq); 295 kfree(ctx->rx.iv); 296 tls_sw_free_resources_rx(sk); 297 } 298 299 #ifdef CONFIG_TLS_DEVICE 300 if (ctx->rx_conf == TLS_HW) 301 tls_device_offload_cleanup_rx(sk); 302 303 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 304 #else 305 { 306 #endif 307 tls_ctx_free(ctx); 308 ctx = NULL; 309 } 310 311 skip_tx_cleanup: 312 release_sock(sk); 313 sk_proto_close(sk, timeout); 314 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 315 * for sk->sk_prot->unhash [tls_hw_unhash] 316 */ 317 if (free_ctx) 318 tls_ctx_free(ctx); 319 } 320 321 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 322 int __user *optlen) 323 { 324 int rc = 0; 325 struct tls_context *ctx = tls_get_ctx(sk); 326 struct tls_crypto_info *crypto_info; 327 int len; 328 329 if (get_user(len, optlen)) 330 return -EFAULT; 331 332 if (!optval || (len < sizeof(*crypto_info))) { 333 rc = -EINVAL; 334 goto out; 335 } 336 337 if (!ctx) { 338 rc = -EBUSY; 339 goto out; 340 } 341 342 /* get user crypto info */ 343 crypto_info = &ctx->crypto_send.info; 344 345 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 346 rc = -EBUSY; 347 goto out; 348 } 349 350 if (len == sizeof(*crypto_info)) { 351 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 352 rc = -EFAULT; 353 goto out; 354 } 355 356 switch (crypto_info->cipher_type) { 357 case TLS_CIPHER_AES_GCM_128: { 358 struct tls12_crypto_info_aes_gcm_128 * 359 crypto_info_aes_gcm_128 = 360 container_of(crypto_info, 361 struct tls12_crypto_info_aes_gcm_128, 362 info); 363 364 if (len != sizeof(*crypto_info_aes_gcm_128)) { 365 rc = -EINVAL; 366 goto out; 367 } 368 lock_sock(sk); 369 memcpy(crypto_info_aes_gcm_128->iv, 370 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 371 TLS_CIPHER_AES_GCM_128_IV_SIZE); 372 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 373 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 374 release_sock(sk); 375 if (copy_to_user(optval, 376 crypto_info_aes_gcm_128, 377 sizeof(*crypto_info_aes_gcm_128))) 378 rc = -EFAULT; 379 break; 380 } 381 default: 382 rc = -EINVAL; 383 } 384 385 out: 386 return rc; 387 } 388 389 static int do_tls_getsockopt(struct sock *sk, int optname, 390 char __user *optval, int __user *optlen) 391 { 392 int rc = 0; 393 394 switch (optname) { 395 case TLS_TX: 396 rc = do_tls_getsockopt_tx(sk, optval, optlen); 397 break; 398 default: 399 rc = -ENOPROTOOPT; 400 break; 401 } 402 return rc; 403 } 404 405 static int tls_getsockopt(struct sock *sk, int level, int optname, 406 char __user *optval, int __user *optlen) 407 { 408 struct tls_context *ctx = tls_get_ctx(sk); 409 410 if (level != SOL_TLS) 411 return ctx->getsockopt(sk, level, optname, optval, optlen); 412 413 return do_tls_getsockopt(sk, optname, optval, optlen); 414 } 415 416 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 417 unsigned int optlen, int tx) 418 { 419 struct tls_crypto_info *crypto_info; 420 struct tls_context *ctx = tls_get_ctx(sk); 421 int rc = 0; 422 int conf; 423 424 if (!optval || (optlen < sizeof(*crypto_info))) { 425 rc = -EINVAL; 426 goto out; 427 } 428 429 if (tx) 430 crypto_info = &ctx->crypto_send.info; 431 else 432 crypto_info = &ctx->crypto_recv.info; 433 434 /* Currently we don't support set crypto info more than one time */ 435 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 436 rc = -EBUSY; 437 goto out; 438 } 439 440 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 441 if (rc) { 442 rc = -EFAULT; 443 goto err_crypto_info; 444 } 445 446 /* check version */ 447 if (crypto_info->version != TLS_1_2_VERSION) { 448 rc = -ENOTSUPP; 449 goto err_crypto_info; 450 } 451 452 switch (crypto_info->cipher_type) { 453 case TLS_CIPHER_AES_GCM_128: { 454 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 455 rc = -EINVAL; 456 goto err_crypto_info; 457 } 458 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 459 optlen - sizeof(*crypto_info)); 460 if (rc) { 461 rc = -EFAULT; 462 goto err_crypto_info; 463 } 464 break; 465 } 466 default: 467 rc = -EINVAL; 468 goto err_crypto_info; 469 } 470 471 if (tx) { 472 #ifdef CONFIG_TLS_DEVICE 473 rc = tls_set_device_offload(sk, ctx); 474 conf = TLS_HW; 475 if (rc) { 476 #else 477 { 478 #endif 479 rc = tls_set_sw_offload(sk, ctx, 1); 480 conf = TLS_SW; 481 } 482 } else { 483 #ifdef CONFIG_TLS_DEVICE 484 rc = tls_set_device_offload_rx(sk, ctx); 485 conf = TLS_HW; 486 if (rc) { 487 #else 488 { 489 #endif 490 rc = tls_set_sw_offload(sk, ctx, 0); 491 conf = TLS_SW; 492 } 493 } 494 495 if (rc) 496 goto err_crypto_info; 497 498 if (tx) 499 ctx->tx_conf = conf; 500 else 501 ctx->rx_conf = conf; 502 update_sk_prot(sk, ctx); 503 if (tx) { 504 ctx->sk_write_space = sk->sk_write_space; 505 sk->sk_write_space = tls_write_space; 506 } else { 507 sk->sk_socket->ops = &tls_sw_proto_ops; 508 } 509 goto out; 510 511 err_crypto_info: 512 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 513 out: 514 return rc; 515 } 516 517 static int do_tls_setsockopt(struct sock *sk, int optname, 518 char __user *optval, unsigned int optlen) 519 { 520 int rc = 0; 521 522 switch (optname) { 523 case TLS_TX: 524 case TLS_RX: 525 lock_sock(sk); 526 rc = do_tls_setsockopt_conf(sk, optval, optlen, 527 optname == TLS_TX); 528 release_sock(sk); 529 break; 530 default: 531 rc = -ENOPROTOOPT; 532 break; 533 } 534 return rc; 535 } 536 537 static int tls_setsockopt(struct sock *sk, int level, int optname, 538 char __user *optval, unsigned int optlen) 539 { 540 struct tls_context *ctx = tls_get_ctx(sk); 541 542 if (level != SOL_TLS) 543 return ctx->setsockopt(sk, level, optname, optval, optlen); 544 545 return do_tls_setsockopt(sk, optname, optval, optlen); 546 } 547 548 static struct tls_context *create_ctx(struct sock *sk) 549 { 550 struct inet_connection_sock *icsk = inet_csk(sk); 551 struct tls_context *ctx; 552 553 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 554 if (!ctx) 555 return NULL; 556 557 icsk->icsk_ulp_data = ctx; 558 return ctx; 559 } 560 561 static int tls_hw_prot(struct sock *sk) 562 { 563 struct tls_context *ctx; 564 struct tls_device *dev; 565 int rc = 0; 566 567 mutex_lock(&device_mutex); 568 list_for_each_entry(dev, &device_list, dev_list) { 569 if (dev->feature && dev->feature(dev)) { 570 ctx = create_ctx(sk); 571 if (!ctx) 572 goto out; 573 574 ctx->hash = sk->sk_prot->hash; 575 ctx->unhash = sk->sk_prot->unhash; 576 ctx->sk_proto_close = sk->sk_prot->close; 577 ctx->rx_conf = TLS_HW_RECORD; 578 ctx->tx_conf = TLS_HW_RECORD; 579 update_sk_prot(sk, ctx); 580 rc = 1; 581 break; 582 } 583 } 584 out: 585 mutex_unlock(&device_mutex); 586 return rc; 587 } 588 589 static void tls_hw_unhash(struct sock *sk) 590 { 591 struct tls_context *ctx = tls_get_ctx(sk); 592 struct tls_device *dev; 593 594 mutex_lock(&device_mutex); 595 list_for_each_entry(dev, &device_list, dev_list) { 596 if (dev->unhash) 597 dev->unhash(dev, sk); 598 } 599 mutex_unlock(&device_mutex); 600 ctx->unhash(sk); 601 } 602 603 static int tls_hw_hash(struct sock *sk) 604 { 605 struct tls_context *ctx = tls_get_ctx(sk); 606 struct tls_device *dev; 607 int err; 608 609 err = ctx->hash(sk); 610 mutex_lock(&device_mutex); 611 list_for_each_entry(dev, &device_list, dev_list) { 612 if (dev->hash) 613 err |= dev->hash(dev, sk); 614 } 615 mutex_unlock(&device_mutex); 616 617 if (err) 618 tls_hw_unhash(sk); 619 return err; 620 } 621 622 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 623 struct proto *base) 624 { 625 prot[TLS_BASE][TLS_BASE] = *base; 626 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 627 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 628 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 629 630 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 631 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 632 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 633 634 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 635 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 636 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 637 638 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 639 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 640 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 641 642 #ifdef CONFIG_TLS_DEVICE 643 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 644 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 645 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 646 647 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 648 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 649 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 650 651 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 652 653 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 654 655 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 656 #endif 657 658 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 659 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 660 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 661 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 662 } 663 664 static int tls_init(struct sock *sk) 665 { 666 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 667 struct tls_context *ctx; 668 int rc = 0; 669 670 if (tls_hw_prot(sk)) 671 goto out; 672 673 /* The TLS ulp is currently supported only for TCP sockets 674 * in ESTABLISHED state. 675 * Supporting sockets in LISTEN state will require us 676 * to modify the accept implementation to clone rather then 677 * share the ulp context. 678 */ 679 if (sk->sk_state != TCP_ESTABLISHED) 680 return -ENOTSUPP; 681 682 /* allocate tls context */ 683 ctx = create_ctx(sk); 684 if (!ctx) { 685 rc = -ENOMEM; 686 goto out; 687 } 688 ctx->setsockopt = sk->sk_prot->setsockopt; 689 ctx->getsockopt = sk->sk_prot->getsockopt; 690 ctx->sk_proto_close = sk->sk_prot->close; 691 692 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 693 if (ip_ver == TLSV6 && 694 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 695 mutex_lock(&tcpv6_prot_mutex); 696 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 697 build_protos(tls_prots[TLSV6], sk->sk_prot); 698 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 699 } 700 mutex_unlock(&tcpv6_prot_mutex); 701 } 702 703 ctx->tx_conf = TLS_BASE; 704 ctx->rx_conf = TLS_BASE; 705 update_sk_prot(sk, ctx); 706 out: 707 return rc; 708 } 709 710 void tls_register_device(struct tls_device *device) 711 { 712 mutex_lock(&device_mutex); 713 list_add_tail(&device->dev_list, &device_list); 714 mutex_unlock(&device_mutex); 715 } 716 EXPORT_SYMBOL(tls_register_device); 717 718 void tls_unregister_device(struct tls_device *device) 719 { 720 mutex_lock(&device_mutex); 721 list_del(&device->dev_list); 722 mutex_unlock(&device_mutex); 723 } 724 EXPORT_SYMBOL(tls_unregister_device); 725 726 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 727 .name = "tls", 728 .uid = TCP_ULP_TLS, 729 .user_visible = true, 730 .owner = THIS_MODULE, 731 .init = tls_init, 732 }; 733 734 static int __init tls_register(void) 735 { 736 build_protos(tls_prots[TLSV4], &tcp_prot); 737 738 tls_sw_proto_ops = inet_stream_ops; 739 tls_sw_proto_ops.poll = tls_sw_poll; 740 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 741 742 #ifdef CONFIG_TLS_DEVICE 743 tls_device_init(); 744 #endif 745 tcp_register_ulp(&tcp_tls_ulp_ops); 746 747 return 0; 748 } 749 750 static void __exit tls_unregister(void) 751 { 752 tcp_unregister_ulp(&tcp_tls_ulp_ops); 753 #ifdef CONFIG_TLS_DEVICE 754 tls_device_cleanup(); 755 #endif 756 } 757 758 module_init(tls_register); 759 module_exit(tls_unregister); 760