1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static struct proto *saved_tcpv4_prot; 59 static DEFINE_MUTEX(tcpv4_prot_mutex); 60 static LIST_HEAD(device_list); 61 static DEFINE_SPINLOCK(device_spinlock); 62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 63 static struct proto_ops tls_sw_proto_ops; 64 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 65 struct proto *base); 66 67 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 68 { 69 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 70 71 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 72 } 73 74 int wait_on_pending_writer(struct sock *sk, long *timeo) 75 { 76 int rc = 0; 77 DEFINE_WAIT_FUNC(wait, woken_wake_function); 78 79 add_wait_queue(sk_sleep(sk), &wait); 80 while (1) { 81 if (!*timeo) { 82 rc = -EAGAIN; 83 break; 84 } 85 86 if (signal_pending(current)) { 87 rc = sock_intr_errno(*timeo); 88 break; 89 } 90 91 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 92 break; 93 } 94 remove_wait_queue(sk_sleep(sk), &wait); 95 return rc; 96 } 97 98 int tls_push_sg(struct sock *sk, 99 struct tls_context *ctx, 100 struct scatterlist *sg, 101 u16 first_offset, 102 int flags) 103 { 104 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 105 int ret = 0; 106 struct page *p; 107 size_t size; 108 int offset = first_offset; 109 110 size = sg->length - offset; 111 offset += sg->offset; 112 113 ctx->in_tcp_sendpages = true; 114 while (1) { 115 if (sg_is_last(sg)) 116 sendpage_flags = flags; 117 118 /* is sending application-limited? */ 119 tcp_rate_check_app_limited(sk); 120 p = sg_page(sg); 121 retry: 122 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 123 124 if (ret != size) { 125 if (ret > 0) { 126 offset += ret; 127 size -= ret; 128 goto retry; 129 } 130 131 offset -= sg->offset; 132 ctx->partially_sent_offset = offset; 133 ctx->partially_sent_record = (void *)sg; 134 ctx->in_tcp_sendpages = false; 135 return ret; 136 } 137 138 put_page(p); 139 sk_mem_uncharge(sk, sg->length); 140 sg = sg_next(sg); 141 if (!sg) 142 break; 143 144 offset = sg->offset; 145 size = sg->length; 146 } 147 148 ctx->in_tcp_sendpages = false; 149 ctx->sk_write_space(sk); 150 151 return 0; 152 } 153 154 static int tls_handle_open_record(struct sock *sk, int flags) 155 { 156 struct tls_context *ctx = tls_get_ctx(sk); 157 158 if (tls_is_pending_open_record(ctx)) 159 return ctx->push_pending_record(sk, flags); 160 161 return 0; 162 } 163 164 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 165 unsigned char *record_type) 166 { 167 struct cmsghdr *cmsg; 168 int rc = -EINVAL; 169 170 for_each_cmsghdr(cmsg, msg) { 171 if (!CMSG_OK(msg, cmsg)) 172 return -EINVAL; 173 if (cmsg->cmsg_level != SOL_TLS) 174 continue; 175 176 switch (cmsg->cmsg_type) { 177 case TLS_SET_RECORD_TYPE: 178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 179 return -EINVAL; 180 181 if (msg->msg_flags & MSG_MORE) 182 return -EINVAL; 183 184 rc = tls_handle_open_record(sk, msg->msg_flags); 185 if (rc) 186 return rc; 187 188 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 189 rc = 0; 190 break; 191 default: 192 return -EINVAL; 193 } 194 } 195 196 return rc; 197 } 198 199 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 200 int flags) 201 { 202 struct scatterlist *sg; 203 u16 offset; 204 205 sg = ctx->partially_sent_record; 206 offset = ctx->partially_sent_offset; 207 208 ctx->partially_sent_record = NULL; 209 return tls_push_sg(sk, ctx, sg, offset, flags); 210 } 211 212 static void tls_write_space(struct sock *sk) 213 { 214 struct tls_context *ctx = tls_get_ctx(sk); 215 216 /* If in_tcp_sendpages call lower protocol write space handler 217 * to ensure we wake up any waiting operations there. For example 218 * if do_tcp_sendpages where to call sk_wait_event. 219 */ 220 if (ctx->in_tcp_sendpages) { 221 ctx->sk_write_space(sk); 222 return; 223 } 224 225 #ifdef CONFIG_TLS_DEVICE 226 if (ctx->tx_conf == TLS_HW) 227 tls_device_write_space(sk, ctx); 228 else 229 #endif 230 tls_sw_write_space(sk, ctx); 231 } 232 233 static void tls_ctx_free(struct tls_context *ctx) 234 { 235 if (!ctx) 236 return; 237 238 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 239 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 240 kfree(ctx); 241 } 242 243 static void tls_sk_proto_close(struct sock *sk, long timeout) 244 { 245 struct tls_context *ctx = tls_get_ctx(sk); 246 long timeo = sock_sndtimeo(sk, 0); 247 void (*sk_proto_close)(struct sock *sk, long timeout); 248 bool free_ctx = false; 249 250 lock_sock(sk); 251 sk_proto_close = ctx->sk_proto_close; 252 253 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) 254 goto skip_tx_cleanup; 255 256 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) { 257 free_ctx = true; 258 goto skip_tx_cleanup; 259 } 260 261 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 262 tls_handle_open_record(sk, 0); 263 264 /* We need these for tls_sw_fallback handling of other packets */ 265 if (ctx->tx_conf == TLS_SW) { 266 kfree(ctx->tx.rec_seq); 267 kfree(ctx->tx.iv); 268 tls_sw_free_resources_tx(sk); 269 } 270 271 if (ctx->rx_conf == TLS_SW) { 272 kfree(ctx->rx.rec_seq); 273 kfree(ctx->rx.iv); 274 tls_sw_free_resources_rx(sk); 275 } 276 277 #ifdef CONFIG_TLS_DEVICE 278 if (ctx->rx_conf == TLS_HW) 279 tls_device_offload_cleanup_rx(sk); 280 281 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 282 #else 283 { 284 #endif 285 tls_ctx_free(ctx); 286 ctx = NULL; 287 } 288 289 skip_tx_cleanup: 290 release_sock(sk); 291 sk_proto_close(sk, timeout); 292 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 293 * for sk->sk_prot->unhash [tls_hw_unhash] 294 */ 295 if (free_ctx) 296 tls_ctx_free(ctx); 297 } 298 299 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 300 int __user *optlen) 301 { 302 int rc = 0; 303 struct tls_context *ctx = tls_get_ctx(sk); 304 struct tls_crypto_info *crypto_info; 305 int len; 306 307 if (get_user(len, optlen)) 308 return -EFAULT; 309 310 if (!optval || (len < sizeof(*crypto_info))) { 311 rc = -EINVAL; 312 goto out; 313 } 314 315 if (!ctx) { 316 rc = -EBUSY; 317 goto out; 318 } 319 320 /* get user crypto info */ 321 crypto_info = &ctx->crypto_send.info; 322 323 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 324 rc = -EBUSY; 325 goto out; 326 } 327 328 if (len == sizeof(*crypto_info)) { 329 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 330 rc = -EFAULT; 331 goto out; 332 } 333 334 switch (crypto_info->cipher_type) { 335 case TLS_CIPHER_AES_GCM_128: { 336 struct tls12_crypto_info_aes_gcm_128 * 337 crypto_info_aes_gcm_128 = 338 container_of(crypto_info, 339 struct tls12_crypto_info_aes_gcm_128, 340 info); 341 342 if (len != sizeof(*crypto_info_aes_gcm_128)) { 343 rc = -EINVAL; 344 goto out; 345 } 346 lock_sock(sk); 347 memcpy(crypto_info_aes_gcm_128->iv, 348 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 349 TLS_CIPHER_AES_GCM_128_IV_SIZE); 350 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 351 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 352 release_sock(sk); 353 if (copy_to_user(optval, 354 crypto_info_aes_gcm_128, 355 sizeof(*crypto_info_aes_gcm_128))) 356 rc = -EFAULT; 357 break; 358 } 359 case TLS_CIPHER_AES_GCM_256: { 360 struct tls12_crypto_info_aes_gcm_256 * 361 crypto_info_aes_gcm_256 = 362 container_of(crypto_info, 363 struct tls12_crypto_info_aes_gcm_256, 364 info); 365 366 if (len != sizeof(*crypto_info_aes_gcm_256)) { 367 rc = -EINVAL; 368 goto out; 369 } 370 lock_sock(sk); 371 memcpy(crypto_info_aes_gcm_256->iv, 372 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 373 TLS_CIPHER_AES_GCM_256_IV_SIZE); 374 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq, 375 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 376 release_sock(sk); 377 if (copy_to_user(optval, 378 crypto_info_aes_gcm_256, 379 sizeof(*crypto_info_aes_gcm_256))) 380 rc = -EFAULT; 381 break; 382 } 383 default: 384 rc = -EINVAL; 385 } 386 387 out: 388 return rc; 389 } 390 391 static int do_tls_getsockopt(struct sock *sk, int optname, 392 char __user *optval, int __user *optlen) 393 { 394 int rc = 0; 395 396 switch (optname) { 397 case TLS_TX: 398 rc = do_tls_getsockopt_tx(sk, optval, optlen); 399 break; 400 default: 401 rc = -ENOPROTOOPT; 402 break; 403 } 404 return rc; 405 } 406 407 static int tls_getsockopt(struct sock *sk, int level, int optname, 408 char __user *optval, int __user *optlen) 409 { 410 struct tls_context *ctx = tls_get_ctx(sk); 411 412 if (level != SOL_TLS) 413 return ctx->getsockopt(sk, level, optname, optval, optlen); 414 415 return do_tls_getsockopt(sk, optname, optval, optlen); 416 } 417 418 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 419 unsigned int optlen, int tx) 420 { 421 struct tls_crypto_info *crypto_info; 422 struct tls_crypto_info *alt_crypto_info; 423 struct tls_context *ctx = tls_get_ctx(sk); 424 size_t optsize; 425 int rc = 0; 426 int conf; 427 428 if (!optval || (optlen < sizeof(*crypto_info))) { 429 rc = -EINVAL; 430 goto out; 431 } 432 433 if (tx) { 434 crypto_info = &ctx->crypto_send.info; 435 alt_crypto_info = &ctx->crypto_recv.info; 436 } else { 437 crypto_info = &ctx->crypto_recv.info; 438 alt_crypto_info = &ctx->crypto_send.info; 439 } 440 441 /* Currently we don't support set crypto info more than one time */ 442 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 443 rc = -EBUSY; 444 goto out; 445 } 446 447 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 448 if (rc) { 449 rc = -EFAULT; 450 goto err_crypto_info; 451 } 452 453 /* check version */ 454 if (crypto_info->version != TLS_1_2_VERSION && 455 crypto_info->version != TLS_1_3_VERSION) { 456 rc = -ENOTSUPP; 457 goto err_crypto_info; 458 } 459 460 /* Ensure that TLS version and ciphers are same in both directions */ 461 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 462 if (alt_crypto_info->version != crypto_info->version || 463 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 464 rc = -EINVAL; 465 goto err_crypto_info; 466 } 467 } 468 469 switch (crypto_info->cipher_type) { 470 case TLS_CIPHER_AES_GCM_128: 471 case TLS_CIPHER_AES_GCM_256: { 472 optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ? 473 sizeof(struct tls12_crypto_info_aes_gcm_128) : 474 sizeof(struct tls12_crypto_info_aes_gcm_256); 475 if (optlen != optsize) { 476 rc = -EINVAL; 477 goto err_crypto_info; 478 } 479 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 480 optlen - sizeof(*crypto_info)); 481 if (rc) { 482 rc = -EFAULT; 483 goto err_crypto_info; 484 } 485 break; 486 } 487 default: 488 rc = -EINVAL; 489 goto err_crypto_info; 490 } 491 492 if (tx) { 493 #ifdef CONFIG_TLS_DEVICE 494 rc = tls_set_device_offload(sk, ctx); 495 conf = TLS_HW; 496 if (rc) { 497 #else 498 { 499 #endif 500 rc = tls_set_sw_offload(sk, ctx, 1); 501 conf = TLS_SW; 502 } 503 } else { 504 #ifdef CONFIG_TLS_DEVICE 505 rc = tls_set_device_offload_rx(sk, ctx); 506 conf = TLS_HW; 507 if (rc) { 508 #else 509 { 510 #endif 511 rc = tls_set_sw_offload(sk, ctx, 0); 512 conf = TLS_SW; 513 } 514 } 515 516 if (rc) 517 goto err_crypto_info; 518 519 if (tx) 520 ctx->tx_conf = conf; 521 else 522 ctx->rx_conf = conf; 523 update_sk_prot(sk, ctx); 524 if (tx) { 525 ctx->sk_write_space = sk->sk_write_space; 526 sk->sk_write_space = tls_write_space; 527 } else { 528 sk->sk_socket->ops = &tls_sw_proto_ops; 529 } 530 goto out; 531 532 err_crypto_info: 533 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 534 out: 535 return rc; 536 } 537 538 static int do_tls_setsockopt(struct sock *sk, int optname, 539 char __user *optval, unsigned int optlen) 540 { 541 int rc = 0; 542 543 switch (optname) { 544 case TLS_TX: 545 case TLS_RX: 546 lock_sock(sk); 547 rc = do_tls_setsockopt_conf(sk, optval, optlen, 548 optname == TLS_TX); 549 release_sock(sk); 550 break; 551 default: 552 rc = -ENOPROTOOPT; 553 break; 554 } 555 return rc; 556 } 557 558 static int tls_setsockopt(struct sock *sk, int level, int optname, 559 char __user *optval, unsigned int optlen) 560 { 561 struct tls_context *ctx = tls_get_ctx(sk); 562 563 if (level != SOL_TLS) 564 return ctx->setsockopt(sk, level, optname, optval, optlen); 565 566 return do_tls_setsockopt(sk, optname, optval, optlen); 567 } 568 569 static struct tls_context *create_ctx(struct sock *sk) 570 { 571 struct inet_connection_sock *icsk = inet_csk(sk); 572 struct tls_context *ctx; 573 574 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 575 if (!ctx) 576 return NULL; 577 578 icsk->icsk_ulp_data = ctx; 579 ctx->setsockopt = sk->sk_prot->setsockopt; 580 ctx->getsockopt = sk->sk_prot->getsockopt; 581 ctx->sk_proto_close = sk->sk_prot->close; 582 return ctx; 583 } 584 585 static void tls_build_proto(struct sock *sk) 586 { 587 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 588 589 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 590 if (ip_ver == TLSV6 && 591 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 592 mutex_lock(&tcpv6_prot_mutex); 593 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 594 build_protos(tls_prots[TLSV6], sk->sk_prot); 595 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 596 } 597 mutex_unlock(&tcpv6_prot_mutex); 598 } 599 600 if (ip_ver == TLSV4 && 601 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 602 mutex_lock(&tcpv4_prot_mutex); 603 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 604 build_protos(tls_prots[TLSV4], sk->sk_prot); 605 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 606 } 607 mutex_unlock(&tcpv4_prot_mutex); 608 } 609 } 610 611 static void tls_hw_sk_destruct(struct sock *sk) 612 { 613 struct tls_context *ctx = tls_get_ctx(sk); 614 struct inet_connection_sock *icsk = inet_csk(sk); 615 616 ctx->sk_destruct(sk); 617 /* Free ctx */ 618 kfree(ctx); 619 icsk->icsk_ulp_data = NULL; 620 } 621 622 static int tls_hw_prot(struct sock *sk) 623 { 624 struct tls_context *ctx; 625 struct tls_device *dev; 626 int rc = 0; 627 628 spin_lock_bh(&device_spinlock); 629 list_for_each_entry(dev, &device_list, dev_list) { 630 if (dev->feature && dev->feature(dev)) { 631 ctx = create_ctx(sk); 632 if (!ctx) 633 goto out; 634 635 spin_unlock_bh(&device_spinlock); 636 tls_build_proto(sk); 637 ctx->hash = sk->sk_prot->hash; 638 ctx->unhash = sk->sk_prot->unhash; 639 ctx->sk_proto_close = sk->sk_prot->close; 640 ctx->sk_destruct = sk->sk_destruct; 641 sk->sk_destruct = tls_hw_sk_destruct; 642 ctx->rx_conf = TLS_HW_RECORD; 643 ctx->tx_conf = TLS_HW_RECORD; 644 update_sk_prot(sk, ctx); 645 spin_lock_bh(&device_spinlock); 646 rc = 1; 647 break; 648 } 649 } 650 out: 651 spin_unlock_bh(&device_spinlock); 652 return rc; 653 } 654 655 static void tls_hw_unhash(struct sock *sk) 656 { 657 struct tls_context *ctx = tls_get_ctx(sk); 658 struct tls_device *dev; 659 660 spin_lock_bh(&device_spinlock); 661 list_for_each_entry(dev, &device_list, dev_list) { 662 if (dev->unhash) { 663 kref_get(&dev->kref); 664 spin_unlock_bh(&device_spinlock); 665 dev->unhash(dev, sk); 666 kref_put(&dev->kref, dev->release); 667 spin_lock_bh(&device_spinlock); 668 } 669 } 670 spin_unlock_bh(&device_spinlock); 671 ctx->unhash(sk); 672 } 673 674 static int tls_hw_hash(struct sock *sk) 675 { 676 struct tls_context *ctx = tls_get_ctx(sk); 677 struct tls_device *dev; 678 int err; 679 680 err = ctx->hash(sk); 681 spin_lock_bh(&device_spinlock); 682 list_for_each_entry(dev, &device_list, dev_list) { 683 if (dev->hash) { 684 kref_get(&dev->kref); 685 spin_unlock_bh(&device_spinlock); 686 err |= dev->hash(dev, sk); 687 kref_put(&dev->kref, dev->release); 688 spin_lock_bh(&device_spinlock); 689 } 690 } 691 spin_unlock_bh(&device_spinlock); 692 693 if (err) 694 tls_hw_unhash(sk); 695 return err; 696 } 697 698 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 699 struct proto *base) 700 { 701 prot[TLS_BASE][TLS_BASE] = *base; 702 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 703 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 704 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 705 706 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 707 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 708 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 709 710 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 711 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 712 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 713 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 714 715 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 716 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 717 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 718 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 719 720 #ifdef CONFIG_TLS_DEVICE 721 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 722 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 723 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 724 725 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 726 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 727 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 728 729 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 730 731 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 732 733 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 734 #endif 735 736 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 737 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 738 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 739 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 740 } 741 742 static int tls_init(struct sock *sk) 743 { 744 struct tls_context *ctx; 745 int rc = 0; 746 747 if (tls_hw_prot(sk)) 748 goto out; 749 750 /* The TLS ulp is currently supported only for TCP sockets 751 * in ESTABLISHED state. 752 * Supporting sockets in LISTEN state will require us 753 * to modify the accept implementation to clone rather then 754 * share the ulp context. 755 */ 756 if (sk->sk_state != TCP_ESTABLISHED) 757 return -ENOTSUPP; 758 759 /* allocate tls context */ 760 ctx = create_ctx(sk); 761 if (!ctx) { 762 rc = -ENOMEM; 763 goto out; 764 } 765 766 tls_build_proto(sk); 767 ctx->tx_conf = TLS_BASE; 768 ctx->rx_conf = TLS_BASE; 769 update_sk_prot(sk, ctx); 770 out: 771 return rc; 772 } 773 774 void tls_register_device(struct tls_device *device) 775 { 776 spin_lock_bh(&device_spinlock); 777 list_add_tail(&device->dev_list, &device_list); 778 spin_unlock_bh(&device_spinlock); 779 } 780 EXPORT_SYMBOL(tls_register_device); 781 782 void tls_unregister_device(struct tls_device *device) 783 { 784 spin_lock_bh(&device_spinlock); 785 list_del(&device->dev_list); 786 spin_unlock_bh(&device_spinlock); 787 } 788 EXPORT_SYMBOL(tls_unregister_device); 789 790 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 791 .name = "tls", 792 .owner = THIS_MODULE, 793 .init = tls_init, 794 }; 795 796 static int __init tls_register(void) 797 { 798 tls_sw_proto_ops = inet_stream_ops; 799 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 800 801 #ifdef CONFIG_TLS_DEVICE 802 tls_device_init(); 803 #endif 804 tcp_register_ulp(&tcp_tls_ulp_ops); 805 806 return 0; 807 } 808 809 static void __exit tls_unregister(void) 810 { 811 tcp_unregister_ulp(&tcp_tls_ulp_ops); 812 #ifdef CONFIG_TLS_DEVICE 813 tls_device_cleanup(); 814 #endif 815 } 816 817 module_init(tls_register); 818 module_exit(tls_unregister); 819