1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static struct proto *saved_tcpv4_prot; 59 static DEFINE_MUTEX(tcpv4_prot_mutex); 60 static LIST_HEAD(device_list); 61 static DEFINE_SPINLOCK(device_spinlock); 62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 63 static struct proto_ops tls_sw_proto_ops; 64 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 65 struct proto *base); 66 67 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 68 { 69 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 70 71 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 72 } 73 74 int wait_on_pending_writer(struct sock *sk, long *timeo) 75 { 76 int rc = 0; 77 DEFINE_WAIT_FUNC(wait, woken_wake_function); 78 79 add_wait_queue(sk_sleep(sk), &wait); 80 while (1) { 81 if (!*timeo) { 82 rc = -EAGAIN; 83 break; 84 } 85 86 if (signal_pending(current)) { 87 rc = sock_intr_errno(*timeo); 88 break; 89 } 90 91 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 92 break; 93 } 94 remove_wait_queue(sk_sleep(sk), &wait); 95 return rc; 96 } 97 98 int tls_push_sg(struct sock *sk, 99 struct tls_context *ctx, 100 struct scatterlist *sg, 101 u16 first_offset, 102 int flags) 103 { 104 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 105 int ret = 0; 106 struct page *p; 107 size_t size; 108 int offset = first_offset; 109 110 size = sg->length - offset; 111 offset += sg->offset; 112 113 ctx->in_tcp_sendpages = true; 114 while (1) { 115 if (sg_is_last(sg)) 116 sendpage_flags = flags; 117 118 /* is sending application-limited? */ 119 tcp_rate_check_app_limited(sk); 120 p = sg_page(sg); 121 retry: 122 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 123 124 if (ret != size) { 125 if (ret > 0) { 126 offset += ret; 127 size -= ret; 128 goto retry; 129 } 130 131 offset -= sg->offset; 132 ctx->partially_sent_offset = offset; 133 ctx->partially_sent_record = (void *)sg; 134 ctx->in_tcp_sendpages = false; 135 return ret; 136 } 137 138 put_page(p); 139 sk_mem_uncharge(sk, sg->length); 140 sg = sg_next(sg); 141 if (!sg) 142 break; 143 144 offset = sg->offset; 145 size = sg->length; 146 } 147 148 ctx->in_tcp_sendpages = false; 149 150 return 0; 151 } 152 153 static int tls_handle_open_record(struct sock *sk, int flags) 154 { 155 struct tls_context *ctx = tls_get_ctx(sk); 156 157 if (tls_is_pending_open_record(ctx)) 158 return ctx->push_pending_record(sk, flags); 159 160 return 0; 161 } 162 163 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 164 unsigned char *record_type) 165 { 166 struct cmsghdr *cmsg; 167 int rc = -EINVAL; 168 169 for_each_cmsghdr(cmsg, msg) { 170 if (!CMSG_OK(msg, cmsg)) 171 return -EINVAL; 172 if (cmsg->cmsg_level != SOL_TLS) 173 continue; 174 175 switch (cmsg->cmsg_type) { 176 case TLS_SET_RECORD_TYPE: 177 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 178 return -EINVAL; 179 180 if (msg->msg_flags & MSG_MORE) 181 return -EINVAL; 182 183 rc = tls_handle_open_record(sk, msg->msg_flags); 184 if (rc) 185 return rc; 186 187 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 188 rc = 0; 189 break; 190 default: 191 return -EINVAL; 192 } 193 } 194 195 return rc; 196 } 197 198 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 199 int flags) 200 { 201 struct scatterlist *sg; 202 u16 offset; 203 204 sg = ctx->partially_sent_record; 205 offset = ctx->partially_sent_offset; 206 207 ctx->partially_sent_record = NULL; 208 return tls_push_sg(sk, ctx, sg, offset, flags); 209 } 210 211 static void tls_write_space(struct sock *sk) 212 { 213 struct tls_context *ctx = tls_get_ctx(sk); 214 215 /* If in_tcp_sendpages call lower protocol write space handler 216 * to ensure we wake up any waiting operations there. For example 217 * if do_tcp_sendpages where to call sk_wait_event. 218 */ 219 if (ctx->in_tcp_sendpages) { 220 ctx->sk_write_space(sk); 221 return; 222 } 223 224 #ifdef CONFIG_TLS_DEVICE 225 if (ctx->tx_conf == TLS_HW) 226 tls_device_write_space(sk, ctx); 227 else 228 #endif 229 tls_sw_write_space(sk, ctx); 230 231 ctx->sk_write_space(sk); 232 } 233 234 static void tls_ctx_free(struct tls_context *ctx) 235 { 236 if (!ctx) 237 return; 238 239 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 240 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 241 kfree(ctx); 242 } 243 244 static void tls_sk_proto_close(struct sock *sk, long timeout) 245 { 246 struct tls_context *ctx = tls_get_ctx(sk); 247 long timeo = sock_sndtimeo(sk, 0); 248 void (*sk_proto_close)(struct sock *sk, long timeout); 249 bool free_ctx = false; 250 251 lock_sock(sk); 252 sk_proto_close = ctx->sk_proto_close; 253 254 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) 255 goto skip_tx_cleanup; 256 257 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) { 258 free_ctx = true; 259 goto skip_tx_cleanup; 260 } 261 262 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 263 tls_handle_open_record(sk, 0); 264 265 /* We need these for tls_sw_fallback handling of other packets */ 266 if (ctx->tx_conf == TLS_SW) { 267 kfree(ctx->tx.rec_seq); 268 kfree(ctx->tx.iv); 269 tls_sw_free_resources_tx(sk); 270 } 271 272 if (ctx->rx_conf == TLS_SW) { 273 kfree(ctx->rx.rec_seq); 274 kfree(ctx->rx.iv); 275 tls_sw_free_resources_rx(sk); 276 } 277 278 #ifdef CONFIG_TLS_DEVICE 279 if (ctx->rx_conf == TLS_HW) 280 tls_device_offload_cleanup_rx(sk); 281 282 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 283 #else 284 { 285 #endif 286 tls_ctx_free(ctx); 287 ctx = NULL; 288 } 289 290 skip_tx_cleanup: 291 release_sock(sk); 292 sk_proto_close(sk, timeout); 293 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 294 * for sk->sk_prot->unhash [tls_hw_unhash] 295 */ 296 if (free_ctx) 297 tls_ctx_free(ctx); 298 } 299 300 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 301 int __user *optlen) 302 { 303 int rc = 0; 304 struct tls_context *ctx = tls_get_ctx(sk); 305 struct tls_crypto_info *crypto_info; 306 int len; 307 308 if (get_user(len, optlen)) 309 return -EFAULT; 310 311 if (!optval || (len < sizeof(*crypto_info))) { 312 rc = -EINVAL; 313 goto out; 314 } 315 316 if (!ctx) { 317 rc = -EBUSY; 318 goto out; 319 } 320 321 /* get user crypto info */ 322 crypto_info = &ctx->crypto_send.info; 323 324 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 325 rc = -EBUSY; 326 goto out; 327 } 328 329 if (len == sizeof(*crypto_info)) { 330 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 331 rc = -EFAULT; 332 goto out; 333 } 334 335 switch (crypto_info->cipher_type) { 336 case TLS_CIPHER_AES_GCM_128: { 337 struct tls12_crypto_info_aes_gcm_128 * 338 crypto_info_aes_gcm_128 = 339 container_of(crypto_info, 340 struct tls12_crypto_info_aes_gcm_128, 341 info); 342 343 if (len != sizeof(*crypto_info_aes_gcm_128)) { 344 rc = -EINVAL; 345 goto out; 346 } 347 lock_sock(sk); 348 memcpy(crypto_info_aes_gcm_128->iv, 349 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 350 TLS_CIPHER_AES_GCM_128_IV_SIZE); 351 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 352 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 353 release_sock(sk); 354 if (copy_to_user(optval, 355 crypto_info_aes_gcm_128, 356 sizeof(*crypto_info_aes_gcm_128))) 357 rc = -EFAULT; 358 break; 359 } 360 case TLS_CIPHER_AES_GCM_256: { 361 struct tls12_crypto_info_aes_gcm_256 * 362 crypto_info_aes_gcm_256 = 363 container_of(crypto_info, 364 struct tls12_crypto_info_aes_gcm_256, 365 info); 366 367 if (len != sizeof(*crypto_info_aes_gcm_256)) { 368 rc = -EINVAL; 369 goto out; 370 } 371 lock_sock(sk); 372 memcpy(crypto_info_aes_gcm_256->iv, 373 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 374 TLS_CIPHER_AES_GCM_256_IV_SIZE); 375 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq, 376 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 377 release_sock(sk); 378 if (copy_to_user(optval, 379 crypto_info_aes_gcm_256, 380 sizeof(*crypto_info_aes_gcm_256))) 381 rc = -EFAULT; 382 break; 383 } 384 default: 385 rc = -EINVAL; 386 } 387 388 out: 389 return rc; 390 } 391 392 static int do_tls_getsockopt(struct sock *sk, int optname, 393 char __user *optval, int __user *optlen) 394 { 395 int rc = 0; 396 397 switch (optname) { 398 case TLS_TX: 399 rc = do_tls_getsockopt_tx(sk, optval, optlen); 400 break; 401 default: 402 rc = -ENOPROTOOPT; 403 break; 404 } 405 return rc; 406 } 407 408 static int tls_getsockopt(struct sock *sk, int level, int optname, 409 char __user *optval, int __user *optlen) 410 { 411 struct tls_context *ctx = tls_get_ctx(sk); 412 413 if (level != SOL_TLS) 414 return ctx->getsockopt(sk, level, optname, optval, optlen); 415 416 return do_tls_getsockopt(sk, optname, optval, optlen); 417 } 418 419 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 420 unsigned int optlen, int tx) 421 { 422 struct tls_crypto_info *crypto_info; 423 struct tls_crypto_info *alt_crypto_info; 424 struct tls_context *ctx = tls_get_ctx(sk); 425 size_t optsize; 426 int rc = 0; 427 int conf; 428 429 if (!optval || (optlen < sizeof(*crypto_info))) { 430 rc = -EINVAL; 431 goto out; 432 } 433 434 if (tx) { 435 crypto_info = &ctx->crypto_send.info; 436 alt_crypto_info = &ctx->crypto_recv.info; 437 } else { 438 crypto_info = &ctx->crypto_recv.info; 439 alt_crypto_info = &ctx->crypto_send.info; 440 } 441 442 /* Currently we don't support set crypto info more than one time */ 443 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 444 rc = -EBUSY; 445 goto out; 446 } 447 448 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 449 if (rc) { 450 rc = -EFAULT; 451 goto err_crypto_info; 452 } 453 454 /* check version */ 455 if (crypto_info->version != TLS_1_2_VERSION && 456 crypto_info->version != TLS_1_3_VERSION) { 457 rc = -ENOTSUPP; 458 goto err_crypto_info; 459 } 460 461 /* Ensure that TLS version and ciphers are same in both directions */ 462 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 463 if (alt_crypto_info->version != crypto_info->version || 464 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 465 rc = -EINVAL; 466 goto err_crypto_info; 467 } 468 } 469 470 switch (crypto_info->cipher_type) { 471 case TLS_CIPHER_AES_GCM_128: 472 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); 473 break; 474 case TLS_CIPHER_AES_GCM_256: { 475 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); 476 break; 477 } 478 case TLS_CIPHER_AES_CCM_128: 479 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); 480 break; 481 default: 482 rc = -EINVAL; 483 goto err_crypto_info; 484 } 485 486 if (optlen != optsize) { 487 rc = -EINVAL; 488 goto err_crypto_info; 489 } 490 491 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 492 optlen - sizeof(*crypto_info)); 493 if (rc) { 494 rc = -EFAULT; 495 goto err_crypto_info; 496 } 497 498 if (tx) { 499 #ifdef CONFIG_TLS_DEVICE 500 rc = tls_set_device_offload(sk, ctx); 501 conf = TLS_HW; 502 if (rc) { 503 #else 504 { 505 #endif 506 rc = tls_set_sw_offload(sk, ctx, 1); 507 conf = TLS_SW; 508 } 509 } else { 510 #ifdef CONFIG_TLS_DEVICE 511 rc = tls_set_device_offload_rx(sk, ctx); 512 conf = TLS_HW; 513 if (rc) { 514 #else 515 { 516 #endif 517 rc = tls_set_sw_offload(sk, ctx, 0); 518 conf = TLS_SW; 519 } 520 } 521 522 if (rc) 523 goto err_crypto_info; 524 525 if (tx) 526 ctx->tx_conf = conf; 527 else 528 ctx->rx_conf = conf; 529 update_sk_prot(sk, ctx); 530 if (tx) { 531 ctx->sk_write_space = sk->sk_write_space; 532 sk->sk_write_space = tls_write_space; 533 } else { 534 sk->sk_socket->ops = &tls_sw_proto_ops; 535 } 536 goto out; 537 538 err_crypto_info: 539 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 540 out: 541 return rc; 542 } 543 544 static int do_tls_setsockopt(struct sock *sk, int optname, 545 char __user *optval, unsigned int optlen) 546 { 547 int rc = 0; 548 549 switch (optname) { 550 case TLS_TX: 551 case TLS_RX: 552 lock_sock(sk); 553 rc = do_tls_setsockopt_conf(sk, optval, optlen, 554 optname == TLS_TX); 555 release_sock(sk); 556 break; 557 default: 558 rc = -ENOPROTOOPT; 559 break; 560 } 561 return rc; 562 } 563 564 static int tls_setsockopt(struct sock *sk, int level, int optname, 565 char __user *optval, unsigned int optlen) 566 { 567 struct tls_context *ctx = tls_get_ctx(sk); 568 569 if (level != SOL_TLS) 570 return ctx->setsockopt(sk, level, optname, optval, optlen); 571 572 return do_tls_setsockopt(sk, optname, optval, optlen); 573 } 574 575 static struct tls_context *create_ctx(struct sock *sk) 576 { 577 struct inet_connection_sock *icsk = inet_csk(sk); 578 struct tls_context *ctx; 579 580 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 581 if (!ctx) 582 return NULL; 583 584 icsk->icsk_ulp_data = ctx; 585 ctx->setsockopt = sk->sk_prot->setsockopt; 586 ctx->getsockopt = sk->sk_prot->getsockopt; 587 ctx->sk_proto_close = sk->sk_prot->close; 588 return ctx; 589 } 590 591 static void tls_build_proto(struct sock *sk) 592 { 593 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 594 595 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 596 if (ip_ver == TLSV6 && 597 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 598 mutex_lock(&tcpv6_prot_mutex); 599 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 600 build_protos(tls_prots[TLSV6], sk->sk_prot); 601 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 602 } 603 mutex_unlock(&tcpv6_prot_mutex); 604 } 605 606 if (ip_ver == TLSV4 && 607 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 608 mutex_lock(&tcpv4_prot_mutex); 609 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 610 build_protos(tls_prots[TLSV4], sk->sk_prot); 611 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 612 } 613 mutex_unlock(&tcpv4_prot_mutex); 614 } 615 } 616 617 static void tls_hw_sk_destruct(struct sock *sk) 618 { 619 struct tls_context *ctx = tls_get_ctx(sk); 620 struct inet_connection_sock *icsk = inet_csk(sk); 621 622 ctx->sk_destruct(sk); 623 /* Free ctx */ 624 kfree(ctx); 625 icsk->icsk_ulp_data = NULL; 626 } 627 628 static int tls_hw_prot(struct sock *sk) 629 { 630 struct tls_context *ctx; 631 struct tls_device *dev; 632 int rc = 0; 633 634 spin_lock_bh(&device_spinlock); 635 list_for_each_entry(dev, &device_list, dev_list) { 636 if (dev->feature && dev->feature(dev)) { 637 ctx = create_ctx(sk); 638 if (!ctx) 639 goto out; 640 641 spin_unlock_bh(&device_spinlock); 642 tls_build_proto(sk); 643 ctx->hash = sk->sk_prot->hash; 644 ctx->unhash = sk->sk_prot->unhash; 645 ctx->sk_proto_close = sk->sk_prot->close; 646 ctx->sk_destruct = sk->sk_destruct; 647 sk->sk_destruct = tls_hw_sk_destruct; 648 ctx->rx_conf = TLS_HW_RECORD; 649 ctx->tx_conf = TLS_HW_RECORD; 650 update_sk_prot(sk, ctx); 651 spin_lock_bh(&device_spinlock); 652 rc = 1; 653 break; 654 } 655 } 656 out: 657 spin_unlock_bh(&device_spinlock); 658 return rc; 659 } 660 661 static void tls_hw_unhash(struct sock *sk) 662 { 663 struct tls_context *ctx = tls_get_ctx(sk); 664 struct tls_device *dev; 665 666 spin_lock_bh(&device_spinlock); 667 list_for_each_entry(dev, &device_list, dev_list) { 668 if (dev->unhash) { 669 kref_get(&dev->kref); 670 spin_unlock_bh(&device_spinlock); 671 dev->unhash(dev, sk); 672 kref_put(&dev->kref, dev->release); 673 spin_lock_bh(&device_spinlock); 674 } 675 } 676 spin_unlock_bh(&device_spinlock); 677 ctx->unhash(sk); 678 } 679 680 static int tls_hw_hash(struct sock *sk) 681 { 682 struct tls_context *ctx = tls_get_ctx(sk); 683 struct tls_device *dev; 684 int err; 685 686 err = ctx->hash(sk); 687 spin_lock_bh(&device_spinlock); 688 list_for_each_entry(dev, &device_list, dev_list) { 689 if (dev->hash) { 690 kref_get(&dev->kref); 691 spin_unlock_bh(&device_spinlock); 692 err |= dev->hash(dev, sk); 693 kref_put(&dev->kref, dev->release); 694 spin_lock_bh(&device_spinlock); 695 } 696 } 697 spin_unlock_bh(&device_spinlock); 698 699 if (err) 700 tls_hw_unhash(sk); 701 return err; 702 } 703 704 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 705 struct proto *base) 706 { 707 prot[TLS_BASE][TLS_BASE] = *base; 708 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 709 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 710 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 711 712 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 713 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 714 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 715 716 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 717 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 718 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 719 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 720 721 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 722 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 723 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 724 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 725 726 #ifdef CONFIG_TLS_DEVICE 727 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 728 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 729 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 730 731 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 732 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 733 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 734 735 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 736 737 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 738 739 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 740 #endif 741 742 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 743 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 744 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 745 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 746 } 747 748 static int tls_init(struct sock *sk) 749 { 750 struct tls_context *ctx; 751 int rc = 0; 752 753 if (tls_hw_prot(sk)) 754 goto out; 755 756 /* The TLS ulp is currently supported only for TCP sockets 757 * in ESTABLISHED state. 758 * Supporting sockets in LISTEN state will require us 759 * to modify the accept implementation to clone rather then 760 * share the ulp context. 761 */ 762 if (sk->sk_state != TCP_ESTABLISHED) 763 return -ENOTSUPP; 764 765 /* allocate tls context */ 766 ctx = create_ctx(sk); 767 if (!ctx) { 768 rc = -ENOMEM; 769 goto out; 770 } 771 772 tls_build_proto(sk); 773 ctx->tx_conf = TLS_BASE; 774 ctx->rx_conf = TLS_BASE; 775 update_sk_prot(sk, ctx); 776 out: 777 return rc; 778 } 779 780 void tls_register_device(struct tls_device *device) 781 { 782 spin_lock_bh(&device_spinlock); 783 list_add_tail(&device->dev_list, &device_list); 784 spin_unlock_bh(&device_spinlock); 785 } 786 EXPORT_SYMBOL(tls_register_device); 787 788 void tls_unregister_device(struct tls_device *device) 789 { 790 spin_lock_bh(&device_spinlock); 791 list_del(&device->dev_list); 792 spin_unlock_bh(&device_spinlock); 793 } 794 EXPORT_SYMBOL(tls_unregister_device); 795 796 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 797 .name = "tls", 798 .owner = THIS_MODULE, 799 .init = tls_init, 800 }; 801 802 static int __init tls_register(void) 803 { 804 tls_sw_proto_ops = inet_stream_ops; 805 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 806 807 #ifdef CONFIG_TLS_DEVICE 808 tls_device_init(); 809 #endif 810 tcp_register_ulp(&tcp_tls_ulp_ops); 811 812 return 0; 813 } 814 815 static void __exit tls_unregister(void) 816 { 817 tcp_unregister_ulp(&tcp_tls_ulp_ops); 818 #ifdef CONFIG_TLS_DEVICE 819 tls_device_cleanup(); 820 #endif 821 } 822 823 module_init(tls_register); 824 module_exit(tls_unregister); 825