1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 #include <linux/inet_diag.h> 43 44 #include <net/snmp.h> 45 #include <net/tls.h> 46 #include <net/tls_toe.h> 47 48 MODULE_AUTHOR("Mellanox Technologies"); 49 MODULE_DESCRIPTION("Transport Layer Security Support"); 50 MODULE_LICENSE("Dual BSD/GPL"); 51 MODULE_ALIAS_TCP_ULP("tls"); 52 53 enum { 54 TLSV4, 55 TLSV6, 56 TLS_NUM_PROTS, 57 }; 58 59 static struct proto *saved_tcpv6_prot; 60 static DEFINE_MUTEX(tcpv6_prot_mutex); 61 static struct proto *saved_tcpv4_prot; 62 static DEFINE_MUTEX(tcpv4_prot_mutex); 63 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 64 static struct proto_ops tls_sw_proto_ops; 65 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 66 struct proto *base); 67 68 void update_sk_prot(struct sock *sk, struct tls_context *ctx) 69 { 70 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 71 72 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 73 } 74 75 int wait_on_pending_writer(struct sock *sk, long *timeo) 76 { 77 int rc = 0; 78 DEFINE_WAIT_FUNC(wait, woken_wake_function); 79 80 add_wait_queue(sk_sleep(sk), &wait); 81 while (1) { 82 if (!*timeo) { 83 rc = -EAGAIN; 84 break; 85 } 86 87 if (signal_pending(current)) { 88 rc = sock_intr_errno(*timeo); 89 break; 90 } 91 92 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 93 break; 94 } 95 remove_wait_queue(sk_sleep(sk), &wait); 96 return rc; 97 } 98 99 int tls_push_sg(struct sock *sk, 100 struct tls_context *ctx, 101 struct scatterlist *sg, 102 u16 first_offset, 103 int flags) 104 { 105 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 106 int ret = 0; 107 struct page *p; 108 size_t size; 109 int offset = first_offset; 110 111 size = sg->length - offset; 112 offset += sg->offset; 113 114 ctx->in_tcp_sendpages = true; 115 while (1) { 116 if (sg_is_last(sg)) 117 sendpage_flags = flags; 118 119 /* is sending application-limited? */ 120 tcp_rate_check_app_limited(sk); 121 p = sg_page(sg); 122 retry: 123 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 124 125 if (ret != size) { 126 if (ret > 0) { 127 offset += ret; 128 size -= ret; 129 goto retry; 130 } 131 132 offset -= sg->offset; 133 ctx->partially_sent_offset = offset; 134 ctx->partially_sent_record = (void *)sg; 135 ctx->in_tcp_sendpages = false; 136 return ret; 137 } 138 139 put_page(p); 140 sk_mem_uncharge(sk, sg->length); 141 sg = sg_next(sg); 142 if (!sg) 143 break; 144 145 offset = sg->offset; 146 size = sg->length; 147 } 148 149 ctx->in_tcp_sendpages = false; 150 151 return 0; 152 } 153 154 static int tls_handle_open_record(struct sock *sk, int flags) 155 { 156 struct tls_context *ctx = tls_get_ctx(sk); 157 158 if (tls_is_pending_open_record(ctx)) 159 return ctx->push_pending_record(sk, flags); 160 161 return 0; 162 } 163 164 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 165 unsigned char *record_type) 166 { 167 struct cmsghdr *cmsg; 168 int rc = -EINVAL; 169 170 for_each_cmsghdr(cmsg, msg) { 171 if (!CMSG_OK(msg, cmsg)) 172 return -EINVAL; 173 if (cmsg->cmsg_level != SOL_TLS) 174 continue; 175 176 switch (cmsg->cmsg_type) { 177 case TLS_SET_RECORD_TYPE: 178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 179 return -EINVAL; 180 181 if (msg->msg_flags & MSG_MORE) 182 return -EINVAL; 183 184 rc = tls_handle_open_record(sk, msg->msg_flags); 185 if (rc) 186 return rc; 187 188 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 189 rc = 0; 190 break; 191 default: 192 return -EINVAL; 193 } 194 } 195 196 return rc; 197 } 198 199 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 200 int flags) 201 { 202 struct scatterlist *sg; 203 u16 offset; 204 205 sg = ctx->partially_sent_record; 206 offset = ctx->partially_sent_offset; 207 208 ctx->partially_sent_record = NULL; 209 return tls_push_sg(sk, ctx, sg, offset, flags); 210 } 211 212 bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) 213 { 214 struct scatterlist *sg; 215 216 sg = ctx->partially_sent_record; 217 if (!sg) 218 return false; 219 220 while (1) { 221 put_page(sg_page(sg)); 222 sk_mem_uncharge(sk, sg->length); 223 224 if (sg_is_last(sg)) 225 break; 226 sg++; 227 } 228 ctx->partially_sent_record = NULL; 229 return true; 230 } 231 232 static void tls_write_space(struct sock *sk) 233 { 234 struct tls_context *ctx = tls_get_ctx(sk); 235 236 /* If in_tcp_sendpages call lower protocol write space handler 237 * to ensure we wake up any waiting operations there. For example 238 * if do_tcp_sendpages where to call sk_wait_event. 239 */ 240 if (ctx->in_tcp_sendpages) { 241 ctx->sk_write_space(sk); 242 return; 243 } 244 245 #ifdef CONFIG_TLS_DEVICE 246 if (ctx->tx_conf == TLS_HW) 247 tls_device_write_space(sk, ctx); 248 else 249 #endif 250 tls_sw_write_space(sk, ctx); 251 252 ctx->sk_write_space(sk); 253 } 254 255 /** 256 * tls_ctx_free() - free TLS ULP context 257 * @sk: socket to with @ctx is attached 258 * @ctx: TLS context structure 259 * 260 * Free TLS context. If @sk is %NULL caller guarantees that the socket 261 * to which @ctx was attached has no outstanding references. 262 */ 263 void tls_ctx_free(struct sock *sk, struct tls_context *ctx) 264 { 265 if (!ctx) 266 return; 267 268 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 269 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 270 mutex_destroy(&ctx->tx_lock); 271 272 if (sk) 273 kfree_rcu(ctx, rcu); 274 else 275 kfree(ctx); 276 } 277 278 static void tls_sk_proto_cleanup(struct sock *sk, 279 struct tls_context *ctx, long timeo) 280 { 281 if (unlikely(sk->sk_write_pending) && 282 !wait_on_pending_writer(sk, &timeo)) 283 tls_handle_open_record(sk, 0); 284 285 /* We need these for tls_sw_fallback handling of other packets */ 286 if (ctx->tx_conf == TLS_SW) { 287 kfree(ctx->tx.rec_seq); 288 kfree(ctx->tx.iv); 289 tls_sw_release_resources_tx(sk); 290 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 291 } else if (ctx->tx_conf == TLS_HW) { 292 tls_device_free_resources_tx(sk); 293 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 294 } 295 296 if (ctx->rx_conf == TLS_SW) { 297 tls_sw_release_resources_rx(sk); 298 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 299 } else if (ctx->rx_conf == TLS_HW) { 300 tls_device_offload_cleanup_rx(sk); 301 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 302 } 303 } 304 305 static void tls_sk_proto_close(struct sock *sk, long timeout) 306 { 307 struct inet_connection_sock *icsk = inet_csk(sk); 308 struct tls_context *ctx = tls_get_ctx(sk); 309 long timeo = sock_sndtimeo(sk, 0); 310 bool free_ctx; 311 312 if (ctx->tx_conf == TLS_SW) 313 tls_sw_cancel_work_tx(ctx); 314 315 lock_sock(sk); 316 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW; 317 318 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) 319 tls_sk_proto_cleanup(sk, ctx, timeo); 320 321 write_lock_bh(&sk->sk_callback_lock); 322 if (free_ctx) 323 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 324 sk->sk_prot = ctx->sk_proto; 325 if (sk->sk_write_space == tls_write_space) 326 sk->sk_write_space = ctx->sk_write_space; 327 write_unlock_bh(&sk->sk_callback_lock); 328 release_sock(sk); 329 if (ctx->tx_conf == TLS_SW) 330 tls_sw_free_ctx_tx(ctx); 331 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 332 tls_sw_strparser_done(ctx); 333 if (ctx->rx_conf == TLS_SW) 334 tls_sw_free_ctx_rx(ctx); 335 ctx->sk_proto->close(sk, timeout); 336 337 if (free_ctx) 338 tls_ctx_free(sk, ctx); 339 } 340 341 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 342 int __user *optlen) 343 { 344 int rc = 0; 345 struct tls_context *ctx = tls_get_ctx(sk); 346 struct tls_crypto_info *crypto_info; 347 int len; 348 349 if (get_user(len, optlen)) 350 return -EFAULT; 351 352 if (!optval || (len < sizeof(*crypto_info))) { 353 rc = -EINVAL; 354 goto out; 355 } 356 357 if (!ctx) { 358 rc = -EBUSY; 359 goto out; 360 } 361 362 /* get user crypto info */ 363 crypto_info = &ctx->crypto_send.info; 364 365 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 366 rc = -EBUSY; 367 goto out; 368 } 369 370 if (len == sizeof(*crypto_info)) { 371 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 372 rc = -EFAULT; 373 goto out; 374 } 375 376 switch (crypto_info->cipher_type) { 377 case TLS_CIPHER_AES_GCM_128: { 378 struct tls12_crypto_info_aes_gcm_128 * 379 crypto_info_aes_gcm_128 = 380 container_of(crypto_info, 381 struct tls12_crypto_info_aes_gcm_128, 382 info); 383 384 if (len != sizeof(*crypto_info_aes_gcm_128)) { 385 rc = -EINVAL; 386 goto out; 387 } 388 lock_sock(sk); 389 memcpy(crypto_info_aes_gcm_128->iv, 390 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 391 TLS_CIPHER_AES_GCM_128_IV_SIZE); 392 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 393 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 394 release_sock(sk); 395 if (copy_to_user(optval, 396 crypto_info_aes_gcm_128, 397 sizeof(*crypto_info_aes_gcm_128))) 398 rc = -EFAULT; 399 break; 400 } 401 case TLS_CIPHER_AES_GCM_256: { 402 struct tls12_crypto_info_aes_gcm_256 * 403 crypto_info_aes_gcm_256 = 404 container_of(crypto_info, 405 struct tls12_crypto_info_aes_gcm_256, 406 info); 407 408 if (len != sizeof(*crypto_info_aes_gcm_256)) { 409 rc = -EINVAL; 410 goto out; 411 } 412 lock_sock(sk); 413 memcpy(crypto_info_aes_gcm_256->iv, 414 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 415 TLS_CIPHER_AES_GCM_256_IV_SIZE); 416 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq, 417 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 418 release_sock(sk); 419 if (copy_to_user(optval, 420 crypto_info_aes_gcm_256, 421 sizeof(*crypto_info_aes_gcm_256))) 422 rc = -EFAULT; 423 break; 424 } 425 default: 426 rc = -EINVAL; 427 } 428 429 out: 430 return rc; 431 } 432 433 static int do_tls_getsockopt(struct sock *sk, int optname, 434 char __user *optval, int __user *optlen) 435 { 436 int rc = 0; 437 438 switch (optname) { 439 case TLS_TX: 440 rc = do_tls_getsockopt_tx(sk, optval, optlen); 441 break; 442 default: 443 rc = -ENOPROTOOPT; 444 break; 445 } 446 return rc; 447 } 448 449 static int tls_getsockopt(struct sock *sk, int level, int optname, 450 char __user *optval, int __user *optlen) 451 { 452 struct tls_context *ctx = tls_get_ctx(sk); 453 454 if (level != SOL_TLS) 455 return ctx->sk_proto->getsockopt(sk, level, 456 optname, optval, optlen); 457 458 return do_tls_getsockopt(sk, optname, optval, optlen); 459 } 460 461 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 462 unsigned int optlen, int tx) 463 { 464 struct tls_crypto_info *crypto_info; 465 struct tls_crypto_info *alt_crypto_info; 466 struct tls_context *ctx = tls_get_ctx(sk); 467 size_t optsize; 468 int rc = 0; 469 int conf; 470 471 if (!optval || (optlen < sizeof(*crypto_info))) { 472 rc = -EINVAL; 473 goto out; 474 } 475 476 if (tx) { 477 crypto_info = &ctx->crypto_send.info; 478 alt_crypto_info = &ctx->crypto_recv.info; 479 } else { 480 crypto_info = &ctx->crypto_recv.info; 481 alt_crypto_info = &ctx->crypto_send.info; 482 } 483 484 /* Currently we don't support set crypto info more than one time */ 485 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 486 rc = -EBUSY; 487 goto out; 488 } 489 490 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 491 if (rc) { 492 rc = -EFAULT; 493 goto err_crypto_info; 494 } 495 496 /* check version */ 497 if (crypto_info->version != TLS_1_2_VERSION && 498 crypto_info->version != TLS_1_3_VERSION) { 499 rc = -ENOTSUPP; 500 goto err_crypto_info; 501 } 502 503 /* Ensure that TLS version and ciphers are same in both directions */ 504 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 505 if (alt_crypto_info->version != crypto_info->version || 506 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 507 rc = -EINVAL; 508 goto err_crypto_info; 509 } 510 } 511 512 switch (crypto_info->cipher_type) { 513 case TLS_CIPHER_AES_GCM_128: 514 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); 515 break; 516 case TLS_CIPHER_AES_GCM_256: { 517 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); 518 break; 519 } 520 case TLS_CIPHER_AES_CCM_128: 521 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); 522 break; 523 default: 524 rc = -EINVAL; 525 goto err_crypto_info; 526 } 527 528 if (optlen != optsize) { 529 rc = -EINVAL; 530 goto err_crypto_info; 531 } 532 533 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 534 optlen - sizeof(*crypto_info)); 535 if (rc) { 536 rc = -EFAULT; 537 goto err_crypto_info; 538 } 539 540 if (tx) { 541 rc = tls_set_device_offload(sk, ctx); 542 conf = TLS_HW; 543 if (!rc) { 544 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE); 545 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 546 } else { 547 rc = tls_set_sw_offload(sk, ctx, 1); 548 if (rc) 549 goto err_crypto_info; 550 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW); 551 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 552 conf = TLS_SW; 553 } 554 } else { 555 rc = tls_set_device_offload_rx(sk, ctx); 556 conf = TLS_HW; 557 if (!rc) { 558 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE); 559 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 560 } else { 561 rc = tls_set_sw_offload(sk, ctx, 0); 562 if (rc) 563 goto err_crypto_info; 564 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW); 565 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 566 conf = TLS_SW; 567 } 568 tls_sw_strparser_arm(sk, ctx); 569 } 570 571 if (tx) 572 ctx->tx_conf = conf; 573 else 574 ctx->rx_conf = conf; 575 update_sk_prot(sk, ctx); 576 if (tx) { 577 ctx->sk_write_space = sk->sk_write_space; 578 sk->sk_write_space = tls_write_space; 579 } else { 580 sk->sk_socket->ops = &tls_sw_proto_ops; 581 } 582 goto out; 583 584 err_crypto_info: 585 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 586 out: 587 return rc; 588 } 589 590 static int do_tls_setsockopt(struct sock *sk, int optname, 591 char __user *optval, unsigned int optlen) 592 { 593 int rc = 0; 594 595 switch (optname) { 596 case TLS_TX: 597 case TLS_RX: 598 lock_sock(sk); 599 rc = do_tls_setsockopt_conf(sk, optval, optlen, 600 optname == TLS_TX); 601 release_sock(sk); 602 break; 603 default: 604 rc = -ENOPROTOOPT; 605 break; 606 } 607 return rc; 608 } 609 610 static int tls_setsockopt(struct sock *sk, int level, int optname, 611 char __user *optval, unsigned int optlen) 612 { 613 struct tls_context *ctx = tls_get_ctx(sk); 614 615 if (level != SOL_TLS) 616 return ctx->sk_proto->setsockopt(sk, level, optname, optval, 617 optlen); 618 619 return do_tls_setsockopt(sk, optname, optval, optlen); 620 } 621 622 struct tls_context *tls_ctx_create(struct sock *sk) 623 { 624 struct inet_connection_sock *icsk = inet_csk(sk); 625 struct tls_context *ctx; 626 627 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 628 if (!ctx) 629 return NULL; 630 631 mutex_init(&ctx->tx_lock); 632 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 633 ctx->sk_proto = sk->sk_prot; 634 return ctx; 635 } 636 637 static void tls_build_proto(struct sock *sk) 638 { 639 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 640 641 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 642 if (ip_ver == TLSV6 && 643 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 644 mutex_lock(&tcpv6_prot_mutex); 645 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 646 build_protos(tls_prots[TLSV6], sk->sk_prot); 647 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 648 } 649 mutex_unlock(&tcpv6_prot_mutex); 650 } 651 652 if (ip_ver == TLSV4 && 653 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 654 mutex_lock(&tcpv4_prot_mutex); 655 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 656 build_protos(tls_prots[TLSV4], sk->sk_prot); 657 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 658 } 659 mutex_unlock(&tcpv4_prot_mutex); 660 } 661 } 662 663 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 664 struct proto *base) 665 { 666 prot[TLS_BASE][TLS_BASE] = *base; 667 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 668 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 669 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 670 671 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 672 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 673 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 674 675 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 676 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 677 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 678 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 679 680 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 681 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 682 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 683 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 684 685 #ifdef CONFIG_TLS_DEVICE 686 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 687 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 688 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 689 690 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 691 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 692 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 693 694 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 695 696 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 697 698 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 699 #endif 700 #ifdef CONFIG_TLS_TOE 701 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 702 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash; 703 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash; 704 #endif 705 } 706 707 static int tls_init(struct sock *sk) 708 { 709 struct tls_context *ctx; 710 int rc = 0; 711 712 tls_build_proto(sk); 713 714 #ifdef CONFIG_TLS_TOE 715 if (tls_toe_bypass(sk)) 716 return 0; 717 #endif 718 719 /* The TLS ulp is currently supported only for TCP sockets 720 * in ESTABLISHED state. 721 * Supporting sockets in LISTEN state will require us 722 * to modify the accept implementation to clone rather then 723 * share the ulp context. 724 */ 725 if (sk->sk_state != TCP_ESTABLISHED) 726 return -ENOTSUPP; 727 728 /* allocate tls context */ 729 write_lock_bh(&sk->sk_callback_lock); 730 ctx = tls_ctx_create(sk); 731 if (!ctx) { 732 rc = -ENOMEM; 733 goto out; 734 } 735 736 ctx->tx_conf = TLS_BASE; 737 ctx->rx_conf = TLS_BASE; 738 update_sk_prot(sk, ctx); 739 out: 740 write_unlock_bh(&sk->sk_callback_lock); 741 return rc; 742 } 743 744 static void tls_update(struct sock *sk, struct proto *p) 745 { 746 struct tls_context *ctx; 747 748 ctx = tls_get_ctx(sk); 749 if (likely(ctx)) 750 ctx->sk_proto = p; 751 else 752 sk->sk_prot = p; 753 } 754 755 static int tls_get_info(const struct sock *sk, struct sk_buff *skb) 756 { 757 u16 version, cipher_type; 758 struct tls_context *ctx; 759 struct nlattr *start; 760 int err; 761 762 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); 763 if (!start) 764 return -EMSGSIZE; 765 766 rcu_read_lock(); 767 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); 768 if (!ctx) { 769 err = 0; 770 goto nla_failure; 771 } 772 version = ctx->prot_info.version; 773 if (version) { 774 err = nla_put_u16(skb, TLS_INFO_VERSION, version); 775 if (err) 776 goto nla_failure; 777 } 778 cipher_type = ctx->prot_info.cipher_type; 779 if (cipher_type) { 780 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); 781 if (err) 782 goto nla_failure; 783 } 784 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); 785 if (err) 786 goto nla_failure; 787 788 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); 789 if (err) 790 goto nla_failure; 791 792 rcu_read_unlock(); 793 nla_nest_end(skb, start); 794 return 0; 795 796 nla_failure: 797 rcu_read_unlock(); 798 nla_nest_cancel(skb, start); 799 return err; 800 } 801 802 static size_t tls_get_info_size(const struct sock *sk) 803 { 804 size_t size = 0; 805 806 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ 807 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ 808 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ 809 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ 810 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ 811 0; 812 813 return size; 814 } 815 816 static int __net_init tls_init_net(struct net *net) 817 { 818 int err; 819 820 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib); 821 if (!net->mib.tls_statistics) 822 return -ENOMEM; 823 824 err = tls_proc_init(net); 825 if (err) 826 goto err_free_stats; 827 828 return 0; 829 err_free_stats: 830 free_percpu(net->mib.tls_statistics); 831 return err; 832 } 833 834 static void __net_exit tls_exit_net(struct net *net) 835 { 836 tls_proc_fini(net); 837 free_percpu(net->mib.tls_statistics); 838 } 839 840 static struct pernet_operations tls_proc_ops = { 841 .init = tls_init_net, 842 .exit = tls_exit_net, 843 }; 844 845 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 846 .name = "tls", 847 .owner = THIS_MODULE, 848 .init = tls_init, 849 .update = tls_update, 850 .get_info = tls_get_info, 851 .get_info_size = tls_get_info_size, 852 }; 853 854 static int __init tls_register(void) 855 { 856 int err; 857 858 err = register_pernet_subsys(&tls_proc_ops); 859 if (err) 860 return err; 861 862 tls_sw_proto_ops = inet_stream_ops; 863 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 864 tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked, 865 866 tls_device_init(); 867 tcp_register_ulp(&tcp_tls_ulp_ops); 868 869 return 0; 870 } 871 872 static void __exit tls_unregister(void) 873 { 874 tcp_unregister_ulp(&tcp_tls_ulp_ops); 875 tls_device_cleanup(); 876 unregister_pernet_subsys(&tls_proc_ops); 877 } 878 879 module_init(tls_register); 880 module_exit(tls_unregister); 881