1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014-2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_rss.h" 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/ktls.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/rmlock.h> 42 #include <sys/proc.h> 43 #include <sys/protosw.h> 44 #include <sys/refcount.h> 45 #include <sys/smp.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysctl.h> 49 #include <sys/taskqueue.h> 50 #include <sys/kthread.h> 51 #include <sys/uio.h> 52 #include <sys/vmmeter.h> 53 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 54 #include <machine/pcb.h> 55 #endif 56 #include <machine/vmparam.h> 57 #ifdef RSS 58 #include <net/netisr.h> 59 #include <net/rss_config.h> 60 #endif 61 #if defined(INET) || defined(INET6) 62 #include <netinet/in.h> 63 #include <netinet/in_pcb.h> 64 #endif 65 #include <netinet/tcp_var.h> 66 #include <opencrypto/xform.h> 67 #include <vm/uma_dbg.h> 68 #include <vm/vm.h> 69 #include <vm/vm_pageout.h> 70 #include <vm/vm_page.h> 71 72 struct ktls_wq { 73 struct mtx mtx; 74 STAILQ_HEAD(, mbuf_ext_pgs) head; 75 bool running; 76 } __aligned(CACHE_LINE_SIZE); 77 78 static struct ktls_wq *ktls_wq; 79 static struct proc *ktls_proc; 80 LIST_HEAD(, ktls_crypto_backend) ktls_backends; 81 static struct rmlock ktls_backends_lock; 82 static uma_zone_t ktls_session_zone; 83 static uint16_t ktls_cpuid_lookup[MAXCPU]; 84 85 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW, 0, 86 "Kernel TLS offload"); 87 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW, 0, 88 "Kernel TLS offload stats"); 89 90 static int ktls_allow_unload; 91 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, allow_unload, CTLFLAG_RDTUN, 92 &ktls_allow_unload, 0, "Allow software crypto modules to unload"); 93 94 #ifdef RSS 95 static int ktls_bind_threads = 1; 96 #else 97 static int ktls_bind_threads; 98 #endif 99 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN, 100 &ktls_bind_threads, 0, 101 "Bind crypto threads to cores or domains at boot"); 102 103 static u_int ktls_maxlen = 16384; 104 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RWTUN, 105 &ktls_maxlen, 0, "Maximum TLS record size"); 106 107 static int ktls_number_threads; 108 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD, 109 &ktls_number_threads, 0, 110 "Number of TLS threads in thread-pool"); 111 112 static bool ktls_offload_enable; 113 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RW, 114 &ktls_offload_enable, 0, 115 "Enable support for kernel TLS offload"); 116 117 static bool ktls_cbc_enable = true; 118 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RW, 119 &ktls_cbc_enable, 1, 120 "Enable Support of AES-CBC crypto for kernel TLS"); 121 122 static counter_u64_t ktls_tasks_active; 123 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD, 124 &ktls_tasks_active, "Number of active tasks"); 125 126 static counter_u64_t ktls_cnt_on; 127 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, so_inqueue, CTLFLAG_RD, 128 &ktls_cnt_on, "Number of TLS records in queue to tasks for SW crypto"); 129 130 static counter_u64_t ktls_offload_total; 131 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total, 132 CTLFLAG_RD, &ktls_offload_total, 133 "Total successful TLS setups (parameters set)"); 134 135 static counter_u64_t ktls_offload_enable_calls; 136 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls, 137 CTLFLAG_RD, &ktls_offload_enable_calls, 138 "Total number of TLS enable calls made"); 139 140 static counter_u64_t ktls_offload_active; 141 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD, 142 &ktls_offload_active, "Total Active TLS sessions"); 143 144 static counter_u64_t ktls_offload_failed_crypto; 145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD, 146 &ktls_offload_failed_crypto, "Total TLS crypto failures"); 147 148 static counter_u64_t ktls_switch_to_ifnet; 149 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD, 150 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet"); 151 152 static counter_u64_t ktls_switch_to_sw; 153 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD, 154 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW"); 155 156 static counter_u64_t ktls_switch_failed; 157 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD, 158 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet"); 159 160 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD, 0, 161 "Software TLS session stats"); 162 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD, 0, 163 "Hardware (ifnet) TLS session stats"); 164 165 static counter_u64_t ktls_sw_cbc; 166 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc, 167 "Active number of software TLS sessions using AES-CBC"); 168 169 static counter_u64_t ktls_sw_gcm; 170 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm, 171 "Active number of software TLS sessions using AES-GCM"); 172 173 static counter_u64_t ktls_ifnet_cbc; 174 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD, 175 &ktls_ifnet_cbc, 176 "Active number of ifnet TLS sessions using AES-CBC"); 177 178 static counter_u64_t ktls_ifnet_gcm; 179 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD, 180 &ktls_ifnet_gcm, 181 "Active number of ifnet TLS sessions using AES-GCM"); 182 183 static counter_u64_t ktls_ifnet_reset; 184 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD, 185 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag"); 186 187 static counter_u64_t ktls_ifnet_reset_dropped; 188 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD, 189 &ktls_ifnet_reset_dropped, 190 "TLS sessions dropped after failing to update ifnet send tag"); 191 192 static counter_u64_t ktls_ifnet_reset_failed; 193 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD, 194 &ktls_ifnet_reset_failed, 195 "TLS sessions that failed to allocate a new ifnet send tag"); 196 197 static int ktls_ifnet_permitted; 198 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN, 199 &ktls_ifnet_permitted, 1, 200 "Whether to permit hardware (ifnet) TLS sessions"); 201 202 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS"); 203 204 static void ktls_cleanup(struct ktls_session *tls); 205 #if defined(INET) || defined(INET6) 206 static void ktls_reset_send_tag(void *context, int pending); 207 #endif 208 static void ktls_work_thread(void *ctx); 209 210 int 211 ktls_crypto_backend_register(struct ktls_crypto_backend *be) 212 { 213 struct ktls_crypto_backend *curr_be, *tmp; 214 215 if (be->api_version != KTLS_API_VERSION) { 216 printf("KTLS: API version mismatch (%d vs %d) for %s\n", 217 be->api_version, KTLS_API_VERSION, 218 be->name); 219 return (EINVAL); 220 } 221 222 rm_wlock(&ktls_backends_lock); 223 printf("KTLS: Registering crypto method %s with prio %d\n", 224 be->name, be->prio); 225 if (LIST_EMPTY(&ktls_backends)) { 226 LIST_INSERT_HEAD(&ktls_backends, be, next); 227 } else { 228 LIST_FOREACH_SAFE(curr_be, &ktls_backends, next, tmp) { 229 if (curr_be->prio < be->prio) { 230 LIST_INSERT_BEFORE(curr_be, be, next); 231 break; 232 } 233 if (LIST_NEXT(curr_be, next) == NULL) { 234 LIST_INSERT_AFTER(curr_be, be, next); 235 break; 236 } 237 } 238 } 239 rm_wunlock(&ktls_backends_lock); 240 return (0); 241 } 242 243 int 244 ktls_crypto_backend_deregister(struct ktls_crypto_backend *be) 245 { 246 struct ktls_crypto_backend *tmp; 247 248 /* 249 * Don't error if the backend isn't registered. This permits 250 * MOD_UNLOAD handlers to use this function unconditionally. 251 */ 252 rm_wlock(&ktls_backends_lock); 253 LIST_FOREACH(tmp, &ktls_backends, next) { 254 if (tmp == be) 255 break; 256 } 257 if (tmp == NULL) { 258 rm_wunlock(&ktls_backends_lock); 259 return (0); 260 } 261 262 if (!ktls_allow_unload) { 263 rm_wunlock(&ktls_backends_lock); 264 printf( 265 "KTLS: Deregistering crypto method %s is not supported\n", 266 be->name); 267 return (EBUSY); 268 } 269 270 if (be->use_count) { 271 rm_wunlock(&ktls_backends_lock); 272 return (EBUSY); 273 } 274 275 LIST_REMOVE(be, next); 276 rm_wunlock(&ktls_backends_lock); 277 return (0); 278 } 279 280 #if defined(INET) || defined(INET6) 281 static uint16_t 282 ktls_get_cpu(struct socket *so) 283 { 284 struct inpcb *inp; 285 uint16_t cpuid; 286 287 inp = sotoinpcb(so); 288 #ifdef RSS 289 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 290 if (cpuid != NETISR_CPUID_NONE) 291 return (cpuid); 292 #endif 293 /* 294 * Just use the flowid to shard connections in a repeatable 295 * fashion. Note that some crypto backends rely on the 296 * serialization provided by having the same connection use 297 * the same queue. 298 */ 299 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads]; 300 return (cpuid); 301 } 302 #endif 303 304 static void 305 ktls_init(void *dummy __unused) 306 { 307 struct thread *td; 308 struct pcpu *pc; 309 cpuset_t mask; 310 int error, i; 311 312 ktls_tasks_active = counter_u64_alloc(M_WAITOK); 313 ktls_cnt_on = counter_u64_alloc(M_WAITOK); 314 ktls_offload_total = counter_u64_alloc(M_WAITOK); 315 ktls_offload_enable_calls = counter_u64_alloc(M_WAITOK); 316 ktls_offload_active = counter_u64_alloc(M_WAITOK); 317 ktls_offload_failed_crypto = counter_u64_alloc(M_WAITOK); 318 ktls_switch_to_ifnet = counter_u64_alloc(M_WAITOK); 319 ktls_switch_to_sw = counter_u64_alloc(M_WAITOK); 320 ktls_switch_failed = counter_u64_alloc(M_WAITOK); 321 ktls_sw_cbc = counter_u64_alloc(M_WAITOK); 322 ktls_sw_gcm = counter_u64_alloc(M_WAITOK); 323 ktls_ifnet_cbc = counter_u64_alloc(M_WAITOK); 324 ktls_ifnet_gcm = counter_u64_alloc(M_WAITOK); 325 ktls_ifnet_reset = counter_u64_alloc(M_WAITOK); 326 ktls_ifnet_reset_dropped = counter_u64_alloc(M_WAITOK); 327 ktls_ifnet_reset_failed = counter_u64_alloc(M_WAITOK); 328 329 rm_init(&ktls_backends_lock, "ktls backends"); 330 LIST_INIT(&ktls_backends); 331 332 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS, 333 M_WAITOK | M_ZERO); 334 335 ktls_session_zone = uma_zcreate("ktls_session", 336 sizeof(struct ktls_session), 337 #ifdef INVARIANTS 338 trash_ctor, trash_dtor, trash_init, trash_fini, 339 #else 340 NULL, NULL, NULL, NULL, 341 #endif 342 UMA_ALIGN_CACHE, 0); 343 344 /* 345 * Initialize the workqueues to run the TLS work. We create a 346 * work queue for each CPU. 347 */ 348 CPU_FOREACH(i) { 349 STAILQ_INIT(&ktls_wq[i].head); 350 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF); 351 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i], 352 &ktls_proc, &td, 0, 0, "KTLS", "ktls_thr_%d", i); 353 if (error) 354 panic("Can't add KTLS thread %d error %d", i, error); 355 356 /* 357 * Bind threads to cores. If ktls_bind_threads is > 358 * 1, then we bind to the NUMA domain. 359 */ 360 if (ktls_bind_threads) { 361 if (ktls_bind_threads > 1) { 362 pc = pcpu_find(i); 363 CPU_COPY(&cpuset_domain[pc->pc_domain], &mask); 364 } else { 365 CPU_SETOF(i, &mask); 366 } 367 error = cpuset_setthread(td->td_tid, &mask); 368 if (error) 369 panic( 370 "Unable to bind KTLS thread for CPU %d error %d", 371 i, error); 372 } 373 ktls_cpuid_lookup[ktls_number_threads] = i; 374 ktls_number_threads++; 375 } 376 printf("KTLS: Initialized %d threads\n", ktls_number_threads); 377 } 378 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL); 379 380 #if defined(INET) || defined(INET6) 381 static int 382 ktls_create_session(struct socket *so, struct tls_enable *en, 383 struct ktls_session **tlsp) 384 { 385 struct ktls_session *tls; 386 int error; 387 388 /* Only TLS 1.0 - 1.2 are supported. */ 389 if (en->tls_vmajor != TLS_MAJOR_VER_ONE) 390 return (EINVAL); 391 if (en->tls_vminor < TLS_MINOR_VER_ZERO || 392 en->tls_vminor > TLS_MINOR_VER_TWO) 393 return (EINVAL); 394 395 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE) 396 return (EINVAL); 397 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE) 398 return (EINVAL); 399 if (en->iv_len < 0 || en->iv_len > TLS_MAX_PARAM_SIZE) 400 return (EINVAL); 401 402 /* All supported algorithms require a cipher key. */ 403 if (en->cipher_key_len == 0) 404 return (EINVAL); 405 406 /* No flags are currently supported. */ 407 if (en->flags != 0) 408 return (EINVAL); 409 410 /* Common checks for supported algorithms. */ 411 switch (en->cipher_algorithm) { 412 case CRYPTO_AES_NIST_GCM_16: 413 /* 414 * auth_algorithm isn't used, but permit GMAC values 415 * for compatibility. 416 */ 417 switch (en->auth_algorithm) { 418 case 0: 419 case CRYPTO_AES_128_NIST_GMAC: 420 case CRYPTO_AES_192_NIST_GMAC: 421 case CRYPTO_AES_256_NIST_GMAC: 422 break; 423 default: 424 return (EINVAL); 425 } 426 if (en->auth_key_len != 0) 427 return (EINVAL); 428 if (en->iv_len != TLS_AEAD_GCM_LEN) 429 return (EINVAL); 430 break; 431 case CRYPTO_AES_CBC: 432 switch (en->auth_algorithm) { 433 case CRYPTO_SHA1_HMAC: 434 /* 435 * TLS 1.0 requires an implicit IV. TLS 1.1+ 436 * all use explicit IVs. 437 */ 438 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 439 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN) 440 return (EINVAL); 441 break; 442 } 443 444 /* FALLTHROUGH */ 445 case CRYPTO_SHA2_256_HMAC: 446 case CRYPTO_SHA2_384_HMAC: 447 /* Ignore any supplied IV. */ 448 en->iv_len = 0; 449 break; 450 default: 451 return (EINVAL); 452 } 453 if (en->auth_key_len == 0) 454 return (EINVAL); 455 break; 456 default: 457 return (EINVAL); 458 } 459 460 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 461 462 counter_u64_add(ktls_offload_active, 1); 463 464 refcount_init(&tls->refcount, 1); 465 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); 466 467 tls->wq_index = ktls_get_cpu(so); 468 469 tls->params.cipher_algorithm = en->cipher_algorithm; 470 tls->params.auth_algorithm = en->auth_algorithm; 471 tls->params.tls_vmajor = en->tls_vmajor; 472 tls->params.tls_vminor = en->tls_vminor; 473 tls->params.flags = en->flags; 474 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen); 475 476 /* Set the header and trailer lengths. */ 477 tls->params.tls_hlen = sizeof(struct tls_record_layer); 478 switch (en->cipher_algorithm) { 479 case CRYPTO_AES_NIST_GCM_16: 480 tls->params.tls_hlen += 8; 481 tls->params.tls_tlen = AES_GMAC_HASH_LEN; 482 tls->params.tls_bs = 1; 483 break; 484 case CRYPTO_AES_CBC: 485 switch (en->auth_algorithm) { 486 case CRYPTO_SHA1_HMAC: 487 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 488 /* Implicit IV, no nonce. */ 489 } else { 490 tls->params.tls_hlen += AES_BLOCK_LEN; 491 } 492 tls->params.tls_tlen = AES_BLOCK_LEN + 493 SHA1_HASH_LEN; 494 break; 495 case CRYPTO_SHA2_256_HMAC: 496 tls->params.tls_hlen += AES_BLOCK_LEN; 497 tls->params.tls_tlen = AES_BLOCK_LEN + 498 SHA2_256_HASH_LEN; 499 break; 500 case CRYPTO_SHA2_384_HMAC: 501 tls->params.tls_hlen += AES_BLOCK_LEN; 502 tls->params.tls_tlen = AES_BLOCK_LEN + 503 SHA2_384_HASH_LEN; 504 break; 505 default: 506 panic("invalid hmac"); 507 } 508 tls->params.tls_bs = AES_BLOCK_LEN; 509 break; 510 default: 511 panic("invalid cipher"); 512 } 513 514 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN, 515 ("TLS header length too long: %d", tls->params.tls_hlen)); 516 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN, 517 ("TLS trailer length too long: %d", tls->params.tls_tlen)); 518 519 if (en->auth_key_len != 0) { 520 tls->params.auth_key_len = en->auth_key_len; 521 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS, 522 M_WAITOK); 523 error = copyin(en->auth_key, tls->params.auth_key, 524 en->auth_key_len); 525 if (error) 526 goto out; 527 } 528 529 tls->params.cipher_key_len = en->cipher_key_len; 530 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK); 531 error = copyin(en->cipher_key, tls->params.cipher_key, 532 en->cipher_key_len); 533 if (error) 534 goto out; 535 536 /* 537 * This holds the implicit portion of the nonce for GCM and 538 * the initial implicit IV for TLS 1.0. The explicit portions 539 * of the IV are generated in ktls_frame() and ktls_seq(). 540 */ 541 if (en->iv_len != 0) { 542 MPASS(en->iv_len <= sizeof(tls->params.iv)); 543 tls->params.iv_len = en->iv_len; 544 error = copyin(en->iv, tls->params.iv, en->iv_len); 545 if (error) 546 goto out; 547 } 548 549 *tlsp = tls; 550 return (0); 551 552 out: 553 ktls_cleanup(tls); 554 return (error); 555 } 556 557 static struct ktls_session * 558 ktls_clone_session(struct ktls_session *tls) 559 { 560 struct ktls_session *tls_new; 561 562 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 563 564 counter_u64_add(ktls_offload_active, 1); 565 566 refcount_init(&tls_new->refcount, 1); 567 568 /* Copy fields from existing session. */ 569 tls_new->params = tls->params; 570 tls_new->wq_index = tls->wq_index; 571 572 /* Deep copy keys. */ 573 if (tls_new->params.auth_key != NULL) { 574 tls_new->params.auth_key = malloc(tls->params.auth_key_len, 575 M_KTLS, M_WAITOK); 576 memcpy(tls_new->params.auth_key, tls->params.auth_key, 577 tls->params.auth_key_len); 578 } 579 580 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS, 581 M_WAITOK); 582 memcpy(tls_new->params.cipher_key, tls->params.cipher_key, 583 tls->params.cipher_key_len); 584 585 return (tls_new); 586 } 587 #endif 588 589 static void 590 ktls_cleanup(struct ktls_session *tls) 591 { 592 593 counter_u64_add(ktls_offload_active, -1); 594 if (tls->free != NULL) { 595 MPASS(tls->be != NULL); 596 switch (tls->params.cipher_algorithm) { 597 case CRYPTO_AES_CBC: 598 counter_u64_add(ktls_sw_cbc, -1); 599 break; 600 case CRYPTO_AES_NIST_GCM_16: 601 counter_u64_add(ktls_sw_gcm, -1); 602 break; 603 } 604 tls->free(tls); 605 } else if (tls->snd_tag != NULL) { 606 switch (tls->params.cipher_algorithm) { 607 case CRYPTO_AES_CBC: 608 counter_u64_add(ktls_ifnet_cbc, -1); 609 break; 610 case CRYPTO_AES_NIST_GCM_16: 611 counter_u64_add(ktls_ifnet_gcm, -1); 612 break; 613 } 614 m_snd_tag_rele(tls->snd_tag); 615 } 616 if (tls->params.auth_key != NULL) { 617 explicit_bzero(tls->params.auth_key, tls->params.auth_key_len); 618 free(tls->params.auth_key, M_KTLS); 619 tls->params.auth_key = NULL; 620 tls->params.auth_key_len = 0; 621 } 622 if (tls->params.cipher_key != NULL) { 623 explicit_bzero(tls->params.cipher_key, 624 tls->params.cipher_key_len); 625 free(tls->params.cipher_key, M_KTLS); 626 tls->params.cipher_key = NULL; 627 tls->params.cipher_key_len = 0; 628 } 629 explicit_bzero(tls->params.iv, sizeof(tls->params.iv)); 630 } 631 632 #if defined(INET) || defined(INET6) 633 /* 634 * Common code used when first enabling ifnet TLS on a connection or 635 * when allocating a new ifnet TLS session due to a routing change. 636 * This function allocates a new TLS send tag on whatever interface 637 * the connection is currently routed over. 638 */ 639 static int 640 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force, 641 struct m_snd_tag **mstp) 642 { 643 union if_snd_tag_alloc_params params; 644 struct ifnet *ifp; 645 struct rtentry *rt; 646 struct tcpcb *tp; 647 int error; 648 649 INP_RLOCK(inp); 650 if (inp->inp_flags2 & INP_FREED) { 651 INP_RUNLOCK(inp); 652 return (ECONNRESET); 653 } 654 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 655 INP_RUNLOCK(inp); 656 return (ECONNRESET); 657 } 658 if (inp->inp_socket == NULL) { 659 INP_RUNLOCK(inp); 660 return (ECONNRESET); 661 } 662 tp = intotcpcb(inp); 663 664 /* 665 * Check administrative controls on ifnet TLS to determine if 666 * ifnet TLS should be denied. 667 * 668 * - Always permit 'force' requests. 669 * - ktls_ifnet_permitted == 0: always deny. 670 */ 671 if (!force && ktls_ifnet_permitted == 0) { 672 INP_RUNLOCK(inp); 673 return (ENXIO); 674 } 675 676 /* 677 * XXX: Use the cached route in the inpcb to find the 678 * interface. This should perhaps instead use 679 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only 680 * enabled after a connection has completed key negotiation in 681 * userland, the cached route will be present in practice. 682 */ 683 rt = inp->inp_route.ro_rt; 684 if (rt == NULL || rt->rt_ifp == NULL) { 685 INP_RUNLOCK(inp); 686 return (ENXIO); 687 } 688 ifp = rt->rt_ifp; 689 if_ref(ifp); 690 691 params.hdr.type = IF_SND_TAG_TYPE_TLS; 692 params.hdr.flowid = inp->inp_flowid; 693 params.hdr.flowtype = inp->inp_flowtype; 694 params.tls.inp = inp; 695 params.tls.tls = tls; 696 INP_RUNLOCK(inp); 697 698 if (ifp->if_snd_tag_alloc == NULL) { 699 error = EOPNOTSUPP; 700 goto out; 701 } 702 if ((ifp->if_capenable & IFCAP_NOMAP) == 0) { 703 error = EOPNOTSUPP; 704 goto out; 705 } 706 if (inp->inp_vflag & INP_IPV6) { 707 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) { 708 error = EOPNOTSUPP; 709 goto out; 710 } 711 } else { 712 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) { 713 error = EOPNOTSUPP; 714 goto out; 715 } 716 } 717 error = ifp->if_snd_tag_alloc(ifp, ¶ms, mstp); 718 out: 719 if_rele(ifp); 720 return (error); 721 } 722 723 static int 724 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force) 725 { 726 struct m_snd_tag *mst; 727 int error; 728 729 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); 730 if (error == 0) { 731 tls->snd_tag = mst; 732 switch (tls->params.cipher_algorithm) { 733 case CRYPTO_AES_CBC: 734 counter_u64_add(ktls_ifnet_cbc, 1); 735 break; 736 case CRYPTO_AES_NIST_GCM_16: 737 counter_u64_add(ktls_ifnet_gcm, 1); 738 break; 739 } 740 } 741 return (error); 742 } 743 744 static int 745 ktls_try_sw(struct socket *so, struct ktls_session *tls) 746 { 747 struct rm_priotracker prio; 748 struct ktls_crypto_backend *be; 749 750 /* 751 * Choose the best software crypto backend. Backends are 752 * stored in sorted priority order (larget value == most 753 * important at the head of the list), so this just stops on 754 * the first backend that claims the session by returning 755 * success. 756 */ 757 if (ktls_allow_unload) 758 rm_rlock(&ktls_backends_lock, &prio); 759 LIST_FOREACH(be, &ktls_backends, next) { 760 if (be->try(so, tls) == 0) 761 break; 762 KASSERT(tls->cipher == NULL, 763 ("ktls backend leaked a cipher pointer")); 764 } 765 if (be != NULL) { 766 if (ktls_allow_unload) 767 be->use_count++; 768 tls->be = be; 769 } 770 if (ktls_allow_unload) 771 rm_runlock(&ktls_backends_lock, &prio); 772 if (be == NULL) 773 return (EOPNOTSUPP); 774 switch (tls->params.cipher_algorithm) { 775 case CRYPTO_AES_CBC: 776 counter_u64_add(ktls_sw_cbc, 1); 777 break; 778 case CRYPTO_AES_NIST_GCM_16: 779 counter_u64_add(ktls_sw_gcm, 1); 780 break; 781 } 782 return (0); 783 } 784 785 int 786 ktls_enable_tx(struct socket *so, struct tls_enable *en) 787 { 788 struct ktls_session *tls; 789 int error; 790 791 if (!ktls_offload_enable) 792 return (ENOTSUP); 793 794 counter_u64_add(ktls_offload_enable_calls, 1); 795 796 /* 797 * This should always be true since only the TCP socket option 798 * invokes this function. 799 */ 800 if (so->so_proto->pr_protocol != IPPROTO_TCP) 801 return (EINVAL); 802 803 /* 804 * XXX: Don't overwrite existing sessions. We should permit 805 * this to support rekeying in the future. 806 */ 807 if (so->so_snd.sb_tls_info != NULL) 808 return (EALREADY); 809 810 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 811 return (ENOTSUP); 812 813 /* TLS requires ext pgs */ 814 if (mb_use_ext_pgs == 0) 815 return (ENXIO); 816 817 error = ktls_create_session(so, en, &tls); 818 if (error) 819 return (error); 820 821 /* Prefer ifnet TLS over software TLS. */ 822 error = ktls_try_ifnet(so, tls, false); 823 if (error) 824 error = ktls_try_sw(so, tls); 825 826 if (error) { 827 ktls_cleanup(tls); 828 return (error); 829 } 830 831 error = sblock(&so->so_snd, SBL_WAIT); 832 if (error) { 833 ktls_cleanup(tls); 834 return (error); 835 } 836 837 SOCKBUF_LOCK(&so->so_snd); 838 so->so_snd.sb_tls_info = tls; 839 if (tls->sw_encrypt == NULL) 840 so->so_snd.sb_flags |= SB_TLS_IFNET; 841 SOCKBUF_UNLOCK(&so->so_snd); 842 sbunlock(&so->so_snd); 843 844 counter_u64_add(ktls_offload_total, 1); 845 846 return (0); 847 } 848 849 int 850 ktls_get_tx_mode(struct socket *so) 851 { 852 struct ktls_session *tls; 853 struct inpcb *inp; 854 int mode; 855 856 inp = so->so_pcb; 857 INP_WLOCK_ASSERT(inp); 858 SOCKBUF_LOCK(&so->so_snd); 859 tls = so->so_snd.sb_tls_info; 860 if (tls == NULL) 861 mode = TCP_TLS_MODE_NONE; 862 else if (tls->sw_encrypt != NULL) 863 mode = TCP_TLS_MODE_SW; 864 else 865 mode = TCP_TLS_MODE_IFNET; 866 SOCKBUF_UNLOCK(&so->so_snd); 867 return (mode); 868 } 869 870 /* 871 * Switch between SW and ifnet TLS sessions as requested. 872 */ 873 int 874 ktls_set_tx_mode(struct socket *so, int mode) 875 { 876 struct ktls_session *tls, *tls_new; 877 struct inpcb *inp; 878 int error; 879 880 MPASS(mode == TCP_TLS_MODE_SW || mode == TCP_TLS_MODE_IFNET); 881 882 inp = so->so_pcb; 883 INP_WLOCK_ASSERT(inp); 884 SOCKBUF_LOCK(&so->so_snd); 885 tls = so->so_snd.sb_tls_info; 886 if (tls == NULL) { 887 SOCKBUF_UNLOCK(&so->so_snd); 888 return (0); 889 } 890 891 if ((tls->sw_encrypt != NULL && mode == TCP_TLS_MODE_SW) || 892 (tls->sw_encrypt == NULL && mode == TCP_TLS_MODE_IFNET)) { 893 SOCKBUF_UNLOCK(&so->so_snd); 894 return (0); 895 } 896 897 tls = ktls_hold(tls); 898 SOCKBUF_UNLOCK(&so->so_snd); 899 INP_WUNLOCK(inp); 900 901 tls_new = ktls_clone_session(tls); 902 903 if (mode == TCP_TLS_MODE_IFNET) 904 error = ktls_try_ifnet(so, tls_new, true); 905 else 906 error = ktls_try_sw(so, tls_new); 907 if (error) { 908 counter_u64_add(ktls_switch_failed, 1); 909 ktls_free(tls_new); 910 ktls_free(tls); 911 INP_WLOCK(inp); 912 return (error); 913 } 914 915 error = sblock(&so->so_snd, SBL_WAIT); 916 if (error) { 917 counter_u64_add(ktls_switch_failed, 1); 918 ktls_free(tls_new); 919 ktls_free(tls); 920 INP_WLOCK(inp); 921 return (error); 922 } 923 924 /* 925 * If we raced with another session change, keep the existing 926 * session. 927 */ 928 if (tls != so->so_snd.sb_tls_info) { 929 counter_u64_add(ktls_switch_failed, 1); 930 sbunlock(&so->so_snd); 931 ktls_free(tls_new); 932 ktls_free(tls); 933 INP_WLOCK(inp); 934 return (EBUSY); 935 } 936 937 SOCKBUF_LOCK(&so->so_snd); 938 so->so_snd.sb_tls_info = tls_new; 939 if (tls_new->sw_encrypt == NULL) 940 so->so_snd.sb_flags |= SB_TLS_IFNET; 941 SOCKBUF_UNLOCK(&so->so_snd); 942 sbunlock(&so->so_snd); 943 944 /* 945 * Drop two references on 'tls'. The first is for the 946 * ktls_hold() above. The second drops the reference from the 947 * socket buffer. 948 */ 949 KASSERT(tls->refcount >= 2, ("too few references on old session")); 950 ktls_free(tls); 951 ktls_free(tls); 952 953 if (mode == TCP_TLS_MODE_IFNET) 954 counter_u64_add(ktls_switch_to_ifnet, 1); 955 else 956 counter_u64_add(ktls_switch_to_sw, 1); 957 958 INP_WLOCK(inp); 959 return (0); 960 } 961 962 /* 963 * Try to allocate a new TLS send tag. This task is scheduled when 964 * ip_output detects a route change while trying to transmit a packet 965 * holding a TLS record. If a new tag is allocated, replace the tag 966 * in the TLS session. Subsequent packets on the connection will use 967 * the new tag. If a new tag cannot be allocated, drop the 968 * connection. 969 */ 970 static void 971 ktls_reset_send_tag(void *context, int pending) 972 { 973 struct epoch_tracker et; 974 struct ktls_session *tls; 975 struct m_snd_tag *old, *new; 976 struct inpcb *inp; 977 struct tcpcb *tp; 978 int error; 979 980 MPASS(pending == 1); 981 982 tls = context; 983 inp = tls->inp; 984 985 /* 986 * Free the old tag first before allocating a new one. 987 * ip[6]_output_send() will treat a NULL send tag the same as 988 * an ifp mismatch and drop packets until a new tag is 989 * allocated. 990 * 991 * Write-lock the INP when changing tls->snd_tag since 992 * ip[6]_output_send() holds a read-lock when reading the 993 * pointer. 994 */ 995 INP_WLOCK(inp); 996 old = tls->snd_tag; 997 tls->snd_tag = NULL; 998 INP_WUNLOCK(inp); 999 if (old != NULL) 1000 m_snd_tag_rele(old); 1001 1002 error = ktls_alloc_snd_tag(inp, tls, true, &new); 1003 1004 if (error == 0) { 1005 INP_WLOCK(inp); 1006 tls->snd_tag = new; 1007 mtx_pool_lock(mtxpool_sleep, tls); 1008 tls->reset_pending = false; 1009 mtx_pool_unlock(mtxpool_sleep, tls); 1010 if (!in_pcbrele_wlocked(inp)) 1011 INP_WUNLOCK(inp); 1012 1013 counter_u64_add(ktls_ifnet_reset, 1); 1014 1015 /* 1016 * XXX: Should we kick tcp_output explicitly now that 1017 * the send tag is fixed or just rely on timers? 1018 */ 1019 } else { 1020 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1021 INP_WLOCK(inp); 1022 if (!in_pcbrele_wlocked(inp)) { 1023 if (!(inp->inp_flags & INP_TIMEWAIT) && 1024 !(inp->inp_flags & INP_DROPPED)) { 1025 tp = intotcpcb(inp); 1026 tp = tcp_drop(tp, ECONNABORTED); 1027 if (tp != NULL) 1028 INP_WUNLOCK(inp); 1029 counter_u64_add(ktls_ifnet_reset_dropped, 1); 1030 } else 1031 INP_WUNLOCK(inp); 1032 } 1033 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1034 1035 counter_u64_add(ktls_ifnet_reset_failed, 1); 1036 1037 /* 1038 * Leave reset_pending true to avoid future tasks while 1039 * the socket goes away. 1040 */ 1041 } 1042 1043 ktls_free(tls); 1044 } 1045 1046 int 1047 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls) 1048 { 1049 1050 if (inp == NULL) 1051 return (ENOBUFS); 1052 1053 INP_LOCK_ASSERT(inp); 1054 1055 /* 1056 * See if we should schedule a task to update the send tag for 1057 * this session. 1058 */ 1059 mtx_pool_lock(mtxpool_sleep, tls); 1060 if (!tls->reset_pending) { 1061 (void) ktls_hold(tls); 1062 in_pcbref(inp); 1063 tls->inp = inp; 1064 tls->reset_pending = true; 1065 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); 1066 } 1067 mtx_pool_unlock(mtxpool_sleep, tls); 1068 return (ENOBUFS); 1069 } 1070 #endif 1071 1072 void 1073 ktls_destroy(struct ktls_session *tls) 1074 { 1075 struct rm_priotracker prio; 1076 1077 ktls_cleanup(tls); 1078 if (tls->be != NULL && ktls_allow_unload) { 1079 rm_rlock(&ktls_backends_lock, &prio); 1080 tls->be->use_count--; 1081 rm_runlock(&ktls_backends_lock, &prio); 1082 } 1083 uma_zfree(ktls_session_zone, tls); 1084 } 1085 1086 void 1087 ktls_seq(struct sockbuf *sb, struct mbuf *m) 1088 { 1089 struct mbuf_ext_pgs *pgs; 1090 struct tls_record_layer *tlshdr; 1091 uint64_t seqno; 1092 1093 for (; m != NULL; m = m->m_next) { 1094 KASSERT((m->m_flags & M_NOMAP) != 0, 1095 ("ktls_seq: mapped mbuf %p", m)); 1096 1097 pgs = m->m_ext.ext_pgs; 1098 pgs->seqno = sb->sb_tls_seqno; 1099 1100 /* 1101 * Store the sequence number in the TLS header as the 1102 * explicit part of the IV for GCM. 1103 */ 1104 if (pgs->tls->params.cipher_algorithm == 1105 CRYPTO_AES_NIST_GCM_16) { 1106 tlshdr = (void *)pgs->hdr; 1107 seqno = htobe64(pgs->seqno); 1108 memcpy(tlshdr + 1, &seqno, sizeof(seqno)); 1109 } 1110 sb->sb_tls_seqno++; 1111 } 1112 } 1113 1114 /* 1115 * Add TLS framing (headers and trailers) to a chain of mbufs. Each 1116 * mbuf in the chain must be an unmapped mbuf. The payload of the 1117 * mbuf must be populated with the payload of each TLS record. 1118 * 1119 * The record_type argument specifies the TLS record type used when 1120 * populating the TLS header. 1121 * 1122 * The enq_count argument on return is set to the number of pages of 1123 * payload data for this entire chain that need to be encrypted via SW 1124 * encryption. The returned value should be passed to ktls_enqueue 1125 * when scheduling encryption of this chain of mbufs. 1126 */ 1127 int 1128 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt, 1129 uint8_t record_type) 1130 { 1131 struct tls_record_layer *tlshdr; 1132 struct mbuf *m; 1133 struct mbuf_ext_pgs *pgs; 1134 uint16_t tls_len; 1135 int maxlen; 1136 1137 maxlen = tls->params.max_frame_len; 1138 *enq_cnt = 0; 1139 for (m = top; m != NULL; m = m->m_next) { 1140 /* 1141 * All mbufs in the chain should be non-empty TLS 1142 * records whose payload does not exceed the maximum 1143 * frame length. 1144 */ 1145 if (m->m_len > maxlen || m->m_len == 0) 1146 return (EINVAL); 1147 tls_len = m->m_len; 1148 1149 /* 1150 * TLS frames require unmapped mbufs to store session 1151 * info. 1152 */ 1153 KASSERT((m->m_flags & M_NOMAP) != 0, 1154 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top)); 1155 1156 pgs = m->m_ext.ext_pgs; 1157 1158 /* Save a reference to the session. */ 1159 pgs->tls = ktls_hold(tls); 1160 1161 pgs->hdr_len = tls->params.tls_hlen; 1162 pgs->trail_len = tls->params.tls_tlen; 1163 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) { 1164 int bs, delta; 1165 1166 /* 1167 * AES-CBC pads messages to a multiple of the 1168 * block size. Note that the padding is 1169 * applied after the digest and the encryption 1170 * is done on the "plaintext || mac || padding". 1171 * At least one byte of padding is always 1172 * present. 1173 * 1174 * Compute the final trailer length assuming 1175 * at most one block of padding. 1176 * tls->params.sb_tls_tlen is the maximum 1177 * possible trailer length (padding + digest). 1178 * delta holds the number of excess padding 1179 * bytes if the maximum were used. Those 1180 * extra bytes are removed. 1181 */ 1182 bs = tls->params.tls_bs; 1183 delta = (tls_len + tls->params.tls_tlen) & (bs - 1); 1184 pgs->trail_len -= delta; 1185 } 1186 m->m_len += pgs->hdr_len + pgs->trail_len; 1187 1188 /* Populate the TLS header. */ 1189 tlshdr = (void *)pgs->hdr; 1190 tlshdr->tls_vmajor = tls->params.tls_vmajor; 1191 tlshdr->tls_vminor = tls->params.tls_vminor; 1192 tlshdr->tls_type = record_type; 1193 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); 1194 1195 /* 1196 * For GCM, the sequence number is stored in the 1197 * header by ktls_seq(). For CBC, a random nonce is 1198 * inserted for TLS 1.1+. 1199 */ 1200 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC && 1201 tls->params.tls_vminor >= TLS_MINOR_VER_ONE) 1202 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0); 1203 1204 /* 1205 * When using SW encryption, mark the mbuf not ready. 1206 * It will be marked ready via sbready() after the 1207 * record has been encrypted. 1208 * 1209 * When using ifnet TLS, unencrypted TLS records are 1210 * sent down the stack to the NIC. 1211 */ 1212 if (tls->sw_encrypt != NULL) { 1213 m->m_flags |= M_NOTREADY; 1214 pgs->nrdy = pgs->npgs; 1215 *enq_cnt += pgs->npgs; 1216 } 1217 } 1218 return (0); 1219 } 1220 1221 void 1222 ktls_enqueue_to_free(struct mbuf_ext_pgs *pgs) 1223 { 1224 struct ktls_wq *wq; 1225 bool running; 1226 1227 /* Mark it for freeing. */ 1228 pgs->mbuf = NULL; 1229 wq = &ktls_wq[pgs->tls->wq_index]; 1230 mtx_lock(&wq->mtx); 1231 STAILQ_INSERT_TAIL(&wq->head, pgs, stailq); 1232 running = wq->running; 1233 mtx_unlock(&wq->mtx); 1234 if (!running) 1235 wakeup(wq); 1236 } 1237 1238 void 1239 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count) 1240 { 1241 struct mbuf_ext_pgs *pgs; 1242 struct ktls_wq *wq; 1243 bool running; 1244 1245 KASSERT(((m->m_flags & (M_NOMAP | M_NOTREADY)) == 1246 (M_NOMAP | M_NOTREADY)), 1247 ("ktls_enqueue: %p not unready & nomap mbuf\n", m)); 1248 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count")); 1249 1250 pgs = m->m_ext.ext_pgs; 1251 1252 KASSERT(pgs->tls->sw_encrypt != NULL, ("ifnet TLS mbuf")); 1253 1254 pgs->enc_cnt = page_count; 1255 pgs->mbuf = m; 1256 1257 /* 1258 * Save a pointer to the socket. The caller is responsible 1259 * for taking an additional reference via soref(). 1260 */ 1261 pgs->so = so; 1262 1263 wq = &ktls_wq[pgs->tls->wq_index]; 1264 mtx_lock(&wq->mtx); 1265 STAILQ_INSERT_TAIL(&wq->head, pgs, stailq); 1266 running = wq->running; 1267 mtx_unlock(&wq->mtx); 1268 if (!running) 1269 wakeup(wq); 1270 counter_u64_add(ktls_cnt_on, 1); 1271 } 1272 1273 static __noinline void 1274 ktls_encrypt(struct mbuf_ext_pgs *pgs) 1275 { 1276 struct ktls_session *tls; 1277 struct socket *so; 1278 struct mbuf *m, *top; 1279 vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; 1280 struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; 1281 struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; 1282 vm_page_t pg; 1283 int error, i, len, npages, off, total_pages; 1284 bool is_anon; 1285 1286 so = pgs->so; 1287 tls = pgs->tls; 1288 top = pgs->mbuf; 1289 KASSERT(tls != NULL, ("tls = NULL, top = %p, pgs = %p\n", top, pgs)); 1290 KASSERT(so != NULL, ("so = NULL, top = %p, pgs = %p\n", top, pgs)); 1291 #ifdef INVARIANTS 1292 pgs->so = NULL; 1293 pgs->mbuf = NULL; 1294 #endif 1295 total_pages = pgs->enc_cnt; 1296 npages = 0; 1297 1298 /* 1299 * Encrypt the TLS records in the chain of mbufs starting with 1300 * 'top'. 'total_pages' gives us a total count of pages and is 1301 * used to know when we have finished encrypting the TLS 1302 * records originally queued with 'top'. 1303 * 1304 * NB: These mbufs are queued in the socket buffer and 1305 * 'm_next' is traversing the mbufs in the socket buffer. The 1306 * socket buffer lock is not held while traversing this chain. 1307 * Since the mbufs are all marked M_NOTREADY their 'm_next' 1308 * pointers should be stable. However, the 'm_next' of the 1309 * last mbuf encrypted is not necessarily NULL. It can point 1310 * to other mbufs appended while 'top' was on the TLS work 1311 * queue. 1312 * 1313 * Each mbuf holds an entire TLS record. 1314 */ 1315 error = 0; 1316 for (m = top; npages != total_pages; m = m->m_next) { 1317 pgs = m->m_ext.ext_pgs; 1318 1319 KASSERT(pgs->tls == tls, 1320 ("different TLS sessions in a single mbuf chain: %p vs %p", 1321 tls, pgs->tls)); 1322 KASSERT((m->m_flags & (M_NOMAP | M_NOTREADY)) == 1323 (M_NOMAP | M_NOTREADY), 1324 ("%p not unready & nomap mbuf (top = %p)\n", m, top)); 1325 KASSERT(npages + pgs->npgs <= total_pages, 1326 ("page count mismatch: top %p, total_pages %d, m %p", top, 1327 total_pages, m)); 1328 1329 /* 1330 * Generate source and destination ivoecs to pass to 1331 * the SW encryption backend. For writable mbufs, the 1332 * destination iovec is a copy of the source and 1333 * encryption is done in place. For file-backed mbufs 1334 * (from sendfile), anonymous wired pages are 1335 * allocated and assigned to the destination iovec. 1336 */ 1337 is_anon = M_WRITABLE(m); 1338 1339 off = pgs->first_pg_off; 1340 for (i = 0; i < pgs->npgs; i++, off = 0) { 1341 len = mbuf_ext_pg_len(pgs, i, off); 1342 src_iov[i].iov_len = len; 1343 src_iov[i].iov_base = 1344 (char *)(void *)PHYS_TO_DMAP(pgs->pa[i]) + off; 1345 1346 if (is_anon) { 1347 dst_iov[i].iov_base = src_iov[i].iov_base; 1348 dst_iov[i].iov_len = src_iov[i].iov_len; 1349 continue; 1350 } 1351 retry_page: 1352 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 1353 VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | VM_ALLOC_WIRED); 1354 if (pg == NULL) { 1355 vm_wait(NULL); 1356 goto retry_page; 1357 } 1358 parray[i] = VM_PAGE_TO_PHYS(pg); 1359 dst_iov[i].iov_base = 1360 (char *)(void *)PHYS_TO_DMAP(parray[i]) + off; 1361 dst_iov[i].iov_len = len; 1362 } 1363 1364 npages += i; 1365 1366 error = (*tls->sw_encrypt)(tls, 1367 (const struct tls_record_layer *)pgs->hdr, 1368 pgs->trail, src_iov, dst_iov, i, pgs->seqno); 1369 if (error) { 1370 counter_u64_add(ktls_offload_failed_crypto, 1); 1371 break; 1372 } 1373 1374 /* 1375 * For file-backed mbufs, release the file-backed 1376 * pages and replace them in the ext_pgs array with 1377 * the anonymous wired pages allocated above. 1378 */ 1379 if (!is_anon) { 1380 /* Free the old pages. */ 1381 m->m_ext.ext_free(m); 1382 1383 /* Replace them with the new pages. */ 1384 for (i = 0; i < pgs->npgs; i++) 1385 pgs->pa[i] = parray[i]; 1386 1387 /* Use the basic free routine. */ 1388 m->m_ext.ext_free = mb_free_mext_pgs; 1389 } 1390 1391 /* 1392 * Drop a reference to the session now that it is no 1393 * longer needed. Existing code depends on encrypted 1394 * records having no associated session vs 1395 * yet-to-be-encrypted records having an associated 1396 * session. 1397 */ 1398 pgs->tls = NULL; 1399 ktls_free(tls); 1400 } 1401 1402 CURVNET_SET(so->so_vnet); 1403 if (error == 0) { 1404 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages); 1405 } else { 1406 so->so_proto->pr_usrreqs->pru_abort(so); 1407 so->so_error = EIO; 1408 mb_free_notready(top, total_pages); 1409 } 1410 1411 SOCK_LOCK(so); 1412 sorele(so); 1413 CURVNET_RESTORE(); 1414 } 1415 1416 static void 1417 ktls_work_thread(void *ctx) 1418 { 1419 struct ktls_wq *wq = ctx; 1420 struct mbuf_ext_pgs *p, *n; 1421 struct ktls_session *tls; 1422 STAILQ_HEAD(, mbuf_ext_pgs) local_head; 1423 1424 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 1425 fpu_kern_thread(0); 1426 #endif 1427 for (;;) { 1428 mtx_lock(&wq->mtx); 1429 while (STAILQ_EMPTY(&wq->head)) { 1430 wq->running = false; 1431 mtx_sleep(wq, &wq->mtx, 0, "-", 0); 1432 wq->running = true; 1433 } 1434 1435 STAILQ_INIT(&local_head); 1436 STAILQ_CONCAT(&local_head, &wq->head); 1437 mtx_unlock(&wq->mtx); 1438 1439 STAILQ_FOREACH_SAFE(p, &local_head, stailq, n) { 1440 if (p->mbuf != NULL) { 1441 ktls_encrypt(p); 1442 counter_u64_add(ktls_cnt_on, -1); 1443 } else { 1444 tls = p->tls; 1445 ktls_free(tls); 1446 uma_zfree(zone_extpgs, p); 1447 } 1448 } 1449 } 1450 } 1451