1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014-2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 #include "opt_rss.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/domainset.h> 40 #include <sys/endian.h> 41 #include <sys/ktls.h> 42 #include <sys/lock.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/rmlock.h> 46 #include <sys/proc.h> 47 #include <sys/protosw.h> 48 #include <sys/refcount.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/sysctl.h> 53 #include <sys/taskqueue.h> 54 #include <sys/kthread.h> 55 #include <sys/uio.h> 56 #include <sys/vmmeter.h> 57 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 58 #include <machine/pcb.h> 59 #endif 60 #include <machine/vmparam.h> 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #ifdef RSS 64 #include <net/netisr.h> 65 #include <net/rss_config.h> 66 #endif 67 #include <net/route.h> 68 #include <net/route/nhop.h> 69 #if defined(INET) || defined(INET6) 70 #include <netinet/in.h> 71 #include <netinet/in_pcb.h> 72 #endif 73 #include <netinet/tcp_var.h> 74 #ifdef TCP_OFFLOAD 75 #include <netinet/tcp_offload.h> 76 #endif 77 #include <opencrypto/cryptodev.h> 78 #include <opencrypto/ktls.h> 79 #include <vm/uma_dbg.h> 80 #include <vm/vm.h> 81 #include <vm/vm_pageout.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pagequeue.h> 84 85 struct ktls_wq { 86 struct mtx mtx; 87 STAILQ_HEAD(, mbuf) m_head; 88 STAILQ_HEAD(, socket) so_head; 89 bool running; 90 int lastallocfail; 91 } __aligned(CACHE_LINE_SIZE); 92 93 struct ktls_alloc_thread { 94 uint64_t wakeups; 95 uint64_t allocs; 96 struct thread *td; 97 int running; 98 }; 99 100 struct ktls_domain_info { 101 int count; 102 int cpu[MAXCPU]; 103 struct ktls_alloc_thread alloc_td; 104 }; 105 106 struct ktls_domain_info ktls_domains[MAXMEMDOM]; 107 static struct ktls_wq *ktls_wq; 108 static struct proc *ktls_proc; 109 static uma_zone_t ktls_session_zone; 110 static uma_zone_t ktls_buffer_zone; 111 static uint16_t ktls_cpuid_lookup[MAXCPU]; 112 113 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 114 "Kernel TLS offload"); 115 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 116 "Kernel TLS offload stats"); 117 118 #ifdef RSS 119 static int ktls_bind_threads = 1; 120 #else 121 static int ktls_bind_threads; 122 #endif 123 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN, 124 &ktls_bind_threads, 0, 125 "Bind crypto threads to cores (1) or cores and domains (2) at boot"); 126 127 static u_int ktls_maxlen = 16384; 128 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN, 129 &ktls_maxlen, 0, "Maximum TLS record size"); 130 131 static int ktls_number_threads; 132 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD, 133 &ktls_number_threads, 0, 134 "Number of TLS threads in thread-pool"); 135 136 unsigned int ktls_ifnet_max_rexmit_pct = 2; 137 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN, 138 &ktls_ifnet_max_rexmit_pct, 2, 139 "Max percent bytes retransmitted before ifnet TLS is disabled"); 140 141 static bool ktls_offload_enable; 142 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN, 143 &ktls_offload_enable, 0, 144 "Enable support for kernel TLS offload"); 145 146 static bool ktls_cbc_enable = true; 147 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN, 148 &ktls_cbc_enable, 1, 149 "Enable Support of AES-CBC crypto for kernel TLS"); 150 151 static bool ktls_sw_buffer_cache = true; 152 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN, 153 &ktls_sw_buffer_cache, 1, 154 "Enable caching of output buffers for SW encryption"); 155 156 static int ktls_max_alloc = 128; 157 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_alloc, CTLFLAG_RWTUN, 158 &ktls_max_alloc, 128, 159 "Max number of 16k buffers to allocate in thread context"); 160 161 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active); 162 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD, 163 &ktls_tasks_active, "Number of active tasks"); 164 165 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued); 166 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD, 167 &ktls_cnt_tx_queued, 168 "Number of TLS records in queue to tasks for SW encryption"); 169 170 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued); 171 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD, 172 &ktls_cnt_rx_queued, 173 "Number of TLS sockets in queue to tasks for SW decryption"); 174 175 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total); 176 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total, 177 CTLFLAG_RD, &ktls_offload_total, 178 "Total successful TLS setups (parameters set)"); 179 180 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls); 181 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls, 182 CTLFLAG_RD, &ktls_offload_enable_calls, 183 "Total number of TLS enable calls made"); 184 185 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active); 186 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD, 187 &ktls_offload_active, "Total Active TLS sessions"); 188 189 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records); 190 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD, 191 &ktls_offload_corrupted_records, "Total corrupted TLS records received"); 192 193 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto); 194 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD, 195 &ktls_offload_failed_crypto, "Total TLS crypto failures"); 196 197 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet); 198 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD, 199 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet"); 200 201 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw); 202 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD, 203 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW"); 204 205 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed); 206 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD, 207 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet"); 208 209 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail); 210 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD, 211 &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet"); 212 213 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok); 214 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD, 215 &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet"); 216 217 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 218 "Software TLS session stats"); 219 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 220 "Hardware (ifnet) TLS session stats"); 221 #ifdef TCP_OFFLOAD 222 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 223 "TOE TLS session stats"); 224 #endif 225 226 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc); 227 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc, 228 "Active number of software TLS sessions using AES-CBC"); 229 230 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm); 231 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm, 232 "Active number of software TLS sessions using AES-GCM"); 233 234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20); 235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD, 236 &ktls_sw_chacha20, 237 "Active number of software TLS sessions using Chacha20-Poly1305"); 238 239 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc); 240 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD, 241 &ktls_ifnet_cbc, 242 "Active number of ifnet TLS sessions using AES-CBC"); 243 244 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm); 245 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD, 246 &ktls_ifnet_gcm, 247 "Active number of ifnet TLS sessions using AES-GCM"); 248 249 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20); 250 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD, 251 &ktls_ifnet_chacha20, 252 "Active number of ifnet TLS sessions using Chacha20-Poly1305"); 253 254 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset); 255 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD, 256 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag"); 257 258 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped); 259 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD, 260 &ktls_ifnet_reset_dropped, 261 "TLS sessions dropped after failing to update ifnet send tag"); 262 263 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed); 264 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD, 265 &ktls_ifnet_reset_failed, 266 "TLS sessions that failed to allocate a new ifnet send tag"); 267 268 static int ktls_ifnet_permitted; 269 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN, 270 &ktls_ifnet_permitted, 1, 271 "Whether to permit hardware (ifnet) TLS sessions"); 272 273 #ifdef TCP_OFFLOAD 274 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc); 275 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD, 276 &ktls_toe_cbc, 277 "Active number of TOE TLS sessions using AES-CBC"); 278 279 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm); 280 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD, 281 &ktls_toe_gcm, 282 "Active number of TOE TLS sessions using AES-GCM"); 283 284 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20); 285 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD, 286 &ktls_toe_chacha20, 287 "Active number of TOE TLS sessions using Chacha20-Poly1305"); 288 #endif 289 290 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS"); 291 292 static void ktls_cleanup(struct ktls_session *tls); 293 #if defined(INET) || defined(INET6) 294 static void ktls_reset_send_tag(void *context, int pending); 295 #endif 296 static void ktls_work_thread(void *ctx); 297 static void ktls_alloc_thread(void *ctx); 298 299 #if defined(INET) || defined(INET6) 300 static u_int 301 ktls_get_cpu(struct socket *so) 302 { 303 struct inpcb *inp; 304 #ifdef NUMA 305 struct ktls_domain_info *di; 306 #endif 307 u_int cpuid; 308 309 inp = sotoinpcb(so); 310 #ifdef RSS 311 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 312 if (cpuid != NETISR_CPUID_NONE) 313 return (cpuid); 314 #endif 315 /* 316 * Just use the flowid to shard connections in a repeatable 317 * fashion. Note that TLS 1.0 sessions rely on the 318 * serialization provided by having the same connection use 319 * the same queue. 320 */ 321 #ifdef NUMA 322 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) { 323 di = &ktls_domains[inp->inp_numa_domain]; 324 cpuid = di->cpu[inp->inp_flowid % di->count]; 325 } else 326 #endif 327 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads]; 328 return (cpuid); 329 } 330 #endif 331 332 static int 333 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags) 334 { 335 vm_page_t m; 336 int i; 337 338 KASSERT((ktls_maxlen & PAGE_MASK) == 0, 339 ("%s: ktls max length %d is not page size-aligned", 340 __func__, ktls_maxlen)); 341 342 for (i = 0; i < count; i++) { 343 m = vm_page_alloc_contig_domain(NULL, 0, domain, 344 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 345 VM_ALLOC_NODUMP | malloc2vm_flags(flags), 346 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0, 347 VM_MEMATTR_DEFAULT); 348 if (m == NULL) 349 break; 350 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 351 } 352 return (i); 353 } 354 355 static void 356 ktls_buffer_release(void *arg __unused, void **store, int count) 357 { 358 vm_page_t m; 359 int i, j; 360 361 for (i = 0; i < count; i++) { 362 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); 363 for (j = 0; j < atop(ktls_maxlen); j++) { 364 (void)vm_page_unwire_noq(m + j); 365 vm_page_free(m + j); 366 } 367 } 368 } 369 370 static void 371 ktls_free_mext_contig(struct mbuf *m) 372 { 373 M_ASSERTEXTPG(m); 374 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0])); 375 } 376 377 static void 378 ktls_init(void *dummy __unused) 379 { 380 struct thread *td; 381 struct pcpu *pc; 382 cpuset_t mask; 383 int count, domain, error, i; 384 385 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS, 386 M_WAITOK | M_ZERO); 387 388 ktls_session_zone = uma_zcreate("ktls_session", 389 sizeof(struct ktls_session), 390 NULL, NULL, NULL, NULL, 391 UMA_ALIGN_CACHE, 0); 392 393 if (ktls_sw_buffer_cache) { 394 ktls_buffer_zone = uma_zcache_create("ktls_buffers", 395 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL, 396 ktls_buffer_import, ktls_buffer_release, NULL, 397 UMA_ZONE_FIRSTTOUCH); 398 } 399 400 /* 401 * Initialize the workqueues to run the TLS work. We create a 402 * work queue for each CPU. 403 */ 404 CPU_FOREACH(i) { 405 STAILQ_INIT(&ktls_wq[i].m_head); 406 STAILQ_INIT(&ktls_wq[i].so_head); 407 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF); 408 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i], 409 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i); 410 if (error) 411 panic("Can't add KTLS thread %d error %d", i, error); 412 413 /* 414 * Bind threads to cores. If ktls_bind_threads is > 415 * 1, then we bind to the NUMA domain. 416 */ 417 if (ktls_bind_threads) { 418 if (ktls_bind_threads > 1) { 419 pc = pcpu_find(i); 420 domain = pc->pc_domain; 421 CPU_COPY(&cpuset_domain[domain], &mask); 422 count = ktls_domains[domain].count; 423 ktls_domains[domain].cpu[count] = i; 424 ktls_domains[domain].count++; 425 } else { 426 CPU_SETOF(i, &mask); 427 } 428 error = cpuset_setthread(td->td_tid, &mask); 429 if (error) 430 panic( 431 "Unable to bind KTLS thread for CPU %d error %d", 432 i, error); 433 } 434 ktls_cpuid_lookup[ktls_number_threads] = i; 435 ktls_number_threads++; 436 } 437 438 /* 439 * Start an allocation thread per-domain to perform blocking allocations 440 * of 16k physically contiguous TLS crypto destination buffers. 441 */ 442 if (ktls_sw_buffer_cache) { 443 for (domain = 0; domain < vm_ndomains; domain++) { 444 if (VM_DOMAIN_EMPTY(domain)) 445 continue; 446 if (CPU_EMPTY(&cpuset_domain[domain])) 447 continue; 448 error = kproc_kthread_add(ktls_alloc_thread, 449 &ktls_domains[domain], &ktls_proc, 450 &ktls_domains[domain].alloc_td.td, 451 0, 0, "KTLS", "alloc_%d", domain); 452 if (error) 453 panic("Can't add KTLS alloc thread %d error %d", 454 domain, error); 455 CPU_COPY(&cpuset_domain[domain], &mask); 456 error = cpuset_setthread(ktls_domains[domain].alloc_td.td->td_tid, 457 &mask); 458 if (error) 459 panic("Unable to bind KTLS alloc %d error %d", 460 domain, error); 461 } 462 } 463 464 /* 465 * If we somehow have an empty domain, fall back to choosing 466 * among all KTLS threads. 467 */ 468 if (ktls_bind_threads > 1) { 469 for (i = 0; i < vm_ndomains; i++) { 470 if (ktls_domains[i].count == 0) { 471 ktls_bind_threads = 1; 472 break; 473 } 474 } 475 } 476 477 if (bootverbose) 478 printf("KTLS: Initialized %d threads\n", ktls_number_threads); 479 } 480 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL); 481 482 #if defined(INET) || defined(INET6) 483 static int 484 ktls_create_session(struct socket *so, struct tls_enable *en, 485 struct ktls_session **tlsp) 486 { 487 struct ktls_session *tls; 488 int error; 489 490 /* Only TLS 1.0 - 1.3 are supported. */ 491 if (en->tls_vmajor != TLS_MAJOR_VER_ONE) 492 return (EINVAL); 493 if (en->tls_vminor < TLS_MINOR_VER_ZERO || 494 en->tls_vminor > TLS_MINOR_VER_THREE) 495 return (EINVAL); 496 497 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE) 498 return (EINVAL); 499 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE) 500 return (EINVAL); 501 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv)) 502 return (EINVAL); 503 504 /* All supported algorithms require a cipher key. */ 505 if (en->cipher_key_len == 0) 506 return (EINVAL); 507 508 /* No flags are currently supported. */ 509 if (en->flags != 0) 510 return (EINVAL); 511 512 /* Common checks for supported algorithms. */ 513 switch (en->cipher_algorithm) { 514 case CRYPTO_AES_NIST_GCM_16: 515 /* 516 * auth_algorithm isn't used, but permit GMAC values 517 * for compatibility. 518 */ 519 switch (en->auth_algorithm) { 520 case 0: 521 #ifdef COMPAT_FREEBSD12 522 /* XXX: Really 13.0-current COMPAT. */ 523 case CRYPTO_AES_128_NIST_GMAC: 524 case CRYPTO_AES_192_NIST_GMAC: 525 case CRYPTO_AES_256_NIST_GMAC: 526 #endif 527 break; 528 default: 529 return (EINVAL); 530 } 531 if (en->auth_key_len != 0) 532 return (EINVAL); 533 if ((en->tls_vminor == TLS_MINOR_VER_TWO && 534 en->iv_len != TLS_AEAD_GCM_LEN) || 535 (en->tls_vminor == TLS_MINOR_VER_THREE && 536 en->iv_len != TLS_1_3_GCM_IV_LEN)) 537 return (EINVAL); 538 break; 539 case CRYPTO_AES_CBC: 540 switch (en->auth_algorithm) { 541 case CRYPTO_SHA1_HMAC: 542 /* 543 * TLS 1.0 requires an implicit IV. TLS 1.1+ 544 * all use explicit IVs. 545 */ 546 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 547 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN) 548 return (EINVAL); 549 break; 550 } 551 552 /* FALLTHROUGH */ 553 case CRYPTO_SHA2_256_HMAC: 554 case CRYPTO_SHA2_384_HMAC: 555 /* Ignore any supplied IV. */ 556 en->iv_len = 0; 557 break; 558 default: 559 return (EINVAL); 560 } 561 if (en->auth_key_len == 0) 562 return (EINVAL); 563 break; 564 case CRYPTO_CHACHA20_POLY1305: 565 if (en->auth_algorithm != 0 || en->auth_key_len != 0) 566 return (EINVAL); 567 if (en->tls_vminor != TLS_MINOR_VER_TWO && 568 en->tls_vminor != TLS_MINOR_VER_THREE) 569 return (EINVAL); 570 if (en->iv_len != TLS_CHACHA20_IV_LEN) 571 return (EINVAL); 572 break; 573 default: 574 return (EINVAL); 575 } 576 577 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 578 579 counter_u64_add(ktls_offload_active, 1); 580 581 refcount_init(&tls->refcount, 1); 582 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); 583 584 tls->wq_index = ktls_get_cpu(so); 585 586 tls->params.cipher_algorithm = en->cipher_algorithm; 587 tls->params.auth_algorithm = en->auth_algorithm; 588 tls->params.tls_vmajor = en->tls_vmajor; 589 tls->params.tls_vminor = en->tls_vminor; 590 tls->params.flags = en->flags; 591 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen); 592 593 /* Set the header and trailer lengths. */ 594 tls->params.tls_hlen = sizeof(struct tls_record_layer); 595 switch (en->cipher_algorithm) { 596 case CRYPTO_AES_NIST_GCM_16: 597 /* 598 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte 599 * nonce. TLS 1.3 uses a 12 byte implicit IV. 600 */ 601 if (en->tls_vminor < TLS_MINOR_VER_THREE) 602 tls->params.tls_hlen += sizeof(uint64_t); 603 tls->params.tls_tlen = AES_GMAC_HASH_LEN; 604 tls->params.tls_bs = 1; 605 break; 606 case CRYPTO_AES_CBC: 607 switch (en->auth_algorithm) { 608 case CRYPTO_SHA1_HMAC: 609 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 610 /* Implicit IV, no nonce. */ 611 } else { 612 tls->params.tls_hlen += AES_BLOCK_LEN; 613 } 614 tls->params.tls_tlen = AES_BLOCK_LEN + 615 SHA1_HASH_LEN; 616 break; 617 case CRYPTO_SHA2_256_HMAC: 618 tls->params.tls_hlen += AES_BLOCK_LEN; 619 tls->params.tls_tlen = AES_BLOCK_LEN + 620 SHA2_256_HASH_LEN; 621 break; 622 case CRYPTO_SHA2_384_HMAC: 623 tls->params.tls_hlen += AES_BLOCK_LEN; 624 tls->params.tls_tlen = AES_BLOCK_LEN + 625 SHA2_384_HASH_LEN; 626 break; 627 default: 628 panic("invalid hmac"); 629 } 630 tls->params.tls_bs = AES_BLOCK_LEN; 631 break; 632 case CRYPTO_CHACHA20_POLY1305: 633 /* 634 * Chacha20 uses a 12 byte implicit IV. 635 */ 636 tls->params.tls_tlen = POLY1305_HASH_LEN; 637 tls->params.tls_bs = 1; 638 break; 639 default: 640 panic("invalid cipher"); 641 } 642 643 /* 644 * TLS 1.3 includes optional padding which we do not support, 645 * and also puts the "real" record type at the end of the 646 * encrypted data. 647 */ 648 if (en->tls_vminor == TLS_MINOR_VER_THREE) 649 tls->params.tls_tlen += sizeof(uint8_t); 650 651 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN, 652 ("TLS header length too long: %d", tls->params.tls_hlen)); 653 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN, 654 ("TLS trailer length too long: %d", tls->params.tls_tlen)); 655 656 if (en->auth_key_len != 0) { 657 tls->params.auth_key_len = en->auth_key_len; 658 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS, 659 M_WAITOK); 660 error = copyin(en->auth_key, tls->params.auth_key, 661 en->auth_key_len); 662 if (error) 663 goto out; 664 } 665 666 tls->params.cipher_key_len = en->cipher_key_len; 667 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK); 668 error = copyin(en->cipher_key, tls->params.cipher_key, 669 en->cipher_key_len); 670 if (error) 671 goto out; 672 673 /* 674 * This holds the implicit portion of the nonce for AEAD 675 * ciphers and the initial implicit IV for TLS 1.0. The 676 * explicit portions of the IV are generated in ktls_frame(). 677 */ 678 if (en->iv_len != 0) { 679 tls->params.iv_len = en->iv_len; 680 error = copyin(en->iv, tls->params.iv, en->iv_len); 681 if (error) 682 goto out; 683 684 /* 685 * For TLS 1.2 with GCM, generate an 8-byte nonce as a 686 * counter to generate unique explicit IVs. 687 * 688 * Store this counter in the last 8 bytes of the IV 689 * array so that it is 8-byte aligned. 690 */ 691 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 692 en->tls_vminor == TLS_MINOR_VER_TWO) 693 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0); 694 } 695 696 *tlsp = tls; 697 return (0); 698 699 out: 700 ktls_cleanup(tls); 701 return (error); 702 } 703 704 static struct ktls_session * 705 ktls_clone_session(struct ktls_session *tls) 706 { 707 struct ktls_session *tls_new; 708 709 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 710 711 counter_u64_add(ktls_offload_active, 1); 712 713 refcount_init(&tls_new->refcount, 1); 714 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new); 715 716 /* Copy fields from existing session. */ 717 tls_new->params = tls->params; 718 tls_new->wq_index = tls->wq_index; 719 720 /* Deep copy keys. */ 721 if (tls_new->params.auth_key != NULL) { 722 tls_new->params.auth_key = malloc(tls->params.auth_key_len, 723 M_KTLS, M_WAITOK); 724 memcpy(tls_new->params.auth_key, tls->params.auth_key, 725 tls->params.auth_key_len); 726 } 727 728 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS, 729 M_WAITOK); 730 memcpy(tls_new->params.cipher_key, tls->params.cipher_key, 731 tls->params.cipher_key_len); 732 733 return (tls_new); 734 } 735 #endif 736 737 static void 738 ktls_cleanup(struct ktls_session *tls) 739 { 740 741 counter_u64_add(ktls_offload_active, -1); 742 switch (tls->mode) { 743 case TCP_TLS_MODE_SW: 744 switch (tls->params.cipher_algorithm) { 745 case CRYPTO_AES_CBC: 746 counter_u64_add(ktls_sw_cbc, -1); 747 break; 748 case CRYPTO_AES_NIST_GCM_16: 749 counter_u64_add(ktls_sw_gcm, -1); 750 break; 751 case CRYPTO_CHACHA20_POLY1305: 752 counter_u64_add(ktls_sw_chacha20, -1); 753 break; 754 } 755 ktls_ocf_free(tls); 756 break; 757 case TCP_TLS_MODE_IFNET: 758 switch (tls->params.cipher_algorithm) { 759 case CRYPTO_AES_CBC: 760 counter_u64_add(ktls_ifnet_cbc, -1); 761 break; 762 case CRYPTO_AES_NIST_GCM_16: 763 counter_u64_add(ktls_ifnet_gcm, -1); 764 break; 765 case CRYPTO_CHACHA20_POLY1305: 766 counter_u64_add(ktls_ifnet_chacha20, -1); 767 break; 768 } 769 if (tls->snd_tag != NULL) 770 m_snd_tag_rele(tls->snd_tag); 771 break; 772 #ifdef TCP_OFFLOAD 773 case TCP_TLS_MODE_TOE: 774 switch (tls->params.cipher_algorithm) { 775 case CRYPTO_AES_CBC: 776 counter_u64_add(ktls_toe_cbc, -1); 777 break; 778 case CRYPTO_AES_NIST_GCM_16: 779 counter_u64_add(ktls_toe_gcm, -1); 780 break; 781 case CRYPTO_CHACHA20_POLY1305: 782 counter_u64_add(ktls_toe_chacha20, -1); 783 break; 784 } 785 break; 786 #endif 787 } 788 if (tls->params.auth_key != NULL) { 789 zfree(tls->params.auth_key, M_KTLS); 790 tls->params.auth_key = NULL; 791 tls->params.auth_key_len = 0; 792 } 793 if (tls->params.cipher_key != NULL) { 794 zfree(tls->params.cipher_key, M_KTLS); 795 tls->params.cipher_key = NULL; 796 tls->params.cipher_key_len = 0; 797 } 798 explicit_bzero(tls->params.iv, sizeof(tls->params.iv)); 799 } 800 801 #if defined(INET) || defined(INET6) 802 803 #ifdef TCP_OFFLOAD 804 static int 805 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction) 806 { 807 struct inpcb *inp; 808 struct tcpcb *tp; 809 int error; 810 811 inp = so->so_pcb; 812 INP_WLOCK(inp); 813 if (inp->inp_flags2 & INP_FREED) { 814 INP_WUNLOCK(inp); 815 return (ECONNRESET); 816 } 817 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 818 INP_WUNLOCK(inp); 819 return (ECONNRESET); 820 } 821 if (inp->inp_socket == NULL) { 822 INP_WUNLOCK(inp); 823 return (ECONNRESET); 824 } 825 tp = intotcpcb(inp); 826 if (!(tp->t_flags & TF_TOE)) { 827 INP_WUNLOCK(inp); 828 return (EOPNOTSUPP); 829 } 830 831 error = tcp_offload_alloc_tls_session(tp, tls, direction); 832 INP_WUNLOCK(inp); 833 if (error == 0) { 834 tls->mode = TCP_TLS_MODE_TOE; 835 switch (tls->params.cipher_algorithm) { 836 case CRYPTO_AES_CBC: 837 counter_u64_add(ktls_toe_cbc, 1); 838 break; 839 case CRYPTO_AES_NIST_GCM_16: 840 counter_u64_add(ktls_toe_gcm, 1); 841 break; 842 case CRYPTO_CHACHA20_POLY1305: 843 counter_u64_add(ktls_toe_chacha20, 1); 844 break; 845 } 846 } 847 return (error); 848 } 849 #endif 850 851 /* 852 * Common code used when first enabling ifnet TLS on a connection or 853 * when allocating a new ifnet TLS session due to a routing change. 854 * This function allocates a new TLS send tag on whatever interface 855 * the connection is currently routed over. 856 */ 857 static int 858 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force, 859 struct m_snd_tag **mstp) 860 { 861 union if_snd_tag_alloc_params params; 862 struct ifnet *ifp; 863 struct nhop_object *nh; 864 struct tcpcb *tp; 865 int error; 866 867 INP_RLOCK(inp); 868 if (inp->inp_flags2 & INP_FREED) { 869 INP_RUNLOCK(inp); 870 return (ECONNRESET); 871 } 872 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 873 INP_RUNLOCK(inp); 874 return (ECONNRESET); 875 } 876 if (inp->inp_socket == NULL) { 877 INP_RUNLOCK(inp); 878 return (ECONNRESET); 879 } 880 tp = intotcpcb(inp); 881 882 /* 883 * Check administrative controls on ifnet TLS to determine if 884 * ifnet TLS should be denied. 885 * 886 * - Always permit 'force' requests. 887 * - ktls_ifnet_permitted == 0: always deny. 888 */ 889 if (!force && ktls_ifnet_permitted == 0) { 890 INP_RUNLOCK(inp); 891 return (ENXIO); 892 } 893 894 /* 895 * XXX: Use the cached route in the inpcb to find the 896 * interface. This should perhaps instead use 897 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only 898 * enabled after a connection has completed key negotiation in 899 * userland, the cached route will be present in practice. 900 */ 901 nh = inp->inp_route.ro_nh; 902 if (nh == NULL) { 903 INP_RUNLOCK(inp); 904 return (ENXIO); 905 } 906 ifp = nh->nh_ifp; 907 if_ref(ifp); 908 909 /* 910 * Allocate a TLS + ratelimit tag if the connection has an 911 * existing pacing rate. 912 */ 913 if (tp->t_pacing_rate != -1 && 914 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) { 915 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT; 916 params.tls_rate_limit.inp = inp; 917 params.tls_rate_limit.tls = tls; 918 params.tls_rate_limit.max_rate = tp->t_pacing_rate; 919 } else { 920 params.hdr.type = IF_SND_TAG_TYPE_TLS; 921 params.tls.inp = inp; 922 params.tls.tls = tls; 923 } 924 params.hdr.flowid = inp->inp_flowid; 925 params.hdr.flowtype = inp->inp_flowtype; 926 params.hdr.numa_domain = inp->inp_numa_domain; 927 INP_RUNLOCK(inp); 928 929 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) { 930 error = EOPNOTSUPP; 931 goto out; 932 } 933 if (inp->inp_vflag & INP_IPV6) { 934 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) { 935 error = EOPNOTSUPP; 936 goto out; 937 } 938 } else { 939 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) { 940 error = EOPNOTSUPP; 941 goto out; 942 } 943 } 944 error = m_snd_tag_alloc(ifp, ¶ms, mstp); 945 out: 946 if_rele(ifp); 947 return (error); 948 } 949 950 static int 951 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force) 952 { 953 struct m_snd_tag *mst; 954 int error; 955 956 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); 957 if (error == 0) { 958 tls->mode = TCP_TLS_MODE_IFNET; 959 tls->snd_tag = mst; 960 switch (tls->params.cipher_algorithm) { 961 case CRYPTO_AES_CBC: 962 counter_u64_add(ktls_ifnet_cbc, 1); 963 break; 964 case CRYPTO_AES_NIST_GCM_16: 965 counter_u64_add(ktls_ifnet_gcm, 1); 966 break; 967 case CRYPTO_CHACHA20_POLY1305: 968 counter_u64_add(ktls_ifnet_chacha20, 1); 969 break; 970 } 971 } 972 return (error); 973 } 974 975 static int 976 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) 977 { 978 int error; 979 980 error = ktls_ocf_try(so, tls, direction); 981 if (error) 982 return (error); 983 tls->mode = TCP_TLS_MODE_SW; 984 switch (tls->params.cipher_algorithm) { 985 case CRYPTO_AES_CBC: 986 counter_u64_add(ktls_sw_cbc, 1); 987 break; 988 case CRYPTO_AES_NIST_GCM_16: 989 counter_u64_add(ktls_sw_gcm, 1); 990 break; 991 case CRYPTO_CHACHA20_POLY1305: 992 counter_u64_add(ktls_sw_chacha20, 1); 993 break; 994 } 995 return (0); 996 } 997 998 /* 999 * KTLS RX stores data in the socket buffer as a list of TLS records, 1000 * where each record is stored as a control message containg the TLS 1001 * header followed by data mbufs containing the decrypted data. This 1002 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for 1003 * both encrypted and decrypted data. TLS records decrypted by a NIC 1004 * should be queued to the socket buffer as records, but encrypted 1005 * data which needs to be decrypted by software arrives as a stream of 1006 * regular mbufs which need to be converted. In addition, there may 1007 * already be pending encrypted data in the socket buffer when KTLS RX 1008 * is enabled. 1009 * 1010 * To manage not-yet-decrypted data for KTLS RX, the following scheme 1011 * is used: 1012 * 1013 * - A single chain of NOTREADY mbufs is hung off of sb_mtls. 1014 * 1015 * - ktls_check_rx checks this chain of mbufs reading the TLS header 1016 * from the first mbuf. Once all of the data for that TLS record is 1017 * queued, the socket is queued to a worker thread. 1018 * 1019 * - The worker thread calls ktls_decrypt to decrypt TLS records in 1020 * the TLS chain. Each TLS record is detached from the TLS chain, 1021 * decrypted, and inserted into the regular socket buffer chain as 1022 * record starting with a control message holding the TLS header and 1023 * a chain of mbufs holding the encrypted data. 1024 */ 1025 1026 static void 1027 sb_mark_notready(struct sockbuf *sb) 1028 { 1029 struct mbuf *m; 1030 1031 m = sb->sb_mb; 1032 sb->sb_mtls = m; 1033 sb->sb_mb = NULL; 1034 sb->sb_mbtail = NULL; 1035 sb->sb_lastrecord = NULL; 1036 for (; m != NULL; m = m->m_next) { 1037 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL", 1038 __func__)); 1039 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail", 1040 __func__)); 1041 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len", 1042 __func__)); 1043 m->m_flags |= M_NOTREADY; 1044 sb->sb_acc -= m->m_len; 1045 sb->sb_tlscc += m->m_len; 1046 sb->sb_mtlstail = m; 1047 } 1048 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc, 1049 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc, 1050 sb->sb_ccc)); 1051 } 1052 1053 int 1054 ktls_enable_rx(struct socket *so, struct tls_enable *en) 1055 { 1056 struct ktls_session *tls; 1057 int error; 1058 1059 if (!ktls_offload_enable) 1060 return (ENOTSUP); 1061 if (SOLISTENING(so)) 1062 return (EINVAL); 1063 1064 counter_u64_add(ktls_offload_enable_calls, 1); 1065 1066 /* 1067 * This should always be true since only the TCP socket option 1068 * invokes this function. 1069 */ 1070 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1071 return (EINVAL); 1072 1073 /* 1074 * XXX: Don't overwrite existing sessions. We should permit 1075 * this to support rekeying in the future. 1076 */ 1077 if (so->so_rcv.sb_tls_info != NULL) 1078 return (EALREADY); 1079 1080 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1081 return (ENOTSUP); 1082 1083 /* TLS 1.3 is not yet supported. */ 1084 if (en->tls_vmajor == TLS_MAJOR_VER_ONE && 1085 en->tls_vminor == TLS_MINOR_VER_THREE) 1086 return (ENOTSUP); 1087 1088 error = ktls_create_session(so, en, &tls); 1089 if (error) 1090 return (error); 1091 1092 #ifdef TCP_OFFLOAD 1093 error = ktls_try_toe(so, tls, KTLS_RX); 1094 if (error) 1095 #endif 1096 error = ktls_try_sw(so, tls, KTLS_RX); 1097 1098 if (error) { 1099 ktls_cleanup(tls); 1100 return (error); 1101 } 1102 1103 /* Mark the socket as using TLS offload. */ 1104 SOCKBUF_LOCK(&so->so_rcv); 1105 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq); 1106 so->so_rcv.sb_tls_info = tls; 1107 so->so_rcv.sb_flags |= SB_TLS_RX; 1108 1109 /* Mark existing data as not ready until it can be decrypted. */ 1110 if (tls->mode != TCP_TLS_MODE_TOE) { 1111 sb_mark_notready(&so->so_rcv); 1112 ktls_check_rx(&so->so_rcv); 1113 } 1114 SOCKBUF_UNLOCK(&so->so_rcv); 1115 1116 counter_u64_add(ktls_offload_total, 1); 1117 1118 return (0); 1119 } 1120 1121 int 1122 ktls_enable_tx(struct socket *so, struct tls_enable *en) 1123 { 1124 struct ktls_session *tls; 1125 struct inpcb *inp; 1126 int error; 1127 1128 if (!ktls_offload_enable) 1129 return (ENOTSUP); 1130 if (SOLISTENING(so)) 1131 return (EINVAL); 1132 1133 counter_u64_add(ktls_offload_enable_calls, 1); 1134 1135 /* 1136 * This should always be true since only the TCP socket option 1137 * invokes this function. 1138 */ 1139 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1140 return (EINVAL); 1141 1142 /* 1143 * XXX: Don't overwrite existing sessions. We should permit 1144 * this to support rekeying in the future. 1145 */ 1146 if (so->so_snd.sb_tls_info != NULL) 1147 return (EALREADY); 1148 1149 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1150 return (ENOTSUP); 1151 1152 /* TLS requires ext pgs */ 1153 if (mb_use_ext_pgs == 0) 1154 return (ENXIO); 1155 1156 error = ktls_create_session(so, en, &tls); 1157 if (error) 1158 return (error); 1159 1160 /* Prefer TOE -> ifnet TLS -> software TLS. */ 1161 #ifdef TCP_OFFLOAD 1162 error = ktls_try_toe(so, tls, KTLS_TX); 1163 if (error) 1164 #endif 1165 error = ktls_try_ifnet(so, tls, false); 1166 if (error) 1167 error = ktls_try_sw(so, tls, KTLS_TX); 1168 1169 if (error) { 1170 ktls_cleanup(tls); 1171 return (error); 1172 } 1173 1174 error = sblock(&so->so_snd, SBL_WAIT); 1175 if (error) { 1176 ktls_cleanup(tls); 1177 return (error); 1178 } 1179 1180 /* 1181 * Write lock the INP when setting sb_tls_info so that 1182 * routines in tcp_ratelimit.c can read sb_tls_info while 1183 * holding the INP lock. 1184 */ 1185 inp = so->so_pcb; 1186 INP_WLOCK(inp); 1187 SOCKBUF_LOCK(&so->so_snd); 1188 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq); 1189 so->so_snd.sb_tls_info = tls; 1190 if (tls->mode != TCP_TLS_MODE_SW) 1191 so->so_snd.sb_flags |= SB_TLS_IFNET; 1192 SOCKBUF_UNLOCK(&so->so_snd); 1193 INP_WUNLOCK(inp); 1194 sbunlock(&so->so_snd); 1195 1196 counter_u64_add(ktls_offload_total, 1); 1197 1198 return (0); 1199 } 1200 1201 int 1202 ktls_get_rx_mode(struct socket *so) 1203 { 1204 struct ktls_session *tls; 1205 struct inpcb *inp; 1206 int mode; 1207 1208 if (SOLISTENING(so)) 1209 return (EINVAL); 1210 inp = so->so_pcb; 1211 INP_WLOCK_ASSERT(inp); 1212 SOCKBUF_LOCK(&so->so_rcv); 1213 tls = so->so_rcv.sb_tls_info; 1214 if (tls == NULL) 1215 mode = TCP_TLS_MODE_NONE; 1216 else 1217 mode = tls->mode; 1218 SOCKBUF_UNLOCK(&so->so_rcv); 1219 return (mode); 1220 } 1221 1222 int 1223 ktls_get_tx_mode(struct socket *so) 1224 { 1225 struct ktls_session *tls; 1226 struct inpcb *inp; 1227 int mode; 1228 1229 if (SOLISTENING(so)) 1230 return (EINVAL); 1231 inp = so->so_pcb; 1232 INP_WLOCK_ASSERT(inp); 1233 SOCKBUF_LOCK(&so->so_snd); 1234 tls = so->so_snd.sb_tls_info; 1235 if (tls == NULL) 1236 mode = TCP_TLS_MODE_NONE; 1237 else 1238 mode = tls->mode; 1239 SOCKBUF_UNLOCK(&so->so_snd); 1240 return (mode); 1241 } 1242 1243 /* 1244 * Switch between SW and ifnet TLS sessions as requested. 1245 */ 1246 int 1247 ktls_set_tx_mode(struct socket *so, int mode) 1248 { 1249 struct ktls_session *tls, *tls_new; 1250 struct inpcb *inp; 1251 int error; 1252 1253 if (SOLISTENING(so)) 1254 return (EINVAL); 1255 switch (mode) { 1256 case TCP_TLS_MODE_SW: 1257 case TCP_TLS_MODE_IFNET: 1258 break; 1259 default: 1260 return (EINVAL); 1261 } 1262 1263 inp = so->so_pcb; 1264 INP_WLOCK_ASSERT(inp); 1265 SOCKBUF_LOCK(&so->so_snd); 1266 tls = so->so_snd.sb_tls_info; 1267 if (tls == NULL) { 1268 SOCKBUF_UNLOCK(&so->so_snd); 1269 return (0); 1270 } 1271 1272 if (tls->mode == mode) { 1273 SOCKBUF_UNLOCK(&so->so_snd); 1274 return (0); 1275 } 1276 1277 tls = ktls_hold(tls); 1278 SOCKBUF_UNLOCK(&so->so_snd); 1279 INP_WUNLOCK(inp); 1280 1281 tls_new = ktls_clone_session(tls); 1282 1283 if (mode == TCP_TLS_MODE_IFNET) 1284 error = ktls_try_ifnet(so, tls_new, true); 1285 else 1286 error = ktls_try_sw(so, tls_new, KTLS_TX); 1287 if (error) { 1288 counter_u64_add(ktls_switch_failed, 1); 1289 ktls_free(tls_new); 1290 ktls_free(tls); 1291 INP_WLOCK(inp); 1292 return (error); 1293 } 1294 1295 error = sblock(&so->so_snd, SBL_WAIT); 1296 if (error) { 1297 counter_u64_add(ktls_switch_failed, 1); 1298 ktls_free(tls_new); 1299 ktls_free(tls); 1300 INP_WLOCK(inp); 1301 return (error); 1302 } 1303 1304 /* 1305 * If we raced with another session change, keep the existing 1306 * session. 1307 */ 1308 if (tls != so->so_snd.sb_tls_info) { 1309 counter_u64_add(ktls_switch_failed, 1); 1310 sbunlock(&so->so_snd); 1311 ktls_free(tls_new); 1312 ktls_free(tls); 1313 INP_WLOCK(inp); 1314 return (EBUSY); 1315 } 1316 1317 SOCKBUF_LOCK(&so->so_snd); 1318 so->so_snd.sb_tls_info = tls_new; 1319 if (tls_new->mode != TCP_TLS_MODE_SW) 1320 so->so_snd.sb_flags |= SB_TLS_IFNET; 1321 SOCKBUF_UNLOCK(&so->so_snd); 1322 sbunlock(&so->so_snd); 1323 1324 /* 1325 * Drop two references on 'tls'. The first is for the 1326 * ktls_hold() above. The second drops the reference from the 1327 * socket buffer. 1328 */ 1329 KASSERT(tls->refcount >= 2, ("too few references on old session")); 1330 ktls_free(tls); 1331 ktls_free(tls); 1332 1333 if (mode == TCP_TLS_MODE_IFNET) 1334 counter_u64_add(ktls_switch_to_ifnet, 1); 1335 else 1336 counter_u64_add(ktls_switch_to_sw, 1); 1337 1338 INP_WLOCK(inp); 1339 return (0); 1340 } 1341 1342 /* 1343 * Try to allocate a new TLS send tag. This task is scheduled when 1344 * ip_output detects a route change while trying to transmit a packet 1345 * holding a TLS record. If a new tag is allocated, replace the tag 1346 * in the TLS session. Subsequent packets on the connection will use 1347 * the new tag. If a new tag cannot be allocated, drop the 1348 * connection. 1349 */ 1350 static void 1351 ktls_reset_send_tag(void *context, int pending) 1352 { 1353 struct epoch_tracker et; 1354 struct ktls_session *tls; 1355 struct m_snd_tag *old, *new; 1356 struct inpcb *inp; 1357 struct tcpcb *tp; 1358 int error; 1359 1360 MPASS(pending == 1); 1361 1362 tls = context; 1363 inp = tls->inp; 1364 1365 /* 1366 * Free the old tag first before allocating a new one. 1367 * ip[6]_output_send() will treat a NULL send tag the same as 1368 * an ifp mismatch and drop packets until a new tag is 1369 * allocated. 1370 * 1371 * Write-lock the INP when changing tls->snd_tag since 1372 * ip[6]_output_send() holds a read-lock when reading the 1373 * pointer. 1374 */ 1375 INP_WLOCK(inp); 1376 old = tls->snd_tag; 1377 tls->snd_tag = NULL; 1378 INP_WUNLOCK(inp); 1379 if (old != NULL) 1380 m_snd_tag_rele(old); 1381 1382 error = ktls_alloc_snd_tag(inp, tls, true, &new); 1383 1384 if (error == 0) { 1385 INP_WLOCK(inp); 1386 tls->snd_tag = new; 1387 mtx_pool_lock(mtxpool_sleep, tls); 1388 tls->reset_pending = false; 1389 mtx_pool_unlock(mtxpool_sleep, tls); 1390 if (!in_pcbrele_wlocked(inp)) 1391 INP_WUNLOCK(inp); 1392 1393 counter_u64_add(ktls_ifnet_reset, 1); 1394 1395 /* 1396 * XXX: Should we kick tcp_output explicitly now that 1397 * the send tag is fixed or just rely on timers? 1398 */ 1399 } else { 1400 NET_EPOCH_ENTER(et); 1401 INP_WLOCK(inp); 1402 if (!in_pcbrele_wlocked(inp)) { 1403 if (!(inp->inp_flags & INP_TIMEWAIT) && 1404 !(inp->inp_flags & INP_DROPPED)) { 1405 tp = intotcpcb(inp); 1406 CURVNET_SET(tp->t_vnet); 1407 tp = tcp_drop(tp, ECONNABORTED); 1408 CURVNET_RESTORE(); 1409 if (tp != NULL) 1410 INP_WUNLOCK(inp); 1411 counter_u64_add(ktls_ifnet_reset_dropped, 1); 1412 } else 1413 INP_WUNLOCK(inp); 1414 } 1415 NET_EPOCH_EXIT(et); 1416 1417 counter_u64_add(ktls_ifnet_reset_failed, 1); 1418 1419 /* 1420 * Leave reset_pending true to avoid future tasks while 1421 * the socket goes away. 1422 */ 1423 } 1424 1425 ktls_free(tls); 1426 } 1427 1428 int 1429 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls) 1430 { 1431 1432 if (inp == NULL) 1433 return (ENOBUFS); 1434 1435 INP_LOCK_ASSERT(inp); 1436 1437 /* 1438 * See if we should schedule a task to update the send tag for 1439 * this session. 1440 */ 1441 mtx_pool_lock(mtxpool_sleep, tls); 1442 if (!tls->reset_pending) { 1443 (void) ktls_hold(tls); 1444 in_pcbref(inp); 1445 tls->inp = inp; 1446 tls->reset_pending = true; 1447 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); 1448 } 1449 mtx_pool_unlock(mtxpool_sleep, tls); 1450 return (ENOBUFS); 1451 } 1452 1453 #ifdef RATELIMIT 1454 int 1455 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate) 1456 { 1457 union if_snd_tag_modify_params params = { 1458 .rate_limit.max_rate = max_pacing_rate, 1459 .rate_limit.flags = M_NOWAIT, 1460 }; 1461 struct m_snd_tag *mst; 1462 struct ifnet *ifp; 1463 1464 /* Can't get to the inp, but it should be locked. */ 1465 /* INP_LOCK_ASSERT(inp); */ 1466 1467 MPASS(tls->mode == TCP_TLS_MODE_IFNET); 1468 1469 if (tls->snd_tag == NULL) { 1470 /* 1471 * Resetting send tag, ignore this change. The 1472 * pending reset may or may not see this updated rate 1473 * in the tcpcb. If it doesn't, we will just lose 1474 * this rate change. 1475 */ 1476 return (0); 1477 } 1478 1479 MPASS(tls->snd_tag != NULL); 1480 MPASS(tls->snd_tag->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); 1481 1482 mst = tls->snd_tag; 1483 ifp = mst->ifp; 1484 return (ifp->if_snd_tag_modify(mst, ¶ms)); 1485 } 1486 #endif 1487 #endif 1488 1489 void 1490 ktls_destroy(struct ktls_session *tls) 1491 { 1492 1493 ktls_cleanup(tls); 1494 uma_zfree(ktls_session_zone, tls); 1495 } 1496 1497 void 1498 ktls_seq(struct sockbuf *sb, struct mbuf *m) 1499 { 1500 1501 for (; m != NULL; m = m->m_next) { 1502 KASSERT((m->m_flags & M_EXTPG) != 0, 1503 ("ktls_seq: mapped mbuf %p", m)); 1504 1505 m->m_epg_seqno = sb->sb_tls_seqno; 1506 sb->sb_tls_seqno++; 1507 } 1508 } 1509 1510 /* 1511 * Add TLS framing (headers and trailers) to a chain of mbufs. Each 1512 * mbuf in the chain must be an unmapped mbuf. The payload of the 1513 * mbuf must be populated with the payload of each TLS record. 1514 * 1515 * The record_type argument specifies the TLS record type used when 1516 * populating the TLS header. 1517 * 1518 * The enq_count argument on return is set to the number of pages of 1519 * payload data for this entire chain that need to be encrypted via SW 1520 * encryption. The returned value should be passed to ktls_enqueue 1521 * when scheduling encryption of this chain of mbufs. To handle the 1522 * special case of empty fragments for TLS 1.0 sessions, an empty 1523 * fragment counts as one page. 1524 */ 1525 void 1526 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt, 1527 uint8_t record_type) 1528 { 1529 struct tls_record_layer *tlshdr; 1530 struct mbuf *m; 1531 uint64_t *noncep; 1532 uint16_t tls_len; 1533 int maxlen; 1534 1535 maxlen = tls->params.max_frame_len; 1536 *enq_cnt = 0; 1537 for (m = top; m != NULL; m = m->m_next) { 1538 /* 1539 * All mbufs in the chain should be TLS records whose 1540 * payload does not exceed the maximum frame length. 1541 * 1542 * Empty TLS records are permitted when using CBC. 1543 */ 1544 KASSERT(m->m_len <= maxlen && 1545 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ? 1546 m->m_len >= 0 : m->m_len > 0), 1547 ("ktls_frame: m %p len %d\n", m, m->m_len)); 1548 1549 /* 1550 * TLS frames require unmapped mbufs to store session 1551 * info. 1552 */ 1553 KASSERT((m->m_flags & M_EXTPG) != 0, 1554 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top)); 1555 1556 tls_len = m->m_len; 1557 1558 /* Save a reference to the session. */ 1559 m->m_epg_tls = ktls_hold(tls); 1560 1561 m->m_epg_hdrlen = tls->params.tls_hlen; 1562 m->m_epg_trllen = tls->params.tls_tlen; 1563 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) { 1564 int bs, delta; 1565 1566 /* 1567 * AES-CBC pads messages to a multiple of the 1568 * block size. Note that the padding is 1569 * applied after the digest and the encryption 1570 * is done on the "plaintext || mac || padding". 1571 * At least one byte of padding is always 1572 * present. 1573 * 1574 * Compute the final trailer length assuming 1575 * at most one block of padding. 1576 * tls->params.tls_tlen is the maximum 1577 * possible trailer length (padding + digest). 1578 * delta holds the number of excess padding 1579 * bytes if the maximum were used. Those 1580 * extra bytes are removed. 1581 */ 1582 bs = tls->params.tls_bs; 1583 delta = (tls_len + tls->params.tls_tlen) & (bs - 1); 1584 m->m_epg_trllen -= delta; 1585 } 1586 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen; 1587 1588 /* Populate the TLS header. */ 1589 tlshdr = (void *)m->m_epg_hdr; 1590 tlshdr->tls_vmajor = tls->params.tls_vmajor; 1591 1592 /* 1593 * TLS 1.3 masquarades as TLS 1.2 with a record type 1594 * of TLS_RLTYPE_APP. 1595 */ 1596 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE && 1597 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) { 1598 tlshdr->tls_vminor = TLS_MINOR_VER_TWO; 1599 tlshdr->tls_type = TLS_RLTYPE_APP; 1600 /* save the real record type for later */ 1601 m->m_epg_record_type = record_type; 1602 m->m_epg_trail[0] = record_type; 1603 } else { 1604 tlshdr->tls_vminor = tls->params.tls_vminor; 1605 tlshdr->tls_type = record_type; 1606 } 1607 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); 1608 1609 /* 1610 * Store nonces / explicit IVs after the end of the 1611 * TLS header. 1612 * 1613 * For GCM with TLS 1.2, an 8 byte nonce is copied 1614 * from the end of the IV. The nonce is then 1615 * incremented for use by the next record. 1616 * 1617 * For CBC, a random nonce is inserted for TLS 1.1+. 1618 */ 1619 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 1620 tls->params.tls_vminor == TLS_MINOR_VER_TWO) { 1621 noncep = (uint64_t *)(tls->params.iv + 8); 1622 be64enc(tlshdr + 1, *noncep); 1623 (*noncep)++; 1624 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC && 1625 tls->params.tls_vminor >= TLS_MINOR_VER_ONE) 1626 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0); 1627 1628 /* 1629 * When using SW encryption, mark the mbuf not ready. 1630 * It will be marked ready via sbready() after the 1631 * record has been encrypted. 1632 * 1633 * When using ifnet TLS, unencrypted TLS records are 1634 * sent down the stack to the NIC. 1635 */ 1636 if (tls->mode == TCP_TLS_MODE_SW) { 1637 m->m_flags |= M_NOTREADY; 1638 if (__predict_false(tls_len == 0)) { 1639 /* TLS 1.0 empty fragment. */ 1640 m->m_epg_nrdy = 1; 1641 } else 1642 m->m_epg_nrdy = m->m_epg_npgs; 1643 *enq_cnt += m->m_epg_nrdy; 1644 } 1645 } 1646 } 1647 1648 void 1649 ktls_check_rx(struct sockbuf *sb) 1650 { 1651 struct tls_record_layer hdr; 1652 struct ktls_wq *wq; 1653 struct socket *so; 1654 bool running; 1655 1656 SOCKBUF_LOCK_ASSERT(sb); 1657 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX", 1658 __func__, sb)); 1659 so = __containerof(sb, struct socket, so_rcv); 1660 1661 if (sb->sb_flags & SB_TLS_RX_RUNNING) 1662 return; 1663 1664 /* Is there enough queued for a TLS header? */ 1665 if (sb->sb_tlscc < sizeof(hdr)) { 1666 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0) 1667 so->so_error = EMSGSIZE; 1668 return; 1669 } 1670 1671 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr); 1672 1673 /* Is the entire record queued? */ 1674 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) { 1675 if ((sb->sb_state & SBS_CANTRCVMORE) != 0) 1676 so->so_error = EMSGSIZE; 1677 return; 1678 } 1679 1680 sb->sb_flags |= SB_TLS_RX_RUNNING; 1681 1682 soref(so); 1683 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index]; 1684 mtx_lock(&wq->mtx); 1685 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list); 1686 running = wq->running; 1687 mtx_unlock(&wq->mtx); 1688 if (!running) 1689 wakeup(wq); 1690 counter_u64_add(ktls_cnt_rx_queued, 1); 1691 } 1692 1693 static struct mbuf * 1694 ktls_detach_record(struct sockbuf *sb, int len) 1695 { 1696 struct mbuf *m, *n, *top; 1697 int remain; 1698 1699 SOCKBUF_LOCK_ASSERT(sb); 1700 MPASS(len <= sb->sb_tlscc); 1701 1702 /* 1703 * If TLS chain is the exact size of the record, 1704 * just grab the whole record. 1705 */ 1706 top = sb->sb_mtls; 1707 if (sb->sb_tlscc == len) { 1708 sb->sb_mtls = NULL; 1709 sb->sb_mtlstail = NULL; 1710 goto out; 1711 } 1712 1713 /* 1714 * While it would be nice to use m_split() here, we need 1715 * to know exactly what m_split() allocates to update the 1716 * accounting, so do it inline instead. 1717 */ 1718 remain = len; 1719 for (m = top; remain > m->m_len; m = m->m_next) 1720 remain -= m->m_len; 1721 1722 /* Easy case: don't have to split 'm'. */ 1723 if (remain == m->m_len) { 1724 sb->sb_mtls = m->m_next; 1725 if (sb->sb_mtls == NULL) 1726 sb->sb_mtlstail = NULL; 1727 m->m_next = NULL; 1728 goto out; 1729 } 1730 1731 /* 1732 * Need to allocate an mbuf to hold the remainder of 'm'. Try 1733 * with M_NOWAIT first. 1734 */ 1735 n = m_get(M_NOWAIT, MT_DATA); 1736 if (n == NULL) { 1737 /* 1738 * Use M_WAITOK with socket buffer unlocked. If 1739 * 'sb_mtls' changes while the lock is dropped, return 1740 * NULL to force the caller to retry. 1741 */ 1742 SOCKBUF_UNLOCK(sb); 1743 1744 n = m_get(M_WAITOK, MT_DATA); 1745 1746 SOCKBUF_LOCK(sb); 1747 if (sb->sb_mtls != top) { 1748 m_free(n); 1749 return (NULL); 1750 } 1751 } 1752 n->m_flags |= M_NOTREADY; 1753 1754 /* Store remainder in 'n'. */ 1755 n->m_len = m->m_len - remain; 1756 if (m->m_flags & M_EXT) { 1757 n->m_data = m->m_data + remain; 1758 mb_dupcl(n, m); 1759 } else { 1760 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len); 1761 } 1762 1763 /* Trim 'm' and update accounting. */ 1764 m->m_len -= n->m_len; 1765 sb->sb_tlscc -= n->m_len; 1766 sb->sb_ccc -= n->m_len; 1767 1768 /* Account for 'n'. */ 1769 sballoc_ktls_rx(sb, n); 1770 1771 /* Insert 'n' into the TLS chain. */ 1772 sb->sb_mtls = n; 1773 n->m_next = m->m_next; 1774 if (sb->sb_mtlstail == m) 1775 sb->sb_mtlstail = n; 1776 1777 /* Detach the record from the TLS chain. */ 1778 m->m_next = NULL; 1779 1780 out: 1781 MPASS(m_length(top, NULL) == len); 1782 for (m = top; m != NULL; m = m->m_next) 1783 sbfree_ktls_rx(sb, m); 1784 sb->sb_tlsdcc = len; 1785 sb->sb_ccc += len; 1786 SBCHECK(sb); 1787 return (top); 1788 } 1789 1790 static void 1791 ktls_decrypt(struct socket *so) 1792 { 1793 char tls_header[MBUF_PEXT_HDR_LEN]; 1794 struct ktls_session *tls; 1795 struct sockbuf *sb; 1796 struct tls_record_layer *hdr; 1797 struct tls_get_record tgr; 1798 struct mbuf *control, *data, *m; 1799 uint64_t seqno; 1800 int error, remain, tls_len, trail_len; 1801 1802 hdr = (struct tls_record_layer *)tls_header; 1803 sb = &so->so_rcv; 1804 SOCKBUF_LOCK(sb); 1805 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING, 1806 ("%s: socket %p not running", __func__, so)); 1807 1808 tls = sb->sb_tls_info; 1809 MPASS(tls != NULL); 1810 1811 for (;;) { 1812 /* Is there enough queued for a TLS header? */ 1813 if (sb->sb_tlscc < tls->params.tls_hlen) 1814 break; 1815 1816 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header); 1817 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length); 1818 1819 if (hdr->tls_vmajor != tls->params.tls_vmajor || 1820 hdr->tls_vminor != tls->params.tls_vminor) 1821 error = EINVAL; 1822 else if (tls_len < tls->params.tls_hlen || tls_len > 1823 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 + 1824 tls->params.tls_tlen) 1825 error = EMSGSIZE; 1826 else 1827 error = 0; 1828 if (__predict_false(error != 0)) { 1829 /* 1830 * We have a corrupted record and are likely 1831 * out of sync. The connection isn't 1832 * recoverable at this point, so abort it. 1833 */ 1834 SOCKBUF_UNLOCK(sb); 1835 counter_u64_add(ktls_offload_corrupted_records, 1); 1836 1837 CURVNET_SET(so->so_vnet); 1838 so->so_proto->pr_usrreqs->pru_abort(so); 1839 so->so_error = error; 1840 CURVNET_RESTORE(); 1841 goto deref; 1842 } 1843 1844 /* Is the entire record queued? */ 1845 if (sb->sb_tlscc < tls_len) 1846 break; 1847 1848 /* 1849 * Split out the portion of the mbuf chain containing 1850 * this TLS record. 1851 */ 1852 data = ktls_detach_record(sb, tls_len); 1853 if (data == NULL) 1854 continue; 1855 MPASS(sb->sb_tlsdcc == tls_len); 1856 1857 seqno = sb->sb_tls_seqno; 1858 sb->sb_tls_seqno++; 1859 SBCHECK(sb); 1860 SOCKBUF_UNLOCK(sb); 1861 1862 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len); 1863 if (error) { 1864 counter_u64_add(ktls_offload_failed_crypto, 1); 1865 1866 SOCKBUF_LOCK(sb); 1867 if (sb->sb_tlsdcc == 0) { 1868 /* 1869 * sbcut/drop/flush discarded these 1870 * mbufs. 1871 */ 1872 m_freem(data); 1873 break; 1874 } 1875 1876 /* 1877 * Drop this TLS record's data, but keep 1878 * decrypting subsequent records. 1879 */ 1880 sb->sb_ccc -= tls_len; 1881 sb->sb_tlsdcc = 0; 1882 1883 CURVNET_SET(so->so_vnet); 1884 so->so_error = EBADMSG; 1885 sorwakeup_locked(so); 1886 CURVNET_RESTORE(); 1887 1888 m_freem(data); 1889 1890 SOCKBUF_LOCK(sb); 1891 continue; 1892 } 1893 1894 /* Allocate the control mbuf. */ 1895 tgr.tls_type = hdr->tls_type; 1896 tgr.tls_vmajor = hdr->tls_vmajor; 1897 tgr.tls_vminor = hdr->tls_vminor; 1898 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen - 1899 trail_len); 1900 control = sbcreatecontrol_how(&tgr, sizeof(tgr), 1901 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK); 1902 1903 SOCKBUF_LOCK(sb); 1904 if (sb->sb_tlsdcc == 0) { 1905 /* sbcut/drop/flush discarded these mbufs. */ 1906 MPASS(sb->sb_tlscc == 0); 1907 m_freem(data); 1908 m_freem(control); 1909 break; 1910 } 1911 1912 /* 1913 * Clear the 'dcc' accounting in preparation for 1914 * adding the decrypted record. 1915 */ 1916 sb->sb_ccc -= tls_len; 1917 sb->sb_tlsdcc = 0; 1918 SBCHECK(sb); 1919 1920 /* If there is no payload, drop all of the data. */ 1921 if (tgr.tls_length == htobe16(0)) { 1922 m_freem(data); 1923 data = NULL; 1924 } else { 1925 /* Trim header. */ 1926 remain = tls->params.tls_hlen; 1927 while (remain > 0) { 1928 if (data->m_len > remain) { 1929 data->m_data += remain; 1930 data->m_len -= remain; 1931 break; 1932 } 1933 remain -= data->m_len; 1934 data = m_free(data); 1935 } 1936 1937 /* Trim trailer and clear M_NOTREADY. */ 1938 remain = be16toh(tgr.tls_length); 1939 m = data; 1940 for (m = data; remain > m->m_len; m = m->m_next) { 1941 m->m_flags &= ~M_NOTREADY; 1942 remain -= m->m_len; 1943 } 1944 m->m_len = remain; 1945 m_freem(m->m_next); 1946 m->m_next = NULL; 1947 m->m_flags &= ~M_NOTREADY; 1948 1949 /* Set EOR on the final mbuf. */ 1950 m->m_flags |= M_EOR; 1951 } 1952 1953 sbappendcontrol_locked(sb, data, control, 0); 1954 } 1955 1956 sb->sb_flags &= ~SB_TLS_RX_RUNNING; 1957 1958 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0) 1959 so->so_error = EMSGSIZE; 1960 1961 sorwakeup_locked(so); 1962 1963 deref: 1964 SOCKBUF_UNLOCK_ASSERT(sb); 1965 1966 CURVNET_SET(so->so_vnet); 1967 SOCK_LOCK(so); 1968 sorele(so); 1969 CURVNET_RESTORE(); 1970 } 1971 1972 void 1973 ktls_enqueue_to_free(struct mbuf *m) 1974 { 1975 struct ktls_wq *wq; 1976 bool running; 1977 1978 /* Mark it for freeing. */ 1979 m->m_epg_flags |= EPG_FLAG_2FREE; 1980 wq = &ktls_wq[m->m_epg_tls->wq_index]; 1981 mtx_lock(&wq->mtx); 1982 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 1983 running = wq->running; 1984 mtx_unlock(&wq->mtx); 1985 if (!running) 1986 wakeup(wq); 1987 } 1988 1989 static void * 1990 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m) 1991 { 1992 void *buf; 1993 int domain, running; 1994 1995 if (m->m_epg_npgs <= 2) 1996 return (NULL); 1997 if (ktls_buffer_zone == NULL) 1998 return (NULL); 1999 if ((u_int)(ticks - wq->lastallocfail) < hz) { 2000 /* 2001 * Rate-limit allocation attempts after a failure. 2002 * ktls_buffer_import() will acquire a per-domain mutex to check 2003 * the free page queues and may fail consistently if memory is 2004 * fragmented. 2005 */ 2006 return (NULL); 2007 } 2008 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM); 2009 if (buf == NULL) { 2010 domain = PCPU_GET(domain); 2011 wq->lastallocfail = ticks; 2012 2013 /* 2014 * Note that this check is "racy", but the races are 2015 * harmless, and are either a spurious wakeup if 2016 * multiple threads fail allocations before the alloc 2017 * thread wakes, or waiting an extra second in case we 2018 * see an old value of running == true. 2019 */ 2020 if (!VM_DOMAIN_EMPTY(domain)) { 2021 running = atomic_load_int(&ktls_domains[domain].alloc_td.running); 2022 if (!running) 2023 wakeup(&ktls_domains[domain].alloc_td); 2024 } 2025 } 2026 return (buf); 2027 } 2028 2029 static int 2030 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m, 2031 struct ktls_session *tls, struct ktls_ocf_encrypt_state *state) 2032 { 2033 vm_page_t pg; 2034 int error, i, len, off; 2035 2036 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY), 2037 ("%p not unready & nomap mbuf\n", m)); 2038 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen, 2039 ("page count %d larger than maximum frame length %d", m->m_epg_npgs, 2040 ktls_maxlen)); 2041 2042 /* Anonymous mbufs are encrypted in place. */ 2043 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) 2044 return (tls->sw_encrypt(state, tls, m, NULL, 0)); 2045 2046 /* 2047 * For file-backed mbufs (from sendfile), anonymous wired 2048 * pages are allocated and used as the encryption destination. 2049 */ 2050 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) { 2051 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len - 2052 m->m_epg_1st_off; 2053 state->dst_iov[0].iov_base = (char *)state->cbuf + 2054 m->m_epg_1st_off; 2055 state->dst_iov[0].iov_len = len; 2056 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf); 2057 i = 1; 2058 } else { 2059 off = m->m_epg_1st_off; 2060 for (i = 0; i < m->m_epg_npgs; i++, off = 0) { 2061 do { 2062 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2063 VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | 2064 VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL); 2065 } while (pg == NULL); 2066 2067 len = m_epg_pagelen(m, i, off); 2068 state->parray[i] = VM_PAGE_TO_PHYS(pg); 2069 state->dst_iov[i].iov_base = 2070 (char *)PHYS_TO_DMAP(state->parray[i]) + off; 2071 state->dst_iov[i].iov_len = len; 2072 } 2073 } 2074 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small")); 2075 state->dst_iov[i].iov_base = m->m_epg_trail; 2076 state->dst_iov[i].iov_len = m->m_epg_trllen; 2077 2078 error = tls->sw_encrypt(state, tls, m, state->dst_iov, i + 1); 2079 2080 if (__predict_false(error != 0)) { 2081 /* Free the anonymous pages. */ 2082 if (state->cbuf != NULL) 2083 uma_zfree(ktls_buffer_zone, state->cbuf); 2084 else { 2085 for (i = 0; i < m->m_epg_npgs; i++) { 2086 pg = PHYS_TO_VM_PAGE(state->parray[i]); 2087 (void)vm_page_unwire_noq(pg); 2088 vm_page_free(pg); 2089 } 2090 } 2091 } 2092 return (error); 2093 } 2094 2095 void 2096 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count) 2097 { 2098 struct ktls_wq *wq; 2099 bool running; 2100 2101 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) == 2102 (M_EXTPG | M_NOTREADY)), 2103 ("ktls_enqueue: %p not unready & nomap mbuf\n", m)); 2104 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count")); 2105 2106 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf")); 2107 2108 m->m_epg_enc_cnt = page_count; 2109 2110 /* 2111 * Save a pointer to the socket. The caller is responsible 2112 * for taking an additional reference via soref(). 2113 */ 2114 m->m_epg_so = so; 2115 2116 wq = &ktls_wq[m->m_epg_tls->wq_index]; 2117 mtx_lock(&wq->mtx); 2118 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 2119 running = wq->running; 2120 mtx_unlock(&wq->mtx); 2121 if (!running) 2122 wakeup(wq); 2123 counter_u64_add(ktls_cnt_tx_queued, 1); 2124 } 2125 2126 /* 2127 * Once a file-backed mbuf (from sendfile) has been encrypted, free 2128 * the pages from the file and replace them with the anonymous pages 2129 * allocated in ktls_encrypt_record(). 2130 */ 2131 static void 2132 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state) 2133 { 2134 int i; 2135 2136 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0); 2137 2138 /* Free the old pages. */ 2139 m->m_ext.ext_free(m); 2140 2141 /* Replace them with the new pages. */ 2142 if (state->cbuf != NULL) { 2143 for (i = 0; i < m->m_epg_npgs; i++) 2144 m->m_epg_pa[i] = state->parray[0] + ptoa(i); 2145 2146 /* Contig pages should go back to the cache. */ 2147 m->m_ext.ext_free = ktls_free_mext_contig; 2148 } else { 2149 for (i = 0; i < m->m_epg_npgs; i++) 2150 m->m_epg_pa[i] = state->parray[i]; 2151 2152 /* Use the basic free routine. */ 2153 m->m_ext.ext_free = mb_free_mext_pgs; 2154 } 2155 2156 /* Pages are now writable. */ 2157 m->m_epg_flags |= EPG_FLAG_ANON; 2158 } 2159 2160 static __noinline void 2161 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top) 2162 { 2163 struct ktls_ocf_encrypt_state state; 2164 struct ktls_session *tls; 2165 struct socket *so; 2166 struct mbuf *m; 2167 int error, npages, total_pages; 2168 2169 so = top->m_epg_so; 2170 tls = top->m_epg_tls; 2171 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2172 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2173 #ifdef INVARIANTS 2174 top->m_epg_so = NULL; 2175 #endif 2176 total_pages = top->m_epg_enc_cnt; 2177 npages = 0; 2178 2179 /* 2180 * Encrypt the TLS records in the chain of mbufs starting with 2181 * 'top'. 'total_pages' gives us a total count of pages and is 2182 * used to know when we have finished encrypting the TLS 2183 * records originally queued with 'top'. 2184 * 2185 * NB: These mbufs are queued in the socket buffer and 2186 * 'm_next' is traversing the mbufs in the socket buffer. The 2187 * socket buffer lock is not held while traversing this chain. 2188 * Since the mbufs are all marked M_NOTREADY their 'm_next' 2189 * pointers should be stable. However, the 'm_next' of the 2190 * last mbuf encrypted is not necessarily NULL. It can point 2191 * to other mbufs appended while 'top' was on the TLS work 2192 * queue. 2193 * 2194 * Each mbuf holds an entire TLS record. 2195 */ 2196 error = 0; 2197 for (m = top; npages != total_pages; m = m->m_next) { 2198 KASSERT(m->m_epg_tls == tls, 2199 ("different TLS sessions in a single mbuf chain: %p vs %p", 2200 tls, m->m_epg_tls)); 2201 KASSERT(npages + m->m_epg_npgs <= total_pages, 2202 ("page count mismatch: top %p, total_pages %d, m %p", top, 2203 total_pages, m)); 2204 2205 error = ktls_encrypt_record(wq, m, tls, &state); 2206 if (error) { 2207 counter_u64_add(ktls_offload_failed_crypto, 1); 2208 break; 2209 } 2210 2211 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2212 ktls_finish_nonanon(m, &state); 2213 2214 npages += m->m_epg_nrdy; 2215 2216 /* 2217 * Drop a reference to the session now that it is no 2218 * longer needed. Existing code depends on encrypted 2219 * records having no associated session vs 2220 * yet-to-be-encrypted records having an associated 2221 * session. 2222 */ 2223 m->m_epg_tls = NULL; 2224 ktls_free(tls); 2225 } 2226 2227 CURVNET_SET(so->so_vnet); 2228 if (error == 0) { 2229 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages); 2230 } else { 2231 so->so_proto->pr_usrreqs->pru_abort(so); 2232 so->so_error = EIO; 2233 mb_free_notready(top, total_pages); 2234 } 2235 2236 SOCK_LOCK(so); 2237 sorele(so); 2238 CURVNET_RESTORE(); 2239 } 2240 2241 void 2242 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error) 2243 { 2244 struct ktls_session *tls; 2245 struct socket *so; 2246 struct mbuf *m; 2247 int npages; 2248 2249 m = state->m; 2250 2251 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2252 ktls_finish_nonanon(m, state); 2253 2254 so = state->so; 2255 free(state, M_KTLS); 2256 2257 /* 2258 * Drop a reference to the session now that it is no longer 2259 * needed. Existing code depends on encrypted records having 2260 * no associated session vs yet-to-be-encrypted records having 2261 * an associated session. 2262 */ 2263 tls = m->m_epg_tls; 2264 m->m_epg_tls = NULL; 2265 ktls_free(tls); 2266 2267 if (error != 0) 2268 counter_u64_add(ktls_offload_failed_crypto, 1); 2269 2270 CURVNET_SET(so->so_vnet); 2271 npages = m->m_epg_nrdy; 2272 2273 if (error == 0) { 2274 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, m, npages); 2275 } else { 2276 so->so_proto->pr_usrreqs->pru_abort(so); 2277 so->so_error = EIO; 2278 mb_free_notready(m, npages); 2279 } 2280 2281 SOCK_LOCK(so); 2282 sorele(so); 2283 CURVNET_RESTORE(); 2284 } 2285 2286 /* 2287 * Similar to ktls_encrypt, but used with asynchronous OCF backends 2288 * (coprocessors) where encryption does not use host CPU resources and 2289 * it can be beneficial to queue more requests than CPUs. 2290 */ 2291 static __noinline void 2292 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top) 2293 { 2294 struct ktls_ocf_encrypt_state *state; 2295 struct ktls_session *tls; 2296 struct socket *so; 2297 struct mbuf *m, *n; 2298 int error, mpages, npages, total_pages; 2299 2300 so = top->m_epg_so; 2301 tls = top->m_epg_tls; 2302 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2303 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2304 #ifdef INVARIANTS 2305 top->m_epg_so = NULL; 2306 #endif 2307 total_pages = top->m_epg_enc_cnt; 2308 npages = 0; 2309 2310 error = 0; 2311 for (m = top; npages != total_pages; m = n) { 2312 KASSERT(m->m_epg_tls == tls, 2313 ("different TLS sessions in a single mbuf chain: %p vs %p", 2314 tls, m->m_epg_tls)); 2315 KASSERT(npages + m->m_epg_npgs <= total_pages, 2316 ("page count mismatch: top %p, total_pages %d, m %p", top, 2317 total_pages, m)); 2318 2319 state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO); 2320 soref(so); 2321 state->so = so; 2322 state->m = m; 2323 2324 mpages = m->m_epg_nrdy; 2325 n = m->m_next; 2326 2327 error = ktls_encrypt_record(wq, m, tls, state); 2328 if (error) { 2329 counter_u64_add(ktls_offload_failed_crypto, 1); 2330 free(state, M_KTLS); 2331 CURVNET_SET(so->so_vnet); 2332 SOCK_LOCK(so); 2333 sorele(so); 2334 CURVNET_RESTORE(); 2335 break; 2336 } 2337 2338 npages += mpages; 2339 } 2340 2341 CURVNET_SET(so->so_vnet); 2342 if (error != 0) { 2343 so->so_proto->pr_usrreqs->pru_abort(so); 2344 so->so_error = EIO; 2345 mb_free_notready(m, total_pages - npages); 2346 } 2347 2348 SOCK_LOCK(so); 2349 sorele(so); 2350 CURVNET_RESTORE(); 2351 } 2352 2353 static void 2354 ktls_alloc_thread(void *ctx) 2355 { 2356 struct ktls_domain_info *ktls_domain = ctx; 2357 struct ktls_alloc_thread *sc = &ktls_domain->alloc_td; 2358 void **buf; 2359 struct sysctl_oid *oid; 2360 char name[80]; 2361 int i, nbufs; 2362 2363 curthread->td_domain.dr_policy = 2364 DOMAINSET_PREF(PCPU_GET(domain)); 2365 snprintf(name, sizeof(name), "domain%d", PCPU_GET(domain)); 2366 if (bootverbose) 2367 printf("Starting KTLS alloc thread for domain %d\n", 2368 PCPU_GET(domain)); 2369 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO, 2370 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2371 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "allocs", 2372 CTLFLAG_RD, &sc->allocs, 0, "buffers allocated"); 2373 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups", 2374 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups"); 2375 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running", 2376 CTLFLAG_RD, &sc->running, 0, "thread running"); 2377 2378 buf = NULL; 2379 nbufs = 0; 2380 for (;;) { 2381 atomic_store_int(&sc->running, 0); 2382 tsleep(sc, PZERO | PNOLOCK, "-", 0); 2383 atomic_store_int(&sc->running, 1); 2384 sc->wakeups++; 2385 if (nbufs != ktls_max_alloc) { 2386 free(buf, M_KTLS); 2387 nbufs = atomic_load_int(&ktls_max_alloc); 2388 buf = malloc(sizeof(void *) * nbufs, M_KTLS, 2389 M_WAITOK | M_ZERO); 2390 } 2391 /* 2392 * Below we allocate nbufs with different allocation 2393 * flags than we use when allocating normally during 2394 * encryption in the ktls worker thread. We specify 2395 * M_NORECLAIM in the worker thread. However, we omit 2396 * that flag here and add M_WAITOK so that the VM 2397 * system is permitted to perform expensive work to 2398 * defragment memory. We do this here, as it does not 2399 * matter if this thread blocks. If we block a ktls 2400 * worker thread, we risk developing backlogs of 2401 * buffers to be encrypted, leading to surges of 2402 * traffic and potential NIC output drops. 2403 */ 2404 for (i = 0; i < nbufs; i++) { 2405 buf[i] = uma_zalloc(ktls_buffer_zone, M_WAITOK); 2406 sc->allocs++; 2407 } 2408 for (i = 0; i < nbufs; i++) { 2409 uma_zfree(ktls_buffer_zone, buf[i]); 2410 buf[i] = NULL; 2411 } 2412 } 2413 } 2414 2415 static void 2416 ktls_work_thread(void *ctx) 2417 { 2418 struct ktls_wq *wq = ctx; 2419 struct mbuf *m, *n; 2420 struct socket *so, *son; 2421 STAILQ_HEAD(, mbuf) local_m_head; 2422 STAILQ_HEAD(, socket) local_so_head; 2423 2424 if (ktls_bind_threads > 1) { 2425 curthread->td_domain.dr_policy = 2426 DOMAINSET_PREF(PCPU_GET(domain)); 2427 } 2428 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 2429 fpu_kern_thread(0); 2430 #endif 2431 for (;;) { 2432 mtx_lock(&wq->mtx); 2433 while (STAILQ_EMPTY(&wq->m_head) && 2434 STAILQ_EMPTY(&wq->so_head)) { 2435 wq->running = false; 2436 mtx_sleep(wq, &wq->mtx, 0, "-", 0); 2437 wq->running = true; 2438 } 2439 2440 STAILQ_INIT(&local_m_head); 2441 STAILQ_CONCAT(&local_m_head, &wq->m_head); 2442 STAILQ_INIT(&local_so_head); 2443 STAILQ_CONCAT(&local_so_head, &wq->so_head); 2444 mtx_unlock(&wq->mtx); 2445 2446 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) { 2447 if (m->m_epg_flags & EPG_FLAG_2FREE) { 2448 ktls_free(m->m_epg_tls); 2449 m_free_raw(m); 2450 } else { 2451 if (m->m_epg_tls->sync_dispatch) 2452 ktls_encrypt(wq, m); 2453 else 2454 ktls_encrypt_async(wq, m); 2455 counter_u64_add(ktls_cnt_tx_queued, -1); 2456 } 2457 } 2458 2459 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) { 2460 ktls_decrypt(so); 2461 counter_u64_add(ktls_cnt_rx_queued, -1); 2462 } 2463 } 2464 } 2465 2466 #if defined(INET) || defined(INET6) 2467 static void 2468 ktls_disable_ifnet_help(void *context, int pending __unused) 2469 { 2470 struct ktls_session *tls; 2471 struct inpcb *inp; 2472 struct tcpcb *tp; 2473 struct socket *so; 2474 int err; 2475 2476 tls = context; 2477 inp = tls->inp; 2478 if (inp == NULL) 2479 return; 2480 INP_WLOCK(inp); 2481 so = inp->inp_socket; 2482 MPASS(so != NULL); 2483 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) || 2484 (inp->inp_flags2 & INP_FREED)) { 2485 goto out; 2486 } 2487 2488 if (so->so_snd.sb_tls_info != NULL) 2489 err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW); 2490 else 2491 err = ENXIO; 2492 if (err == 0) { 2493 counter_u64_add(ktls_ifnet_disable_ok, 1); 2494 /* ktls_set_tx_mode() drops inp wlock, so recheck flags */ 2495 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0 && 2496 (inp->inp_flags2 & INP_FREED) == 0 && 2497 (tp = intotcpcb(inp)) != NULL && 2498 tp->t_fb->tfb_hwtls_change != NULL) 2499 (*tp->t_fb->tfb_hwtls_change)(tp, 0); 2500 } else { 2501 counter_u64_add(ktls_ifnet_disable_fail, 1); 2502 } 2503 2504 out: 2505 SOCK_LOCK(so); 2506 sorele(so); 2507 if (!in_pcbrele_wlocked(inp)) 2508 INP_WUNLOCK(inp); 2509 ktls_free(tls); 2510 } 2511 2512 /* 2513 * Called when re-transmits are becoming a substantial portion of the 2514 * sends on this connection. When this happens, we transition the 2515 * connection to software TLS. This is needed because most inline TLS 2516 * NICs keep crypto state only for in-order transmits. This means 2517 * that to handle a TCP rexmit (which is out-of-order), the NIC must 2518 * re-DMA the entire TLS record up to and including the current 2519 * segment. This means that when re-transmitting the last ~1448 byte 2520 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order 2521 * of magnitude more data than we are sending. This can cause the 2522 * PCIe link to saturate well before the network, which can cause 2523 * output drops, and a general loss of capacity. 2524 */ 2525 void 2526 ktls_disable_ifnet(void *arg) 2527 { 2528 struct tcpcb *tp; 2529 struct inpcb *inp; 2530 struct socket *so; 2531 struct ktls_session *tls; 2532 2533 tp = arg; 2534 inp = tp->t_inpcb; 2535 INP_WLOCK_ASSERT(inp); 2536 so = inp->inp_socket; 2537 SOCK_LOCK(so); 2538 tls = so->so_snd.sb_tls_info; 2539 if (tls->disable_ifnet_pending) { 2540 SOCK_UNLOCK(so); 2541 return; 2542 } 2543 2544 /* 2545 * note that disable_ifnet_pending is never cleared; disabling 2546 * ifnet can only be done once per session, so we never want 2547 * to do it again 2548 */ 2549 2550 (void)ktls_hold(tls); 2551 in_pcbref(inp); 2552 soref(so); 2553 tls->disable_ifnet_pending = true; 2554 tls->inp = inp; 2555 SOCK_UNLOCK(so); 2556 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls); 2557 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task); 2558 } 2559 #endif 2560