1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014-2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 #include "opt_rss.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/domainset.h> 40 #include <sys/endian.h> 41 #include <sys/ktls.h> 42 #include <sys/lock.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/rmlock.h> 46 #include <sys/proc.h> 47 #include <sys/protosw.h> 48 #include <sys/refcount.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/sysctl.h> 53 #include <sys/taskqueue.h> 54 #include <sys/kthread.h> 55 #include <sys/uio.h> 56 #include <sys/vmmeter.h> 57 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 58 #include <machine/pcb.h> 59 #endif 60 #include <machine/vmparam.h> 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #ifdef RSS 64 #include <net/netisr.h> 65 #include <net/rss_config.h> 66 #endif 67 #include <net/route.h> 68 #include <net/route/nhop.h> 69 #if defined(INET) || defined(INET6) 70 #include <netinet/in.h> 71 #include <netinet/in_pcb.h> 72 #endif 73 #include <netinet/tcp_var.h> 74 #ifdef TCP_OFFLOAD 75 #include <netinet/tcp_offload.h> 76 #endif 77 #include <opencrypto/cryptodev.h> 78 #include <opencrypto/ktls.h> 79 #include <vm/uma_dbg.h> 80 #include <vm/vm.h> 81 #include <vm/vm_pageout.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pagequeue.h> 84 85 struct ktls_wq { 86 struct mtx mtx; 87 STAILQ_HEAD(, mbuf) m_head; 88 STAILQ_HEAD(, socket) so_head; 89 bool running; 90 int lastallocfail; 91 } __aligned(CACHE_LINE_SIZE); 92 93 struct ktls_alloc_thread { 94 uint64_t wakeups; 95 uint64_t allocs; 96 struct thread *td; 97 int running; 98 }; 99 100 struct ktls_domain_info { 101 int count; 102 int cpu[MAXCPU]; 103 struct ktls_alloc_thread alloc_td; 104 }; 105 106 struct ktls_domain_info ktls_domains[MAXMEMDOM]; 107 static struct ktls_wq *ktls_wq; 108 static struct proc *ktls_proc; 109 static uma_zone_t ktls_session_zone; 110 static uma_zone_t ktls_buffer_zone; 111 static uint16_t ktls_cpuid_lookup[MAXCPU]; 112 113 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 114 "Kernel TLS offload"); 115 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 116 "Kernel TLS offload stats"); 117 118 #ifdef RSS 119 static int ktls_bind_threads = 1; 120 #else 121 static int ktls_bind_threads; 122 #endif 123 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN, 124 &ktls_bind_threads, 0, 125 "Bind crypto threads to cores (1) or cores and domains (2) at boot"); 126 127 static u_int ktls_maxlen = 16384; 128 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN, 129 &ktls_maxlen, 0, "Maximum TLS record size"); 130 131 static int ktls_number_threads; 132 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD, 133 &ktls_number_threads, 0, 134 "Number of TLS threads in thread-pool"); 135 136 unsigned int ktls_ifnet_max_rexmit_pct = 2; 137 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN, 138 &ktls_ifnet_max_rexmit_pct, 2, 139 "Max percent bytes retransmitted before ifnet TLS is disabled"); 140 141 static bool ktls_offload_enable; 142 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN, 143 &ktls_offload_enable, 0, 144 "Enable support for kernel TLS offload"); 145 146 static bool ktls_cbc_enable = true; 147 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN, 148 &ktls_cbc_enable, 1, 149 "Enable Support of AES-CBC crypto for kernel TLS"); 150 151 static bool ktls_sw_buffer_cache = true; 152 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN, 153 &ktls_sw_buffer_cache, 1, 154 "Enable caching of output buffers for SW encryption"); 155 156 static int ktls_max_alloc = 128; 157 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_alloc, CTLFLAG_RWTUN, 158 &ktls_max_alloc, 128, 159 "Max number of 16k buffers to allocate in thread context"); 160 161 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active); 162 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD, 163 &ktls_tasks_active, "Number of active tasks"); 164 165 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued); 166 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD, 167 &ktls_cnt_tx_queued, 168 "Number of TLS records in queue to tasks for SW encryption"); 169 170 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued); 171 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD, 172 &ktls_cnt_rx_queued, 173 "Number of TLS sockets in queue to tasks for SW decryption"); 174 175 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total); 176 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total, 177 CTLFLAG_RD, &ktls_offload_total, 178 "Total successful TLS setups (parameters set)"); 179 180 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls); 181 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls, 182 CTLFLAG_RD, &ktls_offload_enable_calls, 183 "Total number of TLS enable calls made"); 184 185 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active); 186 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD, 187 &ktls_offload_active, "Total Active TLS sessions"); 188 189 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records); 190 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD, 191 &ktls_offload_corrupted_records, "Total corrupted TLS records received"); 192 193 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto); 194 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD, 195 &ktls_offload_failed_crypto, "Total TLS crypto failures"); 196 197 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet); 198 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD, 199 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet"); 200 201 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw); 202 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD, 203 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW"); 204 205 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed); 206 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD, 207 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet"); 208 209 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail); 210 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD, 211 &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet"); 212 213 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok); 214 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD, 215 &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet"); 216 217 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 218 "Software TLS session stats"); 219 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 220 "Hardware (ifnet) TLS session stats"); 221 #ifdef TCP_OFFLOAD 222 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 223 "TOE TLS session stats"); 224 #endif 225 226 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc); 227 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc, 228 "Active number of software TLS sessions using AES-CBC"); 229 230 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm); 231 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm, 232 "Active number of software TLS sessions using AES-GCM"); 233 234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20); 235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD, 236 &ktls_sw_chacha20, 237 "Active number of software TLS sessions using Chacha20-Poly1305"); 238 239 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc); 240 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD, 241 &ktls_ifnet_cbc, 242 "Active number of ifnet TLS sessions using AES-CBC"); 243 244 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm); 245 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD, 246 &ktls_ifnet_gcm, 247 "Active number of ifnet TLS sessions using AES-GCM"); 248 249 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20); 250 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD, 251 &ktls_ifnet_chacha20, 252 "Active number of ifnet TLS sessions using Chacha20-Poly1305"); 253 254 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset); 255 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD, 256 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag"); 257 258 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped); 259 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD, 260 &ktls_ifnet_reset_dropped, 261 "TLS sessions dropped after failing to update ifnet send tag"); 262 263 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed); 264 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD, 265 &ktls_ifnet_reset_failed, 266 "TLS sessions that failed to allocate a new ifnet send tag"); 267 268 static int ktls_ifnet_permitted; 269 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN, 270 &ktls_ifnet_permitted, 1, 271 "Whether to permit hardware (ifnet) TLS sessions"); 272 273 #ifdef TCP_OFFLOAD 274 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc); 275 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD, 276 &ktls_toe_cbc, 277 "Active number of TOE TLS sessions using AES-CBC"); 278 279 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm); 280 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD, 281 &ktls_toe_gcm, 282 "Active number of TOE TLS sessions using AES-GCM"); 283 284 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20); 285 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD, 286 &ktls_toe_chacha20, 287 "Active number of TOE TLS sessions using Chacha20-Poly1305"); 288 #endif 289 290 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS"); 291 292 static void ktls_cleanup(struct ktls_session *tls); 293 #if defined(INET) || defined(INET6) 294 static void ktls_reset_send_tag(void *context, int pending); 295 #endif 296 static void ktls_work_thread(void *ctx); 297 static void ktls_alloc_thread(void *ctx); 298 299 #if defined(INET) || defined(INET6) 300 static u_int 301 ktls_get_cpu(struct socket *so) 302 { 303 struct inpcb *inp; 304 #ifdef NUMA 305 struct ktls_domain_info *di; 306 #endif 307 u_int cpuid; 308 309 inp = sotoinpcb(so); 310 #ifdef RSS 311 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 312 if (cpuid != NETISR_CPUID_NONE) 313 return (cpuid); 314 #endif 315 /* 316 * Just use the flowid to shard connections in a repeatable 317 * fashion. Note that TLS 1.0 sessions rely on the 318 * serialization provided by having the same connection use 319 * the same queue. 320 */ 321 #ifdef NUMA 322 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) { 323 di = &ktls_domains[inp->inp_numa_domain]; 324 cpuid = di->cpu[inp->inp_flowid % di->count]; 325 } else 326 #endif 327 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads]; 328 return (cpuid); 329 } 330 #endif 331 332 static int 333 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags) 334 { 335 vm_page_t m; 336 int i; 337 338 KASSERT((ktls_maxlen & PAGE_MASK) == 0, 339 ("%s: ktls max length %d is not page size-aligned", 340 __func__, ktls_maxlen)); 341 342 for (i = 0; i < count; i++) { 343 m = vm_page_alloc_contig_domain(NULL, 0, domain, 344 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 345 VM_ALLOC_NODUMP | malloc2vm_flags(flags), 346 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0, 347 VM_MEMATTR_DEFAULT); 348 if (m == NULL) 349 break; 350 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 351 } 352 return (i); 353 } 354 355 static void 356 ktls_buffer_release(void *arg __unused, void **store, int count) 357 { 358 vm_page_t m; 359 int i, j; 360 361 for (i = 0; i < count; i++) { 362 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); 363 for (j = 0; j < atop(ktls_maxlen); j++) { 364 (void)vm_page_unwire_noq(m + j); 365 vm_page_free(m + j); 366 } 367 } 368 } 369 370 static void 371 ktls_free_mext_contig(struct mbuf *m) 372 { 373 M_ASSERTEXTPG(m); 374 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0])); 375 } 376 377 static void 378 ktls_init(void *dummy __unused) 379 { 380 struct thread *td; 381 struct pcpu *pc; 382 cpuset_t mask; 383 int count, domain, error, i; 384 385 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS, 386 M_WAITOK | M_ZERO); 387 388 ktls_session_zone = uma_zcreate("ktls_session", 389 sizeof(struct ktls_session), 390 NULL, NULL, NULL, NULL, 391 UMA_ALIGN_CACHE, 0); 392 393 if (ktls_sw_buffer_cache) { 394 ktls_buffer_zone = uma_zcache_create("ktls_buffers", 395 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL, 396 ktls_buffer_import, ktls_buffer_release, NULL, 397 UMA_ZONE_FIRSTTOUCH); 398 } 399 400 /* 401 * Initialize the workqueues to run the TLS work. We create a 402 * work queue for each CPU. 403 */ 404 CPU_FOREACH(i) { 405 STAILQ_INIT(&ktls_wq[i].m_head); 406 STAILQ_INIT(&ktls_wq[i].so_head); 407 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF); 408 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i], 409 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i); 410 if (error) 411 panic("Can't add KTLS thread %d error %d", i, error); 412 413 /* 414 * Bind threads to cores. If ktls_bind_threads is > 415 * 1, then we bind to the NUMA domain. 416 */ 417 if (ktls_bind_threads) { 418 if (ktls_bind_threads > 1) { 419 pc = pcpu_find(i); 420 domain = pc->pc_domain; 421 CPU_COPY(&cpuset_domain[domain], &mask); 422 count = ktls_domains[domain].count; 423 ktls_domains[domain].cpu[count] = i; 424 ktls_domains[domain].count++; 425 } else { 426 CPU_SETOF(i, &mask); 427 } 428 error = cpuset_setthread(td->td_tid, &mask); 429 if (error) 430 panic( 431 "Unable to bind KTLS thread for CPU %d error %d", 432 i, error); 433 } 434 ktls_cpuid_lookup[ktls_number_threads] = i; 435 ktls_number_threads++; 436 } 437 438 /* 439 * Start an allocation thread per-domain to perform blocking allocations 440 * of 16k physically contiguous TLS crypto destination buffers. 441 */ 442 if (ktls_sw_buffer_cache) { 443 for (domain = 0; domain < vm_ndomains; domain++) { 444 if (VM_DOMAIN_EMPTY(domain)) 445 continue; 446 if (CPU_EMPTY(&cpuset_domain[domain])) 447 continue; 448 error = kproc_kthread_add(ktls_alloc_thread, 449 &ktls_domains[domain], &ktls_proc, 450 &ktls_domains[domain].alloc_td.td, 451 0, 0, "KTLS", "alloc_%d", domain); 452 if (error) 453 panic("Can't add KTLS alloc thread %d error %d", 454 domain, error); 455 CPU_COPY(&cpuset_domain[domain], &mask); 456 error = cpuset_setthread(ktls_domains[domain].alloc_td.td->td_tid, 457 &mask); 458 if (error) 459 panic("Unable to bind KTLS alloc %d error %d", 460 domain, error); 461 } 462 } 463 464 /* 465 * If we somehow have an empty domain, fall back to choosing 466 * among all KTLS threads. 467 */ 468 if (ktls_bind_threads > 1) { 469 for (i = 0; i < vm_ndomains; i++) { 470 if (ktls_domains[i].count == 0) { 471 ktls_bind_threads = 1; 472 break; 473 } 474 } 475 } 476 477 if (bootverbose) 478 printf("KTLS: Initialized %d threads\n", ktls_number_threads); 479 } 480 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL); 481 482 #if defined(INET) || defined(INET6) 483 static int 484 ktls_create_session(struct socket *so, struct tls_enable *en, 485 struct ktls_session **tlsp) 486 { 487 struct ktls_session *tls; 488 int error; 489 490 /* Only TLS 1.0 - 1.3 are supported. */ 491 if (en->tls_vmajor != TLS_MAJOR_VER_ONE) 492 return (EINVAL); 493 if (en->tls_vminor < TLS_MINOR_VER_ZERO || 494 en->tls_vminor > TLS_MINOR_VER_THREE) 495 return (EINVAL); 496 497 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE) 498 return (EINVAL); 499 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE) 500 return (EINVAL); 501 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv)) 502 return (EINVAL); 503 504 /* All supported algorithms require a cipher key. */ 505 if (en->cipher_key_len == 0) 506 return (EINVAL); 507 508 /* No flags are currently supported. */ 509 if (en->flags != 0) 510 return (EINVAL); 511 512 /* Common checks for supported algorithms. */ 513 switch (en->cipher_algorithm) { 514 case CRYPTO_AES_NIST_GCM_16: 515 /* 516 * auth_algorithm isn't used, but permit GMAC values 517 * for compatibility. 518 */ 519 switch (en->auth_algorithm) { 520 case 0: 521 #ifdef COMPAT_FREEBSD12 522 /* XXX: Really 13.0-current COMPAT. */ 523 case CRYPTO_AES_128_NIST_GMAC: 524 case CRYPTO_AES_192_NIST_GMAC: 525 case CRYPTO_AES_256_NIST_GMAC: 526 #endif 527 break; 528 default: 529 return (EINVAL); 530 } 531 if (en->auth_key_len != 0) 532 return (EINVAL); 533 if ((en->tls_vminor == TLS_MINOR_VER_TWO && 534 en->iv_len != TLS_AEAD_GCM_LEN) || 535 (en->tls_vminor == TLS_MINOR_VER_THREE && 536 en->iv_len != TLS_1_3_GCM_IV_LEN)) 537 return (EINVAL); 538 break; 539 case CRYPTO_AES_CBC: 540 switch (en->auth_algorithm) { 541 case CRYPTO_SHA1_HMAC: 542 /* 543 * TLS 1.0 requires an implicit IV. TLS 1.1+ 544 * all use explicit IVs. 545 */ 546 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 547 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN) 548 return (EINVAL); 549 break; 550 } 551 552 /* FALLTHROUGH */ 553 case CRYPTO_SHA2_256_HMAC: 554 case CRYPTO_SHA2_384_HMAC: 555 /* Ignore any supplied IV. */ 556 en->iv_len = 0; 557 break; 558 default: 559 return (EINVAL); 560 } 561 if (en->auth_key_len == 0) 562 return (EINVAL); 563 break; 564 case CRYPTO_CHACHA20_POLY1305: 565 if (en->auth_algorithm != 0 || en->auth_key_len != 0) 566 return (EINVAL); 567 if (en->tls_vminor != TLS_MINOR_VER_TWO && 568 en->tls_vminor != TLS_MINOR_VER_THREE) 569 return (EINVAL); 570 if (en->iv_len != TLS_CHACHA20_IV_LEN) 571 return (EINVAL); 572 break; 573 default: 574 return (EINVAL); 575 } 576 577 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 578 579 counter_u64_add(ktls_offload_active, 1); 580 581 refcount_init(&tls->refcount, 1); 582 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); 583 584 tls->wq_index = ktls_get_cpu(so); 585 586 tls->params.cipher_algorithm = en->cipher_algorithm; 587 tls->params.auth_algorithm = en->auth_algorithm; 588 tls->params.tls_vmajor = en->tls_vmajor; 589 tls->params.tls_vminor = en->tls_vminor; 590 tls->params.flags = en->flags; 591 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen); 592 593 /* Set the header and trailer lengths. */ 594 tls->params.tls_hlen = sizeof(struct tls_record_layer); 595 switch (en->cipher_algorithm) { 596 case CRYPTO_AES_NIST_GCM_16: 597 /* 598 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte 599 * nonce. TLS 1.3 uses a 12 byte implicit IV. 600 */ 601 if (en->tls_vminor < TLS_MINOR_VER_THREE) 602 tls->params.tls_hlen += sizeof(uint64_t); 603 tls->params.tls_tlen = AES_GMAC_HASH_LEN; 604 tls->params.tls_bs = 1; 605 break; 606 case CRYPTO_AES_CBC: 607 switch (en->auth_algorithm) { 608 case CRYPTO_SHA1_HMAC: 609 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 610 /* Implicit IV, no nonce. */ 611 } else { 612 tls->params.tls_hlen += AES_BLOCK_LEN; 613 } 614 tls->params.tls_tlen = AES_BLOCK_LEN + 615 SHA1_HASH_LEN; 616 break; 617 case CRYPTO_SHA2_256_HMAC: 618 tls->params.tls_hlen += AES_BLOCK_LEN; 619 tls->params.tls_tlen = AES_BLOCK_LEN + 620 SHA2_256_HASH_LEN; 621 break; 622 case CRYPTO_SHA2_384_HMAC: 623 tls->params.tls_hlen += AES_BLOCK_LEN; 624 tls->params.tls_tlen = AES_BLOCK_LEN + 625 SHA2_384_HASH_LEN; 626 break; 627 default: 628 panic("invalid hmac"); 629 } 630 tls->params.tls_bs = AES_BLOCK_LEN; 631 break; 632 case CRYPTO_CHACHA20_POLY1305: 633 /* 634 * Chacha20 uses a 12 byte implicit IV. 635 */ 636 tls->params.tls_tlen = POLY1305_HASH_LEN; 637 tls->params.tls_bs = 1; 638 break; 639 default: 640 panic("invalid cipher"); 641 } 642 643 /* 644 * TLS 1.3 includes optional padding which we do not support, 645 * and also puts the "real" record type at the end of the 646 * encrypted data. 647 */ 648 if (en->tls_vminor == TLS_MINOR_VER_THREE) 649 tls->params.tls_tlen += sizeof(uint8_t); 650 651 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN, 652 ("TLS header length too long: %d", tls->params.tls_hlen)); 653 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN, 654 ("TLS trailer length too long: %d", tls->params.tls_tlen)); 655 656 if (en->auth_key_len != 0) { 657 tls->params.auth_key_len = en->auth_key_len; 658 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS, 659 M_WAITOK); 660 error = copyin(en->auth_key, tls->params.auth_key, 661 en->auth_key_len); 662 if (error) 663 goto out; 664 } 665 666 tls->params.cipher_key_len = en->cipher_key_len; 667 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK); 668 error = copyin(en->cipher_key, tls->params.cipher_key, 669 en->cipher_key_len); 670 if (error) 671 goto out; 672 673 /* 674 * This holds the implicit portion of the nonce for AEAD 675 * ciphers and the initial implicit IV for TLS 1.0. The 676 * explicit portions of the IV are generated in ktls_frame(). 677 */ 678 if (en->iv_len != 0) { 679 tls->params.iv_len = en->iv_len; 680 error = copyin(en->iv, tls->params.iv, en->iv_len); 681 if (error) 682 goto out; 683 684 /* 685 * For TLS 1.2 with GCM, generate an 8-byte nonce as a 686 * counter to generate unique explicit IVs. 687 * 688 * Store this counter in the last 8 bytes of the IV 689 * array so that it is 8-byte aligned. 690 */ 691 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 692 en->tls_vminor == TLS_MINOR_VER_TWO) 693 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0); 694 } 695 696 *tlsp = tls; 697 return (0); 698 699 out: 700 ktls_cleanup(tls); 701 return (error); 702 } 703 704 static struct ktls_session * 705 ktls_clone_session(struct ktls_session *tls) 706 { 707 struct ktls_session *tls_new; 708 709 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 710 711 counter_u64_add(ktls_offload_active, 1); 712 713 refcount_init(&tls_new->refcount, 1); 714 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new); 715 716 /* Copy fields from existing session. */ 717 tls_new->params = tls->params; 718 tls_new->wq_index = tls->wq_index; 719 720 /* Deep copy keys. */ 721 if (tls_new->params.auth_key != NULL) { 722 tls_new->params.auth_key = malloc(tls->params.auth_key_len, 723 M_KTLS, M_WAITOK); 724 memcpy(tls_new->params.auth_key, tls->params.auth_key, 725 tls->params.auth_key_len); 726 } 727 728 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS, 729 M_WAITOK); 730 memcpy(tls_new->params.cipher_key, tls->params.cipher_key, 731 tls->params.cipher_key_len); 732 733 return (tls_new); 734 } 735 #endif 736 737 static void 738 ktls_cleanup(struct ktls_session *tls) 739 { 740 741 counter_u64_add(ktls_offload_active, -1); 742 switch (tls->mode) { 743 case TCP_TLS_MODE_SW: 744 switch (tls->params.cipher_algorithm) { 745 case CRYPTO_AES_CBC: 746 counter_u64_add(ktls_sw_cbc, -1); 747 break; 748 case CRYPTO_AES_NIST_GCM_16: 749 counter_u64_add(ktls_sw_gcm, -1); 750 break; 751 case CRYPTO_CHACHA20_POLY1305: 752 counter_u64_add(ktls_sw_chacha20, -1); 753 break; 754 } 755 ktls_ocf_free(tls); 756 break; 757 case TCP_TLS_MODE_IFNET: 758 switch (tls->params.cipher_algorithm) { 759 case CRYPTO_AES_CBC: 760 counter_u64_add(ktls_ifnet_cbc, -1); 761 break; 762 case CRYPTO_AES_NIST_GCM_16: 763 counter_u64_add(ktls_ifnet_gcm, -1); 764 break; 765 case CRYPTO_CHACHA20_POLY1305: 766 counter_u64_add(ktls_ifnet_chacha20, -1); 767 break; 768 } 769 if (tls->snd_tag != NULL) 770 m_snd_tag_rele(tls->snd_tag); 771 break; 772 #ifdef TCP_OFFLOAD 773 case TCP_TLS_MODE_TOE: 774 switch (tls->params.cipher_algorithm) { 775 case CRYPTO_AES_CBC: 776 counter_u64_add(ktls_toe_cbc, -1); 777 break; 778 case CRYPTO_AES_NIST_GCM_16: 779 counter_u64_add(ktls_toe_gcm, -1); 780 break; 781 case CRYPTO_CHACHA20_POLY1305: 782 counter_u64_add(ktls_toe_chacha20, -1); 783 break; 784 } 785 break; 786 #endif 787 } 788 if (tls->params.auth_key != NULL) { 789 zfree(tls->params.auth_key, M_KTLS); 790 tls->params.auth_key = NULL; 791 tls->params.auth_key_len = 0; 792 } 793 if (tls->params.cipher_key != NULL) { 794 zfree(tls->params.cipher_key, M_KTLS); 795 tls->params.cipher_key = NULL; 796 tls->params.cipher_key_len = 0; 797 } 798 explicit_bzero(tls->params.iv, sizeof(tls->params.iv)); 799 } 800 801 #if defined(INET) || defined(INET6) 802 803 #ifdef TCP_OFFLOAD 804 static int 805 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction) 806 { 807 struct inpcb *inp; 808 struct tcpcb *tp; 809 int error; 810 811 inp = so->so_pcb; 812 INP_WLOCK(inp); 813 if (inp->inp_flags2 & INP_FREED) { 814 INP_WUNLOCK(inp); 815 return (ECONNRESET); 816 } 817 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 818 INP_WUNLOCK(inp); 819 return (ECONNRESET); 820 } 821 if (inp->inp_socket == NULL) { 822 INP_WUNLOCK(inp); 823 return (ECONNRESET); 824 } 825 tp = intotcpcb(inp); 826 if (!(tp->t_flags & TF_TOE)) { 827 INP_WUNLOCK(inp); 828 return (EOPNOTSUPP); 829 } 830 831 error = tcp_offload_alloc_tls_session(tp, tls, direction); 832 INP_WUNLOCK(inp); 833 if (error == 0) { 834 tls->mode = TCP_TLS_MODE_TOE; 835 switch (tls->params.cipher_algorithm) { 836 case CRYPTO_AES_CBC: 837 counter_u64_add(ktls_toe_cbc, 1); 838 break; 839 case CRYPTO_AES_NIST_GCM_16: 840 counter_u64_add(ktls_toe_gcm, 1); 841 break; 842 case CRYPTO_CHACHA20_POLY1305: 843 counter_u64_add(ktls_toe_chacha20, 1); 844 break; 845 } 846 } 847 return (error); 848 } 849 #endif 850 851 /* 852 * Common code used when first enabling ifnet TLS on a connection or 853 * when allocating a new ifnet TLS session due to a routing change. 854 * This function allocates a new TLS send tag on whatever interface 855 * the connection is currently routed over. 856 */ 857 static int 858 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force, 859 struct m_snd_tag **mstp) 860 { 861 union if_snd_tag_alloc_params params; 862 struct ifnet *ifp; 863 struct nhop_object *nh; 864 struct tcpcb *tp; 865 int error; 866 867 INP_RLOCK(inp); 868 if (inp->inp_flags2 & INP_FREED) { 869 INP_RUNLOCK(inp); 870 return (ECONNRESET); 871 } 872 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 873 INP_RUNLOCK(inp); 874 return (ECONNRESET); 875 } 876 if (inp->inp_socket == NULL) { 877 INP_RUNLOCK(inp); 878 return (ECONNRESET); 879 } 880 tp = intotcpcb(inp); 881 882 /* 883 * Check administrative controls on ifnet TLS to determine if 884 * ifnet TLS should be denied. 885 * 886 * - Always permit 'force' requests. 887 * - ktls_ifnet_permitted == 0: always deny. 888 */ 889 if (!force && ktls_ifnet_permitted == 0) { 890 INP_RUNLOCK(inp); 891 return (ENXIO); 892 } 893 894 /* 895 * XXX: Use the cached route in the inpcb to find the 896 * interface. This should perhaps instead use 897 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only 898 * enabled after a connection has completed key negotiation in 899 * userland, the cached route will be present in practice. 900 */ 901 nh = inp->inp_route.ro_nh; 902 if (nh == NULL) { 903 INP_RUNLOCK(inp); 904 return (ENXIO); 905 } 906 ifp = nh->nh_ifp; 907 if_ref(ifp); 908 909 /* 910 * Allocate a TLS + ratelimit tag if the connection has an 911 * existing pacing rate. 912 */ 913 if (tp->t_pacing_rate != -1 && 914 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) { 915 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT; 916 params.tls_rate_limit.inp = inp; 917 params.tls_rate_limit.tls = tls; 918 params.tls_rate_limit.max_rate = tp->t_pacing_rate; 919 } else { 920 params.hdr.type = IF_SND_TAG_TYPE_TLS; 921 params.tls.inp = inp; 922 params.tls.tls = tls; 923 } 924 params.hdr.flowid = inp->inp_flowid; 925 params.hdr.flowtype = inp->inp_flowtype; 926 params.hdr.numa_domain = inp->inp_numa_domain; 927 INP_RUNLOCK(inp); 928 929 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) { 930 error = EOPNOTSUPP; 931 goto out; 932 } 933 if (inp->inp_vflag & INP_IPV6) { 934 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) { 935 error = EOPNOTSUPP; 936 goto out; 937 } 938 } else { 939 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) { 940 error = EOPNOTSUPP; 941 goto out; 942 } 943 } 944 error = m_snd_tag_alloc(ifp, ¶ms, mstp); 945 out: 946 if_rele(ifp); 947 return (error); 948 } 949 950 static int 951 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force) 952 { 953 struct m_snd_tag *mst; 954 int error; 955 956 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); 957 if (error == 0) { 958 tls->mode = TCP_TLS_MODE_IFNET; 959 tls->snd_tag = mst; 960 switch (tls->params.cipher_algorithm) { 961 case CRYPTO_AES_CBC: 962 counter_u64_add(ktls_ifnet_cbc, 1); 963 break; 964 case CRYPTO_AES_NIST_GCM_16: 965 counter_u64_add(ktls_ifnet_gcm, 1); 966 break; 967 case CRYPTO_CHACHA20_POLY1305: 968 counter_u64_add(ktls_ifnet_chacha20, 1); 969 break; 970 } 971 } 972 return (error); 973 } 974 975 static int 976 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) 977 { 978 int error; 979 980 error = ktls_ocf_try(so, tls, direction); 981 if (error) 982 return (error); 983 tls->mode = TCP_TLS_MODE_SW; 984 switch (tls->params.cipher_algorithm) { 985 case CRYPTO_AES_CBC: 986 counter_u64_add(ktls_sw_cbc, 1); 987 break; 988 case CRYPTO_AES_NIST_GCM_16: 989 counter_u64_add(ktls_sw_gcm, 1); 990 break; 991 case CRYPTO_CHACHA20_POLY1305: 992 counter_u64_add(ktls_sw_chacha20, 1); 993 break; 994 } 995 return (0); 996 } 997 998 /* 999 * KTLS RX stores data in the socket buffer as a list of TLS records, 1000 * where each record is stored as a control message containg the TLS 1001 * header followed by data mbufs containing the decrypted data. This 1002 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for 1003 * both encrypted and decrypted data. TLS records decrypted by a NIC 1004 * should be queued to the socket buffer as records, but encrypted 1005 * data which needs to be decrypted by software arrives as a stream of 1006 * regular mbufs which need to be converted. In addition, there may 1007 * already be pending encrypted data in the socket buffer when KTLS RX 1008 * is enabled. 1009 * 1010 * To manage not-yet-decrypted data for KTLS RX, the following scheme 1011 * is used: 1012 * 1013 * - A single chain of NOTREADY mbufs is hung off of sb_mtls. 1014 * 1015 * - ktls_check_rx checks this chain of mbufs reading the TLS header 1016 * from the first mbuf. Once all of the data for that TLS record is 1017 * queued, the socket is queued to a worker thread. 1018 * 1019 * - The worker thread calls ktls_decrypt to decrypt TLS records in 1020 * the TLS chain. Each TLS record is detached from the TLS chain, 1021 * decrypted, and inserted into the regular socket buffer chain as 1022 * record starting with a control message holding the TLS header and 1023 * a chain of mbufs holding the encrypted data. 1024 */ 1025 1026 static void 1027 sb_mark_notready(struct sockbuf *sb) 1028 { 1029 struct mbuf *m; 1030 1031 m = sb->sb_mb; 1032 sb->sb_mtls = m; 1033 sb->sb_mb = NULL; 1034 sb->sb_mbtail = NULL; 1035 sb->sb_lastrecord = NULL; 1036 for (; m != NULL; m = m->m_next) { 1037 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL", 1038 __func__)); 1039 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail", 1040 __func__)); 1041 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len", 1042 __func__)); 1043 m->m_flags |= M_NOTREADY; 1044 sb->sb_acc -= m->m_len; 1045 sb->sb_tlscc += m->m_len; 1046 sb->sb_mtlstail = m; 1047 } 1048 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc, 1049 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc, 1050 sb->sb_ccc)); 1051 } 1052 1053 int 1054 ktls_enable_rx(struct socket *so, struct tls_enable *en) 1055 { 1056 struct ktls_session *tls; 1057 int error; 1058 1059 if (!ktls_offload_enable) 1060 return (ENOTSUP); 1061 if (SOLISTENING(so)) 1062 return (EINVAL); 1063 1064 counter_u64_add(ktls_offload_enable_calls, 1); 1065 1066 /* 1067 * This should always be true since only the TCP socket option 1068 * invokes this function. 1069 */ 1070 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1071 return (EINVAL); 1072 1073 /* 1074 * XXX: Don't overwrite existing sessions. We should permit 1075 * this to support rekeying in the future. 1076 */ 1077 if (so->so_rcv.sb_tls_info != NULL) 1078 return (EALREADY); 1079 1080 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1081 return (ENOTSUP); 1082 1083 /* TLS 1.3 is not yet supported. */ 1084 if (en->tls_vmajor == TLS_MAJOR_VER_ONE && 1085 en->tls_vminor == TLS_MINOR_VER_THREE) 1086 return (ENOTSUP); 1087 1088 error = ktls_create_session(so, en, &tls); 1089 if (error) 1090 return (error); 1091 1092 #ifdef TCP_OFFLOAD 1093 error = ktls_try_toe(so, tls, KTLS_RX); 1094 if (error) 1095 #endif 1096 error = ktls_try_sw(so, tls, KTLS_RX); 1097 1098 if (error) { 1099 ktls_cleanup(tls); 1100 return (error); 1101 } 1102 1103 /* Mark the socket as using TLS offload. */ 1104 SOCKBUF_LOCK(&so->so_rcv); 1105 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq); 1106 so->so_rcv.sb_tls_info = tls; 1107 so->so_rcv.sb_flags |= SB_TLS_RX; 1108 1109 /* Mark existing data as not ready until it can be decrypted. */ 1110 if (tls->mode != TCP_TLS_MODE_TOE) { 1111 sb_mark_notready(&so->so_rcv); 1112 ktls_check_rx(&so->so_rcv); 1113 } 1114 SOCKBUF_UNLOCK(&so->so_rcv); 1115 1116 counter_u64_add(ktls_offload_total, 1); 1117 1118 return (0); 1119 } 1120 1121 int 1122 ktls_enable_tx(struct socket *so, struct tls_enable *en) 1123 { 1124 struct ktls_session *tls; 1125 struct inpcb *inp; 1126 int error; 1127 1128 if (!ktls_offload_enable) 1129 return (ENOTSUP); 1130 if (SOLISTENING(so)) 1131 return (EINVAL); 1132 1133 counter_u64_add(ktls_offload_enable_calls, 1); 1134 1135 /* 1136 * This should always be true since only the TCP socket option 1137 * invokes this function. 1138 */ 1139 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1140 return (EINVAL); 1141 1142 /* 1143 * XXX: Don't overwrite existing sessions. We should permit 1144 * this to support rekeying in the future. 1145 */ 1146 if (so->so_snd.sb_tls_info != NULL) 1147 return (EALREADY); 1148 1149 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1150 return (ENOTSUP); 1151 1152 /* TLS requires ext pgs */ 1153 if (mb_use_ext_pgs == 0) 1154 return (ENXIO); 1155 1156 error = ktls_create_session(so, en, &tls); 1157 if (error) 1158 return (error); 1159 1160 /* Prefer TOE -> ifnet TLS -> software TLS. */ 1161 #ifdef TCP_OFFLOAD 1162 error = ktls_try_toe(so, tls, KTLS_TX); 1163 if (error) 1164 #endif 1165 error = ktls_try_ifnet(so, tls, false); 1166 if (error) 1167 error = ktls_try_sw(so, tls, KTLS_TX); 1168 1169 if (error) { 1170 ktls_cleanup(tls); 1171 return (error); 1172 } 1173 1174 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); 1175 if (error) { 1176 ktls_cleanup(tls); 1177 return (error); 1178 } 1179 1180 /* 1181 * Write lock the INP when setting sb_tls_info so that 1182 * routines in tcp_ratelimit.c can read sb_tls_info while 1183 * holding the INP lock. 1184 */ 1185 inp = so->so_pcb; 1186 INP_WLOCK(inp); 1187 SOCKBUF_LOCK(&so->so_snd); 1188 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq); 1189 so->so_snd.sb_tls_info = tls; 1190 if (tls->mode != TCP_TLS_MODE_SW) 1191 so->so_snd.sb_flags |= SB_TLS_IFNET; 1192 SOCKBUF_UNLOCK(&so->so_snd); 1193 INP_WUNLOCK(inp); 1194 SOCK_IO_SEND_UNLOCK(so); 1195 1196 counter_u64_add(ktls_offload_total, 1); 1197 1198 return (0); 1199 } 1200 1201 int 1202 ktls_get_rx_mode(struct socket *so, int *modep) 1203 { 1204 struct ktls_session *tls; 1205 struct inpcb *inp; 1206 1207 if (SOLISTENING(so)) 1208 return (EINVAL); 1209 inp = so->so_pcb; 1210 INP_WLOCK_ASSERT(inp); 1211 SOCK_RECVBUF_LOCK(so); 1212 tls = so->so_rcv.sb_tls_info; 1213 if (tls == NULL) 1214 *modep = TCP_TLS_MODE_NONE; 1215 else 1216 *modep = tls->mode; 1217 SOCK_RECVBUF_UNLOCK(so); 1218 return (0); 1219 } 1220 1221 int 1222 ktls_get_tx_mode(struct socket *so, int *modep) 1223 { 1224 struct ktls_session *tls; 1225 struct inpcb *inp; 1226 1227 if (SOLISTENING(so)) 1228 return (EINVAL); 1229 inp = so->so_pcb; 1230 INP_WLOCK_ASSERT(inp); 1231 SOCK_SENDBUF_LOCK(so); 1232 tls = so->so_snd.sb_tls_info; 1233 if (tls == NULL) 1234 *modep = TCP_TLS_MODE_NONE; 1235 else 1236 *modep = tls->mode; 1237 SOCK_SENDBUF_UNLOCK(so); 1238 return (0); 1239 } 1240 1241 /* 1242 * Switch between SW and ifnet TLS sessions as requested. 1243 */ 1244 int 1245 ktls_set_tx_mode(struct socket *so, int mode) 1246 { 1247 struct ktls_session *tls, *tls_new; 1248 struct inpcb *inp; 1249 int error; 1250 1251 if (SOLISTENING(so)) 1252 return (EINVAL); 1253 switch (mode) { 1254 case TCP_TLS_MODE_SW: 1255 case TCP_TLS_MODE_IFNET: 1256 break; 1257 default: 1258 return (EINVAL); 1259 } 1260 1261 inp = so->so_pcb; 1262 INP_WLOCK_ASSERT(inp); 1263 SOCKBUF_LOCK(&so->so_snd); 1264 tls = so->so_snd.sb_tls_info; 1265 if (tls == NULL) { 1266 SOCKBUF_UNLOCK(&so->so_snd); 1267 return (0); 1268 } 1269 1270 if (tls->mode == mode) { 1271 SOCKBUF_UNLOCK(&so->so_snd); 1272 return (0); 1273 } 1274 1275 tls = ktls_hold(tls); 1276 SOCKBUF_UNLOCK(&so->so_snd); 1277 INP_WUNLOCK(inp); 1278 1279 tls_new = ktls_clone_session(tls); 1280 1281 if (mode == TCP_TLS_MODE_IFNET) 1282 error = ktls_try_ifnet(so, tls_new, true); 1283 else 1284 error = ktls_try_sw(so, tls_new, KTLS_TX); 1285 if (error) { 1286 counter_u64_add(ktls_switch_failed, 1); 1287 ktls_free(tls_new); 1288 ktls_free(tls); 1289 INP_WLOCK(inp); 1290 return (error); 1291 } 1292 1293 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); 1294 if (error) { 1295 counter_u64_add(ktls_switch_failed, 1); 1296 ktls_free(tls_new); 1297 ktls_free(tls); 1298 INP_WLOCK(inp); 1299 return (error); 1300 } 1301 1302 /* 1303 * If we raced with another session change, keep the existing 1304 * session. 1305 */ 1306 if (tls != so->so_snd.sb_tls_info) { 1307 counter_u64_add(ktls_switch_failed, 1); 1308 SOCK_IO_SEND_UNLOCK(so); 1309 ktls_free(tls_new); 1310 ktls_free(tls); 1311 INP_WLOCK(inp); 1312 return (EBUSY); 1313 } 1314 1315 SOCKBUF_LOCK(&so->so_snd); 1316 so->so_snd.sb_tls_info = tls_new; 1317 if (tls_new->mode != TCP_TLS_MODE_SW) 1318 so->so_snd.sb_flags |= SB_TLS_IFNET; 1319 SOCKBUF_UNLOCK(&so->so_snd); 1320 SOCK_IO_SEND_UNLOCK(so); 1321 1322 /* 1323 * Drop two references on 'tls'. The first is for the 1324 * ktls_hold() above. The second drops the reference from the 1325 * socket buffer. 1326 */ 1327 KASSERT(tls->refcount >= 2, ("too few references on old session")); 1328 ktls_free(tls); 1329 ktls_free(tls); 1330 1331 if (mode == TCP_TLS_MODE_IFNET) 1332 counter_u64_add(ktls_switch_to_ifnet, 1); 1333 else 1334 counter_u64_add(ktls_switch_to_sw, 1); 1335 1336 INP_WLOCK(inp); 1337 return (0); 1338 } 1339 1340 /* 1341 * Try to allocate a new TLS send tag. This task is scheduled when 1342 * ip_output detects a route change while trying to transmit a packet 1343 * holding a TLS record. If a new tag is allocated, replace the tag 1344 * in the TLS session. Subsequent packets on the connection will use 1345 * the new tag. If a new tag cannot be allocated, drop the 1346 * connection. 1347 */ 1348 static void 1349 ktls_reset_send_tag(void *context, int pending) 1350 { 1351 struct epoch_tracker et; 1352 struct ktls_session *tls; 1353 struct m_snd_tag *old, *new; 1354 struct inpcb *inp; 1355 struct tcpcb *tp; 1356 int error; 1357 1358 MPASS(pending == 1); 1359 1360 tls = context; 1361 inp = tls->inp; 1362 1363 /* 1364 * Free the old tag first before allocating a new one. 1365 * ip[6]_output_send() will treat a NULL send tag the same as 1366 * an ifp mismatch and drop packets until a new tag is 1367 * allocated. 1368 * 1369 * Write-lock the INP when changing tls->snd_tag since 1370 * ip[6]_output_send() holds a read-lock when reading the 1371 * pointer. 1372 */ 1373 INP_WLOCK(inp); 1374 old = tls->snd_tag; 1375 tls->snd_tag = NULL; 1376 INP_WUNLOCK(inp); 1377 if (old != NULL) 1378 m_snd_tag_rele(old); 1379 1380 error = ktls_alloc_snd_tag(inp, tls, true, &new); 1381 1382 if (error == 0) { 1383 INP_WLOCK(inp); 1384 tls->snd_tag = new; 1385 mtx_pool_lock(mtxpool_sleep, tls); 1386 tls->reset_pending = false; 1387 mtx_pool_unlock(mtxpool_sleep, tls); 1388 if (!in_pcbrele_wlocked(inp)) 1389 INP_WUNLOCK(inp); 1390 1391 counter_u64_add(ktls_ifnet_reset, 1); 1392 1393 /* 1394 * XXX: Should we kick tcp_output explicitly now that 1395 * the send tag is fixed or just rely on timers? 1396 */ 1397 } else { 1398 NET_EPOCH_ENTER(et); 1399 INP_WLOCK(inp); 1400 if (!in_pcbrele_wlocked(inp)) { 1401 if (!(inp->inp_flags & INP_TIMEWAIT) && 1402 !(inp->inp_flags & INP_DROPPED)) { 1403 tp = intotcpcb(inp); 1404 CURVNET_SET(tp->t_vnet); 1405 tp = tcp_drop(tp, ECONNABORTED); 1406 CURVNET_RESTORE(); 1407 if (tp != NULL) 1408 INP_WUNLOCK(inp); 1409 counter_u64_add(ktls_ifnet_reset_dropped, 1); 1410 } else 1411 INP_WUNLOCK(inp); 1412 } 1413 NET_EPOCH_EXIT(et); 1414 1415 counter_u64_add(ktls_ifnet_reset_failed, 1); 1416 1417 /* 1418 * Leave reset_pending true to avoid future tasks while 1419 * the socket goes away. 1420 */ 1421 } 1422 1423 ktls_free(tls); 1424 } 1425 1426 int 1427 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls) 1428 { 1429 1430 if (inp == NULL) 1431 return (ENOBUFS); 1432 1433 INP_LOCK_ASSERT(inp); 1434 1435 /* 1436 * See if we should schedule a task to update the send tag for 1437 * this session. 1438 */ 1439 mtx_pool_lock(mtxpool_sleep, tls); 1440 if (!tls->reset_pending) { 1441 (void) ktls_hold(tls); 1442 in_pcbref(inp); 1443 tls->inp = inp; 1444 tls->reset_pending = true; 1445 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); 1446 } 1447 mtx_pool_unlock(mtxpool_sleep, tls); 1448 return (ENOBUFS); 1449 } 1450 1451 #ifdef RATELIMIT 1452 int 1453 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate) 1454 { 1455 union if_snd_tag_modify_params params = { 1456 .rate_limit.max_rate = max_pacing_rate, 1457 .rate_limit.flags = M_NOWAIT, 1458 }; 1459 struct m_snd_tag *mst; 1460 1461 /* Can't get to the inp, but it should be locked. */ 1462 /* INP_LOCK_ASSERT(inp); */ 1463 1464 MPASS(tls->mode == TCP_TLS_MODE_IFNET); 1465 1466 if (tls->snd_tag == NULL) { 1467 /* 1468 * Resetting send tag, ignore this change. The 1469 * pending reset may or may not see this updated rate 1470 * in the tcpcb. If it doesn't, we will just lose 1471 * this rate change. 1472 */ 1473 return (0); 1474 } 1475 1476 MPASS(tls->snd_tag != NULL); 1477 MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); 1478 1479 mst = tls->snd_tag; 1480 return (mst->sw->snd_tag_modify(mst, ¶ms)); 1481 } 1482 #endif 1483 #endif 1484 1485 void 1486 ktls_destroy(struct ktls_session *tls) 1487 { 1488 1489 ktls_cleanup(tls); 1490 uma_zfree(ktls_session_zone, tls); 1491 } 1492 1493 void 1494 ktls_seq(struct sockbuf *sb, struct mbuf *m) 1495 { 1496 1497 for (; m != NULL; m = m->m_next) { 1498 KASSERT((m->m_flags & M_EXTPG) != 0, 1499 ("ktls_seq: mapped mbuf %p", m)); 1500 1501 m->m_epg_seqno = sb->sb_tls_seqno; 1502 sb->sb_tls_seqno++; 1503 } 1504 } 1505 1506 /* 1507 * Add TLS framing (headers and trailers) to a chain of mbufs. Each 1508 * mbuf in the chain must be an unmapped mbuf. The payload of the 1509 * mbuf must be populated with the payload of each TLS record. 1510 * 1511 * The record_type argument specifies the TLS record type used when 1512 * populating the TLS header. 1513 * 1514 * The enq_count argument on return is set to the number of pages of 1515 * payload data for this entire chain that need to be encrypted via SW 1516 * encryption. The returned value should be passed to ktls_enqueue 1517 * when scheduling encryption of this chain of mbufs. To handle the 1518 * special case of empty fragments for TLS 1.0 sessions, an empty 1519 * fragment counts as one page. 1520 */ 1521 void 1522 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt, 1523 uint8_t record_type) 1524 { 1525 struct tls_record_layer *tlshdr; 1526 struct mbuf *m; 1527 uint64_t *noncep; 1528 uint16_t tls_len; 1529 int maxlen; 1530 1531 maxlen = tls->params.max_frame_len; 1532 *enq_cnt = 0; 1533 for (m = top; m != NULL; m = m->m_next) { 1534 /* 1535 * All mbufs in the chain should be TLS records whose 1536 * payload does not exceed the maximum frame length. 1537 * 1538 * Empty TLS records are permitted when using CBC. 1539 */ 1540 KASSERT(m->m_len <= maxlen && 1541 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ? 1542 m->m_len >= 0 : m->m_len > 0), 1543 ("ktls_frame: m %p len %d\n", m, m->m_len)); 1544 1545 /* 1546 * TLS frames require unmapped mbufs to store session 1547 * info. 1548 */ 1549 KASSERT((m->m_flags & M_EXTPG) != 0, 1550 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top)); 1551 1552 tls_len = m->m_len; 1553 1554 /* Save a reference to the session. */ 1555 m->m_epg_tls = ktls_hold(tls); 1556 1557 m->m_epg_hdrlen = tls->params.tls_hlen; 1558 m->m_epg_trllen = tls->params.tls_tlen; 1559 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) { 1560 int bs, delta; 1561 1562 /* 1563 * AES-CBC pads messages to a multiple of the 1564 * block size. Note that the padding is 1565 * applied after the digest and the encryption 1566 * is done on the "plaintext || mac || padding". 1567 * At least one byte of padding is always 1568 * present. 1569 * 1570 * Compute the final trailer length assuming 1571 * at most one block of padding. 1572 * tls->params.tls_tlen is the maximum 1573 * possible trailer length (padding + digest). 1574 * delta holds the number of excess padding 1575 * bytes if the maximum were used. Those 1576 * extra bytes are removed. 1577 */ 1578 bs = tls->params.tls_bs; 1579 delta = (tls_len + tls->params.tls_tlen) & (bs - 1); 1580 m->m_epg_trllen -= delta; 1581 } 1582 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen; 1583 1584 /* Populate the TLS header. */ 1585 tlshdr = (void *)m->m_epg_hdr; 1586 tlshdr->tls_vmajor = tls->params.tls_vmajor; 1587 1588 /* 1589 * TLS 1.3 masquarades as TLS 1.2 with a record type 1590 * of TLS_RLTYPE_APP. 1591 */ 1592 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE && 1593 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) { 1594 tlshdr->tls_vminor = TLS_MINOR_VER_TWO; 1595 tlshdr->tls_type = TLS_RLTYPE_APP; 1596 /* save the real record type for later */ 1597 m->m_epg_record_type = record_type; 1598 m->m_epg_trail[0] = record_type; 1599 } else { 1600 tlshdr->tls_vminor = tls->params.tls_vminor; 1601 tlshdr->tls_type = record_type; 1602 } 1603 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); 1604 1605 /* 1606 * Store nonces / explicit IVs after the end of the 1607 * TLS header. 1608 * 1609 * For GCM with TLS 1.2, an 8 byte nonce is copied 1610 * from the end of the IV. The nonce is then 1611 * incremented for use by the next record. 1612 * 1613 * For CBC, a random nonce is inserted for TLS 1.1+. 1614 */ 1615 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 1616 tls->params.tls_vminor == TLS_MINOR_VER_TWO) { 1617 noncep = (uint64_t *)(tls->params.iv + 8); 1618 be64enc(tlshdr + 1, *noncep); 1619 (*noncep)++; 1620 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC && 1621 tls->params.tls_vminor >= TLS_MINOR_VER_ONE) 1622 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0); 1623 1624 /* 1625 * When using SW encryption, mark the mbuf not ready. 1626 * It will be marked ready via sbready() after the 1627 * record has been encrypted. 1628 * 1629 * When using ifnet TLS, unencrypted TLS records are 1630 * sent down the stack to the NIC. 1631 */ 1632 if (tls->mode == TCP_TLS_MODE_SW) { 1633 m->m_flags |= M_NOTREADY; 1634 if (__predict_false(tls_len == 0)) { 1635 /* TLS 1.0 empty fragment. */ 1636 m->m_epg_nrdy = 1; 1637 } else 1638 m->m_epg_nrdy = m->m_epg_npgs; 1639 *enq_cnt += m->m_epg_nrdy; 1640 } 1641 } 1642 } 1643 1644 void 1645 ktls_check_rx(struct sockbuf *sb) 1646 { 1647 struct tls_record_layer hdr; 1648 struct ktls_wq *wq; 1649 struct socket *so; 1650 bool running; 1651 1652 SOCKBUF_LOCK_ASSERT(sb); 1653 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX", 1654 __func__, sb)); 1655 so = __containerof(sb, struct socket, so_rcv); 1656 1657 if (sb->sb_flags & SB_TLS_RX_RUNNING) 1658 return; 1659 1660 /* Is there enough queued for a TLS header? */ 1661 if (sb->sb_tlscc < sizeof(hdr)) { 1662 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0) 1663 so->so_error = EMSGSIZE; 1664 return; 1665 } 1666 1667 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr); 1668 1669 /* Is the entire record queued? */ 1670 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) { 1671 if ((sb->sb_state & SBS_CANTRCVMORE) != 0) 1672 so->so_error = EMSGSIZE; 1673 return; 1674 } 1675 1676 sb->sb_flags |= SB_TLS_RX_RUNNING; 1677 1678 soref(so); 1679 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index]; 1680 mtx_lock(&wq->mtx); 1681 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list); 1682 running = wq->running; 1683 mtx_unlock(&wq->mtx); 1684 if (!running) 1685 wakeup(wq); 1686 counter_u64_add(ktls_cnt_rx_queued, 1); 1687 } 1688 1689 static struct mbuf * 1690 ktls_detach_record(struct sockbuf *sb, int len) 1691 { 1692 struct mbuf *m, *n, *top; 1693 int remain; 1694 1695 SOCKBUF_LOCK_ASSERT(sb); 1696 MPASS(len <= sb->sb_tlscc); 1697 1698 /* 1699 * If TLS chain is the exact size of the record, 1700 * just grab the whole record. 1701 */ 1702 top = sb->sb_mtls; 1703 if (sb->sb_tlscc == len) { 1704 sb->sb_mtls = NULL; 1705 sb->sb_mtlstail = NULL; 1706 goto out; 1707 } 1708 1709 /* 1710 * While it would be nice to use m_split() here, we need 1711 * to know exactly what m_split() allocates to update the 1712 * accounting, so do it inline instead. 1713 */ 1714 remain = len; 1715 for (m = top; remain > m->m_len; m = m->m_next) 1716 remain -= m->m_len; 1717 1718 /* Easy case: don't have to split 'm'. */ 1719 if (remain == m->m_len) { 1720 sb->sb_mtls = m->m_next; 1721 if (sb->sb_mtls == NULL) 1722 sb->sb_mtlstail = NULL; 1723 m->m_next = NULL; 1724 goto out; 1725 } 1726 1727 /* 1728 * Need to allocate an mbuf to hold the remainder of 'm'. Try 1729 * with M_NOWAIT first. 1730 */ 1731 n = m_get(M_NOWAIT, MT_DATA); 1732 if (n == NULL) { 1733 /* 1734 * Use M_WAITOK with socket buffer unlocked. If 1735 * 'sb_mtls' changes while the lock is dropped, return 1736 * NULL to force the caller to retry. 1737 */ 1738 SOCKBUF_UNLOCK(sb); 1739 1740 n = m_get(M_WAITOK, MT_DATA); 1741 1742 SOCKBUF_LOCK(sb); 1743 if (sb->sb_mtls != top) { 1744 m_free(n); 1745 return (NULL); 1746 } 1747 } 1748 n->m_flags |= M_NOTREADY; 1749 1750 /* Store remainder in 'n'. */ 1751 n->m_len = m->m_len - remain; 1752 if (m->m_flags & M_EXT) { 1753 n->m_data = m->m_data + remain; 1754 mb_dupcl(n, m); 1755 } else { 1756 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len); 1757 } 1758 1759 /* Trim 'm' and update accounting. */ 1760 m->m_len -= n->m_len; 1761 sb->sb_tlscc -= n->m_len; 1762 sb->sb_ccc -= n->m_len; 1763 1764 /* Account for 'n'. */ 1765 sballoc_ktls_rx(sb, n); 1766 1767 /* Insert 'n' into the TLS chain. */ 1768 sb->sb_mtls = n; 1769 n->m_next = m->m_next; 1770 if (sb->sb_mtlstail == m) 1771 sb->sb_mtlstail = n; 1772 1773 /* Detach the record from the TLS chain. */ 1774 m->m_next = NULL; 1775 1776 out: 1777 MPASS(m_length(top, NULL) == len); 1778 for (m = top; m != NULL; m = m->m_next) 1779 sbfree_ktls_rx(sb, m); 1780 sb->sb_tlsdcc = len; 1781 sb->sb_ccc += len; 1782 SBCHECK(sb); 1783 return (top); 1784 } 1785 1786 static void 1787 ktls_decrypt(struct socket *so) 1788 { 1789 char tls_header[MBUF_PEXT_HDR_LEN]; 1790 struct ktls_session *tls; 1791 struct sockbuf *sb; 1792 struct tls_record_layer *hdr; 1793 struct tls_get_record tgr; 1794 struct mbuf *control, *data, *m; 1795 uint64_t seqno; 1796 int error, remain, tls_len, trail_len; 1797 1798 hdr = (struct tls_record_layer *)tls_header; 1799 sb = &so->so_rcv; 1800 SOCKBUF_LOCK(sb); 1801 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING, 1802 ("%s: socket %p not running", __func__, so)); 1803 1804 tls = sb->sb_tls_info; 1805 MPASS(tls != NULL); 1806 1807 for (;;) { 1808 /* Is there enough queued for a TLS header? */ 1809 if (sb->sb_tlscc < tls->params.tls_hlen) 1810 break; 1811 1812 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header); 1813 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length); 1814 1815 if (hdr->tls_vmajor != tls->params.tls_vmajor || 1816 hdr->tls_vminor != tls->params.tls_vminor) 1817 error = EINVAL; 1818 else if (tls_len < tls->params.tls_hlen || tls_len > 1819 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 + 1820 tls->params.tls_tlen) 1821 error = EMSGSIZE; 1822 else 1823 error = 0; 1824 if (__predict_false(error != 0)) { 1825 /* 1826 * We have a corrupted record and are likely 1827 * out of sync. The connection isn't 1828 * recoverable at this point, so abort it. 1829 */ 1830 SOCKBUF_UNLOCK(sb); 1831 counter_u64_add(ktls_offload_corrupted_records, 1); 1832 1833 CURVNET_SET(so->so_vnet); 1834 so->so_proto->pr_usrreqs->pru_abort(so); 1835 so->so_error = error; 1836 CURVNET_RESTORE(); 1837 goto deref; 1838 } 1839 1840 /* Is the entire record queued? */ 1841 if (sb->sb_tlscc < tls_len) 1842 break; 1843 1844 /* 1845 * Split out the portion of the mbuf chain containing 1846 * this TLS record. 1847 */ 1848 data = ktls_detach_record(sb, tls_len); 1849 if (data == NULL) 1850 continue; 1851 MPASS(sb->sb_tlsdcc == tls_len); 1852 1853 seqno = sb->sb_tls_seqno; 1854 sb->sb_tls_seqno++; 1855 SBCHECK(sb); 1856 SOCKBUF_UNLOCK(sb); 1857 1858 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len); 1859 if (error) { 1860 counter_u64_add(ktls_offload_failed_crypto, 1); 1861 1862 SOCKBUF_LOCK(sb); 1863 if (sb->sb_tlsdcc == 0) { 1864 /* 1865 * sbcut/drop/flush discarded these 1866 * mbufs. 1867 */ 1868 m_freem(data); 1869 break; 1870 } 1871 1872 /* 1873 * Drop this TLS record's data, but keep 1874 * decrypting subsequent records. 1875 */ 1876 sb->sb_ccc -= tls_len; 1877 sb->sb_tlsdcc = 0; 1878 1879 CURVNET_SET(so->so_vnet); 1880 so->so_error = EBADMSG; 1881 sorwakeup_locked(so); 1882 CURVNET_RESTORE(); 1883 1884 m_freem(data); 1885 1886 SOCKBUF_LOCK(sb); 1887 continue; 1888 } 1889 1890 /* Allocate the control mbuf. */ 1891 tgr.tls_type = hdr->tls_type; 1892 tgr.tls_vmajor = hdr->tls_vmajor; 1893 tgr.tls_vminor = hdr->tls_vminor; 1894 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen - 1895 trail_len); 1896 control = sbcreatecontrol_how(&tgr, sizeof(tgr), 1897 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK); 1898 1899 SOCKBUF_LOCK(sb); 1900 if (sb->sb_tlsdcc == 0) { 1901 /* sbcut/drop/flush discarded these mbufs. */ 1902 MPASS(sb->sb_tlscc == 0); 1903 m_freem(data); 1904 m_freem(control); 1905 break; 1906 } 1907 1908 /* 1909 * Clear the 'dcc' accounting in preparation for 1910 * adding the decrypted record. 1911 */ 1912 sb->sb_ccc -= tls_len; 1913 sb->sb_tlsdcc = 0; 1914 SBCHECK(sb); 1915 1916 /* If there is no payload, drop all of the data. */ 1917 if (tgr.tls_length == htobe16(0)) { 1918 m_freem(data); 1919 data = NULL; 1920 } else { 1921 /* Trim header. */ 1922 remain = tls->params.tls_hlen; 1923 while (remain > 0) { 1924 if (data->m_len > remain) { 1925 data->m_data += remain; 1926 data->m_len -= remain; 1927 break; 1928 } 1929 remain -= data->m_len; 1930 data = m_free(data); 1931 } 1932 1933 /* Trim trailer and clear M_NOTREADY. */ 1934 remain = be16toh(tgr.tls_length); 1935 m = data; 1936 for (m = data; remain > m->m_len; m = m->m_next) { 1937 m->m_flags &= ~M_NOTREADY; 1938 remain -= m->m_len; 1939 } 1940 m->m_len = remain; 1941 m_freem(m->m_next); 1942 m->m_next = NULL; 1943 m->m_flags &= ~M_NOTREADY; 1944 1945 /* Set EOR on the final mbuf. */ 1946 m->m_flags |= M_EOR; 1947 } 1948 1949 sbappendcontrol_locked(sb, data, control, 0); 1950 } 1951 1952 sb->sb_flags &= ~SB_TLS_RX_RUNNING; 1953 1954 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0) 1955 so->so_error = EMSGSIZE; 1956 1957 sorwakeup_locked(so); 1958 1959 deref: 1960 SOCKBUF_UNLOCK_ASSERT(sb); 1961 1962 CURVNET_SET(so->so_vnet); 1963 SOCK_LOCK(so); 1964 sorele(so); 1965 CURVNET_RESTORE(); 1966 } 1967 1968 void 1969 ktls_enqueue_to_free(struct mbuf *m) 1970 { 1971 struct ktls_wq *wq; 1972 bool running; 1973 1974 /* Mark it for freeing. */ 1975 m->m_epg_flags |= EPG_FLAG_2FREE; 1976 wq = &ktls_wq[m->m_epg_tls->wq_index]; 1977 mtx_lock(&wq->mtx); 1978 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 1979 running = wq->running; 1980 mtx_unlock(&wq->mtx); 1981 if (!running) 1982 wakeup(wq); 1983 } 1984 1985 static void * 1986 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m) 1987 { 1988 void *buf; 1989 int domain, running; 1990 1991 if (m->m_epg_npgs <= 2) 1992 return (NULL); 1993 if (ktls_buffer_zone == NULL) 1994 return (NULL); 1995 if ((u_int)(ticks - wq->lastallocfail) < hz) { 1996 /* 1997 * Rate-limit allocation attempts after a failure. 1998 * ktls_buffer_import() will acquire a per-domain mutex to check 1999 * the free page queues and may fail consistently if memory is 2000 * fragmented. 2001 */ 2002 return (NULL); 2003 } 2004 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM); 2005 if (buf == NULL) { 2006 domain = PCPU_GET(domain); 2007 wq->lastallocfail = ticks; 2008 2009 /* 2010 * Note that this check is "racy", but the races are 2011 * harmless, and are either a spurious wakeup if 2012 * multiple threads fail allocations before the alloc 2013 * thread wakes, or waiting an extra second in case we 2014 * see an old value of running == true. 2015 */ 2016 if (!VM_DOMAIN_EMPTY(domain)) { 2017 running = atomic_load_int(&ktls_domains[domain].alloc_td.running); 2018 if (!running) 2019 wakeup(&ktls_domains[domain].alloc_td); 2020 } 2021 } 2022 return (buf); 2023 } 2024 2025 static int 2026 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m, 2027 struct ktls_session *tls, struct ktls_ocf_encrypt_state *state) 2028 { 2029 vm_page_t pg; 2030 int error, i, len, off; 2031 2032 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY), 2033 ("%p not unready & nomap mbuf\n", m)); 2034 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen, 2035 ("page count %d larger than maximum frame length %d", m->m_epg_npgs, 2036 ktls_maxlen)); 2037 2038 /* Anonymous mbufs are encrypted in place. */ 2039 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) 2040 return (tls->sw_encrypt(state, tls, m, NULL, 0)); 2041 2042 /* 2043 * For file-backed mbufs (from sendfile), anonymous wired 2044 * pages are allocated and used as the encryption destination. 2045 */ 2046 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) { 2047 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len - 2048 m->m_epg_1st_off; 2049 state->dst_iov[0].iov_base = (char *)state->cbuf + 2050 m->m_epg_1st_off; 2051 state->dst_iov[0].iov_len = len; 2052 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf); 2053 i = 1; 2054 } else { 2055 off = m->m_epg_1st_off; 2056 for (i = 0; i < m->m_epg_npgs; i++, off = 0) { 2057 do { 2058 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2059 VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | 2060 VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL); 2061 } while (pg == NULL); 2062 2063 len = m_epg_pagelen(m, i, off); 2064 state->parray[i] = VM_PAGE_TO_PHYS(pg); 2065 state->dst_iov[i].iov_base = 2066 (char *)PHYS_TO_DMAP(state->parray[i]) + off; 2067 state->dst_iov[i].iov_len = len; 2068 } 2069 } 2070 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small")); 2071 state->dst_iov[i].iov_base = m->m_epg_trail; 2072 state->dst_iov[i].iov_len = m->m_epg_trllen; 2073 2074 error = tls->sw_encrypt(state, tls, m, state->dst_iov, i + 1); 2075 2076 if (__predict_false(error != 0)) { 2077 /* Free the anonymous pages. */ 2078 if (state->cbuf != NULL) 2079 uma_zfree(ktls_buffer_zone, state->cbuf); 2080 else { 2081 for (i = 0; i < m->m_epg_npgs; i++) { 2082 pg = PHYS_TO_VM_PAGE(state->parray[i]); 2083 (void)vm_page_unwire_noq(pg); 2084 vm_page_free(pg); 2085 } 2086 } 2087 } 2088 return (error); 2089 } 2090 2091 void 2092 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count) 2093 { 2094 struct ktls_wq *wq; 2095 bool running; 2096 2097 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) == 2098 (M_EXTPG | M_NOTREADY)), 2099 ("ktls_enqueue: %p not unready & nomap mbuf\n", m)); 2100 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count")); 2101 2102 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf")); 2103 2104 m->m_epg_enc_cnt = page_count; 2105 2106 /* 2107 * Save a pointer to the socket. The caller is responsible 2108 * for taking an additional reference via soref(). 2109 */ 2110 m->m_epg_so = so; 2111 2112 wq = &ktls_wq[m->m_epg_tls->wq_index]; 2113 mtx_lock(&wq->mtx); 2114 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 2115 running = wq->running; 2116 mtx_unlock(&wq->mtx); 2117 if (!running) 2118 wakeup(wq); 2119 counter_u64_add(ktls_cnt_tx_queued, 1); 2120 } 2121 2122 /* 2123 * Once a file-backed mbuf (from sendfile) has been encrypted, free 2124 * the pages from the file and replace them with the anonymous pages 2125 * allocated in ktls_encrypt_record(). 2126 */ 2127 static void 2128 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state) 2129 { 2130 int i; 2131 2132 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0); 2133 2134 /* Free the old pages. */ 2135 m->m_ext.ext_free(m); 2136 2137 /* Replace them with the new pages. */ 2138 if (state->cbuf != NULL) { 2139 for (i = 0; i < m->m_epg_npgs; i++) 2140 m->m_epg_pa[i] = state->parray[0] + ptoa(i); 2141 2142 /* Contig pages should go back to the cache. */ 2143 m->m_ext.ext_free = ktls_free_mext_contig; 2144 } else { 2145 for (i = 0; i < m->m_epg_npgs; i++) 2146 m->m_epg_pa[i] = state->parray[i]; 2147 2148 /* Use the basic free routine. */ 2149 m->m_ext.ext_free = mb_free_mext_pgs; 2150 } 2151 2152 /* Pages are now writable. */ 2153 m->m_epg_flags |= EPG_FLAG_ANON; 2154 } 2155 2156 static __noinline void 2157 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top) 2158 { 2159 struct ktls_ocf_encrypt_state state; 2160 struct ktls_session *tls; 2161 struct socket *so; 2162 struct mbuf *m; 2163 int error, npages, total_pages; 2164 2165 so = top->m_epg_so; 2166 tls = top->m_epg_tls; 2167 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2168 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2169 #ifdef INVARIANTS 2170 top->m_epg_so = NULL; 2171 #endif 2172 total_pages = top->m_epg_enc_cnt; 2173 npages = 0; 2174 2175 /* 2176 * Encrypt the TLS records in the chain of mbufs starting with 2177 * 'top'. 'total_pages' gives us a total count of pages and is 2178 * used to know when we have finished encrypting the TLS 2179 * records originally queued with 'top'. 2180 * 2181 * NB: These mbufs are queued in the socket buffer and 2182 * 'm_next' is traversing the mbufs in the socket buffer. The 2183 * socket buffer lock is not held while traversing this chain. 2184 * Since the mbufs are all marked M_NOTREADY their 'm_next' 2185 * pointers should be stable. However, the 'm_next' of the 2186 * last mbuf encrypted is not necessarily NULL. It can point 2187 * to other mbufs appended while 'top' was on the TLS work 2188 * queue. 2189 * 2190 * Each mbuf holds an entire TLS record. 2191 */ 2192 error = 0; 2193 for (m = top; npages != total_pages; m = m->m_next) { 2194 KASSERT(m->m_epg_tls == tls, 2195 ("different TLS sessions in a single mbuf chain: %p vs %p", 2196 tls, m->m_epg_tls)); 2197 KASSERT(npages + m->m_epg_npgs <= total_pages, 2198 ("page count mismatch: top %p, total_pages %d, m %p", top, 2199 total_pages, m)); 2200 2201 error = ktls_encrypt_record(wq, m, tls, &state); 2202 if (error) { 2203 counter_u64_add(ktls_offload_failed_crypto, 1); 2204 break; 2205 } 2206 2207 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2208 ktls_finish_nonanon(m, &state); 2209 2210 npages += m->m_epg_nrdy; 2211 2212 /* 2213 * Drop a reference to the session now that it is no 2214 * longer needed. Existing code depends on encrypted 2215 * records having no associated session vs 2216 * yet-to-be-encrypted records having an associated 2217 * session. 2218 */ 2219 m->m_epg_tls = NULL; 2220 ktls_free(tls); 2221 } 2222 2223 CURVNET_SET(so->so_vnet); 2224 if (error == 0) { 2225 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages); 2226 } else { 2227 so->so_proto->pr_usrreqs->pru_abort(so); 2228 so->so_error = EIO; 2229 mb_free_notready(top, total_pages); 2230 } 2231 2232 SOCK_LOCK(so); 2233 sorele(so); 2234 CURVNET_RESTORE(); 2235 } 2236 2237 void 2238 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error) 2239 { 2240 struct ktls_session *tls; 2241 struct socket *so; 2242 struct mbuf *m; 2243 int npages; 2244 2245 m = state->m; 2246 2247 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2248 ktls_finish_nonanon(m, state); 2249 2250 so = state->so; 2251 free(state, M_KTLS); 2252 2253 /* 2254 * Drop a reference to the session now that it is no longer 2255 * needed. Existing code depends on encrypted records having 2256 * no associated session vs yet-to-be-encrypted records having 2257 * an associated session. 2258 */ 2259 tls = m->m_epg_tls; 2260 m->m_epg_tls = NULL; 2261 ktls_free(tls); 2262 2263 if (error != 0) 2264 counter_u64_add(ktls_offload_failed_crypto, 1); 2265 2266 CURVNET_SET(so->so_vnet); 2267 npages = m->m_epg_nrdy; 2268 2269 if (error == 0) { 2270 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, m, npages); 2271 } else { 2272 so->so_proto->pr_usrreqs->pru_abort(so); 2273 so->so_error = EIO; 2274 mb_free_notready(m, npages); 2275 } 2276 2277 SOCK_LOCK(so); 2278 sorele(so); 2279 CURVNET_RESTORE(); 2280 } 2281 2282 /* 2283 * Similar to ktls_encrypt, but used with asynchronous OCF backends 2284 * (coprocessors) where encryption does not use host CPU resources and 2285 * it can be beneficial to queue more requests than CPUs. 2286 */ 2287 static __noinline void 2288 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top) 2289 { 2290 struct ktls_ocf_encrypt_state *state; 2291 struct ktls_session *tls; 2292 struct socket *so; 2293 struct mbuf *m, *n; 2294 int error, mpages, npages, total_pages; 2295 2296 so = top->m_epg_so; 2297 tls = top->m_epg_tls; 2298 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2299 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2300 #ifdef INVARIANTS 2301 top->m_epg_so = NULL; 2302 #endif 2303 total_pages = top->m_epg_enc_cnt; 2304 npages = 0; 2305 2306 error = 0; 2307 for (m = top; npages != total_pages; m = n) { 2308 KASSERT(m->m_epg_tls == tls, 2309 ("different TLS sessions in a single mbuf chain: %p vs %p", 2310 tls, m->m_epg_tls)); 2311 KASSERT(npages + m->m_epg_npgs <= total_pages, 2312 ("page count mismatch: top %p, total_pages %d, m %p", top, 2313 total_pages, m)); 2314 2315 state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO); 2316 soref(so); 2317 state->so = so; 2318 state->m = m; 2319 2320 mpages = m->m_epg_nrdy; 2321 n = m->m_next; 2322 2323 error = ktls_encrypt_record(wq, m, tls, state); 2324 if (error) { 2325 counter_u64_add(ktls_offload_failed_crypto, 1); 2326 free(state, M_KTLS); 2327 CURVNET_SET(so->so_vnet); 2328 SOCK_LOCK(so); 2329 sorele(so); 2330 CURVNET_RESTORE(); 2331 break; 2332 } 2333 2334 npages += mpages; 2335 } 2336 2337 CURVNET_SET(so->so_vnet); 2338 if (error != 0) { 2339 so->so_proto->pr_usrreqs->pru_abort(so); 2340 so->so_error = EIO; 2341 mb_free_notready(m, total_pages - npages); 2342 } 2343 2344 SOCK_LOCK(so); 2345 sorele(so); 2346 CURVNET_RESTORE(); 2347 } 2348 2349 static void 2350 ktls_alloc_thread(void *ctx) 2351 { 2352 struct ktls_domain_info *ktls_domain = ctx; 2353 struct ktls_alloc_thread *sc = &ktls_domain->alloc_td; 2354 void **buf; 2355 struct sysctl_oid *oid; 2356 char name[80]; 2357 int i, nbufs; 2358 2359 curthread->td_domain.dr_policy = 2360 DOMAINSET_PREF(PCPU_GET(domain)); 2361 snprintf(name, sizeof(name), "domain%d", PCPU_GET(domain)); 2362 if (bootverbose) 2363 printf("Starting KTLS alloc thread for domain %d\n", 2364 PCPU_GET(domain)); 2365 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO, 2366 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2367 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "allocs", 2368 CTLFLAG_RD, &sc->allocs, 0, "buffers allocated"); 2369 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups", 2370 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups"); 2371 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running", 2372 CTLFLAG_RD, &sc->running, 0, "thread running"); 2373 2374 buf = NULL; 2375 nbufs = 0; 2376 for (;;) { 2377 atomic_store_int(&sc->running, 0); 2378 tsleep(sc, PZERO | PNOLOCK, "-", 0); 2379 atomic_store_int(&sc->running, 1); 2380 sc->wakeups++; 2381 if (nbufs != ktls_max_alloc) { 2382 free(buf, M_KTLS); 2383 nbufs = atomic_load_int(&ktls_max_alloc); 2384 buf = malloc(sizeof(void *) * nbufs, M_KTLS, 2385 M_WAITOK | M_ZERO); 2386 } 2387 /* 2388 * Below we allocate nbufs with different allocation 2389 * flags than we use when allocating normally during 2390 * encryption in the ktls worker thread. We specify 2391 * M_NORECLAIM in the worker thread. However, we omit 2392 * that flag here and add M_WAITOK so that the VM 2393 * system is permitted to perform expensive work to 2394 * defragment memory. We do this here, as it does not 2395 * matter if this thread blocks. If we block a ktls 2396 * worker thread, we risk developing backlogs of 2397 * buffers to be encrypted, leading to surges of 2398 * traffic and potential NIC output drops. 2399 */ 2400 for (i = 0; i < nbufs; i++) { 2401 buf[i] = uma_zalloc(ktls_buffer_zone, M_WAITOK); 2402 sc->allocs++; 2403 } 2404 for (i = 0; i < nbufs; i++) { 2405 uma_zfree(ktls_buffer_zone, buf[i]); 2406 buf[i] = NULL; 2407 } 2408 } 2409 } 2410 2411 static void 2412 ktls_work_thread(void *ctx) 2413 { 2414 struct ktls_wq *wq = ctx; 2415 struct mbuf *m, *n; 2416 struct socket *so, *son; 2417 STAILQ_HEAD(, mbuf) local_m_head; 2418 STAILQ_HEAD(, socket) local_so_head; 2419 2420 if (ktls_bind_threads > 1) { 2421 curthread->td_domain.dr_policy = 2422 DOMAINSET_PREF(PCPU_GET(domain)); 2423 } 2424 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 2425 fpu_kern_thread(0); 2426 #endif 2427 for (;;) { 2428 mtx_lock(&wq->mtx); 2429 while (STAILQ_EMPTY(&wq->m_head) && 2430 STAILQ_EMPTY(&wq->so_head)) { 2431 wq->running = false; 2432 mtx_sleep(wq, &wq->mtx, 0, "-", 0); 2433 wq->running = true; 2434 } 2435 2436 STAILQ_INIT(&local_m_head); 2437 STAILQ_CONCAT(&local_m_head, &wq->m_head); 2438 STAILQ_INIT(&local_so_head); 2439 STAILQ_CONCAT(&local_so_head, &wq->so_head); 2440 mtx_unlock(&wq->mtx); 2441 2442 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) { 2443 if (m->m_epg_flags & EPG_FLAG_2FREE) { 2444 ktls_free(m->m_epg_tls); 2445 m_free_raw(m); 2446 } else { 2447 if (m->m_epg_tls->sync_dispatch) 2448 ktls_encrypt(wq, m); 2449 else 2450 ktls_encrypt_async(wq, m); 2451 counter_u64_add(ktls_cnt_tx_queued, -1); 2452 } 2453 } 2454 2455 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) { 2456 ktls_decrypt(so); 2457 counter_u64_add(ktls_cnt_rx_queued, -1); 2458 } 2459 } 2460 } 2461 2462 #if defined(INET) || defined(INET6) 2463 static void 2464 ktls_disable_ifnet_help(void *context, int pending __unused) 2465 { 2466 struct ktls_session *tls; 2467 struct inpcb *inp; 2468 struct tcpcb *tp; 2469 struct socket *so; 2470 int err; 2471 2472 tls = context; 2473 inp = tls->inp; 2474 if (inp == NULL) 2475 return; 2476 INP_WLOCK(inp); 2477 so = inp->inp_socket; 2478 MPASS(so != NULL); 2479 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) || 2480 (inp->inp_flags2 & INP_FREED)) { 2481 goto out; 2482 } 2483 2484 if (so->so_snd.sb_tls_info != NULL) 2485 err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW); 2486 else 2487 err = ENXIO; 2488 if (err == 0) { 2489 counter_u64_add(ktls_ifnet_disable_ok, 1); 2490 /* ktls_set_tx_mode() drops inp wlock, so recheck flags */ 2491 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0 && 2492 (inp->inp_flags2 & INP_FREED) == 0 && 2493 (tp = intotcpcb(inp)) != NULL && 2494 tp->t_fb->tfb_hwtls_change != NULL) 2495 (*tp->t_fb->tfb_hwtls_change)(tp, 0); 2496 } else { 2497 counter_u64_add(ktls_ifnet_disable_fail, 1); 2498 } 2499 2500 out: 2501 SOCK_LOCK(so); 2502 sorele(so); 2503 if (!in_pcbrele_wlocked(inp)) 2504 INP_WUNLOCK(inp); 2505 ktls_free(tls); 2506 } 2507 2508 /* 2509 * Called when re-transmits are becoming a substantial portion of the 2510 * sends on this connection. When this happens, we transition the 2511 * connection to software TLS. This is needed because most inline TLS 2512 * NICs keep crypto state only for in-order transmits. This means 2513 * that to handle a TCP rexmit (which is out-of-order), the NIC must 2514 * re-DMA the entire TLS record up to and including the current 2515 * segment. This means that when re-transmitting the last ~1448 byte 2516 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order 2517 * of magnitude more data than we are sending. This can cause the 2518 * PCIe link to saturate well before the network, which can cause 2519 * output drops, and a general loss of capacity. 2520 */ 2521 void 2522 ktls_disable_ifnet(void *arg) 2523 { 2524 struct tcpcb *tp; 2525 struct inpcb *inp; 2526 struct socket *so; 2527 struct ktls_session *tls; 2528 2529 tp = arg; 2530 inp = tp->t_inpcb; 2531 INP_WLOCK_ASSERT(inp); 2532 so = inp->inp_socket; 2533 SOCK_LOCK(so); 2534 tls = so->so_snd.sb_tls_info; 2535 if (tls->disable_ifnet_pending) { 2536 SOCK_UNLOCK(so); 2537 return; 2538 } 2539 2540 /* 2541 * note that disable_ifnet_pending is never cleared; disabling 2542 * ifnet can only be done once per session, so we never want 2543 * to do it again 2544 */ 2545 2546 (void)ktls_hold(tls); 2547 in_pcbref(inp); 2548 soref(so); 2549 tls->disable_ifnet_pending = true; 2550 tls->inp = inp; 2551 SOCK_UNLOCK(so); 2552 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls); 2553 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task); 2554 } 2555 #endif 2556