xref: /freebsd/sys/kern/uipc_ktls.c (revision 85df11a1dec6eab9efbce9fd20712402a8e7ac7c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2014-2019 Netflix Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_kern_tls.h"
32 #include "opt_ratelimit.h"
33 #include "opt_rss.h"
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/domainset.h>
38 #include <sys/endian.h>
39 #include <sys/ktls.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/rmlock.h>
44 #include <sys/proc.h>
45 #include <sys/protosw.h>
46 #include <sys/refcount.h>
47 #include <sys/smp.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/taskqueue.h>
52 #include <sys/kthread.h>
53 #include <sys/uio.h>
54 #include <sys/vmmeter.h>
55 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
56 #include <machine/pcb.h>
57 #endif
58 #include <machine/vmparam.h>
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #ifdef RSS
62 #include <net/netisr.h>
63 #include <net/rss_config.h>
64 #endif
65 #include <net/route.h>
66 #include <net/route/nhop.h>
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_var.h>
70 #ifdef TCP_OFFLOAD
71 #include <netinet/tcp_offload.h>
72 #endif
73 #include <opencrypto/cryptodev.h>
74 #include <opencrypto/ktls.h>
75 #include <vm/vm.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_pagequeue.h>
79 
80 struct ktls_wq {
81 	struct mtx	mtx;
82 	STAILQ_HEAD(, mbuf) m_head;
83 	STAILQ_HEAD(, socket) so_head;
84 	bool		running;
85 	int		lastallocfail;
86 } __aligned(CACHE_LINE_SIZE);
87 
88 struct ktls_reclaim_thread {
89 	uint64_t wakeups;
90 	uint64_t reclaims;
91 	struct thread *td;
92 	int running;
93 };
94 
95 struct ktls_domain_info {
96 	int count;
97 	int cpu[MAXCPU];
98 	struct ktls_reclaim_thread reclaim_td;
99 };
100 
101 struct ktls_domain_info ktls_domains[MAXMEMDOM];
102 static struct ktls_wq *ktls_wq;
103 static struct proc *ktls_proc;
104 static uma_zone_t ktls_session_zone;
105 static uma_zone_t ktls_buffer_zone;
106 static uint16_t ktls_cpuid_lookup[MAXCPU];
107 static int ktls_init_state;
108 static struct sx ktls_init_lock;
109 SX_SYSINIT(ktls_init_lock, &ktls_init_lock, "ktls init");
110 
111 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
112     "Kernel TLS offload");
113 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
114     "Kernel TLS offload stats");
115 
116 #ifdef RSS
117 static int ktls_bind_threads = 1;
118 #else
119 static int ktls_bind_threads;
120 #endif
121 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
122     &ktls_bind_threads, 0,
123     "Bind crypto threads to cores (1) or cores and domains (2) at boot");
124 
125 static u_int ktls_maxlen = 16384;
126 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN,
127     &ktls_maxlen, 0, "Maximum TLS record size");
128 
129 static int ktls_number_threads;
130 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
131     &ktls_number_threads, 0,
132     "Number of TLS threads in thread-pool");
133 
134 unsigned int ktls_ifnet_max_rexmit_pct = 2;
135 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN,
136     &ktls_ifnet_max_rexmit_pct, 2,
137     "Max percent bytes retransmitted before ifnet TLS is disabled");
138 
139 static bool ktls_offload_enable;
140 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN,
141     &ktls_offload_enable, 0,
142     "Enable support for kernel TLS offload");
143 
144 static bool ktls_cbc_enable = true;
145 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN,
146     &ktls_cbc_enable, 1,
147     "Enable support of AES-CBC crypto for kernel TLS");
148 
149 static bool ktls_sw_buffer_cache = true;
150 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN,
151     &ktls_sw_buffer_cache, 1,
152     "Enable caching of output buffers for SW encryption");
153 
154 static int ktls_max_reclaim = 1024;
155 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_reclaim, CTLFLAG_RWTUN,
156     &ktls_max_reclaim, 128,
157     "Max number of 16k buffers to reclaim in thread context");
158 
159 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
160 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
161     &ktls_tasks_active, "Number of active tasks");
162 
163 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending);
164 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD,
165     &ktls_cnt_tx_pending,
166     "Number of TLS 1.0 records waiting for earlier TLS records");
167 
168 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
169 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
170     &ktls_cnt_tx_queued,
171     "Number of TLS records in queue to tasks for SW encryption");
172 
173 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued);
174 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
175     &ktls_cnt_rx_queued,
176     "Number of TLS sockets in queue to tasks for SW decryption");
177 
178 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total);
179 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
180     CTLFLAG_RD, &ktls_offload_total,
181     "Total successful TLS setups (parameters set)");
182 
183 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls);
184 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
185     CTLFLAG_RD, &ktls_offload_enable_calls,
186     "Total number of TLS enable calls made");
187 
188 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active);
189 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
190     &ktls_offload_active, "Total Active TLS sessions");
191 
192 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records);
193 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
194     &ktls_offload_corrupted_records, "Total corrupted TLS records received");
195 
196 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto);
197 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
198     &ktls_offload_failed_crypto, "Total TLS crypto failures");
199 
200 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet);
201 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
202     &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
203 
204 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw);
205 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
206     &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
207 
208 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed);
209 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
210     &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
211 
212 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail);
213 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD,
214     &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet");
215 
216 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok);
217 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD,
218     &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet");
219 
220 static COUNTER_U64_DEFINE_EARLY(ktls_destroy_task);
221 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, destroy_task, CTLFLAG_RD,
222     &ktls_destroy_task,
223     "Number of times ktls session was destroyed via taskqueue");
224 
225 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
226     "Software TLS session stats");
227 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
228     "Hardware (ifnet) TLS session stats");
229 #ifdef TCP_OFFLOAD
230 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
231     "TOE TLS session stats");
232 #endif
233 
234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc);
235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
236     "Active number of software TLS sessions using AES-CBC");
237 
238 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm);
239 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
240     "Active number of software TLS sessions using AES-GCM");
241 
242 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20);
243 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD,
244     &ktls_sw_chacha20,
245     "Active number of software TLS sessions using Chacha20-Poly1305");
246 
247 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc);
248 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
249     &ktls_ifnet_cbc,
250     "Active number of ifnet TLS sessions using AES-CBC");
251 
252 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm);
253 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
254     &ktls_ifnet_gcm,
255     "Active number of ifnet TLS sessions using AES-GCM");
256 
257 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20);
258 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD,
259     &ktls_ifnet_chacha20,
260     "Active number of ifnet TLS sessions using Chacha20-Poly1305");
261 
262 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset);
263 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
264     &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
265 
266 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped);
267 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
268     &ktls_ifnet_reset_dropped,
269     "TLS sessions dropped after failing to update ifnet send tag");
270 
271 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed);
272 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
273     &ktls_ifnet_reset_failed,
274     "TLS sessions that failed to allocate a new ifnet send tag");
275 
276 static int ktls_ifnet_permitted;
277 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
278     &ktls_ifnet_permitted, 1,
279     "Whether to permit hardware (ifnet) TLS sessions");
280 
281 #ifdef TCP_OFFLOAD
282 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc);
283 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
284     &ktls_toe_cbc,
285     "Active number of TOE TLS sessions using AES-CBC");
286 
287 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm);
288 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
289     &ktls_toe_gcm,
290     "Active number of TOE TLS sessions using AES-GCM");
291 
292 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20);
293 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD,
294     &ktls_toe_chacha20,
295     "Active number of TOE TLS sessions using Chacha20-Poly1305");
296 #endif
297 
298 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
299 
300 static void ktls_reclaim_thread(void *ctx);
301 static void ktls_reset_receive_tag(void *context, int pending);
302 static void ktls_reset_send_tag(void *context, int pending);
303 static void ktls_work_thread(void *ctx);
304 
305 int
306 ktls_copyin_tls_enable(struct sockopt *sopt, struct tls_enable *tls)
307 {
308 	struct tls_enable_v0 tls_v0;
309 	int error;
310 	uint8_t *cipher_key = NULL, *iv = NULL, *auth_key = NULL;
311 
312 	if (sopt->sopt_valsize == sizeof(tls_v0)) {
313 		error = sooptcopyin(sopt, &tls_v0, sizeof(tls_v0), sizeof(tls_v0));
314 		if (error != 0)
315 			goto done;
316 		memset(tls, 0, sizeof(*tls));
317 		tls->cipher_key = tls_v0.cipher_key;
318 		tls->iv = tls_v0.iv;
319 		tls->auth_key = tls_v0.auth_key;
320 		tls->cipher_algorithm = tls_v0.cipher_algorithm;
321 		tls->cipher_key_len = tls_v0.cipher_key_len;
322 		tls->iv_len = tls_v0.iv_len;
323 		tls->auth_algorithm = tls_v0.auth_algorithm;
324 		tls->auth_key_len = tls_v0.auth_key_len;
325 		tls->flags = tls_v0.flags;
326 		tls->tls_vmajor = tls_v0.tls_vmajor;
327 		tls->tls_vminor = tls_v0.tls_vminor;
328 	} else
329 		error = sooptcopyin(sopt, tls, sizeof(*tls), sizeof(*tls));
330 
331 	if (error != 0)
332 		goto done;
333 
334 	/*
335 	 * Now do a deep copy of the variable-length arrays in the struct, so that
336 	 * subsequent consumers of it can reliably assume kernel memory. This
337 	 * requires doing our own allocations, which we will free in the
338 	 * error paths so that our caller need only worry about outstanding
339 	 * allocations existing on successful return.
340 	 */
341 	cipher_key = malloc(tls->cipher_key_len, M_KTLS, M_WAITOK);
342 	iv = malloc(tls->iv_len, M_KTLS, M_WAITOK);
343 	auth_key = malloc(tls->auth_key_len, M_KTLS, M_WAITOK);
344 	if (sopt->sopt_td != NULL) {
345 		error = copyin(tls->cipher_key, cipher_key, tls->cipher_key_len);
346 		if (error != 0)
347 			goto done;
348 		error = copyin(tls->iv, iv, tls->iv_len);
349 		if (error != 0)
350 			goto done;
351 		error = copyin(tls->auth_key, auth_key, tls->auth_key_len);
352 		if (error != 0)
353 			goto done;
354 	} else {
355 		bcopy(tls->cipher_key, cipher_key, tls->cipher_key_len);
356 		bcopy(tls->iv, iv, tls->iv_len);
357 		bcopy(tls->auth_key, auth_key, tls->auth_key_len);
358 	}
359 	tls->cipher_key = cipher_key;
360 	tls->iv = iv;
361 	tls->auth_key = auth_key;
362 
363 done:
364 	if (error != 0) {
365 		zfree(cipher_key, M_KTLS);
366 		zfree(iv, M_KTLS);
367 		zfree(auth_key, M_KTLS);
368 	}
369 
370 	return (error);
371 }
372 
373 void
374 ktls_cleanup_tls_enable(struct tls_enable *tls)
375 {
376 	zfree(__DECONST(void *, tls->cipher_key), M_KTLS);
377 	zfree(__DECONST(void *, tls->iv), M_KTLS);
378 	zfree(__DECONST(void *, tls->auth_key), M_KTLS);
379 }
380 
381 static u_int
382 ktls_get_cpu(struct socket *so)
383 {
384 	struct inpcb *inp;
385 #ifdef NUMA
386 	struct ktls_domain_info *di;
387 #endif
388 	u_int cpuid;
389 
390 	inp = sotoinpcb(so);
391 #ifdef RSS
392 	cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
393 	if (cpuid != NETISR_CPUID_NONE)
394 		return (cpuid);
395 #endif
396 	/*
397 	 * Just use the flowid to shard connections in a repeatable
398 	 * fashion.  Note that TLS 1.0 sessions rely on the
399 	 * serialization provided by having the same connection use
400 	 * the same queue.
401 	 */
402 #ifdef NUMA
403 	if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) {
404 		di = &ktls_domains[inp->inp_numa_domain];
405 		cpuid = di->cpu[inp->inp_flowid % di->count];
406 	} else
407 #endif
408 		cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
409 	return (cpuid);
410 }
411 
412 static int
413 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags)
414 {
415 	vm_page_t m;
416 	int i, req;
417 
418 	KASSERT((ktls_maxlen & PAGE_MASK) == 0,
419 	    ("%s: ktls max length %d is not page size-aligned",
420 	    __func__, ktls_maxlen));
421 
422 	req = VM_ALLOC_WIRED | VM_ALLOC_NODUMP | malloc2vm_flags(flags);
423 	for (i = 0; i < count; i++) {
424 		m = vm_page_alloc_noobj_contig_domain(domain, req,
425 		    atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
426 		    VM_MEMATTR_DEFAULT);
427 		if (m == NULL)
428 			break;
429 		store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
430 	}
431 	return (i);
432 }
433 
434 static void
435 ktls_buffer_release(void *arg __unused, void **store, int count)
436 {
437 	vm_page_t m;
438 	int i, j;
439 
440 	for (i = 0; i < count; i++) {
441 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
442 		for (j = 0; j < atop(ktls_maxlen); j++) {
443 			(void)vm_page_unwire_noq(m + j);
444 			vm_page_free(m + j);
445 		}
446 	}
447 }
448 
449 static void
450 ktls_free_mext_contig(struct mbuf *m)
451 {
452 	M_ASSERTEXTPG(m);
453 	uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0]));
454 }
455 
456 static int
457 ktls_init(void)
458 {
459 	struct thread *td;
460 	struct pcpu *pc;
461 	int count, domain, error, i;
462 
463 	ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
464 	    M_WAITOK | M_ZERO);
465 
466 	ktls_session_zone = uma_zcreate("ktls_session",
467 	    sizeof(struct ktls_session),
468 	    NULL, NULL, NULL, NULL,
469 	    UMA_ALIGN_CACHE, 0);
470 
471 	if (ktls_sw_buffer_cache) {
472 		ktls_buffer_zone = uma_zcache_create("ktls_buffers",
473 		    roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL,
474 		    ktls_buffer_import, ktls_buffer_release, NULL,
475 		    UMA_ZONE_FIRSTTOUCH);
476 	}
477 
478 	/*
479 	 * Initialize the workqueues to run the TLS work.  We create a
480 	 * work queue for each CPU.
481 	 */
482 	CPU_FOREACH(i) {
483 		STAILQ_INIT(&ktls_wq[i].m_head);
484 		STAILQ_INIT(&ktls_wq[i].so_head);
485 		mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
486 		if (ktls_bind_threads > 1) {
487 			pc = pcpu_find(i);
488 			domain = pc->pc_domain;
489 			count = ktls_domains[domain].count;
490 			ktls_domains[domain].cpu[count] = i;
491 			ktls_domains[domain].count++;
492 		}
493 		ktls_cpuid_lookup[ktls_number_threads] = i;
494 		ktls_number_threads++;
495 	}
496 
497 	/*
498 	 * If we somehow have an empty domain, fall back to choosing
499 	 * among all KTLS threads.
500 	 */
501 	if (ktls_bind_threads > 1) {
502 		for (i = 0; i < vm_ndomains; i++) {
503 			if (ktls_domains[i].count == 0) {
504 				ktls_bind_threads = 1;
505 				break;
506 			}
507 		}
508 	}
509 
510 	/* Start kthreads for each workqueue. */
511 	CPU_FOREACH(i) {
512 		error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
513 		    &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
514 		if (error) {
515 			printf("Can't add KTLS thread %d error %d\n", i, error);
516 			return (error);
517 		}
518 	}
519 
520 	/*
521 	 * Start an allocation thread per-domain to perform blocking allocations
522 	 * of 16k physically contiguous TLS crypto destination buffers.
523 	 */
524 	if (ktls_sw_buffer_cache) {
525 		for (domain = 0; domain < vm_ndomains; domain++) {
526 			if (VM_DOMAIN_EMPTY(domain))
527 				continue;
528 			if (CPU_EMPTY(&cpuset_domain[domain]))
529 				continue;
530 			error = kproc_kthread_add(ktls_reclaim_thread,
531 			    &ktls_domains[domain], &ktls_proc,
532 			    &ktls_domains[domain].reclaim_td.td,
533 			    0, 0, "KTLS", "reclaim_%d", domain);
534 			if (error) {
535 				printf("Can't add KTLS reclaim thread %d error %d\n",
536 				    domain, error);
537 				return (error);
538 			}
539 		}
540 	}
541 
542 	if (bootverbose)
543 		printf("KTLS: Initialized %d threads\n", ktls_number_threads);
544 	return (0);
545 }
546 
547 static int
548 ktls_start_kthreads(void)
549 {
550 	int error, state;
551 
552 start:
553 	state = atomic_load_acq_int(&ktls_init_state);
554 	if (__predict_true(state > 0))
555 		return (0);
556 	if (state < 0)
557 		return (ENXIO);
558 
559 	sx_xlock(&ktls_init_lock);
560 	if (ktls_init_state != 0) {
561 		sx_xunlock(&ktls_init_lock);
562 		goto start;
563 	}
564 
565 	error = ktls_init();
566 	if (error == 0)
567 		state = 1;
568 	else
569 		state = -1;
570 	atomic_store_rel_int(&ktls_init_state, state);
571 	sx_xunlock(&ktls_init_lock);
572 	return (error);
573 }
574 
575 static int
576 ktls_create_session(struct socket *so, struct tls_enable *en,
577     struct ktls_session **tlsp, int direction)
578 {
579 	struct ktls_session *tls;
580 	int error;
581 
582 	/* Only TLS 1.0 - 1.3 are supported. */
583 	if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
584 		return (EINVAL);
585 	if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
586 	    en->tls_vminor > TLS_MINOR_VER_THREE)
587 		return (EINVAL);
588 
589 	if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
590 		return (EINVAL);
591 	if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
592 		return (EINVAL);
593 	if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
594 		return (EINVAL);
595 
596 	/* All supported algorithms require a cipher key. */
597 	if (en->cipher_key_len == 0)
598 		return (EINVAL);
599 
600 	/* No flags are currently supported. */
601 	if (en->flags != 0)
602 		return (EINVAL);
603 
604 	/* Common checks for supported algorithms. */
605 	switch (en->cipher_algorithm) {
606 	case CRYPTO_AES_NIST_GCM_16:
607 		/*
608 		 * auth_algorithm isn't used, but permit GMAC values
609 		 * for compatibility.
610 		 */
611 		switch (en->auth_algorithm) {
612 		case 0:
613 #ifdef COMPAT_FREEBSD12
614 		/* XXX: Really 13.0-current COMPAT. */
615 		case CRYPTO_AES_128_NIST_GMAC:
616 		case CRYPTO_AES_192_NIST_GMAC:
617 		case CRYPTO_AES_256_NIST_GMAC:
618 #endif
619 			break;
620 		default:
621 			return (EINVAL);
622 		}
623 		if (en->auth_key_len != 0)
624 			return (EINVAL);
625 		switch (en->tls_vminor) {
626 		case TLS_MINOR_VER_TWO:
627 			if (en->iv_len != TLS_AEAD_GCM_LEN)
628 				return (EINVAL);
629 			break;
630 		case TLS_MINOR_VER_THREE:
631 			if (en->iv_len != TLS_1_3_GCM_IV_LEN)
632 				return (EINVAL);
633 			break;
634 		default:
635 			return (EINVAL);
636 		}
637 		break;
638 	case CRYPTO_AES_CBC:
639 		switch (en->auth_algorithm) {
640 		case CRYPTO_SHA1_HMAC:
641 			break;
642 		case CRYPTO_SHA2_256_HMAC:
643 		case CRYPTO_SHA2_384_HMAC:
644 			if (en->tls_vminor != TLS_MINOR_VER_TWO)
645 				return (EINVAL);
646 			break;
647 		default:
648 			return (EINVAL);
649 		}
650 		if (en->auth_key_len == 0)
651 			return (EINVAL);
652 
653 		/*
654 		 * TLS 1.0 requires an implicit IV.  TLS 1.1 and 1.2
655 		 * use explicit IVs.
656 		 */
657 		switch (en->tls_vminor) {
658 		case TLS_MINOR_VER_ZERO:
659 			if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
660 				return (EINVAL);
661 			break;
662 		case TLS_MINOR_VER_ONE:
663 		case TLS_MINOR_VER_TWO:
664 			/* Ignore any supplied IV. */
665 			en->iv_len = 0;
666 			break;
667 		default:
668 			return (EINVAL);
669 		}
670 		break;
671 	case CRYPTO_CHACHA20_POLY1305:
672 		if (en->auth_algorithm != 0 || en->auth_key_len != 0)
673 			return (EINVAL);
674 		if (en->tls_vminor != TLS_MINOR_VER_TWO &&
675 		    en->tls_vminor != TLS_MINOR_VER_THREE)
676 			return (EINVAL);
677 		if (en->iv_len != TLS_CHACHA20_IV_LEN)
678 			return (EINVAL);
679 		break;
680 	default:
681 		return (EINVAL);
682 	}
683 
684 	error = ktls_start_kthreads();
685 	if (error != 0)
686 		return (error);
687 
688 	tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
689 
690 	counter_u64_add(ktls_offload_active, 1);
691 
692 	refcount_init(&tls->refcount, 1);
693 	if (direction == KTLS_RX) {
694 		TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_receive_tag, tls);
695 	} else {
696 		TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
697 		tls->inp = so->so_pcb;
698 		in_pcbref(tls->inp);
699 		tls->tx = true;
700 	}
701 
702 	tls->wq_index = ktls_get_cpu(so);
703 
704 	tls->params.cipher_algorithm = en->cipher_algorithm;
705 	tls->params.auth_algorithm = en->auth_algorithm;
706 	tls->params.tls_vmajor = en->tls_vmajor;
707 	tls->params.tls_vminor = en->tls_vminor;
708 	tls->params.flags = en->flags;
709 	tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
710 
711 	/* Set the header and trailer lengths. */
712 	tls->params.tls_hlen = sizeof(struct tls_record_layer);
713 	switch (en->cipher_algorithm) {
714 	case CRYPTO_AES_NIST_GCM_16:
715 		/*
716 		 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
717 		 * nonce.  TLS 1.3 uses a 12 byte implicit IV.
718 		 */
719 		if (en->tls_vminor < TLS_MINOR_VER_THREE)
720 			tls->params.tls_hlen += sizeof(uint64_t);
721 		tls->params.tls_tlen = AES_GMAC_HASH_LEN;
722 		tls->params.tls_bs = 1;
723 		break;
724 	case CRYPTO_AES_CBC:
725 		switch (en->auth_algorithm) {
726 		case CRYPTO_SHA1_HMAC:
727 			if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
728 				/* Implicit IV, no nonce. */
729 				tls->sequential_records = true;
730 				tls->next_seqno = be64dec(en->rec_seq);
731 				STAILQ_INIT(&tls->pending_records);
732 			} else {
733 				tls->params.tls_hlen += AES_BLOCK_LEN;
734 			}
735 			tls->params.tls_tlen = AES_BLOCK_LEN +
736 			    SHA1_HASH_LEN;
737 			break;
738 		case CRYPTO_SHA2_256_HMAC:
739 			tls->params.tls_hlen += AES_BLOCK_LEN;
740 			tls->params.tls_tlen = AES_BLOCK_LEN +
741 			    SHA2_256_HASH_LEN;
742 			break;
743 		case CRYPTO_SHA2_384_HMAC:
744 			tls->params.tls_hlen += AES_BLOCK_LEN;
745 			tls->params.tls_tlen = AES_BLOCK_LEN +
746 			    SHA2_384_HASH_LEN;
747 			break;
748 		default:
749 			panic("invalid hmac");
750 		}
751 		tls->params.tls_bs = AES_BLOCK_LEN;
752 		break;
753 	case CRYPTO_CHACHA20_POLY1305:
754 		/*
755 		 * Chacha20 uses a 12 byte implicit IV.
756 		 */
757 		tls->params.tls_tlen = POLY1305_HASH_LEN;
758 		tls->params.tls_bs = 1;
759 		break;
760 	default:
761 		panic("invalid cipher");
762 	}
763 
764 	/*
765 	 * TLS 1.3 includes optional padding which we do not support,
766 	 * and also puts the "real" record type at the end of the
767 	 * encrypted data.
768 	 */
769 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
770 		tls->params.tls_tlen += sizeof(uint8_t);
771 
772 	KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
773 	    ("TLS header length too long: %d", tls->params.tls_hlen));
774 	KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
775 	    ("TLS trailer length too long: %d", tls->params.tls_tlen));
776 
777 	if (en->auth_key_len != 0) {
778 		tls->params.auth_key_len = en->auth_key_len;
779 		tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
780 		    M_WAITOK);
781 		bcopy(en->auth_key, tls->params.auth_key, en->auth_key_len);
782 	}
783 
784 	tls->params.cipher_key_len = en->cipher_key_len;
785 	tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
786 	bcopy(en->cipher_key, tls->params.cipher_key, en->cipher_key_len);
787 
788 	/*
789 	 * This holds the implicit portion of the nonce for AEAD
790 	 * ciphers and the initial implicit IV for TLS 1.0.  The
791 	 * explicit portions of the IV are generated in ktls_frame().
792 	 */
793 	if (en->iv_len != 0) {
794 		tls->params.iv_len = en->iv_len;
795 		bcopy(en->iv, tls->params.iv, en->iv_len);
796 
797 		/*
798 		 * For TLS 1.2 with GCM, generate an 8-byte nonce as a
799 		 * counter to generate unique explicit IVs.
800 		 *
801 		 * Store this counter in the last 8 bytes of the IV
802 		 * array so that it is 8-byte aligned.
803 		 */
804 		if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
805 		    en->tls_vminor == TLS_MINOR_VER_TWO)
806 			arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
807 	}
808 
809 	*tlsp = tls;
810 	return (0);
811 }
812 
813 static struct ktls_session *
814 ktls_clone_session(struct ktls_session *tls, int direction)
815 {
816 	struct ktls_session *tls_new;
817 
818 	tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
819 
820 	counter_u64_add(ktls_offload_active, 1);
821 
822 	refcount_init(&tls_new->refcount, 1);
823 	if (direction == KTLS_RX) {
824 		TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_receive_tag,
825 		    tls_new);
826 	} else {
827 		TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag,
828 		    tls_new);
829 		tls_new->inp = tls->inp;
830 		tls_new->tx = true;
831 		in_pcbref(tls_new->inp);
832 	}
833 
834 	/* Copy fields from existing session. */
835 	tls_new->params = tls->params;
836 	tls_new->wq_index = tls->wq_index;
837 
838 	/* Deep copy keys. */
839 	if (tls_new->params.auth_key != NULL) {
840 		tls_new->params.auth_key = malloc(tls->params.auth_key_len,
841 		    M_KTLS, M_WAITOK);
842 		memcpy(tls_new->params.auth_key, tls->params.auth_key,
843 		    tls->params.auth_key_len);
844 	}
845 
846 	tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
847 	    M_WAITOK);
848 	memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
849 	    tls->params.cipher_key_len);
850 
851 	return (tls_new);
852 }
853 
854 #ifdef TCP_OFFLOAD
855 static int
856 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
857 {
858 	struct inpcb *inp;
859 	struct tcpcb *tp;
860 	int error;
861 
862 	inp = so->so_pcb;
863 	INP_WLOCK(inp);
864 	if (inp->inp_flags & INP_DROPPED) {
865 		INP_WUNLOCK(inp);
866 		return (ECONNRESET);
867 	}
868 	if (inp->inp_socket == NULL) {
869 		INP_WUNLOCK(inp);
870 		return (ECONNRESET);
871 	}
872 	tp = intotcpcb(inp);
873 	if (!(tp->t_flags & TF_TOE)) {
874 		INP_WUNLOCK(inp);
875 		return (EOPNOTSUPP);
876 	}
877 
878 	error = tcp_offload_alloc_tls_session(tp, tls, direction);
879 	INP_WUNLOCK(inp);
880 	if (error == 0) {
881 		tls->mode = TCP_TLS_MODE_TOE;
882 		switch (tls->params.cipher_algorithm) {
883 		case CRYPTO_AES_CBC:
884 			counter_u64_add(ktls_toe_cbc, 1);
885 			break;
886 		case CRYPTO_AES_NIST_GCM_16:
887 			counter_u64_add(ktls_toe_gcm, 1);
888 			break;
889 		case CRYPTO_CHACHA20_POLY1305:
890 			counter_u64_add(ktls_toe_chacha20, 1);
891 			break;
892 		}
893 	}
894 	return (error);
895 }
896 #endif
897 
898 /*
899  * Common code used when first enabling ifnet TLS on a connection or
900  * when allocating a new ifnet TLS session due to a routing change.
901  * This function allocates a new TLS send tag on whatever interface
902  * the connection is currently routed over.
903  */
904 static int
905 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
906     struct m_snd_tag **mstp)
907 {
908 	union if_snd_tag_alloc_params params;
909 	struct ifnet *ifp;
910 	struct nhop_object *nh;
911 	struct tcpcb *tp;
912 	int error;
913 
914 	INP_RLOCK(inp);
915 	if (inp->inp_flags & INP_DROPPED) {
916 		INP_RUNLOCK(inp);
917 		return (ECONNRESET);
918 	}
919 	if (inp->inp_socket == NULL) {
920 		INP_RUNLOCK(inp);
921 		return (ECONNRESET);
922 	}
923 	tp = intotcpcb(inp);
924 
925 	/*
926 	 * Check administrative controls on ifnet TLS to determine if
927 	 * ifnet TLS should be denied.
928 	 *
929 	 * - Always permit 'force' requests.
930 	 * - ktls_ifnet_permitted == 0: always deny.
931 	 */
932 	if (!force && ktls_ifnet_permitted == 0) {
933 		INP_RUNLOCK(inp);
934 		return (ENXIO);
935 	}
936 
937 	/*
938 	 * XXX: Use the cached route in the inpcb to find the
939 	 * interface.  This should perhaps instead use
940 	 * rtalloc1_fib(dst, 0, 0, fibnum).  Since KTLS is only
941 	 * enabled after a connection has completed key negotiation in
942 	 * userland, the cached route will be present in practice.
943 	 */
944 	nh = inp->inp_route.ro_nh;
945 	if (nh == NULL) {
946 		INP_RUNLOCK(inp);
947 		return (ENXIO);
948 	}
949 	ifp = nh->nh_ifp;
950 	if_ref(ifp);
951 
952 	/*
953 	 * Allocate a TLS + ratelimit tag if the connection has an
954 	 * existing pacing rate.
955 	 */
956 	if (tp->t_pacing_rate != -1 &&
957 	    (if_getcapenable(ifp) & IFCAP_TXTLS_RTLMT) != 0) {
958 		params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT;
959 		params.tls_rate_limit.inp = inp;
960 		params.tls_rate_limit.tls = tls;
961 		params.tls_rate_limit.max_rate = tp->t_pacing_rate;
962 	} else {
963 		params.hdr.type = IF_SND_TAG_TYPE_TLS;
964 		params.tls.inp = inp;
965 		params.tls.tls = tls;
966 	}
967 	params.hdr.flowid = inp->inp_flowid;
968 	params.hdr.flowtype = inp->inp_flowtype;
969 	params.hdr.numa_domain = inp->inp_numa_domain;
970 	INP_RUNLOCK(inp);
971 
972 	if ((if_getcapenable(ifp) & IFCAP_MEXTPG) == 0) {
973 		error = EOPNOTSUPP;
974 		goto out;
975 	}
976 	if (inp->inp_vflag & INP_IPV6) {
977 		if ((if_getcapenable(ifp) & IFCAP_TXTLS6) == 0) {
978 			error = EOPNOTSUPP;
979 			goto out;
980 		}
981 	} else {
982 		if ((if_getcapenable(ifp) & IFCAP_TXTLS4) == 0) {
983 			error = EOPNOTSUPP;
984 			goto out;
985 		}
986 	}
987 	error = m_snd_tag_alloc(ifp, &params, mstp);
988 out:
989 	if_rele(ifp);
990 	return (error);
991 }
992 
993 /*
994  * Allocate an initial TLS receive tag for doing HW decryption of TLS
995  * data.
996  *
997  * This function allocates a new TLS receive tag on whatever interface
998  * the connection is currently routed over.  If the connection ends up
999  * using a different interface for receive this will get fixed up via
1000  * ktls_input_ifp_mismatch as future packets arrive.
1001  */
1002 static int
1003 ktls_alloc_rcv_tag(struct inpcb *inp, struct ktls_session *tls,
1004     struct m_snd_tag **mstp)
1005 {
1006 	union if_snd_tag_alloc_params params;
1007 	struct ifnet *ifp;
1008 	struct nhop_object *nh;
1009 	int error;
1010 
1011 	if (!ktls_ocf_recrypt_supported(tls))
1012 		return (ENXIO);
1013 
1014 	INP_RLOCK(inp);
1015 	if (inp->inp_flags & INP_DROPPED) {
1016 		INP_RUNLOCK(inp);
1017 		return (ECONNRESET);
1018 	}
1019 	if (inp->inp_socket == NULL) {
1020 		INP_RUNLOCK(inp);
1021 		return (ECONNRESET);
1022 	}
1023 
1024 	/*
1025 	 * Check administrative controls on ifnet TLS to determine if
1026 	 * ifnet TLS should be denied.
1027 	 */
1028 	if (ktls_ifnet_permitted == 0) {
1029 		INP_RUNLOCK(inp);
1030 		return (ENXIO);
1031 	}
1032 
1033 	/*
1034 	 * XXX: As with ktls_alloc_snd_tag, use the cached route in
1035 	 * the inpcb to find the interface.
1036 	 */
1037 	nh = inp->inp_route.ro_nh;
1038 	if (nh == NULL) {
1039 		INP_RUNLOCK(inp);
1040 		return (ENXIO);
1041 	}
1042 	ifp = nh->nh_ifp;
1043 	if_ref(ifp);
1044 	tls->rx_ifp = ifp;
1045 
1046 	params.hdr.type = IF_SND_TAG_TYPE_TLS_RX;
1047 	params.hdr.flowid = inp->inp_flowid;
1048 	params.hdr.flowtype = inp->inp_flowtype;
1049 	params.hdr.numa_domain = inp->inp_numa_domain;
1050 	params.tls_rx.inp = inp;
1051 	params.tls_rx.tls = tls;
1052 	params.tls_rx.vlan_id = 0;
1053 
1054 	INP_RUNLOCK(inp);
1055 
1056 	if (inp->inp_vflag & INP_IPV6) {
1057 		if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_RXTLS6)) == 0) {
1058 			error = EOPNOTSUPP;
1059 			goto out;
1060 		}
1061 	} else {
1062 		if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_RXTLS4)) == 0) {
1063 			error = EOPNOTSUPP;
1064 			goto out;
1065 		}
1066 	}
1067 	error = m_snd_tag_alloc(ifp, &params, mstp);
1068 
1069 	/*
1070 	 * If this connection is over a vlan, vlan_snd_tag_alloc
1071 	 * rewrites vlan_id with the saved interface.  Save the VLAN
1072 	 * ID for use in ktls_reset_receive_tag which allocates new
1073 	 * receive tags directly from the leaf interface bypassing
1074 	 * if_vlan.
1075 	 */
1076 	if (error == 0)
1077 		tls->rx_vlan_id = params.tls_rx.vlan_id;
1078 out:
1079 	return (error);
1080 }
1081 
1082 static int
1083 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, int direction,
1084     bool force)
1085 {
1086 	struct m_snd_tag *mst;
1087 	int error;
1088 
1089 	switch (direction) {
1090 	case KTLS_TX:
1091 		error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
1092 		if (__predict_false(error != 0))
1093 			goto done;
1094 		break;
1095 	case KTLS_RX:
1096 		KASSERT(!force, ("%s: forced receive tag", __func__));
1097 		error = ktls_alloc_rcv_tag(so->so_pcb, tls, &mst);
1098 		if (__predict_false(error != 0))
1099 			goto done;
1100 		break;
1101 	default:
1102 		__assert_unreachable();
1103 	}
1104 
1105 	tls->mode = TCP_TLS_MODE_IFNET;
1106 	tls->snd_tag = mst;
1107 
1108 	switch (tls->params.cipher_algorithm) {
1109 	case CRYPTO_AES_CBC:
1110 		counter_u64_add(ktls_ifnet_cbc, 1);
1111 		break;
1112 	case CRYPTO_AES_NIST_GCM_16:
1113 		counter_u64_add(ktls_ifnet_gcm, 1);
1114 		break;
1115 	case CRYPTO_CHACHA20_POLY1305:
1116 		counter_u64_add(ktls_ifnet_chacha20, 1);
1117 		break;
1118 	default:
1119 		break;
1120 	}
1121 done:
1122 	return (error);
1123 }
1124 
1125 static void
1126 ktls_use_sw(struct ktls_session *tls)
1127 {
1128 	tls->mode = TCP_TLS_MODE_SW;
1129 	switch (tls->params.cipher_algorithm) {
1130 	case CRYPTO_AES_CBC:
1131 		counter_u64_add(ktls_sw_cbc, 1);
1132 		break;
1133 	case CRYPTO_AES_NIST_GCM_16:
1134 		counter_u64_add(ktls_sw_gcm, 1);
1135 		break;
1136 	case CRYPTO_CHACHA20_POLY1305:
1137 		counter_u64_add(ktls_sw_chacha20, 1);
1138 		break;
1139 	}
1140 }
1141 
1142 static int
1143 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
1144 {
1145 	int error;
1146 
1147 	error = ktls_ocf_try(so, tls, direction);
1148 	if (error)
1149 		return (error);
1150 	ktls_use_sw(tls);
1151 	return (0);
1152 }
1153 
1154 /*
1155  * KTLS RX stores data in the socket buffer as a list of TLS records,
1156  * where each record is stored as a control message containg the TLS
1157  * header followed by data mbufs containing the decrypted data.  This
1158  * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
1159  * both encrypted and decrypted data.  TLS records decrypted by a NIC
1160  * should be queued to the socket buffer as records, but encrypted
1161  * data which needs to be decrypted by software arrives as a stream of
1162  * regular mbufs which need to be converted.  In addition, there may
1163  * already be pending encrypted data in the socket buffer when KTLS RX
1164  * is enabled.
1165  *
1166  * To manage not-yet-decrypted data for KTLS RX, the following scheme
1167  * is used:
1168  *
1169  * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
1170  *
1171  * - ktls_check_rx checks this chain of mbufs reading the TLS header
1172  *   from the first mbuf.  Once all of the data for that TLS record is
1173  *   queued, the socket is queued to a worker thread.
1174  *
1175  * - The worker thread calls ktls_decrypt to decrypt TLS records in
1176  *   the TLS chain.  Each TLS record is detached from the TLS chain,
1177  *   decrypted, and inserted into the regular socket buffer chain as
1178  *   record starting with a control message holding the TLS header and
1179  *   a chain of mbufs holding the encrypted data.
1180  */
1181 
1182 static void
1183 sb_mark_notready(struct sockbuf *sb)
1184 {
1185 	struct mbuf *m;
1186 
1187 	m = sb->sb_mb;
1188 	sb->sb_mtls = m;
1189 	sb->sb_mb = NULL;
1190 	sb->sb_mbtail = NULL;
1191 	sb->sb_lastrecord = NULL;
1192 	for (; m != NULL; m = m->m_next) {
1193 		KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
1194 		    __func__));
1195 		KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
1196 		    __func__));
1197 		KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
1198 		    __func__));
1199 		m->m_flags |= M_NOTREADY;
1200 		sb->sb_acc -= m->m_len;
1201 		sb->sb_tlscc += m->m_len;
1202 		sb->sb_mtlstail = m;
1203 	}
1204 	KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
1205 	    ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
1206 	    sb->sb_ccc));
1207 }
1208 
1209 /*
1210  * Return information about the pending TLS data in a socket
1211  * buffer.  On return, 'seqno' is set to the sequence number
1212  * of the next TLS record to be received, 'resid' is set to
1213  * the amount of bytes still needed for the last pending
1214  * record.  The function returns 'false' if the last pending
1215  * record contains a partial TLS header.  In that case, 'resid'
1216  * is the number of bytes needed to complete the TLS header.
1217  */
1218 bool
1219 ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp)
1220 {
1221 	struct tls_record_layer hdr;
1222 	struct mbuf *m;
1223 	uint64_t seqno;
1224 	size_t resid;
1225 	u_int offset, record_len;
1226 
1227 	SOCKBUF_LOCK_ASSERT(sb);
1228 	MPASS(sb->sb_flags & SB_TLS_RX);
1229 	seqno = sb->sb_tls_seqno;
1230 	resid = sb->sb_tlscc;
1231 	m = sb->sb_mtls;
1232 	offset = 0;
1233 
1234 	if (resid == 0) {
1235 		*seqnop = seqno;
1236 		*residp = 0;
1237 		return (true);
1238 	}
1239 
1240 	for (;;) {
1241 		seqno++;
1242 
1243 		if (resid < sizeof(hdr)) {
1244 			*seqnop = seqno;
1245 			*residp = sizeof(hdr) - resid;
1246 			return (false);
1247 		}
1248 
1249 		m_copydata(m, offset, sizeof(hdr), (void *)&hdr);
1250 
1251 		record_len = sizeof(hdr) + ntohs(hdr.tls_length);
1252 		if (resid <= record_len) {
1253 			*seqnop = seqno;
1254 			*residp = record_len - resid;
1255 			return (true);
1256 		}
1257 		resid -= record_len;
1258 
1259 		while (record_len != 0) {
1260 			if (m->m_len - offset > record_len) {
1261 				offset += record_len;
1262 				break;
1263 			}
1264 
1265 			record_len -= (m->m_len - offset);
1266 			offset = 0;
1267 			m = m->m_next;
1268 		}
1269 	}
1270 }
1271 
1272 int
1273 ktls_enable_rx(struct socket *so, struct tls_enable *en)
1274 {
1275 	struct ktls_session *tls;
1276 	int error;
1277 
1278 	if (!ktls_offload_enable)
1279 		return (ENOTSUP);
1280 
1281 	counter_u64_add(ktls_offload_enable_calls, 1);
1282 
1283 	/*
1284 	 * This should always be true since only the TCP socket option
1285 	 * invokes this function.
1286 	 */
1287 	if (so->so_proto->pr_protocol != IPPROTO_TCP)
1288 		return (EINVAL);
1289 
1290 	/*
1291 	 * XXX: Don't overwrite existing sessions.  We should permit
1292 	 * this to support rekeying in the future.
1293 	 */
1294 	if (so->so_rcv.sb_tls_info != NULL)
1295 		return (EALREADY);
1296 
1297 	if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1298 		return (ENOTSUP);
1299 
1300 	error = ktls_create_session(so, en, &tls, KTLS_RX);
1301 	if (error)
1302 		return (error);
1303 
1304 	error = ktls_ocf_try(so, tls, KTLS_RX);
1305 	if (error) {
1306 		ktls_free(tls);
1307 		return (error);
1308 	}
1309 
1310 	/* Mark the socket as using TLS offload. */
1311 	SOCK_RECVBUF_LOCK(so);
1312 	if (SOLISTENING(so)) {
1313 		SOCK_RECVBUF_UNLOCK(so);
1314 		ktls_free(tls);
1315 		return (EINVAL);
1316 	}
1317 	so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1318 	so->so_rcv.sb_tls_info = tls;
1319 	so->so_rcv.sb_flags |= SB_TLS_RX;
1320 
1321 	/* Mark existing data as not ready until it can be decrypted. */
1322 	sb_mark_notready(&so->so_rcv);
1323 	ktls_check_rx(&so->so_rcv);
1324 	SOCK_RECVBUF_UNLOCK(so);
1325 
1326 	/* Prefer TOE -> ifnet TLS -> software TLS. */
1327 #ifdef TCP_OFFLOAD
1328 	error = ktls_try_toe(so, tls, KTLS_RX);
1329 	if (error)
1330 #endif
1331 		error = ktls_try_ifnet(so, tls, KTLS_RX, false);
1332 	if (error)
1333 		ktls_use_sw(tls);
1334 
1335 	counter_u64_add(ktls_offload_total, 1);
1336 
1337 	return (0);
1338 }
1339 
1340 int
1341 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1342 {
1343 	struct ktls_session *tls;
1344 	struct inpcb *inp;
1345 	struct tcpcb *tp;
1346 	int error;
1347 
1348 	if (!ktls_offload_enable)
1349 		return (ENOTSUP);
1350 
1351 	counter_u64_add(ktls_offload_enable_calls, 1);
1352 
1353 	/*
1354 	 * This should always be true since only the TCP socket option
1355 	 * invokes this function.
1356 	 */
1357 	if (so->so_proto->pr_protocol != IPPROTO_TCP)
1358 		return (EINVAL);
1359 
1360 	/*
1361 	 * XXX: Don't overwrite existing sessions.  We should permit
1362 	 * this to support rekeying in the future.
1363 	 */
1364 	if (so->so_snd.sb_tls_info != NULL)
1365 		return (EALREADY);
1366 
1367 	if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1368 		return (ENOTSUP);
1369 
1370 	/* TLS requires ext pgs */
1371 	if (mb_use_ext_pgs == 0)
1372 		return (ENXIO);
1373 
1374 	error = ktls_create_session(so, en, &tls, KTLS_TX);
1375 	if (error)
1376 		return (error);
1377 
1378 	/* Prefer TOE -> ifnet TLS -> software TLS. */
1379 #ifdef TCP_OFFLOAD
1380 	error = ktls_try_toe(so, tls, KTLS_TX);
1381 	if (error)
1382 #endif
1383 		error = ktls_try_ifnet(so, tls, KTLS_TX, false);
1384 	if (error)
1385 		error = ktls_try_sw(so, tls, KTLS_TX);
1386 
1387 	if (error) {
1388 		ktls_free(tls);
1389 		return (error);
1390 	}
1391 
1392 	/*
1393 	 * Serialize with sosend_generic() and make sure that we're not
1394 	 * operating on a listening socket.
1395 	 */
1396 	error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1397 	if (error) {
1398 		ktls_free(tls);
1399 		return (error);
1400 	}
1401 
1402 	/*
1403 	 * Write lock the INP when setting sb_tls_info so that
1404 	 * routines in tcp_ratelimit.c can read sb_tls_info while
1405 	 * holding the INP lock.
1406 	 */
1407 	inp = so->so_pcb;
1408 	INP_WLOCK(inp);
1409 	SOCK_SENDBUF_LOCK(so);
1410 	so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1411 	so->so_snd.sb_tls_info = tls;
1412 	if (tls->mode != TCP_TLS_MODE_SW) {
1413 		tp = intotcpcb(inp);
1414 		MPASS(tp->t_nic_ktls_xmit == 0);
1415 		tp->t_nic_ktls_xmit = 1;
1416 		if (tp->t_fb->tfb_hwtls_change != NULL)
1417 			(*tp->t_fb->tfb_hwtls_change)(tp, 1);
1418 	}
1419 	SOCK_SENDBUF_UNLOCK(so);
1420 	INP_WUNLOCK(inp);
1421 	SOCK_IO_SEND_UNLOCK(so);
1422 
1423 	counter_u64_add(ktls_offload_total, 1);
1424 
1425 	return (0);
1426 }
1427 
1428 int
1429 ktls_get_rx_mode(struct socket *so, int *modep)
1430 {
1431 	struct ktls_session *tls;
1432 	struct inpcb *inp __diagused;
1433 
1434 	if (SOLISTENING(so))
1435 		return (EINVAL);
1436 	inp = so->so_pcb;
1437 	INP_WLOCK_ASSERT(inp);
1438 	SOCK_RECVBUF_LOCK(so);
1439 	tls = so->so_rcv.sb_tls_info;
1440 	if (tls == NULL)
1441 		*modep = TCP_TLS_MODE_NONE;
1442 	else
1443 		*modep = tls->mode;
1444 	SOCK_RECVBUF_UNLOCK(so);
1445 	return (0);
1446 }
1447 
1448 /*
1449  * ktls_get_rx_sequence - get the next TCP- and TLS- sequence number.
1450  *
1451  * This function gets information about the next TCP- and TLS-
1452  * sequence number to be processed by the TLS receive worker
1453  * thread. The information is extracted from the given "inpcb"
1454  * structure. The values are stored in host endian format at the two
1455  * given output pointer locations. The TCP sequence number points to
1456  * the beginning of the TLS header.
1457  *
1458  * This function returns zero on success, else a non-zero error code
1459  * is returned.
1460  */
1461 int
1462 ktls_get_rx_sequence(struct inpcb *inp, uint32_t *tcpseq, uint64_t *tlsseq)
1463 {
1464 	struct socket *so;
1465 	struct tcpcb *tp;
1466 
1467 	INP_RLOCK(inp);
1468 	so = inp->inp_socket;
1469 	if (__predict_false(so == NULL)) {
1470 		INP_RUNLOCK(inp);
1471 		return (EINVAL);
1472 	}
1473 	if (inp->inp_flags & INP_DROPPED) {
1474 		INP_RUNLOCK(inp);
1475 		return (ECONNRESET);
1476 	}
1477 
1478 	tp = intotcpcb(inp);
1479 	MPASS(tp != NULL);
1480 
1481 	SOCKBUF_LOCK(&so->so_rcv);
1482 	*tcpseq = tp->rcv_nxt - so->so_rcv.sb_tlscc;
1483 	*tlsseq = so->so_rcv.sb_tls_seqno;
1484 	SOCKBUF_UNLOCK(&so->so_rcv);
1485 
1486 	INP_RUNLOCK(inp);
1487 
1488 	return (0);
1489 }
1490 
1491 int
1492 ktls_get_tx_mode(struct socket *so, int *modep)
1493 {
1494 	struct ktls_session *tls;
1495 	struct inpcb *inp __diagused;
1496 
1497 	if (SOLISTENING(so))
1498 		return (EINVAL);
1499 	inp = so->so_pcb;
1500 	INP_WLOCK_ASSERT(inp);
1501 	SOCK_SENDBUF_LOCK(so);
1502 	tls = so->so_snd.sb_tls_info;
1503 	if (tls == NULL)
1504 		*modep = TCP_TLS_MODE_NONE;
1505 	else
1506 		*modep = tls->mode;
1507 	SOCK_SENDBUF_UNLOCK(so);
1508 	return (0);
1509 }
1510 
1511 /*
1512  * Switch between SW and ifnet TLS sessions as requested.
1513  */
1514 int
1515 ktls_set_tx_mode(struct socket *so, int mode)
1516 {
1517 	struct ktls_session *tls, *tls_new;
1518 	struct inpcb *inp;
1519 	struct tcpcb *tp;
1520 	int error;
1521 
1522 	if (SOLISTENING(so))
1523 		return (EINVAL);
1524 	switch (mode) {
1525 	case TCP_TLS_MODE_SW:
1526 	case TCP_TLS_MODE_IFNET:
1527 		break;
1528 	default:
1529 		return (EINVAL);
1530 	}
1531 
1532 	inp = so->so_pcb;
1533 	INP_WLOCK_ASSERT(inp);
1534 	tp = intotcpcb(inp);
1535 
1536 	if (mode == TCP_TLS_MODE_IFNET) {
1537 		/* Don't allow enabling ifnet ktls multiple times */
1538 		if (tp->t_nic_ktls_xmit)
1539 			return (EALREADY);
1540 
1541 		/*
1542 		 * Don't enable ifnet ktls if we disabled it due to an
1543 		 * excessive retransmission rate
1544 		 */
1545 		if (tp->t_nic_ktls_xmit_dis)
1546 			return (ENXIO);
1547 	}
1548 
1549 	SOCKBUF_LOCK(&so->so_snd);
1550 	tls = so->so_snd.sb_tls_info;
1551 	if (tls == NULL) {
1552 		SOCKBUF_UNLOCK(&so->so_snd);
1553 		return (0);
1554 	}
1555 
1556 	if (tls->mode == mode) {
1557 		SOCKBUF_UNLOCK(&so->so_snd);
1558 		return (0);
1559 	}
1560 
1561 	tls = ktls_hold(tls);
1562 	SOCKBUF_UNLOCK(&so->so_snd);
1563 	INP_WUNLOCK(inp);
1564 
1565 	tls_new = ktls_clone_session(tls, KTLS_TX);
1566 
1567 	if (mode == TCP_TLS_MODE_IFNET)
1568 		error = ktls_try_ifnet(so, tls_new, KTLS_TX, true);
1569 	else
1570 		error = ktls_try_sw(so, tls_new, KTLS_TX);
1571 	if (error) {
1572 		counter_u64_add(ktls_switch_failed, 1);
1573 		ktls_free(tls_new);
1574 		ktls_free(tls);
1575 		INP_WLOCK(inp);
1576 		return (error);
1577 	}
1578 
1579 	error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1580 	if (error) {
1581 		counter_u64_add(ktls_switch_failed, 1);
1582 		ktls_free(tls_new);
1583 		ktls_free(tls);
1584 		INP_WLOCK(inp);
1585 		return (error);
1586 	}
1587 
1588 	/*
1589 	 * If we raced with another session change, keep the existing
1590 	 * session.
1591 	 */
1592 	if (tls != so->so_snd.sb_tls_info) {
1593 		counter_u64_add(ktls_switch_failed, 1);
1594 		SOCK_IO_SEND_UNLOCK(so);
1595 		ktls_free(tls_new);
1596 		ktls_free(tls);
1597 		INP_WLOCK(inp);
1598 		return (EBUSY);
1599 	}
1600 
1601 	INP_WLOCK(inp);
1602 	SOCKBUF_LOCK(&so->so_snd);
1603 	so->so_snd.sb_tls_info = tls_new;
1604 	if (tls_new->mode != TCP_TLS_MODE_SW) {
1605 		MPASS(tp->t_nic_ktls_xmit == 0);
1606 		tp->t_nic_ktls_xmit = 1;
1607 		if (tp->t_fb->tfb_hwtls_change != NULL)
1608 			(*tp->t_fb->tfb_hwtls_change)(tp, 1);
1609 	}
1610 	SOCKBUF_UNLOCK(&so->so_snd);
1611 	SOCK_IO_SEND_UNLOCK(so);
1612 
1613 	/*
1614 	 * Drop two references on 'tls'.  The first is for the
1615 	 * ktls_hold() above.  The second drops the reference from the
1616 	 * socket buffer.
1617 	 */
1618 	KASSERT(tls->refcount >= 2, ("too few references on old session"));
1619 	ktls_free(tls);
1620 	ktls_free(tls);
1621 
1622 	if (mode == TCP_TLS_MODE_IFNET)
1623 		counter_u64_add(ktls_switch_to_ifnet, 1);
1624 	else
1625 		counter_u64_add(ktls_switch_to_sw, 1);
1626 
1627 	return (0);
1628 }
1629 
1630 /*
1631  * Try to allocate a new TLS receive tag.  This task is scheduled when
1632  * sbappend_ktls_rx detects an input path change.  If a new tag is
1633  * allocated, replace the tag in the TLS session.  If a new tag cannot
1634  * be allocated, let the session fall back to software decryption.
1635  */
1636 static void
1637 ktls_reset_receive_tag(void *context, int pending)
1638 {
1639 	union if_snd_tag_alloc_params params;
1640 	struct ktls_session *tls;
1641 	struct m_snd_tag *mst;
1642 	struct inpcb *inp;
1643 	struct ifnet *ifp;
1644 	struct socket *so;
1645 	int error;
1646 
1647 	MPASS(pending == 1);
1648 
1649 	tls = context;
1650 	so = tls->so;
1651 	inp = so->so_pcb;
1652 	ifp = NULL;
1653 
1654 	INP_RLOCK(inp);
1655 	if (inp->inp_flags & INP_DROPPED) {
1656 		INP_RUNLOCK(inp);
1657 		goto out;
1658 	}
1659 
1660 	SOCKBUF_LOCK(&so->so_rcv);
1661 	mst = tls->snd_tag;
1662 	tls->snd_tag = NULL;
1663 	if (mst != NULL)
1664 		m_snd_tag_rele(mst);
1665 
1666 	ifp = tls->rx_ifp;
1667 	if_ref(ifp);
1668 	SOCKBUF_UNLOCK(&so->so_rcv);
1669 
1670 	params.hdr.type = IF_SND_TAG_TYPE_TLS_RX;
1671 	params.hdr.flowid = inp->inp_flowid;
1672 	params.hdr.flowtype = inp->inp_flowtype;
1673 	params.hdr.numa_domain = inp->inp_numa_domain;
1674 	params.tls_rx.inp = inp;
1675 	params.tls_rx.tls = tls;
1676 	params.tls_rx.vlan_id = tls->rx_vlan_id;
1677 	INP_RUNLOCK(inp);
1678 
1679 	if (inp->inp_vflag & INP_IPV6) {
1680 		if ((if_getcapenable2(ifp) & IFCAP2_RXTLS6) == 0)
1681 			goto out;
1682 	} else {
1683 		if ((if_getcapenable2(ifp) & IFCAP2_RXTLS4) == 0)
1684 			goto out;
1685 	}
1686 
1687 	error = m_snd_tag_alloc(ifp, &params, &mst);
1688 	if (error == 0) {
1689 		SOCKBUF_LOCK(&so->so_rcv);
1690 		tls->snd_tag = mst;
1691 		SOCKBUF_UNLOCK(&so->so_rcv);
1692 
1693 		counter_u64_add(ktls_ifnet_reset, 1);
1694 	} else {
1695 		/*
1696 		 * Just fall back to software decryption if a tag
1697 		 * cannot be allocated leaving the connection intact.
1698 		 * If a future input path change switches to another
1699 		 * interface this connection will resume ifnet TLS.
1700 		 */
1701 		counter_u64_add(ktls_ifnet_reset_failed, 1);
1702 	}
1703 
1704 out:
1705 	mtx_pool_lock(mtxpool_sleep, tls);
1706 	tls->reset_pending = false;
1707 	mtx_pool_unlock(mtxpool_sleep, tls);
1708 
1709 	if (ifp != NULL)
1710 		if_rele(ifp);
1711 	CURVNET_SET(so->so_vnet);
1712 	sorele(so);
1713 	CURVNET_RESTORE();
1714 	ktls_free(tls);
1715 }
1716 
1717 /*
1718  * Try to allocate a new TLS send tag.  This task is scheduled when
1719  * ip_output detects a route change while trying to transmit a packet
1720  * holding a TLS record.  If a new tag is allocated, replace the tag
1721  * in the TLS session.  Subsequent packets on the connection will use
1722  * the new tag.  If a new tag cannot be allocated, drop the
1723  * connection.
1724  */
1725 static void
1726 ktls_reset_send_tag(void *context, int pending)
1727 {
1728 	struct epoch_tracker et;
1729 	struct ktls_session *tls;
1730 	struct m_snd_tag *old, *new;
1731 	struct inpcb *inp;
1732 	struct tcpcb *tp;
1733 	int error;
1734 
1735 	MPASS(pending == 1);
1736 
1737 	tls = context;
1738 	inp = tls->inp;
1739 
1740 	/*
1741 	 * Free the old tag first before allocating a new one.
1742 	 * ip[6]_output_send() will treat a NULL send tag the same as
1743 	 * an ifp mismatch and drop packets until a new tag is
1744 	 * allocated.
1745 	 *
1746 	 * Write-lock the INP when changing tls->snd_tag since
1747 	 * ip[6]_output_send() holds a read-lock when reading the
1748 	 * pointer.
1749 	 */
1750 	INP_WLOCK(inp);
1751 	old = tls->snd_tag;
1752 	tls->snd_tag = NULL;
1753 	INP_WUNLOCK(inp);
1754 	if (old != NULL)
1755 		m_snd_tag_rele(old);
1756 
1757 	error = ktls_alloc_snd_tag(inp, tls, true, &new);
1758 
1759 	if (error == 0) {
1760 		INP_WLOCK(inp);
1761 		tls->snd_tag = new;
1762 		mtx_pool_lock(mtxpool_sleep, tls);
1763 		tls->reset_pending = false;
1764 		mtx_pool_unlock(mtxpool_sleep, tls);
1765 		INP_WUNLOCK(inp);
1766 
1767 		counter_u64_add(ktls_ifnet_reset, 1);
1768 
1769 		/*
1770 		 * XXX: Should we kick tcp_output explicitly now that
1771 		 * the send tag is fixed or just rely on timers?
1772 		 */
1773 	} else {
1774 		NET_EPOCH_ENTER(et);
1775 		INP_WLOCK(inp);
1776 		if (!(inp->inp_flags & INP_DROPPED)) {
1777 			tp = intotcpcb(inp);
1778 			CURVNET_SET(inp->inp_vnet);
1779 			tp = tcp_drop(tp, ECONNABORTED);
1780 			CURVNET_RESTORE();
1781 			if (tp != NULL) {
1782 				counter_u64_add(ktls_ifnet_reset_dropped, 1);
1783 				INP_WUNLOCK(inp);
1784 			}
1785 		} else
1786 			INP_WUNLOCK(inp);
1787 		NET_EPOCH_EXIT(et);
1788 
1789 		counter_u64_add(ktls_ifnet_reset_failed, 1);
1790 
1791 		/*
1792 		 * Leave reset_pending true to avoid future tasks while
1793 		 * the socket goes away.
1794 		 */
1795 	}
1796 
1797 	ktls_free(tls);
1798 }
1799 
1800 void
1801 ktls_input_ifp_mismatch(struct sockbuf *sb, struct ifnet *ifp)
1802 {
1803 	struct ktls_session *tls;
1804 	struct socket *so;
1805 
1806 	SOCKBUF_LOCK_ASSERT(sb);
1807 	KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1808 	    __func__, sb));
1809 	so = __containerof(sb, struct socket, so_rcv);
1810 
1811 	tls = sb->sb_tls_info;
1812 	if_rele(tls->rx_ifp);
1813 	if_ref(ifp);
1814 	tls->rx_ifp = ifp;
1815 
1816 	/*
1817 	 * See if we should schedule a task to update the receive tag for
1818 	 * this session.
1819 	 */
1820 	mtx_pool_lock(mtxpool_sleep, tls);
1821 	if (!tls->reset_pending) {
1822 		(void) ktls_hold(tls);
1823 		soref(so);
1824 		tls->so = so;
1825 		tls->reset_pending = true;
1826 		taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1827 	}
1828 	mtx_pool_unlock(mtxpool_sleep, tls);
1829 }
1830 
1831 int
1832 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1833 {
1834 
1835 	if (inp == NULL)
1836 		return (ENOBUFS);
1837 
1838 	INP_LOCK_ASSERT(inp);
1839 
1840 	/*
1841 	 * See if we should schedule a task to update the send tag for
1842 	 * this session.
1843 	 */
1844 	mtx_pool_lock(mtxpool_sleep, tls);
1845 	if (!tls->reset_pending) {
1846 		(void) ktls_hold(tls);
1847 		tls->reset_pending = true;
1848 		taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1849 	}
1850 	mtx_pool_unlock(mtxpool_sleep, tls);
1851 	return (ENOBUFS);
1852 }
1853 
1854 #ifdef RATELIMIT
1855 int
1856 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
1857 {
1858 	union if_snd_tag_modify_params params = {
1859 		.rate_limit.max_rate = max_pacing_rate,
1860 		.rate_limit.flags = M_NOWAIT,
1861 	};
1862 	struct m_snd_tag *mst;
1863 
1864 	/* Can't get to the inp, but it should be locked. */
1865 	/* INP_LOCK_ASSERT(inp); */
1866 
1867 	MPASS(tls->mode == TCP_TLS_MODE_IFNET);
1868 
1869 	if (tls->snd_tag == NULL) {
1870 		/*
1871 		 * Resetting send tag, ignore this change.  The
1872 		 * pending reset may or may not see this updated rate
1873 		 * in the tcpcb.  If it doesn't, we will just lose
1874 		 * this rate change.
1875 		 */
1876 		return (0);
1877 	}
1878 
1879 	mst = tls->snd_tag;
1880 
1881 	MPASS(mst != NULL);
1882 	MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
1883 
1884 	return (mst->sw->snd_tag_modify(mst, &params));
1885 }
1886 #endif
1887 
1888 static void
1889 ktls_destroy_help(void *context, int pending __unused)
1890 {
1891 	ktls_destroy(context);
1892 }
1893 
1894 void
1895 ktls_destroy(struct ktls_session *tls)
1896 {
1897 	struct inpcb *inp;
1898 	struct tcpcb *tp;
1899 	bool wlocked;
1900 
1901 	MPASS(tls->refcount == 0);
1902 
1903 	inp = tls->inp;
1904 	if (tls->tx) {
1905 		wlocked = INP_WLOCKED(inp);
1906 		if (!wlocked && !INP_TRY_WLOCK(inp)) {
1907 			/*
1908 			 * rwlocks read locks are anonymous, and there
1909 			 * is no way to know if our current thread
1910 			 * holds an rlock on the inp.  As a rough
1911 			 * estimate, check to see if the thread holds
1912 			 * *any* rlocks at all.  If it does not, then we
1913 			 * know that we don't hold the inp rlock, and
1914 			 * can safely take the wlock
1915 			 */
1916 			if (curthread->td_rw_rlocks == 0) {
1917 				INP_WLOCK(inp);
1918 			} else {
1919 				/*
1920 				 * We might hold the rlock, so let's
1921 				 * do the destroy in a taskqueue
1922 				 * context to avoid a potential
1923 				 * deadlock.  This should be very
1924 				 * rare.
1925 				 */
1926 				counter_u64_add(ktls_destroy_task, 1);
1927 				TASK_INIT(&tls->destroy_task, 0,
1928 				    ktls_destroy_help, tls);
1929 				(void)taskqueue_enqueue(taskqueue_thread,
1930 				    &tls->destroy_task);
1931 				return;
1932 			}
1933 		}
1934 	}
1935 
1936 	if (tls->sequential_records) {
1937 		struct mbuf *m, *n;
1938 		int page_count;
1939 
1940 		STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) {
1941 			page_count = m->m_epg_enc_cnt;
1942 			while (page_count > 0) {
1943 				KASSERT(page_count >= m->m_epg_nrdy,
1944 				    ("%s: too few pages", __func__));
1945 				page_count -= m->m_epg_nrdy;
1946 				m = m_free(m);
1947 			}
1948 		}
1949 	}
1950 
1951 	counter_u64_add(ktls_offload_active, -1);
1952 	switch (tls->mode) {
1953 	case TCP_TLS_MODE_SW:
1954 		switch (tls->params.cipher_algorithm) {
1955 		case CRYPTO_AES_CBC:
1956 			counter_u64_add(ktls_sw_cbc, -1);
1957 			break;
1958 		case CRYPTO_AES_NIST_GCM_16:
1959 			counter_u64_add(ktls_sw_gcm, -1);
1960 			break;
1961 		case CRYPTO_CHACHA20_POLY1305:
1962 			counter_u64_add(ktls_sw_chacha20, -1);
1963 			break;
1964 		}
1965 		break;
1966 	case TCP_TLS_MODE_IFNET:
1967 		switch (tls->params.cipher_algorithm) {
1968 		case CRYPTO_AES_CBC:
1969 			counter_u64_add(ktls_ifnet_cbc, -1);
1970 			break;
1971 		case CRYPTO_AES_NIST_GCM_16:
1972 			counter_u64_add(ktls_ifnet_gcm, -1);
1973 			break;
1974 		case CRYPTO_CHACHA20_POLY1305:
1975 			counter_u64_add(ktls_ifnet_chacha20, -1);
1976 			break;
1977 		}
1978 		if (tls->snd_tag != NULL)
1979 			m_snd_tag_rele(tls->snd_tag);
1980 		if (tls->rx_ifp != NULL)
1981 			if_rele(tls->rx_ifp);
1982 		if (tls->tx) {
1983 			INP_WLOCK_ASSERT(inp);
1984 			tp = intotcpcb(inp);
1985 			MPASS(tp->t_nic_ktls_xmit == 1);
1986 			tp->t_nic_ktls_xmit = 0;
1987 		}
1988 		break;
1989 #ifdef TCP_OFFLOAD
1990 	case TCP_TLS_MODE_TOE:
1991 		switch (tls->params.cipher_algorithm) {
1992 		case CRYPTO_AES_CBC:
1993 			counter_u64_add(ktls_toe_cbc, -1);
1994 			break;
1995 		case CRYPTO_AES_NIST_GCM_16:
1996 			counter_u64_add(ktls_toe_gcm, -1);
1997 			break;
1998 		case CRYPTO_CHACHA20_POLY1305:
1999 			counter_u64_add(ktls_toe_chacha20, -1);
2000 			break;
2001 		}
2002 		break;
2003 #endif
2004 	}
2005 	if (tls->ocf_session != NULL)
2006 		ktls_ocf_free(tls);
2007 	if (tls->params.auth_key != NULL) {
2008 		zfree(tls->params.auth_key, M_KTLS);
2009 		tls->params.auth_key = NULL;
2010 		tls->params.auth_key_len = 0;
2011 	}
2012 	if (tls->params.cipher_key != NULL) {
2013 		zfree(tls->params.cipher_key, M_KTLS);
2014 		tls->params.cipher_key = NULL;
2015 		tls->params.cipher_key_len = 0;
2016 	}
2017 	if (tls->tx) {
2018 		INP_WLOCK_ASSERT(inp);
2019 		if (!in_pcbrele_wlocked(inp) && !wlocked)
2020 			INP_WUNLOCK(inp);
2021 	}
2022 	explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
2023 
2024 	uma_zfree(ktls_session_zone, tls);
2025 }
2026 
2027 void
2028 ktls_seq(struct sockbuf *sb, struct mbuf *m)
2029 {
2030 
2031 	for (; m != NULL; m = m->m_next) {
2032 		KASSERT((m->m_flags & M_EXTPG) != 0,
2033 		    ("ktls_seq: mapped mbuf %p", m));
2034 
2035 		m->m_epg_seqno = sb->sb_tls_seqno;
2036 		sb->sb_tls_seqno++;
2037 	}
2038 }
2039 
2040 /*
2041  * Add TLS framing (headers and trailers) to a chain of mbufs.  Each
2042  * mbuf in the chain must be an unmapped mbuf.  The payload of the
2043  * mbuf must be populated with the payload of each TLS record.
2044  *
2045  * The record_type argument specifies the TLS record type used when
2046  * populating the TLS header.
2047  *
2048  * The enq_count argument on return is set to the number of pages of
2049  * payload data for this entire chain that need to be encrypted via SW
2050  * encryption.  The returned value should be passed to ktls_enqueue
2051  * when scheduling encryption of this chain of mbufs.  To handle the
2052  * special case of empty fragments for TLS 1.0 sessions, an empty
2053  * fragment counts as one page.
2054  */
2055 void
2056 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
2057     uint8_t record_type)
2058 {
2059 	struct tls_record_layer *tlshdr;
2060 	struct mbuf *m;
2061 	uint64_t *noncep;
2062 	uint16_t tls_len;
2063 	int maxlen __diagused;
2064 
2065 	maxlen = tls->params.max_frame_len;
2066 	*enq_cnt = 0;
2067 	for (m = top; m != NULL; m = m->m_next) {
2068 		/*
2069 		 * All mbufs in the chain should be TLS records whose
2070 		 * payload does not exceed the maximum frame length.
2071 		 *
2072 		 * Empty TLS 1.0 records are permitted when using CBC.
2073 		 */
2074 		KASSERT(m->m_len <= maxlen && m->m_len >= 0 &&
2075 		    (m->m_len > 0 || ktls_permit_empty_frames(tls)),
2076 		    ("ktls_frame: m %p len %d", m, m->m_len));
2077 
2078 		/*
2079 		 * TLS frames require unmapped mbufs to store session
2080 		 * info.
2081 		 */
2082 		KASSERT((m->m_flags & M_EXTPG) != 0,
2083 		    ("ktls_frame: mapped mbuf %p (top = %p)", m, top));
2084 
2085 		tls_len = m->m_len;
2086 
2087 		/* Save a reference to the session. */
2088 		m->m_epg_tls = ktls_hold(tls);
2089 
2090 		m->m_epg_hdrlen = tls->params.tls_hlen;
2091 		m->m_epg_trllen = tls->params.tls_tlen;
2092 		if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
2093 			int bs, delta;
2094 
2095 			/*
2096 			 * AES-CBC pads messages to a multiple of the
2097 			 * block size.  Note that the padding is
2098 			 * applied after the digest and the encryption
2099 			 * is done on the "plaintext || mac || padding".
2100 			 * At least one byte of padding is always
2101 			 * present.
2102 			 *
2103 			 * Compute the final trailer length assuming
2104 			 * at most one block of padding.
2105 			 * tls->params.tls_tlen is the maximum
2106 			 * possible trailer length (padding + digest).
2107 			 * delta holds the number of excess padding
2108 			 * bytes if the maximum were used.  Those
2109 			 * extra bytes are removed.
2110 			 */
2111 			bs = tls->params.tls_bs;
2112 			delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
2113 			m->m_epg_trllen -= delta;
2114 		}
2115 		m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
2116 
2117 		/* Populate the TLS header. */
2118 		tlshdr = (void *)m->m_epg_hdr;
2119 		tlshdr->tls_vmajor = tls->params.tls_vmajor;
2120 
2121 		/*
2122 		 * TLS 1.3 masquarades as TLS 1.2 with a record type
2123 		 * of TLS_RLTYPE_APP.
2124 		 */
2125 		if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
2126 		    tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
2127 			tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
2128 			tlshdr->tls_type = TLS_RLTYPE_APP;
2129 			/* save the real record type for later */
2130 			m->m_epg_record_type = record_type;
2131 			m->m_epg_trail[0] = record_type;
2132 		} else {
2133 			tlshdr->tls_vminor = tls->params.tls_vminor;
2134 			tlshdr->tls_type = record_type;
2135 		}
2136 		tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
2137 
2138 		/*
2139 		 * Store nonces / explicit IVs after the end of the
2140 		 * TLS header.
2141 		 *
2142 		 * For GCM with TLS 1.2, an 8 byte nonce is copied
2143 		 * from the end of the IV.  The nonce is then
2144 		 * incremented for use by the next record.
2145 		 *
2146 		 * For CBC, a random nonce is inserted for TLS 1.1+.
2147 		 */
2148 		if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
2149 		    tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
2150 			noncep = (uint64_t *)(tls->params.iv + 8);
2151 			be64enc(tlshdr + 1, *noncep);
2152 			(*noncep)++;
2153 		} else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
2154 		    tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
2155 			arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
2156 
2157 		/*
2158 		 * When using SW encryption, mark the mbuf not ready.
2159 		 * It will be marked ready via sbready() after the
2160 		 * record has been encrypted.
2161 		 *
2162 		 * When using ifnet TLS, unencrypted TLS records are
2163 		 * sent down the stack to the NIC.
2164 		 */
2165 		if (tls->mode == TCP_TLS_MODE_SW) {
2166 			m->m_flags |= M_NOTREADY;
2167 			if (__predict_false(tls_len == 0)) {
2168 				/* TLS 1.0 empty fragment. */
2169 				m->m_epg_nrdy = 1;
2170 			} else
2171 				m->m_epg_nrdy = m->m_epg_npgs;
2172 			*enq_cnt += m->m_epg_nrdy;
2173 		}
2174 	}
2175 }
2176 
2177 bool
2178 ktls_permit_empty_frames(struct ktls_session *tls)
2179 {
2180 	return (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
2181 	    tls->params.tls_vminor == TLS_MINOR_VER_ZERO);
2182 }
2183 
2184 void
2185 ktls_check_rx(struct sockbuf *sb)
2186 {
2187 	struct tls_record_layer hdr;
2188 	struct ktls_wq *wq;
2189 	struct socket *so;
2190 	bool running;
2191 
2192 	SOCKBUF_LOCK_ASSERT(sb);
2193 	KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
2194 	    __func__, sb));
2195 	so = __containerof(sb, struct socket, so_rcv);
2196 
2197 	if (sb->sb_flags & SB_TLS_RX_RUNNING)
2198 		return;
2199 
2200 	/* Is there enough queued for a TLS header? */
2201 	if (sb->sb_tlscc < sizeof(hdr)) {
2202 		if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
2203 			so->so_error = EMSGSIZE;
2204 		return;
2205 	}
2206 
2207 	m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
2208 
2209 	/* Is the entire record queued? */
2210 	if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
2211 		if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
2212 			so->so_error = EMSGSIZE;
2213 		return;
2214 	}
2215 
2216 	sb->sb_flags |= SB_TLS_RX_RUNNING;
2217 
2218 	soref(so);
2219 	wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
2220 	mtx_lock(&wq->mtx);
2221 	STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
2222 	running = wq->running;
2223 	mtx_unlock(&wq->mtx);
2224 	if (!running)
2225 		wakeup(wq);
2226 	counter_u64_add(ktls_cnt_rx_queued, 1);
2227 }
2228 
2229 static struct mbuf *
2230 ktls_detach_record(struct sockbuf *sb, int len)
2231 {
2232 	struct mbuf *m, *n, *top;
2233 	int remain;
2234 
2235 	SOCKBUF_LOCK_ASSERT(sb);
2236 	MPASS(len <= sb->sb_tlscc);
2237 
2238 	/*
2239 	 * If TLS chain is the exact size of the record,
2240 	 * just grab the whole record.
2241 	 */
2242 	top = sb->sb_mtls;
2243 	if (sb->sb_tlscc == len) {
2244 		sb->sb_mtls = NULL;
2245 		sb->sb_mtlstail = NULL;
2246 		goto out;
2247 	}
2248 
2249 	/*
2250 	 * While it would be nice to use m_split() here, we need
2251 	 * to know exactly what m_split() allocates to update the
2252 	 * accounting, so do it inline instead.
2253 	 */
2254 	remain = len;
2255 	for (m = top; remain > m->m_len; m = m->m_next)
2256 		remain -= m->m_len;
2257 
2258 	/* Easy case: don't have to split 'm'. */
2259 	if (remain == m->m_len) {
2260 		sb->sb_mtls = m->m_next;
2261 		if (sb->sb_mtls == NULL)
2262 			sb->sb_mtlstail = NULL;
2263 		m->m_next = NULL;
2264 		goto out;
2265 	}
2266 
2267 	/*
2268 	 * Need to allocate an mbuf to hold the remainder of 'm'.  Try
2269 	 * with M_NOWAIT first.
2270 	 */
2271 	n = m_get(M_NOWAIT, MT_DATA);
2272 	if (n == NULL) {
2273 		/*
2274 		 * Use M_WAITOK with socket buffer unlocked.  If
2275 		 * 'sb_mtls' changes while the lock is dropped, return
2276 		 * NULL to force the caller to retry.
2277 		 */
2278 		SOCKBUF_UNLOCK(sb);
2279 
2280 		n = m_get(M_WAITOK, MT_DATA);
2281 
2282 		SOCKBUF_LOCK(sb);
2283 		if (sb->sb_mtls != top) {
2284 			m_free(n);
2285 			return (NULL);
2286 		}
2287 	}
2288 	n->m_flags |= (m->m_flags & (M_NOTREADY | M_DECRYPTED));
2289 
2290 	/* Store remainder in 'n'. */
2291 	n->m_len = m->m_len - remain;
2292 	if (m->m_flags & M_EXT) {
2293 		n->m_data = m->m_data + remain;
2294 		mb_dupcl(n, m);
2295 	} else {
2296 		bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
2297 	}
2298 
2299 	/* Trim 'm' and update accounting. */
2300 	m->m_len -= n->m_len;
2301 	sb->sb_tlscc -= n->m_len;
2302 	sb->sb_ccc -= n->m_len;
2303 
2304 	/* Account for 'n'. */
2305 	sballoc_ktls_rx(sb, n);
2306 
2307 	/* Insert 'n' into the TLS chain. */
2308 	sb->sb_mtls = n;
2309 	n->m_next = m->m_next;
2310 	if (sb->sb_mtlstail == m)
2311 		sb->sb_mtlstail = n;
2312 
2313 	/* Detach the record from the TLS chain. */
2314 	m->m_next = NULL;
2315 
2316 out:
2317 	MPASS(m_length(top, NULL) == len);
2318 	for (m = top; m != NULL; m = m->m_next)
2319 		sbfree_ktls_rx(sb, m);
2320 	sb->sb_tlsdcc = len;
2321 	sb->sb_ccc += len;
2322 	SBCHECK(sb);
2323 	return (top);
2324 }
2325 
2326 /*
2327  * Determine the length of the trailing zero padding and find the real
2328  * record type in the byte before the padding.
2329  *
2330  * Walking the mbuf chain backwards is clumsy, so another option would
2331  * be to scan forwards remembering the last non-zero byte before the
2332  * trailer.  However, it would be expensive to scan the entire record.
2333  * Instead, find the last non-zero byte of each mbuf in the chain
2334  * keeping track of the relative offset of that nonzero byte.
2335  *
2336  * trail_len is the size of the MAC/tag on input and is set to the
2337  * size of the full trailer including padding and the record type on
2338  * return.
2339  */
2340 static int
2341 tls13_find_record_type(struct ktls_session *tls, struct mbuf *m, int tls_len,
2342     int *trailer_len, uint8_t *record_typep)
2343 {
2344 	char *cp;
2345 	u_int digest_start, last_offset, m_len, offset;
2346 	uint8_t record_type;
2347 
2348 	digest_start = tls_len - *trailer_len;
2349 	last_offset = 0;
2350 	offset = 0;
2351 	for (; m != NULL && offset < digest_start;
2352 	     offset += m->m_len, m = m->m_next) {
2353 		/* Don't look for padding in the tag. */
2354 		m_len = min(digest_start - offset, m->m_len);
2355 		cp = mtod(m, char *);
2356 
2357 		/* Find last non-zero byte in this mbuf. */
2358 		while (m_len > 0 && cp[m_len - 1] == 0)
2359 			m_len--;
2360 		if (m_len > 0) {
2361 			record_type = cp[m_len - 1];
2362 			last_offset = offset + m_len;
2363 		}
2364 	}
2365 	if (last_offset < tls->params.tls_hlen)
2366 		return (EBADMSG);
2367 
2368 	*record_typep = record_type;
2369 	*trailer_len = tls_len - last_offset + 1;
2370 	return (0);
2371 }
2372 
2373 /*
2374  * Check if a mbuf chain is fully decrypted at the given offset and
2375  * length. Returns KTLS_MBUF_CRYPTO_ST_DECRYPTED if all data is
2376  * decrypted. KTLS_MBUF_CRYPTO_ST_MIXED if there is a mix of encrypted
2377  * and decrypted data. Else KTLS_MBUF_CRYPTO_ST_ENCRYPTED if all data
2378  * is encrypted.
2379  */
2380 ktls_mbuf_crypto_st_t
2381 ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len)
2382 {
2383 	int m_flags_ored = 0;
2384 	int m_flags_anded = -1;
2385 
2386 	for (; mb != NULL; mb = mb->m_next) {
2387 		if (offset < mb->m_len)
2388 			break;
2389 		offset -= mb->m_len;
2390 	}
2391 	offset += len;
2392 
2393 	for (; mb != NULL; mb = mb->m_next) {
2394 		m_flags_ored |= mb->m_flags;
2395 		m_flags_anded &= mb->m_flags;
2396 
2397 		if (offset <= mb->m_len)
2398 			break;
2399 		offset -= mb->m_len;
2400 	}
2401 	MPASS(mb != NULL || offset == 0);
2402 
2403 	if ((m_flags_ored ^ m_flags_anded) & M_DECRYPTED)
2404 		return (KTLS_MBUF_CRYPTO_ST_MIXED);
2405 	else
2406 		return ((m_flags_ored & M_DECRYPTED) ?
2407 		    KTLS_MBUF_CRYPTO_ST_DECRYPTED :
2408 		    KTLS_MBUF_CRYPTO_ST_ENCRYPTED);
2409 }
2410 
2411 /*
2412  * ktls_resync_ifnet - get HW TLS RX back on track after packet loss
2413  */
2414 static int
2415 ktls_resync_ifnet(struct socket *so, uint32_t tls_len, uint64_t tls_rcd_num)
2416 {
2417 	union if_snd_tag_modify_params params;
2418 	struct m_snd_tag *mst;
2419 	struct inpcb *inp;
2420 	struct tcpcb *tp;
2421 
2422 	mst = so->so_rcv.sb_tls_info->snd_tag;
2423 	if (__predict_false(mst == NULL))
2424 		return (EINVAL);
2425 
2426 	inp = sotoinpcb(so);
2427 	if (__predict_false(inp == NULL))
2428 		return (EINVAL);
2429 
2430 	INP_RLOCK(inp);
2431 	if (inp->inp_flags & INP_DROPPED) {
2432 		INP_RUNLOCK(inp);
2433 		return (ECONNRESET);
2434 	}
2435 
2436 	tp = intotcpcb(inp);
2437 	MPASS(tp != NULL);
2438 
2439 	/* Get the TCP sequence number of the next valid TLS header. */
2440 	SOCKBUF_LOCK(&so->so_rcv);
2441 	params.tls_rx.tls_hdr_tcp_sn =
2442 	    tp->rcv_nxt - so->so_rcv.sb_tlscc - tls_len;
2443 	params.tls_rx.tls_rec_length = tls_len;
2444 	params.tls_rx.tls_seq_number = tls_rcd_num;
2445 	SOCKBUF_UNLOCK(&so->so_rcv);
2446 
2447 	INP_RUNLOCK(inp);
2448 
2449 	MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RX);
2450 	return (mst->sw->snd_tag_modify(mst, &params));
2451 }
2452 
2453 static void
2454 ktls_drop(struct socket *so, int error)
2455 {
2456 	struct epoch_tracker et;
2457 	struct inpcb *inp = sotoinpcb(so);
2458 	struct tcpcb *tp;
2459 
2460 	NET_EPOCH_ENTER(et);
2461 	INP_WLOCK(inp);
2462 	if (!(inp->inp_flags & INP_DROPPED)) {
2463 		tp = intotcpcb(inp);
2464 		CURVNET_SET(inp->inp_vnet);
2465 		tp = tcp_drop(tp, error);
2466 		CURVNET_RESTORE();
2467 		if (tp != NULL)
2468 			INP_WUNLOCK(inp);
2469 	} else {
2470 		so->so_error = error;
2471 		SOCK_RECVBUF_LOCK(so);
2472 		sorwakeup_locked(so);
2473 		INP_WUNLOCK(inp);
2474 	}
2475 	NET_EPOCH_EXIT(et);
2476 }
2477 
2478 static void
2479 ktls_decrypt(struct socket *so)
2480 {
2481 	char tls_header[MBUF_PEXT_HDR_LEN];
2482 	struct ktls_session *tls;
2483 	struct sockbuf *sb;
2484 	struct tls_record_layer *hdr;
2485 	struct tls_get_record tgr;
2486 	struct mbuf *control, *data, *m;
2487 	ktls_mbuf_crypto_st_t state;
2488 	uint64_t seqno;
2489 	int error, remain, tls_len, trail_len;
2490 	bool tls13;
2491 	uint8_t vminor, record_type;
2492 
2493 	hdr = (struct tls_record_layer *)tls_header;
2494 	sb = &so->so_rcv;
2495 	SOCKBUF_LOCK(sb);
2496 	KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
2497 	    ("%s: socket %p not running", __func__, so));
2498 
2499 	tls = sb->sb_tls_info;
2500 	MPASS(tls != NULL);
2501 
2502 	tls13 = (tls->params.tls_vminor == TLS_MINOR_VER_THREE);
2503 	if (tls13)
2504 		vminor = TLS_MINOR_VER_TWO;
2505 	else
2506 		vminor = tls->params.tls_vminor;
2507 	for (;;) {
2508 		/* Is there enough queued for a TLS header? */
2509 		if (sb->sb_tlscc < tls->params.tls_hlen)
2510 			break;
2511 
2512 		m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
2513 		tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
2514 
2515 		if (hdr->tls_vmajor != tls->params.tls_vmajor ||
2516 		    hdr->tls_vminor != vminor)
2517 			error = EINVAL;
2518 		else if (tls13 && hdr->tls_type != TLS_RLTYPE_APP)
2519 			error = EINVAL;
2520 		else if (tls_len < tls->params.tls_hlen || tls_len >
2521 		    tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
2522 		    tls->params.tls_tlen)
2523 			error = EMSGSIZE;
2524 		else
2525 			error = 0;
2526 		if (__predict_false(error != 0)) {
2527 			/*
2528 			 * We have a corrupted record and are likely
2529 			 * out of sync.  The connection isn't
2530 			 * recoverable at this point, so abort it.
2531 			 */
2532 			SOCKBUF_UNLOCK(sb);
2533 			counter_u64_add(ktls_offload_corrupted_records, 1);
2534 
2535 			ktls_drop(so, error);
2536 			goto deref;
2537 		}
2538 
2539 		/* Is the entire record queued? */
2540 		if (sb->sb_tlscc < tls_len)
2541 			break;
2542 
2543 		/*
2544 		 * Split out the portion of the mbuf chain containing
2545 		 * this TLS record.
2546 		 */
2547 		data = ktls_detach_record(sb, tls_len);
2548 		if (data == NULL)
2549 			continue;
2550 		MPASS(sb->sb_tlsdcc == tls_len);
2551 
2552 		seqno = sb->sb_tls_seqno;
2553 		sb->sb_tls_seqno++;
2554 		SBCHECK(sb);
2555 		SOCKBUF_UNLOCK(sb);
2556 
2557 		/* get crypto state for this TLS record */
2558 		state = ktls_mbuf_crypto_state(data, 0, tls_len);
2559 
2560 		switch (state) {
2561 		case KTLS_MBUF_CRYPTO_ST_MIXED:
2562 			error = ktls_ocf_recrypt(tls, hdr, data, seqno);
2563 			if (error)
2564 				break;
2565 			/* FALLTHROUGH */
2566 		case KTLS_MBUF_CRYPTO_ST_ENCRYPTED:
2567 			error = ktls_ocf_decrypt(tls, hdr, data, seqno,
2568 			    &trail_len);
2569 			if (__predict_true(error == 0)) {
2570 				if (tls13) {
2571 					error = tls13_find_record_type(tls, data,
2572 					    tls_len, &trail_len, &record_type);
2573 				} else {
2574 					record_type = hdr->tls_type;
2575 				}
2576 			}
2577 			break;
2578 		case KTLS_MBUF_CRYPTO_ST_DECRYPTED:
2579 			/*
2580 			 * NIC TLS is only supported for AEAD
2581 			 * ciphersuites which used a fixed sized
2582 			 * trailer.
2583 			 */
2584 			if (tls13) {
2585 				trail_len = tls->params.tls_tlen - 1;
2586 				error = tls13_find_record_type(tls, data,
2587 				    tls_len, &trail_len, &record_type);
2588 			} else {
2589 				trail_len = tls->params.tls_tlen;
2590 				error = 0;
2591 				record_type = hdr->tls_type;
2592 			}
2593 			break;
2594 		default:
2595 			error = EINVAL;
2596 			break;
2597 		}
2598 		if (error) {
2599 			counter_u64_add(ktls_offload_failed_crypto, 1);
2600 
2601 			SOCKBUF_LOCK(sb);
2602 			if (sb->sb_tlsdcc == 0) {
2603 				/*
2604 				 * sbcut/drop/flush discarded these
2605 				 * mbufs.
2606 				 */
2607 				m_freem(data);
2608 				break;
2609 			}
2610 
2611 			/*
2612 			 * Drop this TLS record's data, but keep
2613 			 * decrypting subsequent records.
2614 			 */
2615 			sb->sb_ccc -= tls_len;
2616 			sb->sb_tlsdcc = 0;
2617 
2618 			if (error != EMSGSIZE)
2619 				error = EBADMSG;
2620 			CURVNET_SET(so->so_vnet);
2621 			so->so_error = error;
2622 			sorwakeup_locked(so);
2623 			CURVNET_RESTORE();
2624 
2625 			m_freem(data);
2626 
2627 			SOCKBUF_LOCK(sb);
2628 			continue;
2629 		}
2630 
2631 		/* Allocate the control mbuf. */
2632 		memset(&tgr, 0, sizeof(tgr));
2633 		tgr.tls_type = record_type;
2634 		tgr.tls_vmajor = hdr->tls_vmajor;
2635 		tgr.tls_vminor = hdr->tls_vminor;
2636 		tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
2637 		    trail_len);
2638 		control = sbcreatecontrol(&tgr, sizeof(tgr),
2639 		    TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
2640 
2641 		SOCKBUF_LOCK(sb);
2642 		if (sb->sb_tlsdcc == 0) {
2643 			/* sbcut/drop/flush discarded these mbufs. */
2644 			MPASS(sb->sb_tlscc == 0);
2645 			m_freem(data);
2646 			m_freem(control);
2647 			break;
2648 		}
2649 
2650 		/*
2651 		 * Clear the 'dcc' accounting in preparation for
2652 		 * adding the decrypted record.
2653 		 */
2654 		sb->sb_ccc -= tls_len;
2655 		sb->sb_tlsdcc = 0;
2656 		SBCHECK(sb);
2657 
2658 		/* If there is no payload, drop all of the data. */
2659 		if (tgr.tls_length == htobe16(0)) {
2660 			m_freem(data);
2661 			data = NULL;
2662 		} else {
2663 			/* Trim header. */
2664 			remain = tls->params.tls_hlen;
2665 			while (remain > 0) {
2666 				if (data->m_len > remain) {
2667 					data->m_data += remain;
2668 					data->m_len -= remain;
2669 					break;
2670 				}
2671 				remain -= data->m_len;
2672 				data = m_free(data);
2673 			}
2674 
2675 			/* Trim trailer and clear M_NOTREADY. */
2676 			remain = be16toh(tgr.tls_length);
2677 			m = data;
2678 			for (m = data; remain > m->m_len; m = m->m_next) {
2679 				m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
2680 				remain -= m->m_len;
2681 			}
2682 			m->m_len = remain;
2683 			m_freem(m->m_next);
2684 			m->m_next = NULL;
2685 			m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
2686 
2687 			/* Set EOR on the final mbuf. */
2688 			m->m_flags |= M_EOR;
2689 		}
2690 
2691 		sbappendcontrol_locked(sb, data, control, 0);
2692 
2693 		if (__predict_false(state != KTLS_MBUF_CRYPTO_ST_DECRYPTED)) {
2694 			sb->sb_flags |= SB_TLS_RX_RESYNC;
2695 			SOCKBUF_UNLOCK(sb);
2696 			ktls_resync_ifnet(so, tls_len, seqno);
2697 			SOCKBUF_LOCK(sb);
2698 		} else if (__predict_false(sb->sb_flags & SB_TLS_RX_RESYNC)) {
2699 			sb->sb_flags &= ~SB_TLS_RX_RESYNC;
2700 			SOCKBUF_UNLOCK(sb);
2701 			ktls_resync_ifnet(so, 0, seqno);
2702 			SOCKBUF_LOCK(sb);
2703 		}
2704 	}
2705 
2706 	sb->sb_flags &= ~SB_TLS_RX_RUNNING;
2707 
2708 	if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
2709 		so->so_error = EMSGSIZE;
2710 
2711 	sorwakeup_locked(so);
2712 
2713 deref:
2714 	SOCKBUF_UNLOCK_ASSERT(sb);
2715 
2716 	CURVNET_SET(so->so_vnet);
2717 	sorele(so);
2718 	CURVNET_RESTORE();
2719 }
2720 
2721 void
2722 ktls_enqueue_to_free(struct mbuf *m)
2723 {
2724 	struct ktls_wq *wq;
2725 	bool running;
2726 
2727 	/* Mark it for freeing. */
2728 	m->m_epg_flags |= EPG_FLAG_2FREE;
2729 	wq = &ktls_wq[m->m_epg_tls->wq_index];
2730 	mtx_lock(&wq->mtx);
2731 	STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2732 	running = wq->running;
2733 	mtx_unlock(&wq->mtx);
2734 	if (!running)
2735 		wakeup(wq);
2736 }
2737 
2738 static void *
2739 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m)
2740 {
2741 	void *buf;
2742 	int domain, running;
2743 
2744 	if (m->m_epg_npgs <= 2)
2745 		return (NULL);
2746 	if (ktls_buffer_zone == NULL)
2747 		return (NULL);
2748 	if ((u_int)(ticks - wq->lastallocfail) < hz) {
2749 		/*
2750 		 * Rate-limit allocation attempts after a failure.
2751 		 * ktls_buffer_import() will acquire a per-domain mutex to check
2752 		 * the free page queues and may fail consistently if memory is
2753 		 * fragmented.
2754 		 */
2755 		return (NULL);
2756 	}
2757 	buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM);
2758 	if (buf == NULL) {
2759 		domain = PCPU_GET(domain);
2760 		wq->lastallocfail = ticks;
2761 
2762 		/*
2763 		 * Note that this check is "racy", but the races are
2764 		 * harmless, and are either a spurious wakeup if
2765 		 * multiple threads fail allocations before the alloc
2766 		 * thread wakes, or waiting an extra second in case we
2767 		 * see an old value of running == true.
2768 		 */
2769 		if (!VM_DOMAIN_EMPTY(domain)) {
2770 			running = atomic_load_int(&ktls_domains[domain].reclaim_td.running);
2771 			if (!running)
2772 				wakeup(&ktls_domains[domain].reclaim_td);
2773 		}
2774 	}
2775 	return (buf);
2776 }
2777 
2778 static int
2779 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
2780     struct ktls_session *tls, struct ktls_ocf_encrypt_state *state)
2781 {
2782 	vm_page_t pg;
2783 	int error, i, len, off;
2784 
2785 	KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY),
2786 	    ("%p not unready & nomap mbuf\n", m));
2787 	KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
2788 	    ("page count %d larger than maximum frame length %d", m->m_epg_npgs,
2789 	    ktls_maxlen));
2790 
2791 	/* Anonymous mbufs are encrypted in place. */
2792 	if ((m->m_epg_flags & EPG_FLAG_ANON) != 0)
2793 		return (ktls_ocf_encrypt(state, tls, m, NULL, 0));
2794 
2795 	/*
2796 	 * For file-backed mbufs (from sendfile), anonymous wired
2797 	 * pages are allocated and used as the encryption destination.
2798 	 */
2799 	if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
2800 		len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len -
2801 		    m->m_epg_1st_off;
2802 		state->dst_iov[0].iov_base = (char *)state->cbuf +
2803 		    m->m_epg_1st_off;
2804 		state->dst_iov[0].iov_len = len;
2805 		state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf);
2806 		i = 1;
2807 	} else {
2808 		off = m->m_epg_1st_off;
2809 		for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
2810 			pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
2811 			    VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
2812 			len = m_epg_pagelen(m, i, off);
2813 			state->parray[i] = VM_PAGE_TO_PHYS(pg);
2814 			state->dst_iov[i].iov_base =
2815 			    (char *)PHYS_TO_DMAP(state->parray[i]) + off;
2816 			state->dst_iov[i].iov_len = len;
2817 		}
2818 	}
2819 	KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small"));
2820 	state->dst_iov[i].iov_base = m->m_epg_trail;
2821 	state->dst_iov[i].iov_len = m->m_epg_trllen;
2822 
2823 	error = ktls_ocf_encrypt(state, tls, m, state->dst_iov, i + 1);
2824 
2825 	if (__predict_false(error != 0)) {
2826 		/* Free the anonymous pages. */
2827 		if (state->cbuf != NULL)
2828 			uma_zfree(ktls_buffer_zone, state->cbuf);
2829 		else {
2830 			for (i = 0; i < m->m_epg_npgs; i++) {
2831 				pg = PHYS_TO_VM_PAGE(state->parray[i]);
2832 				(void)vm_page_unwire_noq(pg);
2833 				vm_page_free(pg);
2834 			}
2835 		}
2836 	}
2837 	return (error);
2838 }
2839 
2840 /* Number of TLS records in a batch passed to ktls_enqueue(). */
2841 static u_int
2842 ktls_batched_records(struct mbuf *m)
2843 {
2844 	int page_count, records;
2845 
2846 	records = 0;
2847 	page_count = m->m_epg_enc_cnt;
2848 	while (page_count > 0) {
2849 		records++;
2850 		page_count -= m->m_epg_nrdy;
2851 		m = m->m_next;
2852 	}
2853 	KASSERT(page_count == 0, ("%s: mismatched page count", __func__));
2854 	return (records);
2855 }
2856 
2857 void
2858 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
2859 {
2860 	struct ktls_session *tls;
2861 	struct ktls_wq *wq;
2862 	int queued;
2863 	bool running;
2864 
2865 	KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2866 	    (M_EXTPG | M_NOTREADY)),
2867 	    ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
2868 	KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
2869 
2870 	KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
2871 
2872 	m->m_epg_enc_cnt = page_count;
2873 
2874 	/*
2875 	 * Save a pointer to the socket.  The caller is responsible
2876 	 * for taking an additional reference via soref().
2877 	 */
2878 	m->m_epg_so = so;
2879 
2880 	queued = 1;
2881 	tls = m->m_epg_tls;
2882 	wq = &ktls_wq[tls->wq_index];
2883 	mtx_lock(&wq->mtx);
2884 	if (__predict_false(tls->sequential_records)) {
2885 		/*
2886 		 * For TLS 1.0, records must be encrypted
2887 		 * sequentially.  For a given connection, all records
2888 		 * queued to the associated work queue are processed
2889 		 * sequentially.  However, sendfile(2) might complete
2890 		 * I/O requests spanning multiple TLS records out of
2891 		 * order.  Here we ensure TLS records are enqueued to
2892 		 * the work queue in FIFO order.
2893 		 *
2894 		 * tls->next_seqno holds the sequence number of the
2895 		 * next TLS record that should be enqueued to the work
2896 		 * queue.  If this next record is not tls->next_seqno,
2897 		 * it must be a future record, so insert it, sorted by
2898 		 * TLS sequence number, into tls->pending_records and
2899 		 * return.
2900 		 *
2901 		 * If this TLS record matches tls->next_seqno, place
2902 		 * it in the work queue and then check
2903 		 * tls->pending_records to see if any
2904 		 * previously-queued records are now ready for
2905 		 * encryption.
2906 		 */
2907 		if (m->m_epg_seqno != tls->next_seqno) {
2908 			struct mbuf *n, *p;
2909 
2910 			p = NULL;
2911 			STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) {
2912 				if (n->m_epg_seqno > m->m_epg_seqno)
2913 					break;
2914 				p = n;
2915 			}
2916 			if (n == NULL)
2917 				STAILQ_INSERT_TAIL(&tls->pending_records, m,
2918 				    m_epg_stailq);
2919 			else if (p == NULL)
2920 				STAILQ_INSERT_HEAD(&tls->pending_records, m,
2921 				    m_epg_stailq);
2922 			else
2923 				STAILQ_INSERT_AFTER(&tls->pending_records, p, m,
2924 				    m_epg_stailq);
2925 			mtx_unlock(&wq->mtx);
2926 			counter_u64_add(ktls_cnt_tx_pending, 1);
2927 			return;
2928 		}
2929 
2930 		tls->next_seqno += ktls_batched_records(m);
2931 		STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2932 
2933 		while (!STAILQ_EMPTY(&tls->pending_records)) {
2934 			struct mbuf *n;
2935 
2936 			n = STAILQ_FIRST(&tls->pending_records);
2937 			if (n->m_epg_seqno != tls->next_seqno)
2938 				break;
2939 
2940 			queued++;
2941 			STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq);
2942 			tls->next_seqno += ktls_batched_records(n);
2943 			STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq);
2944 		}
2945 		counter_u64_add(ktls_cnt_tx_pending, -(queued - 1));
2946 	} else
2947 		STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2948 
2949 	running = wq->running;
2950 	mtx_unlock(&wq->mtx);
2951 	if (!running)
2952 		wakeup(wq);
2953 	counter_u64_add(ktls_cnt_tx_queued, queued);
2954 }
2955 
2956 /*
2957  * Once a file-backed mbuf (from sendfile) has been encrypted, free
2958  * the pages from the file and replace them with the anonymous pages
2959  * allocated in ktls_encrypt_record().
2960  */
2961 static void
2962 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state)
2963 {
2964 	int i;
2965 
2966 	MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0);
2967 
2968 	/* Free the old pages. */
2969 	m->m_ext.ext_free(m);
2970 
2971 	/* Replace them with the new pages. */
2972 	if (state->cbuf != NULL) {
2973 		for (i = 0; i < m->m_epg_npgs; i++)
2974 			m->m_epg_pa[i] = state->parray[0] + ptoa(i);
2975 
2976 		/* Contig pages should go back to the cache. */
2977 		m->m_ext.ext_free = ktls_free_mext_contig;
2978 	} else {
2979 		for (i = 0; i < m->m_epg_npgs; i++)
2980 			m->m_epg_pa[i] = state->parray[i];
2981 
2982 		/* Use the basic free routine. */
2983 		m->m_ext.ext_free = mb_free_mext_pgs;
2984 	}
2985 
2986 	/* Pages are now writable. */
2987 	m->m_epg_flags |= EPG_FLAG_ANON;
2988 }
2989 
2990 static __noinline void
2991 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
2992 {
2993 	struct ktls_ocf_encrypt_state state;
2994 	struct ktls_session *tls;
2995 	struct socket *so;
2996 	struct mbuf *m;
2997 	int error, npages, total_pages;
2998 
2999 	so = top->m_epg_so;
3000 	tls = top->m_epg_tls;
3001 	KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
3002 	KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
3003 #ifdef INVARIANTS
3004 	top->m_epg_so = NULL;
3005 #endif
3006 	total_pages = top->m_epg_enc_cnt;
3007 	npages = 0;
3008 
3009 	/*
3010 	 * Encrypt the TLS records in the chain of mbufs starting with
3011 	 * 'top'.  'total_pages' gives us a total count of pages and is
3012 	 * used to know when we have finished encrypting the TLS
3013 	 * records originally queued with 'top'.
3014 	 *
3015 	 * NB: These mbufs are queued in the socket buffer and
3016 	 * 'm_next' is traversing the mbufs in the socket buffer.  The
3017 	 * socket buffer lock is not held while traversing this chain.
3018 	 * Since the mbufs are all marked M_NOTREADY their 'm_next'
3019 	 * pointers should be stable.  However, the 'm_next' of the
3020 	 * last mbuf encrypted is not necessarily NULL.  It can point
3021 	 * to other mbufs appended while 'top' was on the TLS work
3022 	 * queue.
3023 	 *
3024 	 * Each mbuf holds an entire TLS record.
3025 	 */
3026 	error = 0;
3027 	for (m = top; npages != total_pages; m = m->m_next) {
3028 		KASSERT(m->m_epg_tls == tls,
3029 		    ("different TLS sessions in a single mbuf chain: %p vs %p",
3030 		    tls, m->m_epg_tls));
3031 		KASSERT(npages + m->m_epg_npgs <= total_pages,
3032 		    ("page count mismatch: top %p, total_pages %d, m %p", top,
3033 		    total_pages, m));
3034 
3035 		error = ktls_encrypt_record(wq, m, tls, &state);
3036 		if (error) {
3037 			counter_u64_add(ktls_offload_failed_crypto, 1);
3038 			break;
3039 		}
3040 
3041 		if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
3042 			ktls_finish_nonanon(m, &state);
3043 
3044 		npages += m->m_epg_nrdy;
3045 
3046 		/*
3047 		 * Drop a reference to the session now that it is no
3048 		 * longer needed.  Existing code depends on encrypted
3049 		 * records having no associated session vs
3050 		 * yet-to-be-encrypted records having an associated
3051 		 * session.
3052 		 */
3053 		m->m_epg_tls = NULL;
3054 		ktls_free(tls);
3055 	}
3056 
3057 	CURVNET_SET(so->so_vnet);
3058 	if (error == 0) {
3059 		(void)so->so_proto->pr_ready(so, top, npages);
3060 	} else {
3061 		ktls_drop(so, EIO);
3062 		mb_free_notready(top, total_pages);
3063 	}
3064 
3065 	sorele(so);
3066 	CURVNET_RESTORE();
3067 }
3068 
3069 void
3070 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error)
3071 {
3072 	struct ktls_session *tls;
3073 	struct socket *so;
3074 	struct mbuf *m;
3075 	int npages;
3076 
3077 	m = state->m;
3078 
3079 	if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
3080 		ktls_finish_nonanon(m, state);
3081 
3082 	so = state->so;
3083 	free(state, M_KTLS);
3084 
3085 	/*
3086 	 * Drop a reference to the session now that it is no longer
3087 	 * needed.  Existing code depends on encrypted records having
3088 	 * no associated session vs yet-to-be-encrypted records having
3089 	 * an associated session.
3090 	 */
3091 	tls = m->m_epg_tls;
3092 	m->m_epg_tls = NULL;
3093 	ktls_free(tls);
3094 
3095 	if (error != 0)
3096 		counter_u64_add(ktls_offload_failed_crypto, 1);
3097 
3098 	CURVNET_SET(so->so_vnet);
3099 	npages = m->m_epg_nrdy;
3100 
3101 	if (error == 0) {
3102 		(void)so->so_proto->pr_ready(so, m, npages);
3103 	} else {
3104 		ktls_drop(so, EIO);
3105 		mb_free_notready(m, npages);
3106 	}
3107 
3108 	sorele(so);
3109 	CURVNET_RESTORE();
3110 }
3111 
3112 /*
3113  * Similar to ktls_encrypt, but used with asynchronous OCF backends
3114  * (coprocessors) where encryption does not use host CPU resources and
3115  * it can be beneficial to queue more requests than CPUs.
3116  */
3117 static __noinline void
3118 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top)
3119 {
3120 	struct ktls_ocf_encrypt_state *state;
3121 	struct ktls_session *tls;
3122 	struct socket *so;
3123 	struct mbuf *m, *n;
3124 	int error, mpages, npages, total_pages;
3125 
3126 	so = top->m_epg_so;
3127 	tls = top->m_epg_tls;
3128 	KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
3129 	KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
3130 #ifdef INVARIANTS
3131 	top->m_epg_so = NULL;
3132 #endif
3133 	total_pages = top->m_epg_enc_cnt;
3134 	npages = 0;
3135 
3136 	error = 0;
3137 	for (m = top; npages != total_pages; m = n) {
3138 		KASSERT(m->m_epg_tls == tls,
3139 		    ("different TLS sessions in a single mbuf chain: %p vs %p",
3140 		    tls, m->m_epg_tls));
3141 		KASSERT(npages + m->m_epg_npgs <= total_pages,
3142 		    ("page count mismatch: top %p, total_pages %d, m %p", top,
3143 		    total_pages, m));
3144 
3145 		state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO);
3146 		soref(so);
3147 		state->so = so;
3148 		state->m = m;
3149 
3150 		mpages = m->m_epg_nrdy;
3151 		n = m->m_next;
3152 
3153 		error = ktls_encrypt_record(wq, m, tls, state);
3154 		if (error) {
3155 			counter_u64_add(ktls_offload_failed_crypto, 1);
3156 			free(state, M_KTLS);
3157 			CURVNET_SET(so->so_vnet);
3158 			sorele(so);
3159 			CURVNET_RESTORE();
3160 			break;
3161 		}
3162 
3163 		npages += mpages;
3164 	}
3165 
3166 	CURVNET_SET(so->so_vnet);
3167 	if (error != 0) {
3168 		ktls_drop(so, EIO);
3169 		mb_free_notready(m, total_pages - npages);
3170 	}
3171 
3172 	sorele(so);
3173 	CURVNET_RESTORE();
3174 }
3175 
3176 static int
3177 ktls_bind_domain(int domain)
3178 {
3179 	int error;
3180 
3181 	error = cpuset_setthread(curthread->td_tid, &cpuset_domain[domain]);
3182 	if (error != 0)
3183 		return (error);
3184 	curthread->td_domain.dr_policy = DOMAINSET_PREF(domain);
3185 	return (0);
3186 }
3187 
3188 static void
3189 ktls_reclaim_thread(void *ctx)
3190 {
3191 	struct ktls_domain_info *ktls_domain = ctx;
3192 	struct ktls_reclaim_thread *sc = &ktls_domain->reclaim_td;
3193 	struct sysctl_oid *oid;
3194 	char name[80];
3195 	int error, domain;
3196 
3197 	domain = ktls_domain - ktls_domains;
3198 	if (bootverbose)
3199 		printf("Starting KTLS reclaim thread for domain %d\n", domain);
3200 	error = ktls_bind_domain(domain);
3201 	if (error)
3202 		printf("Unable to bind KTLS reclaim thread for domain %d: error %d\n",
3203 		    domain, error);
3204 	snprintf(name, sizeof(name), "domain%d", domain);
3205 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO,
3206 	    name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3207 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "reclaims",
3208 	    CTLFLAG_RD,  &sc->reclaims, 0, "buffers reclaimed");
3209 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups",
3210 	    CTLFLAG_RD,  &sc->wakeups, 0, "thread wakeups");
3211 	SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running",
3212 	    CTLFLAG_RD,  &sc->running, 0, "thread running");
3213 
3214 	for (;;) {
3215 		atomic_store_int(&sc->running, 0);
3216 		tsleep(sc, PZERO | PNOLOCK, "-",  0);
3217 		atomic_store_int(&sc->running, 1);
3218 		sc->wakeups++;
3219 		/*
3220 		 * Below we attempt to reclaim ktls_max_reclaim
3221 		 * buffers using vm_page_reclaim_contig_domain_ext().
3222 		 * We do this here, as this function can take several
3223 		 * seconds to scan all of memory and it does not
3224 		 * matter if this thread pauses for a while.  If we
3225 		 * block a ktls worker thread, we risk developing
3226 		 * backlogs of buffers to be encrypted, leading to
3227 		 * surges of traffic and potential NIC output drops.
3228 		 */
3229 		if (vm_page_reclaim_contig_domain_ext(domain, VM_ALLOC_NORMAL,
3230 		    atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
3231 		    ktls_max_reclaim) != 0) {
3232 			vm_wait_domain(domain);
3233 		} else {
3234 			sc->reclaims += ktls_max_reclaim;
3235 		}
3236 	}
3237 }
3238 
3239 static void
3240 ktls_work_thread(void *ctx)
3241 {
3242 	struct ktls_wq *wq = ctx;
3243 	struct mbuf *m, *n;
3244 	struct socket *so, *son;
3245 	STAILQ_HEAD(, mbuf) local_m_head;
3246 	STAILQ_HEAD(, socket) local_so_head;
3247 	int cpu;
3248 
3249 	cpu = wq - ktls_wq;
3250 	if (bootverbose)
3251 		printf("Starting KTLS worker thread for CPU %d\n", cpu);
3252 
3253 	/*
3254 	 * Bind to a core.  If ktls_bind_threads is > 1, then
3255 	 * we bind to the NUMA domain instead.
3256 	 */
3257 	if (ktls_bind_threads) {
3258 		int error;
3259 
3260 		if (ktls_bind_threads > 1) {
3261 			struct pcpu *pc = pcpu_find(cpu);
3262 
3263 			error = ktls_bind_domain(pc->pc_domain);
3264 		} else {
3265 			cpuset_t mask;
3266 
3267 			CPU_SETOF(cpu, &mask);
3268 			error = cpuset_setthread(curthread->td_tid, &mask);
3269 		}
3270 		if (error)
3271 			printf("Unable to bind KTLS worker thread for CPU %d: error %d\n",
3272 				cpu, error);
3273 	}
3274 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
3275 	fpu_kern_thread(0);
3276 #endif
3277 	for (;;) {
3278 		mtx_lock(&wq->mtx);
3279 		while (STAILQ_EMPTY(&wq->m_head) &&
3280 		    STAILQ_EMPTY(&wq->so_head)) {
3281 			wq->running = false;
3282 			mtx_sleep(wq, &wq->mtx, 0, "-", 0);
3283 			wq->running = true;
3284 		}
3285 
3286 		STAILQ_INIT(&local_m_head);
3287 		STAILQ_CONCAT(&local_m_head, &wq->m_head);
3288 		STAILQ_INIT(&local_so_head);
3289 		STAILQ_CONCAT(&local_so_head, &wq->so_head);
3290 		mtx_unlock(&wq->mtx);
3291 
3292 		STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
3293 			if (m->m_epg_flags & EPG_FLAG_2FREE) {
3294 				ktls_free(m->m_epg_tls);
3295 				m_free_raw(m);
3296 			} else {
3297 				if (m->m_epg_tls->sync_dispatch)
3298 					ktls_encrypt(wq, m);
3299 				else
3300 					ktls_encrypt_async(wq, m);
3301 				counter_u64_add(ktls_cnt_tx_queued, -1);
3302 			}
3303 		}
3304 
3305 		STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
3306 			ktls_decrypt(so);
3307 			counter_u64_add(ktls_cnt_rx_queued, -1);
3308 		}
3309 	}
3310 }
3311 
3312 static void
3313 ktls_disable_ifnet_help(void *context, int pending __unused)
3314 {
3315 	struct ktls_session *tls;
3316 	struct inpcb *inp;
3317 	struct tcpcb *tp;
3318 	struct socket *so;
3319 	int err;
3320 
3321 	tls = context;
3322 	inp = tls->inp;
3323 	if (inp == NULL)
3324 		return;
3325 	INP_WLOCK(inp);
3326 	so = inp->inp_socket;
3327 	MPASS(so != NULL);
3328 	if (inp->inp_flags & INP_DROPPED) {
3329 		goto out;
3330 	}
3331 
3332 	if (so->so_snd.sb_tls_info != NULL)
3333 		err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW);
3334 	else
3335 		err = ENXIO;
3336 	if (err == 0) {
3337 		counter_u64_add(ktls_ifnet_disable_ok, 1);
3338 		/* ktls_set_tx_mode() drops inp wlock, so recheck flags */
3339 		if ((inp->inp_flags & INP_DROPPED) == 0 &&
3340 		    (tp = intotcpcb(inp)) != NULL &&
3341 		    tp->t_fb->tfb_hwtls_change != NULL)
3342 			(*tp->t_fb->tfb_hwtls_change)(tp, 0);
3343 	} else {
3344 		counter_u64_add(ktls_ifnet_disable_fail, 1);
3345 	}
3346 
3347 out:
3348 	CURVNET_SET(so->so_vnet);
3349 	sorele(so);
3350 	CURVNET_RESTORE();
3351 	INP_WUNLOCK(inp);
3352 	ktls_free(tls);
3353 }
3354 
3355 /*
3356  * Called when re-transmits are becoming a substantial portion of the
3357  * sends on this connection.  When this happens, we transition the
3358  * connection to software TLS.  This is needed because most inline TLS
3359  * NICs keep crypto state only for in-order transmits.  This means
3360  * that to handle a TCP rexmit (which is out-of-order), the NIC must
3361  * re-DMA the entire TLS record up to and including the current
3362  * segment.  This means that when re-transmitting the last ~1448 byte
3363  * segment of a 16KB TLS record, we could wind up re-DMA'ing an order
3364  * of magnitude more data than we are sending.  This can cause the
3365  * PCIe link to saturate well before the network, which can cause
3366  * output drops, and a general loss of capacity.
3367  */
3368 void
3369 ktls_disable_ifnet(void *arg)
3370 {
3371 	struct tcpcb *tp;
3372 	struct inpcb *inp;
3373 	struct socket *so;
3374 	struct ktls_session *tls;
3375 
3376 	tp = arg;
3377 	inp = tptoinpcb(tp);
3378 	INP_WLOCK_ASSERT(inp);
3379 	so = inp->inp_socket;
3380 	SOCK_LOCK(so);
3381 	tls = so->so_snd.sb_tls_info;
3382 	if (tp->t_nic_ktls_xmit_dis == 1) {
3383 		SOCK_UNLOCK(so);
3384 		return;
3385 	}
3386 
3387 	/*
3388 	 * note that t_nic_ktls_xmit_dis is never cleared; disabling
3389 	 * ifnet can only be done once per connection, so we never want
3390 	 * to do it again
3391 	 */
3392 
3393 	(void)ktls_hold(tls);
3394 	soref(so);
3395 	tp->t_nic_ktls_xmit_dis = 1;
3396 	SOCK_UNLOCK(so);
3397 	TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls);
3398 	(void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task);
3399 }
3400