xref: /freebsd/sys/opencrypto/ktls_ocf.c (revision 349cc55c9796c4596a5b9904cd3281af295f878f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Netflix Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/ktls.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46 #include <vm/vm_param.h>
47 #include <netinet/in.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/ktls.h>
50 
51 struct ktls_ocf_sw {
52 	/* Encrypt a single outbound TLS record. */
53 	int	(*encrypt)(struct ktls_ocf_encrypt_state *state,
54 	    struct ktls_session *tls, struct mbuf *m,
55 	    struct iovec *outiov, int outiovcnt);
56 
57 	/* Re-encrypt a received TLS record that is partially decrypted. */
58 	int	(*recrypt)(struct ktls_session *tls,
59 	    const struct tls_record_layer *hdr, struct mbuf *m,
60 	    uint64_t seqno);
61 
62 	/* Decrypt a received TLS record. */
63 	int	(*decrypt)(struct ktls_session *tls,
64 	    const struct tls_record_layer *hdr, struct mbuf *m,
65 	    uint64_t seqno, int *trailer_len);
66 };
67 
68 struct ktls_ocf_session {
69 	const struct ktls_ocf_sw *sw;
70 	crypto_session_t sid;
71 	crypto_session_t mac_sid;
72 	crypto_session_t recrypt_sid;
73 	struct mtx lock;
74 	int mac_len;
75 	bool implicit_iv;
76 
77 	/* Only used for TLS 1.0 with the implicit IV. */
78 #ifdef INVARIANTS
79 	bool in_progress;
80 	uint64_t next_seqno;
81 #endif
82 	char iv[AES_BLOCK_LEN];
83 };
84 
85 struct ocf_operation {
86 	struct ktls_ocf_session *os;
87 	bool done;
88 };
89 
90 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
91 
92 SYSCTL_DECL(_kern_ipc_tls);
93 SYSCTL_DECL(_kern_ipc_tls_stats);
94 
95 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
96     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97     "Kernel TLS offload via OCF stats");
98 
99 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts);
100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts,
101     CTLFLAG_RD, &ocf_tls10_cbc_encrypts,
102     "Total number of OCF TLS 1.0 CBC encryption operations");
103 
104 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts);
105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts,
106     CTLFLAG_RD, &ocf_tls11_cbc_encrypts,
107     "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
108 
109 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts);
110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts,
111     CTLFLAG_RD, &ocf_tls12_gcm_decrypts,
112     "Total number of OCF TLS 1.2 GCM decryption operations");
113 
114 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts);
115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts,
116     CTLFLAG_RD, &ocf_tls12_gcm_encrypts,
117     "Total number of OCF TLS 1.2 GCM encryption operations");
118 
119 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts);
120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts,
121     CTLFLAG_RD, &ocf_tls12_gcm_recrypts,
122     "Total number of OCF TLS 1.2 GCM re-encryption operations");
123 
124 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts);
125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts,
126     CTLFLAG_RD, &ocf_tls12_chacha20_decrypts,
127     "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations");
128 
129 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts);
130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts,
131     CTLFLAG_RD, &ocf_tls12_chacha20_encrypts,
132     "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
133 
134 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts);
135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts,
136     CTLFLAG_RD, &ocf_tls13_gcm_decrypts,
137     "Total number of OCF TLS 1.3 GCM decryption operations");
138 
139 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts);
140 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts,
141     CTLFLAG_RD, &ocf_tls13_gcm_encrypts,
142     "Total number of OCF TLS 1.3 GCM encryption operations");
143 
144 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts);
145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts,
146     CTLFLAG_RD, &ocf_tls13_gcm_recrypts,
147     "Total number of OCF TLS 1.3 GCM re-encryption operations");
148 
149 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts);
150 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts,
151     CTLFLAG_RD, &ocf_tls13_chacha20_decrypts,
152     "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations");
153 
154 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts);
155 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts,
156     CTLFLAG_RD, &ocf_tls13_chacha20_encrypts,
157     "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
158 
159 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
160 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
161     CTLFLAG_RD, &ocf_inplace,
162     "Total number of OCF in-place operations");
163 
164 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
165 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
166     CTLFLAG_RD, &ocf_separate_output,
167     "Total number of OCF operations with a separate output buffer");
168 
169 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
170 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
171     &ocf_retries,
172     "Number of OCF encryption operation retries");
173 
174 static int
175 ktls_ocf_callback_sync(struct cryptop *crp __unused)
176 {
177 	return (0);
178 }
179 
180 static int
181 ktls_ocf_callback_async(struct cryptop *crp)
182 {
183 	struct ocf_operation *oo;
184 
185 	oo = crp->crp_opaque;
186 	mtx_lock(&oo->os->lock);
187 	oo->done = true;
188 	mtx_unlock(&oo->os->lock);
189 	wakeup(oo);
190 	return (0);
191 }
192 
193 static int
194 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp)
195 {
196 	struct ocf_operation oo;
197 	int error;
198 	bool async;
199 
200 	oo.os = os;
201 	oo.done = false;
202 
203 	crp->crp_opaque = &oo;
204 	for (;;) {
205 		async = !CRYPTO_SESS_SYNC(crp->crp_session);
206 		crp->crp_callback = async ? ktls_ocf_callback_async :
207 		    ktls_ocf_callback_sync;
208 
209 		error = crypto_dispatch(crp);
210 		if (error)
211 			break;
212 		if (async) {
213 			mtx_lock(&os->lock);
214 			while (!oo.done)
215 				mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
216 			mtx_unlock(&os->lock);
217 		}
218 
219 		if (crp->crp_etype != EAGAIN) {
220 			error = crp->crp_etype;
221 			break;
222 		}
223 
224 		crp->crp_etype = 0;
225 		crp->crp_flags &= ~CRYPTO_F_DONE;
226 		oo.done = false;
227 		counter_u64_add(ocf_retries, 1);
228 	}
229 	return (error);
230 }
231 
232 static int
233 ktls_ocf_dispatch_async_cb(struct cryptop *crp)
234 {
235 	struct ktls_ocf_encrypt_state *state;
236 	int error;
237 
238 	state = crp->crp_opaque;
239 	if (crp->crp_etype == EAGAIN) {
240 		crp->crp_etype = 0;
241 		crp->crp_flags &= ~CRYPTO_F_DONE;
242 		counter_u64_add(ocf_retries, 1);
243 		error = crypto_dispatch(crp);
244 		if (error != 0) {
245 			crypto_destroyreq(crp);
246 			ktls_encrypt_cb(state, error);
247 		}
248 		return (0);
249 	}
250 
251 	error = crp->crp_etype;
252 	crypto_destroyreq(crp);
253 	ktls_encrypt_cb(state, error);
254 	return (0);
255 }
256 
257 static int
258 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state,
259     struct cryptop *crp)
260 {
261 	int error;
262 
263 	crp->crp_opaque = state;
264 	crp->crp_callback = ktls_ocf_dispatch_async_cb;
265 	error = crypto_dispatch(crp);
266 	if (error != 0)
267 		crypto_destroyreq(crp);
268 	return (error);
269 }
270 
271 static int
272 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
273     struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
274     int outiovcnt)
275 {
276 	const struct tls_record_layer *hdr;
277 	struct uio *uio;
278 	struct tls_mac_data *ad;
279 	struct cryptop *crp;
280 	struct ktls_ocf_session *os;
281 	struct iovec iov[m->m_epg_npgs + 2];
282 	u_int pgoff;
283 	int i, error;
284 	uint16_t tls_comp_len;
285 	uint8_t pad;
286 
287 	MPASS(outiovcnt + 1 <= nitems(iov));
288 
289 	os = tls->ocf_session;
290 	hdr = (const struct tls_record_layer *)m->m_epg_hdr;
291 	crp = &state->crp;
292 	uio = &state->uio;
293 	MPASS(tls->sync_dispatch);
294 
295 #ifdef INVARIANTS
296 	if (os->implicit_iv) {
297 		mtx_lock(&os->lock);
298 		KASSERT(!os->in_progress,
299 		    ("concurrent implicit IV encryptions"));
300 		if (os->next_seqno != m->m_epg_seqno) {
301 			printf("KTLS CBC: TLS records out of order.  "
302 			    "Expected %ju, got %ju\n",
303 			    (uintmax_t)os->next_seqno,
304 			    (uintmax_t)m->m_epg_seqno);
305 			mtx_unlock(&os->lock);
306 			return (EINVAL);
307 		}
308 		os->in_progress = true;
309 		mtx_unlock(&os->lock);
310 	}
311 #endif
312 
313 	/* Payload length. */
314 	tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
315 
316 	/* Initialize the AAD. */
317 	ad = &state->mac;
318 	ad->seq = htobe64(m->m_epg_seqno);
319 	ad->type = hdr->tls_type;
320 	ad->tls_vmajor = hdr->tls_vmajor;
321 	ad->tls_vminor = hdr->tls_vminor;
322 	ad->tls_length = htons(tls_comp_len);
323 
324 	/* First, compute the MAC. */
325 	iov[0].iov_base = ad;
326 	iov[0].iov_len = sizeof(*ad);
327 	pgoff = m->m_epg_1st_off;
328 	for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
329 		iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
330 		    pgoff);
331 		iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff);
332 	}
333 	iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
334 	iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
335 	uio->uio_iov = iov;
336 	uio->uio_iovcnt = m->m_epg_npgs + 2;
337 	uio->uio_offset = 0;
338 	uio->uio_segflg = UIO_SYSSPACE;
339 	uio->uio_td = curthread;
340 	uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len;
341 
342 	crypto_initreq(crp, os->mac_sid);
343 	crp->crp_payload_start = 0;
344 	crp->crp_payload_length = sizeof(*ad) + tls_comp_len;
345 	crp->crp_digest_start = crp->crp_payload_length;
346 	crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
347 	crp->crp_flags = CRYPTO_F_CBIMM;
348 	crypto_use_uio(crp, uio);
349 	error = ktls_ocf_dispatch(os, crp);
350 
351 	crypto_destroyreq(crp);
352 	if (error) {
353 #ifdef INVARIANTS
354 		if (os->implicit_iv) {
355 			mtx_lock(&os->lock);
356 			os->in_progress = false;
357 			mtx_unlock(&os->lock);
358 		}
359 #endif
360 		return (error);
361 	}
362 
363 	/* Second, add the padding. */
364 	pad = m->m_epg_trllen - os->mac_len - 1;
365 	for (i = 0; i < pad + 1; i++)
366 		m->m_epg_trail[os->mac_len + i] = pad;
367 
368 	/* Finally, encrypt the record. */
369 	crypto_initreq(crp, os->sid);
370 	crp->crp_payload_start = m->m_epg_hdrlen;
371 	crp->crp_payload_length = tls_comp_len + m->m_epg_trllen;
372 	KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0,
373 	    ("invalid encryption size"));
374 	crypto_use_single_mbuf(crp, m);
375 	crp->crp_op = CRYPTO_OP_ENCRYPT;
376 	crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
377 	if (os->implicit_iv)
378 		memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN);
379 	else
380 		memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN);
381 
382 	if (outiov != NULL) {
383 		uio->uio_iov = outiov;
384 		uio->uio_iovcnt = outiovcnt;
385 		uio->uio_offset = 0;
386 		uio->uio_segflg = UIO_SYSSPACE;
387 		uio->uio_td = curthread;
388 		uio->uio_resid = crp->crp_payload_length;
389 		crypto_use_output_uio(crp, uio);
390 	}
391 
392 	if (os->implicit_iv)
393 		counter_u64_add(ocf_tls10_cbc_encrypts, 1);
394 	else
395 		counter_u64_add(ocf_tls11_cbc_encrypts, 1);
396 	if (outiov != NULL)
397 		counter_u64_add(ocf_separate_output, 1);
398 	else
399 		counter_u64_add(ocf_inplace, 1);
400 	error = ktls_ocf_dispatch(os, crp);
401 
402 	crypto_destroyreq(crp);
403 
404 	if (os->implicit_iv) {
405 		KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
406 		    ("trailer too short to read IV"));
407 		memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN,
408 		    AES_BLOCK_LEN);
409 #ifdef INVARIANTS
410 		mtx_lock(&os->lock);
411 		os->next_seqno = m->m_epg_seqno + 1;
412 		os->in_progress = false;
413 		mtx_unlock(&os->lock);
414 #endif
415 	}
416 	return (error);
417 }
418 
419 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = {
420 	.encrypt = ktls_ocf_tls_cbc_encrypt
421 };
422 
423 static int
424 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
425     struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
426     int outiovcnt)
427 {
428 	const struct tls_record_layer *hdr;
429 	struct uio *uio;
430 	struct tls_aead_data *ad;
431 	struct cryptop *crp;
432 	struct ktls_ocf_session *os;
433 	int error;
434 	uint16_t tls_comp_len;
435 
436 	os = tls->ocf_session;
437 	hdr = (const struct tls_record_layer *)m->m_epg_hdr;
438 	crp = &state->crp;
439 	uio = &state->uio;
440 
441 	crypto_initreq(crp, os->sid);
442 
443 	/* Setup the IV. */
444 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
445 		memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
446 		memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
447 		    sizeof(uint64_t));
448 	} else {
449 		/*
450 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
451 		 * identically to constructing the IV for AEAD in TLS
452 		 * 1.3.
453 		 */
454 		memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
455 		*(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
456 	}
457 
458 	/* Setup the AAD. */
459 	ad = &state->aead;
460 	tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
461 	ad->seq = htobe64(m->m_epg_seqno);
462 	ad->type = hdr->tls_type;
463 	ad->tls_vmajor = hdr->tls_vmajor;
464 	ad->tls_vminor = hdr->tls_vminor;
465 	ad->tls_length = htons(tls_comp_len);
466 	crp->crp_aad = ad;
467 	crp->crp_aad_length = sizeof(*ad);
468 
469 	/* Set fields for input payload. */
470 	crypto_use_single_mbuf(crp, m);
471 	crp->crp_payload_start = m->m_epg_hdrlen;
472 	crp->crp_payload_length = tls_comp_len;
473 
474 	if (outiov != NULL) {
475 		crp->crp_digest_start = crp->crp_payload_length;
476 
477 		uio->uio_iov = outiov;
478 		uio->uio_iovcnt = outiovcnt;
479 		uio->uio_offset = 0;
480 		uio->uio_segflg = UIO_SYSSPACE;
481 		uio->uio_td = curthread;
482 		uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen;
483 		crypto_use_output_uio(crp, uio);
484 	} else
485 		crp->crp_digest_start = crp->crp_payload_start +
486 		    crp->crp_payload_length;
487 
488 	crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
489 	crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
490 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
491 		counter_u64_add(ocf_tls12_gcm_encrypts, 1);
492 	else
493 		counter_u64_add(ocf_tls12_chacha20_encrypts, 1);
494 	if (outiov != NULL)
495 		counter_u64_add(ocf_separate_output, 1);
496 	else
497 		counter_u64_add(ocf_inplace, 1);
498 	if (tls->sync_dispatch) {
499 		error = ktls_ocf_dispatch(os, crp);
500 		crypto_destroyreq(crp);
501 	} else
502 		error = ktls_ocf_dispatch_async(state, crp);
503 	return (error);
504 }
505 
506 static int
507 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
508     const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
509     int *trailer_len)
510 {
511 	struct tls_aead_data ad;
512 	struct cryptop crp;
513 	struct ktls_ocf_session *os;
514 	int error;
515 	uint16_t tls_comp_len;
516 
517 	os = tls->ocf_session;
518 
519 	crypto_initreq(&crp, os->sid);
520 
521 	/* Setup the IV. */
522 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
523 		memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
524 		memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
525 		    sizeof(uint64_t));
526 	} else {
527 		/*
528 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
529 		 * identically to constructing the IV for AEAD in TLS
530 		 * 1.3.
531 		 */
532 		memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
533 		*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
534 	}
535 
536 	/* Setup the AAD. */
537 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
538 		tls_comp_len = ntohs(hdr->tls_length) -
539 		    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
540 	else
541 		tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
542 	ad.seq = htobe64(seqno);
543 	ad.type = hdr->tls_type;
544 	ad.tls_vmajor = hdr->tls_vmajor;
545 	ad.tls_vminor = hdr->tls_vminor;
546 	ad.tls_length = htons(tls_comp_len);
547 	crp.crp_aad = &ad;
548 	crp.crp_aad_length = sizeof(ad);
549 
550 	crp.crp_payload_start = tls->params.tls_hlen;
551 	crp.crp_payload_length = tls_comp_len;
552 	crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
553 
554 	crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
555 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
556 	crypto_use_mbuf(&crp, m);
557 
558 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
559 		counter_u64_add(ocf_tls12_gcm_decrypts, 1);
560 	else
561 		counter_u64_add(ocf_tls12_chacha20_decrypts, 1);
562 	error = ktls_ocf_dispatch(os, &crp);
563 
564 	crypto_destroyreq(&crp);
565 	*trailer_len = tls->params.tls_tlen;
566 	return (error);
567 }
568 
569 /*
570  * Reconstruct encrypted mbuf data in input buffer.
571  */
572 static void
573 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf)
574 {
575 	const char *src = buf;
576 	u_int todo;
577 
578 	while (skip >= m->m_len) {
579 		skip -= m->m_len;
580 		m = m->m_next;
581 	}
582 
583 	while (len > 0) {
584 		todo = m->m_len - skip;
585 		if (todo > len)
586 			todo = len;
587 
588 		if (m->m_flags & M_DECRYPTED)
589 			memcpy(mtod(m, char *) + skip, src, todo);
590 		src += todo;
591 		len -= todo;
592 		skip = 0;
593 		m = m->m_next;
594 	}
595 }
596 
597 static int
598 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls,
599     const struct tls_record_layer *hdr, struct mbuf *m,
600     uint64_t seqno)
601 {
602 	struct cryptop crp;
603 	struct ktls_ocf_session *os;
604 	char *buf;
605 	u_int payload_len;
606 	int error;
607 
608 	os = tls->ocf_session;
609 
610 	crypto_initreq(&crp, os->recrypt_sid);
611 
612 	KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
613 	    ("%s: only AES-GCM is supported", __func__));
614 
615 	/* Setup the IV. */
616 	memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
617 	memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
618 	be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2);
619 
620 	payload_len = ntohs(hdr->tls_length) -
621 	    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
622 	crp.crp_op = CRYPTO_OP_ENCRYPT;
623 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
624 	crypto_use_mbuf(&crp, m);
625 	crp.crp_payload_start = tls->params.tls_hlen;
626 	crp.crp_payload_length = payload_len;
627 
628 	buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
629 	crypto_use_output_buf(&crp, buf, payload_len);
630 
631 	counter_u64_add(ocf_tls12_gcm_recrypts, 1);
632 	error = ktls_ocf_dispatch(os, &crp);
633 
634 	crypto_destroyreq(&crp);
635 
636 	if (error == 0)
637 		ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
638 		    buf);
639 
640 	free(buf, M_KTLS_OCF);
641 	return (error);
642 }
643 
644 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = {
645 	.encrypt = ktls_ocf_tls12_aead_encrypt,
646 	.recrypt = ktls_ocf_tls12_aead_recrypt,
647 	.decrypt = ktls_ocf_tls12_aead_decrypt,
648 };
649 
650 static int
651 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
652     struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
653     int outiovcnt)
654 {
655 	const struct tls_record_layer *hdr;
656 	struct uio *uio;
657 	struct tls_aead_data_13 *ad;
658 	struct cryptop *crp;
659 	struct ktls_ocf_session *os;
660 	int error;
661 
662 	os = tls->ocf_session;
663 	hdr = (const struct tls_record_layer *)m->m_epg_hdr;
664 	crp = &state->crp;
665 	uio = &state->uio;
666 
667 	crypto_initreq(crp, os->sid);
668 
669 	/* Setup the nonce. */
670 	memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
671 	*(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
672 
673 	/* Setup the AAD. */
674 	ad = &state->aead13;
675 	ad->type = hdr->tls_type;
676 	ad->tls_vmajor = hdr->tls_vmajor;
677 	ad->tls_vminor = hdr->tls_vminor;
678 	ad->tls_length = hdr->tls_length;
679 	crp->crp_aad = ad;
680 	crp->crp_aad_length = sizeof(*ad);
681 
682 	/* Set fields for input payload. */
683 	crypto_use_single_mbuf(crp, m);
684 	crp->crp_payload_start = m->m_epg_hdrlen;
685 	crp->crp_payload_length = m->m_len -
686 	    (m->m_epg_hdrlen + m->m_epg_trllen);
687 
688 	/* Store the record type as the first byte of the trailer. */
689 	m->m_epg_trail[0] = m->m_epg_record_type;
690 	crp->crp_payload_length++;
691 
692 	if (outiov != NULL) {
693 		crp->crp_digest_start = crp->crp_payload_length;
694 
695 		uio->uio_iov = outiov;
696 		uio->uio_iovcnt = outiovcnt;
697 		uio->uio_offset = 0;
698 		uio->uio_segflg = UIO_SYSSPACE;
699 		uio->uio_td = curthread;
700 		uio->uio_resid = m->m_len - m->m_epg_hdrlen;
701 		crypto_use_output_uio(crp, uio);
702 	} else
703 		crp->crp_digest_start = crp->crp_payload_start +
704 		    crp->crp_payload_length;
705 
706 	crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
707 	crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
708 
709 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
710 		counter_u64_add(ocf_tls13_gcm_encrypts, 1);
711 	else
712 		counter_u64_add(ocf_tls13_chacha20_encrypts, 1);
713 	if (outiov != NULL)
714 		counter_u64_add(ocf_separate_output, 1);
715 	else
716 		counter_u64_add(ocf_inplace, 1);
717 	if (tls->sync_dispatch) {
718 		error = ktls_ocf_dispatch(os, crp);
719 		crypto_destroyreq(crp);
720 	} else
721 		error = ktls_ocf_dispatch_async(state, crp);
722 	return (error);
723 }
724 
725 static int
726 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls,
727     const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
728     int *trailer_len)
729 {
730 	struct tls_aead_data_13 ad;
731 	struct cryptop crp;
732 	struct ktls_ocf_session *os;
733 	int error;
734 	u_int tag_len;
735 
736 	os = tls->ocf_session;
737 
738 	tag_len = tls->params.tls_tlen - 1;
739 
740 	/* Payload must contain at least one byte for the record type. */
741 	if (ntohs(hdr->tls_length) < tag_len + 1)
742 		return (EBADMSG);
743 
744 	crypto_initreq(&crp, os->sid);
745 
746 	/* Setup the nonce. */
747 	memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
748 	*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
749 
750 	/* Setup the AAD. */
751 	ad.type = hdr->tls_type;
752 	ad.tls_vmajor = hdr->tls_vmajor;
753 	ad.tls_vminor = hdr->tls_vminor;
754 	ad.tls_length = hdr->tls_length;
755 	crp.crp_aad = &ad;
756 	crp.crp_aad_length = sizeof(ad);
757 
758 	crp.crp_payload_start = tls->params.tls_hlen;
759 	crp.crp_payload_length = ntohs(hdr->tls_length) - tag_len;
760 	crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
761 
762 	crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
763 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
764 	crypto_use_mbuf(&crp, m);
765 
766 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
767 		counter_u64_add(ocf_tls13_gcm_decrypts, 1);
768 	else
769 		counter_u64_add(ocf_tls13_chacha20_decrypts, 1);
770 	error = ktls_ocf_dispatch(os, &crp);
771 
772 	crypto_destroyreq(&crp);
773 	*trailer_len = tag_len;
774 	return (error);
775 }
776 
777 static int
778 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls,
779     const struct tls_record_layer *hdr, struct mbuf *m,
780     uint64_t seqno)
781 {
782 	struct cryptop crp;
783 	struct ktls_ocf_session *os;
784 	char *buf;
785 	u_int payload_len;
786 	int error;
787 
788 	os = tls->ocf_session;
789 
790 	crypto_initreq(&crp, os->recrypt_sid);
791 
792 	KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
793 	    ("%s: only AES-GCM is supported", __func__));
794 
795 	/* Setup the IV. */
796 	memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
797 	*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
798 	be32enc(crp.crp_iv + 12, 2);
799 
800 	payload_len = ntohs(hdr->tls_length) - AES_GMAC_HASH_LEN;
801 	crp.crp_op = CRYPTO_OP_ENCRYPT;
802 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
803 	crypto_use_mbuf(&crp, m);
804 	crp.crp_payload_start = tls->params.tls_hlen;
805 	crp.crp_payload_length = payload_len;
806 
807 	buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
808 	crypto_use_output_buf(&crp, buf, payload_len);
809 
810 	counter_u64_add(ocf_tls13_gcm_recrypts, 1);
811 	error = ktls_ocf_dispatch(os, &crp);
812 
813 	crypto_destroyreq(&crp);
814 
815 	if (error == 0)
816 		ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
817 		    buf);
818 
819 	free(buf, M_KTLS_OCF);
820 	return (error);
821 }
822 
823 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = {
824 	.encrypt = ktls_ocf_tls13_aead_encrypt,
825 	.recrypt = ktls_ocf_tls13_aead_recrypt,
826 	.decrypt = ktls_ocf_tls13_aead_decrypt,
827 };
828 
829 void
830 ktls_ocf_free(struct ktls_session *tls)
831 {
832 	struct ktls_ocf_session *os;
833 
834 	os = tls->ocf_session;
835 	crypto_freesession(os->sid);
836 	crypto_freesession(os->mac_sid);
837 	crypto_freesession(os->recrypt_sid);
838 	mtx_destroy(&os->lock);
839 	zfree(os, M_KTLS_OCF);
840 }
841 
842 int
843 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
844 {
845 	struct crypto_session_params csp, mac_csp, recrypt_csp;
846 	struct ktls_ocf_session *os;
847 	int error, mac_len;
848 
849 	memset(&csp, 0, sizeof(csp));
850 	memset(&mac_csp, 0, sizeof(mac_csp));
851 	mac_csp.csp_mode = CSP_MODE_NONE;
852 	mac_len = 0;
853 	memset(&recrypt_csp, 0, sizeof(mac_csp));
854 	recrypt_csp.csp_mode = CSP_MODE_NONE;
855 
856 	switch (tls->params.cipher_algorithm) {
857 	case CRYPTO_AES_NIST_GCM_16:
858 		switch (tls->params.cipher_key_len) {
859 		case 128 / 8:
860 		case 256 / 8:
861 			break;
862 		default:
863 			return (EINVAL);
864 		}
865 
866 		/* Only TLS 1.2 and 1.3 are supported. */
867 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
868 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
869 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
870 			return (EPROTONOSUPPORT);
871 
872 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
873 		csp.csp_mode = CSP_MODE_AEAD;
874 		csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
875 		csp.csp_cipher_key = tls->params.cipher_key;
876 		csp.csp_cipher_klen = tls->params.cipher_key_len;
877 		csp.csp_ivlen = AES_GCM_IV_LEN;
878 
879 		recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
880 		recrypt_csp.csp_mode = CSP_MODE_CIPHER;
881 		recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM;
882 		recrypt_csp.csp_cipher_key = tls->params.cipher_key;
883 		recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len;
884 		recrypt_csp.csp_ivlen = AES_BLOCK_LEN;
885 		break;
886 	case CRYPTO_AES_CBC:
887 		switch (tls->params.cipher_key_len) {
888 		case 128 / 8:
889 		case 256 / 8:
890 			break;
891 		default:
892 			return (EINVAL);
893 		}
894 
895 		switch (tls->params.auth_algorithm) {
896 		case CRYPTO_SHA1_HMAC:
897 			mac_len = SHA1_HASH_LEN;
898 			break;
899 		case CRYPTO_SHA2_256_HMAC:
900 			mac_len = SHA2_256_HASH_LEN;
901 			break;
902 		case CRYPTO_SHA2_384_HMAC:
903 			mac_len = SHA2_384_HASH_LEN;
904 			break;
905 		default:
906 			return (EINVAL);
907 		}
908 
909 		/* Only TLS 1.0-1.2 are supported. */
910 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
911 		    tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
912 		    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
913 			return (EPROTONOSUPPORT);
914 
915 		/* AES-CBC is not supported for receive. */
916 		if (direction == KTLS_RX)
917 			return (EPROTONOSUPPORT);
918 
919 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
920 		csp.csp_mode = CSP_MODE_CIPHER;
921 		csp.csp_cipher_alg = CRYPTO_AES_CBC;
922 		csp.csp_cipher_key = tls->params.cipher_key;
923 		csp.csp_cipher_klen = tls->params.cipher_key_len;
924 		csp.csp_ivlen = AES_BLOCK_LEN;
925 
926 		mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
927 		mac_csp.csp_mode = CSP_MODE_DIGEST;
928 		mac_csp.csp_auth_alg = tls->params.auth_algorithm;
929 		mac_csp.csp_auth_key = tls->params.auth_key;
930 		mac_csp.csp_auth_klen = tls->params.auth_key_len;
931 		break;
932 	case CRYPTO_CHACHA20_POLY1305:
933 		switch (tls->params.cipher_key_len) {
934 		case 256 / 8:
935 			break;
936 		default:
937 			return (EINVAL);
938 		}
939 
940 		/* Only TLS 1.2 and 1.3 are supported. */
941 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
942 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
943 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
944 			return (EPROTONOSUPPORT);
945 
946 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
947 		csp.csp_mode = CSP_MODE_AEAD;
948 		csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
949 		csp.csp_cipher_key = tls->params.cipher_key;
950 		csp.csp_cipher_klen = tls->params.cipher_key_len;
951 		csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
952 		break;
953 	default:
954 		return (EPROTONOSUPPORT);
955 	}
956 
957 	os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
958 	if (os == NULL)
959 		return (ENOMEM);
960 
961 	error = crypto_newsession(&os->sid, &csp,
962 	    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
963 	if (error) {
964 		free(os, M_KTLS_OCF);
965 		return (error);
966 	}
967 
968 	if (mac_csp.csp_mode != CSP_MODE_NONE) {
969 		error = crypto_newsession(&os->mac_sid, &mac_csp,
970 		    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
971 		if (error) {
972 			crypto_freesession(os->sid);
973 			free(os, M_KTLS_OCF);
974 			return (error);
975 		}
976 		os->mac_len = mac_len;
977 	}
978 
979 	if (recrypt_csp.csp_mode != CSP_MODE_NONE) {
980 		error = crypto_newsession(&os->recrypt_sid, &recrypt_csp,
981 		    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
982 		if (error) {
983 			crypto_freesession(os->sid);
984 			free(os, M_KTLS_OCF);
985 			return (error);
986 		}
987 	}
988 
989 	mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
990 	tls->ocf_session = os;
991 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
992 	    tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
993 		if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
994 			os->sw = &ktls_ocf_tls13_aead_sw;
995 		else
996 			os->sw = &ktls_ocf_tls12_aead_sw;
997 	} else {
998 		os->sw = &ktls_ocf_tls_cbc_sw;
999 		if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
1000 			os->implicit_iv = true;
1001 			memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
1002 #ifdef INVARIANTS
1003 			os->next_seqno = tls->next_seqno;
1004 #endif
1005 		}
1006 	}
1007 
1008 	/*
1009 	 * AES-CBC is always synchronous currently.  Asynchronous
1010 	 * operation would require multiple callbacks and an additional
1011 	 * iovec array in ktls_ocf_encrypt_state.
1012 	 */
1013 	tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) ||
1014 	    tls->params.cipher_algorithm == CRYPTO_AES_CBC;
1015 	return (0);
1016 }
1017 
1018 int
1019 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state,
1020     struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
1021     int outiovcnt)
1022 {
1023 	return (tls->ocf_session->sw->encrypt(state, tls, m, outiov,
1024 	    outiovcnt));
1025 }
1026 
1027 int
1028 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1029     struct mbuf *m, uint64_t seqno, int *trailer_len)
1030 {
1031 	return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len));
1032 }
1033 
1034 int
1035 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1036     struct mbuf *m, uint64_t seqno)
1037 {
1038 	return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno));
1039 }
1040 
1041 bool
1042 ktls_ocf_recrypt_supported(struct ktls_session *tls)
1043 {
1044 	return (tls->ocf_session->sw->recrypt != NULL &&
1045 	    tls->ocf_session->recrypt_sid != NULL);
1046 }
1047