xref: /freebsd/sys/opencrypto/ktls_ocf.c (revision a96ef4501919d7ac08e94e98dc34b0bdd744802b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Netflix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/ktls.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 #include <opencrypto/cryptodev.h>
45 
46 struct ocf_session {
47 	crypto_session_t sid;
48 	crypto_session_t mac_sid;
49 	int mac_len;
50 	struct mtx lock;
51 	bool implicit_iv;
52 
53 	/* Only used for TLS 1.0 with the implicit IV. */
54 #ifdef INVARIANTS
55 	bool in_progress;
56 	uint64_t next_seqno;
57 #endif
58 	char iv[AES_BLOCK_LEN];
59 };
60 
61 struct ocf_operation {
62 	struct ocf_session *os;
63 	bool done;
64 };
65 
66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
67 
68 SYSCTL_DECL(_kern_ipc_tls);
69 SYSCTL_DECL(_kern_ipc_tls_stats);
70 
71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
72     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
73     "Kernel TLS offload via OCF stats");
74 
75 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts);
76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
77     CTLFLAG_RD, &ocf_tls10_cbc_crypts,
78     "Total number of OCF TLS 1.0 CBC encryption operations");
79 
80 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts);
81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
82     CTLFLAG_RD, &ocf_tls11_cbc_crypts,
83     "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
84 
85 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts);
86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
87     CTLFLAG_RD, &ocf_tls12_gcm_crypts,
88     "Total number of OCF TLS 1.2 GCM encryption operations");
89 
90 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_crypts);
91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_crypts,
92     CTLFLAG_RD, &ocf_tls12_chacha20_crypts,
93     "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
94 
95 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts);
96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
97     CTLFLAG_RD, &ocf_tls13_gcm_crypts,
98     "Total number of OCF TLS 1.3 GCM encryption operations");
99 
100 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_crypts);
101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_crypts,
102     CTLFLAG_RD, &ocf_tls13_chacha20_crypts,
103     "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
104 
105 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
107     CTLFLAG_RD, &ocf_inplace,
108     "Total number of OCF in-place operations");
109 
110 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
111 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
112     CTLFLAG_RD, &ocf_separate_output,
113     "Total number of OCF operations with a separate output buffer");
114 
115 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
116 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
117     &ocf_retries,
118     "Number of OCF encryption operation retries");
119 
120 static int
121 ktls_ocf_callback_sync(struct cryptop *crp __unused)
122 {
123 	return (0);
124 }
125 
126 static int
127 ktls_ocf_callback_async(struct cryptop *crp)
128 {
129 	struct ocf_operation *oo;
130 
131 	oo = crp->crp_opaque;
132 	mtx_lock(&oo->os->lock);
133 	oo->done = true;
134 	mtx_unlock(&oo->os->lock);
135 	wakeup(oo);
136 	return (0);
137 }
138 
139 static int
140 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
141 {
142 	struct ocf_operation oo;
143 	int error;
144 	bool async;
145 
146 	oo.os = os;
147 	oo.done = false;
148 
149 	crp->crp_opaque = &oo;
150 	for (;;) {
151 		async = !CRYPTO_SESS_SYNC(crp->crp_session);
152 		crp->crp_callback = async ? ktls_ocf_callback_async :
153 		    ktls_ocf_callback_sync;
154 
155 		error = crypto_dispatch(crp);
156 		if (error)
157 			break;
158 		if (async) {
159 			mtx_lock(&os->lock);
160 			while (!oo.done)
161 				mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
162 			mtx_unlock(&os->lock);
163 		}
164 
165 		if (crp->crp_etype != EAGAIN) {
166 			error = crp->crp_etype;
167 			break;
168 		}
169 
170 		crp->crp_etype = 0;
171 		crp->crp_flags &= ~CRYPTO_F_DONE;
172 		oo.done = false;
173 		counter_u64_add(ocf_retries, 1);
174 	}
175 	return (error);
176 }
177 
178 static int
179 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
180     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
181     struct iovec *outiov, int iovcnt, uint64_t seqno,
182     uint8_t record_type __unused)
183 {
184 	struct uio uio, out_uio;
185 	struct tls_mac_data ad;
186 	struct cryptop crp;
187 	struct ocf_session *os;
188 	struct iovec iov[iovcnt + 2];
189 	struct iovec out_iov[iovcnt + 1];
190 	int i, error;
191 	uint16_t tls_comp_len;
192 	uint8_t pad;
193 	bool inplace;
194 
195 	os = tls->cipher;
196 
197 #ifdef INVARIANTS
198 	if (os->implicit_iv) {
199 		mtx_lock(&os->lock);
200 		KASSERT(!os->in_progress,
201 		    ("concurrent implicit IV encryptions"));
202 		if (os->next_seqno != seqno) {
203 			printf("KTLS CBC: TLS records out of order.  "
204 			    "Expected %ju, got %ju\n",
205 			    (uintmax_t)os->next_seqno, (uintmax_t)seqno);
206 			mtx_unlock(&os->lock);
207 			return (EINVAL);
208 		}
209 		os->in_progress = true;
210 		mtx_unlock(&os->lock);
211 	}
212 #endif
213 
214 	/*
215 	 * Compute the payload length.
216 	 *
217 	 * XXX: This could be easily computed O(1) from the mbuf
218 	 * fields, but we don't have those accessible here.  Can
219 	 * at least compute inplace as well while we are here.
220 	 */
221 	tls_comp_len = 0;
222 	inplace = true;
223 	for (i = 0; i < iovcnt; i++) {
224 		tls_comp_len += iniov[i].iov_len;
225 		if (iniov[i].iov_base != outiov[i].iov_base)
226 			inplace = false;
227 	}
228 
229 	/* Initialize the AAD. */
230 	ad.seq = htobe64(seqno);
231 	ad.type = hdr->tls_type;
232 	ad.tls_vmajor = hdr->tls_vmajor;
233 	ad.tls_vminor = hdr->tls_vminor;
234 	ad.tls_length = htons(tls_comp_len);
235 
236 	/* First, compute the MAC. */
237 	iov[0].iov_base = &ad;
238 	iov[0].iov_len = sizeof(ad);
239 	memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt);
240 	iov[iovcnt + 1].iov_base = trailer;
241 	iov[iovcnt + 1].iov_len = os->mac_len;
242 	uio.uio_iov = iov;
243 	uio.uio_iovcnt = iovcnt + 2;
244 	uio.uio_offset = 0;
245 	uio.uio_segflg = UIO_SYSSPACE;
246 	uio.uio_td = curthread;
247 	uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
248 
249 	crypto_initreq(&crp, os->mac_sid);
250 	crp.crp_payload_start = 0;
251 	crp.crp_payload_length = sizeof(ad) + tls_comp_len;
252 	crp.crp_digest_start = crp.crp_payload_length;
253 	crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
254 	crp.crp_flags = CRYPTO_F_CBIMM;
255 	crypto_use_uio(&crp, &uio);
256 	error = ktls_ocf_dispatch(os, &crp);
257 
258 	crypto_destroyreq(&crp);
259 	if (error) {
260 #ifdef INVARIANTS
261 		if (os->implicit_iv) {
262 			mtx_lock(&os->lock);
263 			os->in_progress = false;
264 			mtx_unlock(&os->lock);
265 		}
266 #endif
267 		return (error);
268 	}
269 
270 	/* Second, add the padding. */
271 	pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
272 	    AES_BLOCK_LEN;
273 	for (i = 0; i < pad + 1; i++)
274 		trailer[os->mac_len + i] = pad;
275 
276 	/* Finally, encrypt the record. */
277 
278 	/*
279 	 * Don't recopy the input iovec, instead just adjust the
280 	 * trailer length and skip over the AAD vector in the uio.
281 	 */
282 	iov[iovcnt + 1].iov_len += pad + 1;
283 	uio.uio_iov = iov + 1;
284 	uio.uio_iovcnt = iovcnt + 1;
285 	uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len;
286 	KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
287 	    ("invalid encryption size"));
288 
289 	crypto_initreq(&crp, os->sid);
290 	crp.crp_payload_start = 0;
291 	crp.crp_payload_length = uio.uio_resid;
292 	crp.crp_op = CRYPTO_OP_ENCRYPT;
293 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
294 	if (os->implicit_iv)
295 		memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
296 	else
297 		memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
298 	crypto_use_uio(&crp, &uio);
299 	if (!inplace) {
300 		memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt);
301 		out_iov[iovcnt] = iov[iovcnt + 1];
302 		out_uio.uio_iov = out_iov;
303 		out_uio.uio_iovcnt = iovcnt + 1;
304 		out_uio.uio_offset = 0;
305 		out_uio.uio_segflg = UIO_SYSSPACE;
306 		out_uio.uio_td = curthread;
307 		out_uio.uio_resid = uio.uio_resid;
308 		crypto_use_output_uio(&crp, &out_uio);
309 	}
310 
311 	if (os->implicit_iv)
312 		counter_u64_add(ocf_tls10_cbc_crypts, 1);
313 	else
314 		counter_u64_add(ocf_tls11_cbc_crypts, 1);
315 	if (inplace)
316 		counter_u64_add(ocf_inplace, 1);
317 	else
318 		counter_u64_add(ocf_separate_output, 1);
319 	error = ktls_ocf_dispatch(os, &crp);
320 
321 	crypto_destroyreq(&crp);
322 
323 	if (os->implicit_iv) {
324 		KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
325 		    ("trailer too short to read IV"));
326 		memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
327 		    AES_BLOCK_LEN);
328 #ifdef INVARIANTS
329 		mtx_lock(&os->lock);
330 		os->next_seqno = seqno + 1;
331 		os->in_progress = false;
332 		mtx_unlock(&os->lock);
333 #endif
334 	}
335 	return (error);
336 }
337 
338 static int
339 ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls,
340     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
341     struct iovec *outiov, int iovcnt, uint64_t seqno,
342     uint8_t record_type __unused)
343 {
344 	struct uio uio, out_uio, *tag_uio;
345 	struct tls_aead_data ad;
346 	struct cryptop crp;
347 	struct ocf_session *os;
348 	struct iovec iov[iovcnt + 1];
349 	int i, error;
350 	uint16_t tls_comp_len;
351 	bool inplace;
352 
353 	os = tls->cipher;
354 
355 	uio.uio_iov = iniov;
356 	uio.uio_iovcnt = iovcnt;
357 	uio.uio_offset = 0;
358 	uio.uio_segflg = UIO_SYSSPACE;
359 	uio.uio_td = curthread;
360 
361 	out_uio.uio_iov = outiov;
362 	out_uio.uio_iovcnt = iovcnt;
363 	out_uio.uio_offset = 0;
364 	out_uio.uio_segflg = UIO_SYSSPACE;
365 	out_uio.uio_td = curthread;
366 
367 	crypto_initreq(&crp, os->sid);
368 
369 	/* Setup the IV. */
370 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
371 		memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
372 		memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
373 		    sizeof(uint64_t));
374 	} else {
375 		/*
376 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
377 		 * identically to constructing the IV for AEAD in TLS
378 		 * 1.3.
379 		 */
380 		memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
381 		*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
382 	}
383 
384 	/* Setup the AAD. */
385 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
386 		tls_comp_len = ntohs(hdr->tls_length) -
387 		    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
388 	else
389 		tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
390 	ad.seq = htobe64(seqno);
391 	ad.type = hdr->tls_type;
392 	ad.tls_vmajor = hdr->tls_vmajor;
393 	ad.tls_vminor = hdr->tls_vminor;
394 	ad.tls_length = htons(tls_comp_len);
395 	crp.crp_aad = &ad;
396 	crp.crp_aad_length = sizeof(ad);
397 
398 	/* Compute payload length and determine if encryption is in place. */
399 	inplace = true;
400 	crp.crp_payload_start = 0;
401 	for (i = 0; i < iovcnt; i++) {
402 		if (iniov[i].iov_base != outiov[i].iov_base)
403 			inplace = false;
404 		crp.crp_payload_length += iniov[i].iov_len;
405 	}
406 	uio.uio_resid = crp.crp_payload_length;
407 	out_uio.uio_resid = crp.crp_payload_length;
408 
409 	if (inplace)
410 		tag_uio = &uio;
411 	else
412 		tag_uio = &out_uio;
413 
414 	/* Duplicate iovec and append vector for tag. */
415 	memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
416 	iov[iovcnt].iov_base = trailer;
417 	iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
418 	tag_uio->uio_iov = iov;
419 	tag_uio->uio_iovcnt++;
420 	crp.crp_digest_start = tag_uio->uio_resid;
421 	tag_uio->uio_resid += AES_GMAC_HASH_LEN;
422 
423 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
424 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
425 	crypto_use_uio(&crp, &uio);
426 	if (!inplace)
427 		crypto_use_output_uio(&crp, &out_uio);
428 
429 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
430 		counter_u64_add(ocf_tls12_gcm_crypts, 1);
431 	else
432 		counter_u64_add(ocf_tls12_chacha20_crypts, 1);
433 	if (inplace)
434 		counter_u64_add(ocf_inplace, 1);
435 	else
436 		counter_u64_add(ocf_separate_output, 1);
437 	error = ktls_ocf_dispatch(os, &crp);
438 
439 	crypto_destroyreq(&crp);
440 	return (error);
441 }
442 
443 static int
444 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
445     const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
446     int *trailer_len)
447 {
448 	struct tls_aead_data ad;
449 	struct cryptop crp;
450 	struct ocf_session *os;
451 	struct ocf_operation oo;
452 	int error;
453 	uint16_t tls_comp_len;
454 
455 	os = tls->cipher;
456 
457 	oo.os = os;
458 	oo.done = false;
459 
460 	crypto_initreq(&crp, os->sid);
461 
462 	/* Setup the IV. */
463 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
464 		memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
465 		memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
466 		    sizeof(uint64_t));
467 	} else {
468 		/*
469 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
470 		 * identically to constructing the IV for AEAD in TLS
471 		 * 1.3.
472 		 */
473 		memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
474 		*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
475 	}
476 
477 	/* Setup the AAD. */
478 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
479 		tls_comp_len = ntohs(hdr->tls_length) -
480 		    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
481 	else
482 		tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
483 	ad.seq = htobe64(seqno);
484 	ad.type = hdr->tls_type;
485 	ad.tls_vmajor = hdr->tls_vmajor;
486 	ad.tls_vminor = hdr->tls_vminor;
487 	ad.tls_length = htons(tls_comp_len);
488 	crp.crp_aad = &ad;
489 	crp.crp_aad_length = sizeof(ad);
490 
491 	crp.crp_payload_start = tls->params.tls_hlen;
492 	crp.crp_payload_length = tls_comp_len;
493 	crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
494 
495 	crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
496 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
497 	crypto_use_mbuf(&crp, m);
498 
499 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
500 		counter_u64_add(ocf_tls12_gcm_crypts, 1);
501 	else
502 		counter_u64_add(ocf_tls12_chacha20_crypts, 1);
503 	error = ktls_ocf_dispatch(os, &crp);
504 
505 	crypto_destroyreq(&crp);
506 	*trailer_len = AES_GMAC_HASH_LEN;
507 	return (error);
508 }
509 
510 static int
511 ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls,
512     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
513     struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type)
514 {
515 	struct uio uio, out_uio;
516 	struct tls_aead_data_13 ad;
517 	char nonce[12];
518 	struct cryptop crp;
519 	struct ocf_session *os;
520 	struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
521 	int i, error;
522 	bool inplace;
523 
524 	os = tls->cipher;
525 
526 	crypto_initreq(&crp, os->sid);
527 
528 	/* Setup the nonce. */
529 	memcpy(nonce, tls->params.iv, tls->params.iv_len);
530 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
531 
532 	/* Setup the AAD. */
533 	ad.type = hdr->tls_type;
534 	ad.tls_vmajor = hdr->tls_vmajor;
535 	ad.tls_vminor = hdr->tls_vminor;
536 	ad.tls_length = hdr->tls_length;
537 	crp.crp_aad = &ad;
538 	crp.crp_aad_length = sizeof(ad);
539 
540 	/* Compute payload length and determine if encryption is in place. */
541 	inplace = true;
542 	crp.crp_payload_start = 0;
543 	for (i = 0; i < iovcnt; i++) {
544 		if (iniov[i].iov_base != outiov[i].iov_base)
545 			inplace = false;
546 		crp.crp_payload_length += iniov[i].iov_len;
547 	}
548 
549 	/* Store the record type as the first byte of the trailer. */
550 	trailer[0] = record_type;
551 	crp.crp_payload_length++;
552 	crp.crp_digest_start = crp.crp_payload_length;
553 
554 	/*
555 	 * Duplicate the input iov to append the trailer.  Always
556 	 * include the full trailer as input to get the record_type
557 	 * even if only the first byte is used.
558 	 */
559 	memcpy(iov, iniov, iovcnt * sizeof(*iov));
560 	iov[iovcnt].iov_base = trailer;
561 	iov[iovcnt].iov_len = tls->params.tls_tlen;
562 	uio.uio_iov = iov;
563 	uio.uio_iovcnt = iovcnt + 1;
564 	uio.uio_offset = 0;
565 	uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen - 1;
566 	uio.uio_segflg = UIO_SYSSPACE;
567 	uio.uio_td = curthread;
568 	crypto_use_uio(&crp, &uio);
569 
570 	if (!inplace) {
571 		/* Duplicate the output iov to append the trailer. */
572 		memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov));
573 		out_iov[iovcnt] = iov[iovcnt];
574 
575 		out_uio.uio_iov = out_iov;
576 		out_uio.uio_iovcnt = iovcnt + 1;
577 		out_uio.uio_offset = 0;
578 		out_uio.uio_resid = crp.crp_payload_length +
579 		    tls->params.tls_tlen - 1;
580 		out_uio.uio_segflg = UIO_SYSSPACE;
581 		out_uio.uio_td = curthread;
582 		crypto_use_output_uio(&crp, &out_uio);
583 	}
584 
585 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
586 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
587 
588 	memcpy(crp.crp_iv, nonce, sizeof(nonce));
589 
590 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
591 		counter_u64_add(ocf_tls13_gcm_crypts, 1);
592 	else
593 		counter_u64_add(ocf_tls13_chacha20_crypts, 1);
594 	if (inplace)
595 		counter_u64_add(ocf_inplace, 1);
596 	else
597 		counter_u64_add(ocf_separate_output, 1);
598 	error = ktls_ocf_dispatch(os, &crp);
599 
600 	crypto_destroyreq(&crp);
601 	return (error);
602 }
603 
604 static void
605 ktls_ocf_free(struct ktls_session *tls)
606 {
607 	struct ocf_session *os;
608 
609 	os = tls->cipher;
610 	crypto_freesession(os->sid);
611 	mtx_destroy(&os->lock);
612 	zfree(os, M_KTLS_OCF);
613 }
614 
615 static int
616 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
617 {
618 	struct crypto_session_params csp, mac_csp;
619 	struct ocf_session *os;
620 	int error, mac_len;
621 
622 	memset(&csp, 0, sizeof(csp));
623 	memset(&mac_csp, 0, sizeof(mac_csp));
624 	mac_csp.csp_mode = CSP_MODE_NONE;
625 	mac_len = 0;
626 
627 	switch (tls->params.cipher_algorithm) {
628 	case CRYPTO_AES_NIST_GCM_16:
629 		switch (tls->params.cipher_key_len) {
630 		case 128 / 8:
631 		case 256 / 8:
632 			break;
633 		default:
634 			return (EINVAL);
635 		}
636 
637 		/* Only TLS 1.2 and 1.3 are supported. */
638 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
639 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
640 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
641 			return (EPROTONOSUPPORT);
642 
643 		/* TLS 1.3 is not yet supported for receive. */
644 		if (direction == KTLS_RX &&
645 		    tls->params.tls_vminor == TLS_MINOR_VER_THREE)
646 			return (EPROTONOSUPPORT);
647 
648 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
649 		csp.csp_mode = CSP_MODE_AEAD;
650 		csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
651 		csp.csp_cipher_key = tls->params.cipher_key;
652 		csp.csp_cipher_klen = tls->params.cipher_key_len;
653 		csp.csp_ivlen = AES_GCM_IV_LEN;
654 		break;
655 	case CRYPTO_AES_CBC:
656 		switch (tls->params.cipher_key_len) {
657 		case 128 / 8:
658 		case 256 / 8:
659 			break;
660 		default:
661 			return (EINVAL);
662 		}
663 
664 		switch (tls->params.auth_algorithm) {
665 		case CRYPTO_SHA1_HMAC:
666 			mac_len = SHA1_HASH_LEN;
667 			break;
668 		case CRYPTO_SHA2_256_HMAC:
669 			mac_len = SHA2_256_HASH_LEN;
670 			break;
671 		case CRYPTO_SHA2_384_HMAC:
672 			mac_len = SHA2_384_HASH_LEN;
673 			break;
674 		default:
675 			return (EINVAL);
676 		}
677 
678 		/* Only TLS 1.0-1.2 are supported. */
679 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
680 		    tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
681 		    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
682 			return (EPROTONOSUPPORT);
683 
684 		/* AES-CBC is not supported for receive. */
685 		if (direction == KTLS_RX)
686 			return (EPROTONOSUPPORT);
687 
688 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
689 		csp.csp_mode = CSP_MODE_CIPHER;
690 		csp.csp_cipher_alg = CRYPTO_AES_CBC;
691 		csp.csp_cipher_key = tls->params.cipher_key;
692 		csp.csp_cipher_klen = tls->params.cipher_key_len;
693 		csp.csp_ivlen = AES_BLOCK_LEN;
694 
695 		mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
696 		mac_csp.csp_mode = CSP_MODE_DIGEST;
697 		mac_csp.csp_auth_alg = tls->params.auth_algorithm;
698 		mac_csp.csp_auth_key = tls->params.auth_key;
699 		mac_csp.csp_auth_klen = tls->params.auth_key_len;
700 		break;
701 	case CRYPTO_CHACHA20_POLY1305:
702 		switch (tls->params.cipher_key_len) {
703 		case 256 / 8:
704 			break;
705 		default:
706 			return (EINVAL);
707 		}
708 
709 		/* Only TLS 1.2 and 1.3 are supported. */
710 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
711 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
712 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
713 			return (EPROTONOSUPPORT);
714 
715 		/* TLS 1.3 is not yet supported for receive. */
716 		if (direction == KTLS_RX &&
717 		    tls->params.tls_vminor == TLS_MINOR_VER_THREE)
718 			return (EPROTONOSUPPORT);
719 
720 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
721 		csp.csp_mode = CSP_MODE_AEAD;
722 		csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
723 		csp.csp_cipher_key = tls->params.cipher_key;
724 		csp.csp_cipher_klen = tls->params.cipher_key_len;
725 		csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
726 		break;
727 	default:
728 		return (EPROTONOSUPPORT);
729 	}
730 
731 	os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
732 	if (os == NULL)
733 		return (ENOMEM);
734 
735 	error = crypto_newsession(&os->sid, &csp,
736 	    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
737 	if (error) {
738 		free(os, M_KTLS_OCF);
739 		return (error);
740 	}
741 
742 	if (mac_csp.csp_mode != CSP_MODE_NONE) {
743 		error = crypto_newsession(&os->mac_sid, &mac_csp,
744 		    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
745 		if (error) {
746 			crypto_freesession(os->sid);
747 			free(os, M_KTLS_OCF);
748 			return (error);
749 		}
750 		os->mac_len = mac_len;
751 	}
752 
753 	mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
754 	tls->cipher = os;
755 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
756 	    tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
757 		if (direction == KTLS_TX) {
758 			if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
759 				tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt;
760 			else
761 				tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt;
762 		} else {
763 			tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt;
764 		}
765 	} else {
766 		tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
767 		if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
768 			os->implicit_iv = true;
769 			memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
770 		}
771 	}
772 	tls->free = ktls_ocf_free;
773 	return (0);
774 }
775 
776 struct ktls_crypto_backend ocf_backend = {
777 	.name = "OCF",
778 	.prio = 5,
779 	.api_version = KTLS_API_VERSION,
780 	.try = ktls_ocf_try,
781 };
782 
783 static int
784 ktls_ocf_modevent(module_t mod, int what, void *arg)
785 {
786 	switch (what) {
787 	case MOD_LOAD:
788 		return (ktls_crypto_backend_register(&ocf_backend));
789 	case MOD_UNLOAD:
790 		return (ktls_crypto_backend_deregister(&ocf_backend));
791 	default:
792 		return (EOPNOTSUPP);
793 	}
794 }
795 
796 static moduledata_t ktls_ocf_moduledata = {
797 	"ktls_ocf",
798 	ktls_ocf_modevent,
799 	NULL
800 };
801 
802 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);
803